diff --git a/Makefile b/Makefile index 18657692d..7030eb48d 100644 --- a/Makefile +++ b/Makefile @@ -1726,7 +1726,7 @@ endif fi tar xvzf snappy-$(SNAPPY_VER).tar.gz mkdir snappy-$(SNAPPY_VER)/build - cd snappy-$(SNAPPY_VER)/build && CFLAGS='${EXTRA_CFLAGS}' CXXFLAGS='${EXTRA_CXXFLAGS}' LDFLAGS='${EXTRA_LDFLAGS}' cmake .. && $(MAKE) ${SNAPPY_MAKE_TARGET} + cd snappy-$(SNAPPY_VER)/build && CFLAGS='${EXTRA_CFLAGS}' CXXFLAGS='${EXTRA_CXXFLAGS}' LDFLAGS='${EXTRA_LDFLAGS}' cmake -DCMAKE_POSITION_INDEPENDENT_CODE=ON .. && $(MAKE) ${SNAPPY_MAKE_TARGET} cp snappy-$(SNAPPY_VER)/build/libsnappy.a . liblz4.a: diff --git a/include/rocksdb/options.h b/include/rocksdb/options.h index 311d78983..4691956ee 100644 --- a/include/rocksdb/options.h +++ b/include/rocksdb/options.h @@ -709,7 +709,7 @@ struct DBOptions { // a limit, a flush will be triggered in the next DB to which the next write // is issued. // - // If the object is only passed to on DB, the behavior is the same as + // If the object is only passed to one DB, the behavior is the same as // db_write_buffer_size. When write_buffer_manager is set, the value set will // override db_write_buffer_size. // @@ -821,7 +821,7 @@ struct DBOptions { // Dynamically changeable through SetDBOptions() API. uint64_t wal_bytes_per_sync = 0; - // A vector of EventListeners which callback functions will be called + // A vector of EventListeners whose callback functions will be called // when specific RocksDB event happens. std::vector> listeners; diff --git a/include/rocksdb/statistics.h b/include/rocksdb/statistics.h index c1cadde7b..9119d8c93 100644 --- a/include/rocksdb/statistics.h +++ b/include/rocksdb/statistics.h @@ -22,6 +22,7 @@ namespace rocksdb { * 1. Any ticker should be added before TICKER_ENUM_MAX. * 2. Add a readable string in TickersNameMap below for the newly added ticker. * 3. Add a corresponding enum value to TickerType.java in the java API + * 4. Add the enum conversions from Java and C++ to portal.h's toJavaTickerType and toCppTickers */ enum Tickers : uint32_t { // total block cache misses diff --git a/include/rocksdb/table.h b/include/rocksdb/table.h index 100586d4e..7e64c341b 100644 --- a/include/rocksdb/table.h +++ b/include/rocksdb/table.h @@ -229,7 +229,7 @@ struct BlockBasedTableOptions { // Default: 0 (disabled) uint32_t read_amp_bytes_per_bit = 0; - // We currently have three versions: + // We currently have five versions: // 0 -- This version is currently written out by all RocksDB's versions by // default. Can be read by really old RocksDB's. Doesn't support changing // checksum (default is CRC32). diff --git a/java/CMakeLists.txt b/java/CMakeLists.txt index 8f4ec9a56..360951834 100644 --- a/java/CMakeLists.txt +++ b/java/CMakeLists.txt @@ -11,6 +11,9 @@ set(JNI_NATIVE_SOURCES rocksjni/compaction_filter.cc rocksjni/compaction_filter_factory.cc rocksjni/compaction_filter_factory_jnicallback.cc + rocksjni/compaction_job_info.cc + rocksjni/compaction_job_stats.cc + rocksjni/compaction_options.cc rocksjni/compaction_options_fifo.cc rocksjni/compaction_options_universal.cc rocksjni/compact_range_options.cc @@ -33,6 +36,7 @@ set(JNI_NATIVE_SOURCES rocksjni/optimistic_transaction_options.cc rocksjni/options.cc rocksjni/options_util.cc + rocksjni/persistent_cache.cc rocksjni/ratelimiterjni.cc rocksjni/remove_emptyvalue_compactionfilterjni.cc rocksjni/restorejni.cc @@ -46,6 +50,11 @@ set(JNI_NATIVE_SOURCES rocksjni/statistics.cc rocksjni/statisticsjni.cc rocksjni/table.cc + rocksjni/table_filter.cc + rocksjni/table_filter_jnicallback.cc + rocksjni/thread_status.cc + rocksjni/trace_writer.cc + rocksjni/trace_writer_jnicallback.cc rocksjni/transaction.cc rocksjni/transaction_db.cc rocksjni/transaction_db_options.cc @@ -54,6 +63,8 @@ set(JNI_NATIVE_SOURCES rocksjni/transaction_notifier_jnicallback.cc rocksjni/transaction_options.cc rocksjni/ttl.cc + rocksjni/wal_filter.cc + rocksjni/wal_filter_jnicallback.cc rocksjni/write_batch.cc rocksjni/writebatchhandlerjnicallback.cc rocksjni/write_batch_test.cc @@ -69,7 +80,10 @@ set(NATIVE_JAVA_CLASSES org.rocksdb.AbstractNativeReference org.rocksdb.AbstractRocksIterator org.rocksdb.AbstractSlice + org.rocksdb.AbstractTableFilter + org.rocksdb.AbstractTraceWriter org.rocksdb.AbstractTransactionNotifier + org.rocksdb.AbstractWalFilter org.rocksdb.BackupableDBOptions org.rocksdb.BackupEngine org.rocksdb.BlockBasedTableConfig @@ -80,6 +94,9 @@ set(NATIVE_JAVA_CLASSES org.rocksdb.ClockCache org.rocksdb.ColumnFamilyHandle org.rocksdb.ColumnFamilyOptions + org.rocksdb.CompactionJobInfo + org.rocksdb.CompactionJobStats + org.rocksdb.CompactionOptions org.rocksdb.CompactionOptionsFIFO org.rocksdb.CompactionOptionsUniversal org.rocksdb.CompactRangeOptions @@ -95,6 +112,7 @@ set(NATIVE_JAVA_CLASSES org.rocksdb.FlushOptions org.rocksdb.HashLinkedListMemTableConfig org.rocksdb.HashSkipListMemTableConfig + org.rocksdb.HdfsEnv org.rocksdb.IngestExternalFileOptions org.rocksdb.Logger org.rocksdb.LRUCache @@ -106,6 +124,7 @@ set(NATIVE_JAVA_CLASSES org.rocksdb.OptimisticTransactionOptions org.rocksdb.Options org.rocksdb.OptionsUtil + org.rocksdb.PersistentCache org.rocksdb.PlainTableConfig org.rocksdb.RateLimiter org.rocksdb.ReadOptions @@ -127,6 +146,8 @@ set(NATIVE_JAVA_CLASSES org.rocksdb.Statistics org.rocksdb.StringAppendOperator org.rocksdb.TableFormatConfig + org.rocksdb.ThreadStatus + org.rocksdb.TimedEnv org.rocksdb.Transaction org.rocksdb.TransactionDB org.rocksdb.TransactionDBOptions @@ -172,10 +193,14 @@ add_jar( src/main/java/org/rocksdb/AbstractCompactionFilter.java src/main/java/org/rocksdb/AbstractComparator.java src/main/java/org/rocksdb/AbstractImmutableNativeReference.java + src/main/java/org/rocksdb/AbstractMutableOptions.java src/main/java/org/rocksdb/AbstractNativeReference.java src/main/java/org/rocksdb/AbstractRocksIterator.java src/main/java/org/rocksdb/AbstractSlice.java + src/main/java/org/rocksdb/AbstractTableFilter.java + src/main/java/org/rocksdb/AbstractTraceWriter.java src/main/java/org/rocksdb/AbstractTransactionNotifier.java + src/main/java/org/rocksdb/AbstractWalFilter.java src/main/java/org/rocksdb/AbstractWriteBatch.java src/main/java/org/rocksdb/AccessHint.java src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java @@ -194,11 +219,16 @@ add_jar( src/main/java/org/rocksdb/ClockCache.java src/main/java/org/rocksdb/ColumnFamilyDescriptor.java src/main/java/org/rocksdb/ColumnFamilyHandle.java + src/main/java/org/rocksdb/ColumnFamilyMetaData.java src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java src/main/java/org/rocksdb/ColumnFamilyOptions.java + src/main/java/org/rocksdb/CompactionJobInfo.java + src/main/java/org/rocksdb/CompactionJobStats.java + src/main/java/org/rocksdb/CompactionOptions.java src/main/java/org/rocksdb/CompactionOptionsFIFO.java src/main/java/org/rocksdb/CompactionOptionsUniversal.java src/main/java/org/rocksdb/CompactionPriority.java + src/main/java/org/rocksdb/CompactionReason.java src/main/java/org/rocksdb/CompactRangeOptions.java src/main/java/org/rocksdb/CompactionStopStyle.java src/main/java/org/rocksdb/CompactionStyle.java @@ -207,6 +237,7 @@ add_jar( src/main/java/org/rocksdb/ComparatorType.java src/main/java/org/rocksdb/CompressionOptions.java src/main/java/org/rocksdb/CompressionType.java + src/main/java/org/rocksdb/DataBlockIndexType.java src/main/java/org/rocksdb/DBOptionsInterface.java src/main/java/org/rocksdb/DBOptions.java src/main/java/org/rocksdb/DbPath.java @@ -220,26 +251,39 @@ add_jar( src/main/java/org/rocksdb/FlushOptions.java src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java src/main/java/org/rocksdb/HashSkipListMemTableConfig.java + src/main/java/org/rocksdb/HdfsEnv.java src/main/java/org/rocksdb/HistogramData.java src/main/java/org/rocksdb/HistogramType.java src/main/java/org/rocksdb/IndexType.java src/main/java/org/rocksdb/InfoLogLevel.java src/main/java/org/rocksdb/IngestExternalFileOptions.java + src/main/java/org/rocksdb/LevelMetaData.java + src/main/java/org/rocksdb/LiveFileMetaData.java + src/main/java/org/rocksdb/LogFile.java src/main/java/org/rocksdb/Logger.java src/main/java/org/rocksdb/LRUCache.java src/main/java/org/rocksdb/MemoryUsageType.java src/main/java/org/rocksdb/MemoryUtil.java src/main/java/org/rocksdb/MemTableConfig.java src/main/java/org/rocksdb/MergeOperator.java - src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java src/main/java/org/rocksdb/MutableColumnFamilyOptions.java + src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java + src/main/java/org/rocksdb/MutableDBOptions.java + src/main/java/org/rocksdb/MutableDBOptionsInterface.java + src/main/java/org/rocksdb/MutableOptionKey.java + src/main/java/org/rocksdb/MutableOptionValue.java src/main/java/org/rocksdb/NativeComparatorWrapper.java src/main/java/org/rocksdb/NativeLibraryLoader.java + src/main/java/org/rocksdb/OperationStage.java + src/main/java/org/rocksdb/OperationType.java src/main/java/org/rocksdb/OptimisticTransactionDB.java src/main/java/org/rocksdb/OptimisticTransactionOptions.java src/main/java/org/rocksdb/Options.java src/main/java/org/rocksdb/OptionsUtil.java + src/main/java/org/rocksdb/PersistentCache.java src/main/java/org/rocksdb/PlainTableConfig.java + src/main/java/org/rocksdb/Priority.java + src/main/java/org/rocksdb/Range.java src/main/java/org/rocksdb/RateLimiter.java src/main/java/org/rocksdb/RateLimiterMode.java src/main/java/org/rocksdb/ReadOptions.java @@ -255,11 +299,14 @@ add_jar( src/main/java/org/rocksdb/RocksMemEnv.java src/main/java/org/rocksdb/RocksMutableObject.java src/main/java/org/rocksdb/RocksObject.java + src/main/java/org/rocksdb/SizeApproximationFlag.java src/main/java/org/rocksdb/SkipListMemTableConfig.java src/main/java/org/rocksdb/Slice.java src/main/java/org/rocksdb/Snapshot.java src/main/java/org/rocksdb/SstFileManager.java + src/main/java/org/rocksdb/SstFileMetaData.java src/main/java/org/rocksdb/SstFileWriter.java + src/main/java/org/rocksdb/StateType.java src/main/java/org/rocksdb/StatisticsCollectorCallback.java src/main/java/org/rocksdb/StatisticsCollector.java src/main/java/org/rocksdb/Statistics.java @@ -267,8 +314,15 @@ add_jar( src/main/java/org/rocksdb/StatsLevel.java src/main/java/org/rocksdb/Status.java src/main/java/org/rocksdb/StringAppendOperator.java + src/main/java/org/rocksdb/TableFilter.java + src/main/java/org/rocksdb/TableProperties.java src/main/java/org/rocksdb/TableFormatConfig.java + src/main/java/org/rocksdb/ThreadType.java + src/main/java/org/rocksdb/ThreadStatus.java src/main/java/org/rocksdb/TickerType.java + src/main/java/org/rocksdb/TimedEnv.java + src/main/java/org/rocksdb/TraceOptions.java + src/main/java/org/rocksdb/TraceWriter.java src/main/java/org/rocksdb/TransactionalDB.java src/main/java/org/rocksdb/TransactionalOptions.java src/main/java/org/rocksdb/TransactionDB.java @@ -279,6 +333,9 @@ add_jar( src/main/java/org/rocksdb/TtlDB.java src/main/java/org/rocksdb/TxnDBWritePolicy.java src/main/java/org/rocksdb/VectorMemTableConfig.java + src/main/java/org/rocksdb/WalFileType.java + src/main/java/org/rocksdb/WalFilter.java + src/main/java/org/rocksdb/WalProcessingOption.java src/main/java/org/rocksdb/WALRecoveryMode.java src/main/java/org/rocksdb/WBWIRocksIterator.java src/main/java/org/rocksdb/WriteBatchInterface.java diff --git a/java/Makefile b/java/Makefile index b3b89eb83..efc9d2b4e 100644 --- a/java/Makefile +++ b/java/Makefile @@ -1,7 +1,10 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\ org.rocksdb.AbstractCompactionFilterFactory\ org.rocksdb.AbstractSlice\ + org.rocksdb.AbstractTableFilter\ + org.rocksdb.AbstractTraceWriter\ org.rocksdb.AbstractTransactionNotifier\ + org.rocksdb.AbstractWalFilter\ org.rocksdb.BackupEngine\ org.rocksdb.BackupableDBOptions\ org.rocksdb.BlockBasedTableConfig\ @@ -12,6 +15,9 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\ org.rocksdb.CassandraValueMergeOperator\ org.rocksdb.ColumnFamilyHandle\ org.rocksdb.ColumnFamilyOptions\ + org.rocksdb.CompactionJobInfo\ + org.rocksdb.CompactionJobStats\ + org.rocksdb.CompactionOptions\ org.rocksdb.CompactionOptionsFIFO\ org.rocksdb.CompactionOptionsUniversal\ org.rocksdb.CompactRangeOptions\ @@ -28,6 +34,7 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\ org.rocksdb.IngestExternalFileOptions\ org.rocksdb.HashLinkedListMemTableConfig\ org.rocksdb.HashSkipListMemTableConfig\ + org.rocksdb.HdfsEnv\ org.rocksdb.Logger\ org.rocksdb.LRUCache\ org.rocksdb.MemoryUsageType\ @@ -38,6 +45,7 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\ org.rocksdb.OptimisticTransactionOptions\ org.rocksdb.Options\ org.rocksdb.OptionsUtil\ + org.rocksdb.PersistentCache\ org.rocksdb.PlainTableConfig\ org.rocksdb.RateLimiter\ org.rocksdb.ReadOptions\ @@ -53,6 +61,8 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\ org.rocksdb.SstFileManager\ org.rocksdb.SstFileWriter\ org.rocksdb.Statistics\ + org.rocksdb.ThreadStatus\ + org.rocksdb.TimedEnv\ org.rocksdb.Transaction\ org.rocksdb.TransactionDB\ org.rocksdb.TransactionDBOptions\ @@ -94,7 +104,10 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\ org.rocksdb.ClockCacheTest\ org.rocksdb.ColumnFamilyOptionsTest\ org.rocksdb.ColumnFamilyTest\ - org.rocksdb.CompactionFilterFactoryTest\ + org.rocksdb.CompactionFilterFactoryTest\ + org.rocksdb.CompactionJobInfoTest\ + org.rocksdb.CompactionJobStatsTest\ + org.rocksdb.CompactionOptionsTest\ org.rocksdb.CompactionOptionsFIFOTest\ org.rocksdb.CompactionOptionsUniversalTest\ org.rocksdb.CompactionPriorityTest\ @@ -107,6 +120,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\ org.rocksdb.DirectComparatorTest\ org.rocksdb.DirectSliceTest\ org.rocksdb.EnvOptionsTest\ + org.rocksdb.HdfsEnvTest\ org.rocksdb.IngestExternalFileOptionsTest\ org.rocksdb.util.EnvironmentTest\ org.rocksdb.FilterTest\ @@ -120,6 +134,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\ org.rocksdb.MergeTest\ org.rocksdb.MixedOptionsTest\ org.rocksdb.MutableColumnFamilyOptionsTest\ + org.rocksdb.MutableDBOptionsTest\ org.rocksdb.NativeComparatorWrapperTest\ org.rocksdb.NativeLibraryLoaderTest\ org.rocksdb.OptimisticTransactionTest\ @@ -133,7 +148,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\ org.rocksdb.ReadOptionsTest\ org.rocksdb.RocksDBTest\ org.rocksdb.RocksDBExceptionTest\ - org.rocksdb.RocksEnvTest\ + org.rocksdb.DefaultEnvTest\ org.rocksdb.RocksIteratorTest\ org.rocksdb.RocksMemEnvTest\ org.rocksdb.util.SizeUnitTest\ @@ -141,6 +156,8 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\ org.rocksdb.SnapshotTest\ org.rocksdb.SstFileManagerTest\ org.rocksdb.SstFileWriterTest\ + org.rocksdb.TableFilterTest\ + org.rocksdb.TimedEnvTest\ org.rocksdb.TransactionTest\ org.rocksdb.TransactionDBTest\ org.rocksdb.TransactionOptionsTest\ @@ -149,6 +166,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\ org.rocksdb.TtlDBTest\ org.rocksdb.StatisticsTest\ org.rocksdb.StatisticsCollectorTest\ + org.rocksdb.WalFilterTest\ org.rocksdb.WALRecoveryModeTest\ org.rocksdb.WriteBatchHandlerTest\ org.rocksdb.WriteBatchTest\ diff --git a/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java b/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java index db69e58cc..67f6a5cc0 100644 --- a/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java +++ b/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java @@ -493,7 +493,7 @@ public class DbBenchmark { options.setCreateIfMissing(false); } if (useMemenv_) { - options.setEnv(new RocksMemEnv()); + options.setEnv(new RocksMemEnv(Env.getDefault())); } switch (memtable_) { case "skip_list": diff --git a/java/rocksjni/compaction_filter_factory.cc b/java/rocksjni/compaction_filter_factory.cc index c2fb1b0a1..2ef0a7746 100644 --- a/java/rocksjni/compaction_filter_factory.cc +++ b/java/rocksjni/compaction_filter_factory.cc @@ -31,9 +31,8 @@ jlong Java_org_rocksdb_AbstractCompactionFilterFactory_createNewCompactionFilter * Signature: (J)V */ void Java_org_rocksdb_AbstractCompactionFilterFactory_disposeInternal( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* ptr_sptr_cff = reinterpret_cast< std::shared_ptr*>(jhandle); delete ptr_sptr_cff; - // @lint-ignore TXT4 T25377293 Grandfathered in } diff --git a/java/rocksjni/compaction_job_info.cc b/java/rocksjni/compaction_job_info.cc new file mode 100644 index 000000000..6af6efcb8 --- /dev/null +++ b/java/rocksjni/compaction_job_info.cc @@ -0,0 +1,222 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::CompactionJobInfo. + +#include + +#include "include/org_rocksdb_CompactionJobInfo.h" +#include "rocksdb/listener.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: newCompactionJobInfo + * Signature: ()J + */ +jlong Java_org_rocksdb_CompactionJobInfo_newCompactionJobInfo( + JNIEnv*, jclass) { + auto* compact_job_info = new rocksdb::CompactionJobInfo(); + return reinterpret_cast(compact_job_info); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompactionJobInfo_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + delete compact_job_info; +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: columnFamilyName + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_CompactionJobInfo_columnFamilyName( + JNIEnv* env, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return rocksdb::JniUtil::copyBytes( + env, compact_job_info->cf_name); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: status + * Signature: (J)Lorg/rocksdb/Status; + */ +jobject Java_org_rocksdb_CompactionJobInfo_status( + JNIEnv* env, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return rocksdb::StatusJni::construct( + env, compact_job_info->status); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: threadId + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobInfo_threadId( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return static_cast(compact_job_info->thread_id); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: jobId + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionJobInfo_jobId( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return static_cast(compact_job_info->job_id); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: baseInputLevel + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionJobInfo_baseInputLevel( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return static_cast(compact_job_info->base_input_level); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: outputLevel + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionJobInfo_outputLevel( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return static_cast(compact_job_info->output_level); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: inputFiles + * Signature: (J)[Ljava/lang/String; + */ +jobjectArray Java_org_rocksdb_CompactionJobInfo_inputFiles( + JNIEnv* env, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return rocksdb::JniUtil::toJavaStrings( + env, &compact_job_info->input_files); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: outputFiles + * Signature: (J)[Ljava/lang/String; + */ +jobjectArray Java_org_rocksdb_CompactionJobInfo_outputFiles( + JNIEnv* env, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return rocksdb::JniUtil::toJavaStrings( + env, &compact_job_info->output_files); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: tableProperties + * Signature: (J)Ljava/util/Map; + */ +jobject Java_org_rocksdb_CompactionJobInfo_tableProperties( + JNIEnv* env, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + auto* map = &compact_job_info->table_properties; + + jobject jhash_map = rocksdb::HashMapJni::construct( + env, static_cast(map->size())); + if (jhash_map == nullptr) { + // exception occurred + return nullptr; + } + + const rocksdb::HashMapJni::FnMapKV, jobject, jobject> fn_map_kv = + [env](const std::pair>& kv) { + jstring jkey = rocksdb::JniUtil::toJavaString(env, &(kv.first), false); + if (env->ExceptionCheck()) { + // an error occurred + return std::unique_ptr>(nullptr); + } + + jobject jtable_properties = rocksdb::TablePropertiesJni::fromCppTableProperties( + env, *(kv.second.get())); + if (env->ExceptionCheck()) { + // an error occurred + env->DeleteLocalRef(jkey); + return std::unique_ptr>(nullptr); + } + + return std::unique_ptr>( + new std::pair(static_cast(jkey), jtable_properties)); + }; + + if (!rocksdb::HashMapJni::putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) { + // exception occurred + return nullptr; + } + + return jhash_map; +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: compactionReason + * Signature: (J)B + */ +jbyte Java_org_rocksdb_CompactionJobInfo_compactionReason( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return rocksdb::CompactionReasonJni::toJavaCompactionReason( + compact_job_info->compaction_reason); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: compression + * Signature: (J)B + */ +jbyte Java_org_rocksdb_CompactionJobInfo_compression( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return rocksdb::CompressionTypeJni::toJavaCompressionType( + compact_job_info->compression); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: stats + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobInfo_stats( + JNIEnv *, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + auto* stats = new rocksdb::CompactionJobStats(); + stats->Add(compact_job_info->stats); + return reinterpret_cast(stats); +} diff --git a/java/rocksjni/compaction_job_stats.cc b/java/rocksjni/compaction_job_stats.cc new file mode 100644 index 000000000..7d13dd12f --- /dev/null +++ b/java/rocksjni/compaction_job_stats.cc @@ -0,0 +1,361 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::CompactionJobStats. + +#include + +#include "include/org_rocksdb_CompactionJobStats.h" +#include "rocksdb/compaction_job_stats.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: newCompactionJobStats + * Signature: ()J + */ +jlong Java_org_rocksdb_CompactionJobStats_newCompactionJobStats( + JNIEnv*, jclass) { + auto* compact_job_stats = new rocksdb::CompactionJobStats(); + return reinterpret_cast(compact_job_stats); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompactionJobStats_disposeInternal( + JNIEnv *, jobject, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + delete compact_job_stats; +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: reset + * Signature: (J)V + */ +void Java_org_rocksdb_CompactionJobStats_reset( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + compact_job_stats->Reset(); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: add + * Signature: (JJ)V + */ +void Java_org_rocksdb_CompactionJobStats_add( + JNIEnv*, jclass, jlong jhandle, jlong jother_handle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + auto* other_compact_job_stats = + reinterpret_cast(jother_handle); + compact_job_stats->Add(*other_compact_job_stats); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: elapsedMicros + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_elapsedMicros( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->elapsed_micros); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numInputRecords + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numInputRecords( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_input_records); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numInputFiles + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numInputFiles( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_input_files); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numInputFilesAtOutputLevel + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numInputFilesAtOutputLevel( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->num_input_files_at_output_level); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numOutputRecords + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numOutputRecords( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->num_output_records); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numOutputFiles + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numOutputFiles( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->num_output_files); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: isManualCompaction + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_CompactionJobStats_isManualCompaction( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + if (compact_job_stats->is_manual_compaction) { + return JNI_TRUE; + } else { + return JNI_FALSE; + } +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: totalInputBytes + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_totalInputBytes( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->total_input_bytes); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: totalOutputBytes + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_totalOutputBytes( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->total_output_bytes); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numRecordsReplaced + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numRecordsReplaced( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->num_records_replaced); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: totalInputRawKeyBytes + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_totalInputRawKeyBytes( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->total_input_raw_key_bytes); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: totalInputRawValueBytes + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_totalInputRawValueBytes( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->total_input_raw_value_bytes); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numInputDeletionRecords + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numInputDeletionRecords( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->num_input_deletion_records); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numExpiredDeletionRecords + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numExpiredDeletionRecords( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->num_expired_deletion_records); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numCorruptKeys + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numCorruptKeys( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->num_corrupt_keys); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: fileWriteNanos + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_fileWriteNanos( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->file_write_nanos); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: fileRangeSyncNanos + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_fileRangeSyncNanos( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->file_range_sync_nanos); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: fileFsyncNanos + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_fileFsyncNanos( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->file_fsync_nanos); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: filePrepareWriteNanos + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_filePrepareWriteNanos( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->file_prepare_write_nanos); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: smallestOutputKeyPrefix + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_CompactionJobStats_smallestOutputKeyPrefix( + JNIEnv* env, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return rocksdb::JniUtil::copyBytes(env, + compact_job_stats->smallest_output_key_prefix); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: largestOutputKeyPrefix + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_CompactionJobStats_largestOutputKeyPrefix( + JNIEnv* env, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return rocksdb::JniUtil::copyBytes(env, + compact_job_stats->largest_output_key_prefix); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numSingleDelFallthru + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numSingleDelFallthru( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->num_single_del_fallthru); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numSingleDelMismatch + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numSingleDelMismatch( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast( + compact_job_stats->num_single_del_mismatch); +} \ No newline at end of file diff --git a/java/rocksjni/compaction_options.cc b/java/rocksjni/compaction_options.cc new file mode 100644 index 000000000..6aaabea73 --- /dev/null +++ b/java/rocksjni/compaction_options.cc @@ -0,0 +1,116 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::CompactionOptions. + +#include + +#include "include/org_rocksdb_CompactionOptions.h" +#include "rocksdb/options.h" +#include "rocksjni/portal.h" + + +/* + * Class: org_rocksdb_CompactionOptions + * Method: newCompactionOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_CompactionOptions_newCompactionOptions( + JNIEnv*, jclass) { + auto* compact_opts = new rocksdb::CompactionOptions(); + return reinterpret_cast(compact_opts); +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompactionOptions_disposeInternal( + JNIEnv *, jobject, jlong jhandle) { + auto* compact_opts = + reinterpret_cast(jhandle); + delete compact_opts; +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: compression + * Signature: (J)B + */ +jbyte Java_org_rocksdb_CompactionOptions_compression( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_opts = + reinterpret_cast(jhandle); + return rocksdb::CompressionTypeJni::toJavaCompressionType( + compact_opts->compression); +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: setCompression + * Signature: (JB)V + */ +void Java_org_rocksdb_CompactionOptions_setCompression( + JNIEnv*, jclass, jlong jhandle, jbyte jcompression_type_value) { + auto* compact_opts = + reinterpret_cast(jhandle); + compact_opts->compression = + rocksdb::CompressionTypeJni::toCppCompressionType( + jcompression_type_value); +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: outputFileSizeLimit + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionOptions_outputFileSizeLimit( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_opts = + reinterpret_cast(jhandle); + return static_cast( + compact_opts->output_file_size_limit); +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: setOutputFileSizeLimit + * Signature: (JJ)V + */ +void Java_org_rocksdb_CompactionOptions_setOutputFileSizeLimit( + JNIEnv*, jclass, jlong jhandle, jlong joutput_file_size_limit) { + auto* compact_opts = + reinterpret_cast(jhandle); + compact_opts->output_file_size_limit = + static_cast(joutput_file_size_limit); +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: maxSubcompactions + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionOptions_maxSubcompactions( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_opts = + reinterpret_cast(jhandle); + return static_cast( + compact_opts->max_subcompactions); +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: setMaxSubcompactions + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactionOptions_setMaxSubcompactions( + JNIEnv*, jclass, jlong jhandle, jint jmax_subcompactions) { + auto* compact_opts = + reinterpret_cast(jhandle); + compact_opts->max_subcompactions = + static_cast(jmax_subcompactions); +} \ No newline at end of file diff --git a/java/rocksjni/compaction_options_fifo.cc b/java/rocksjni/compaction_options_fifo.cc index 594cb01d8..b7c445fd6 100644 --- a/java/rocksjni/compaction_options_fifo.cc +++ b/java/rocksjni/compaction_options_fifo.cc @@ -17,7 +17,7 @@ * Signature: ()J */ jlong Java_org_rocksdb_CompactionOptionsFIFO_newCompactionOptionsFIFO( - JNIEnv* /*env*/, jclass /*jcls*/) { + JNIEnv*, jclass) { const auto* opt = new rocksdb::CompactionOptionsFIFO(); return reinterpret_cast(opt); } @@ -28,8 +28,7 @@ jlong Java_org_rocksdb_CompactionOptionsFIFO_newCompactionOptionsFIFO( * Signature: (JJ)V */ void Java_org_rocksdb_CompactionOptionsFIFO_setMaxTableFilesSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jmax_table_files_size) { + JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) { auto* opt = reinterpret_cast(jhandle); opt->max_table_files_size = static_cast(jmax_table_files_size); } @@ -39,9 +38,8 @@ void Java_org_rocksdb_CompactionOptionsFIFO_setMaxTableFilesSize( * Method: maxTableFilesSize * Signature: (J)J */ -jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->max_table_files_size); } @@ -52,8 +50,7 @@ jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_CompactionOptionsFIFO_setAllowCompaction( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean allow_compaction) { + JNIEnv*, jobject, jlong jhandle, jboolean allow_compaction) { auto* opt = reinterpret_cast(jhandle); opt->allow_compaction = static_cast(allow_compaction); } @@ -64,7 +61,7 @@ void Java_org_rocksdb_CompactionOptionsFIFO_setAllowCompaction( * Signature: (J)Z */ jboolean Java_org_rocksdb_CompactionOptionsFIFO_allowCompaction( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->allow_compaction); } @@ -74,8 +71,7 @@ jboolean Java_org_rocksdb_CompactionOptionsFIFO_allowCompaction( * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_CompactionOptionsFIFO_disposeInternal(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_CompactionOptionsFIFO_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { delete reinterpret_cast(jhandle); } diff --git a/java/rocksjni/compaction_options_universal.cc b/java/rocksjni/compaction_options_universal.cc index da31bc688..7ca519885 100644 --- a/java/rocksjni/compaction_options_universal.cc +++ b/java/rocksjni/compaction_options_universal.cc @@ -18,7 +18,7 @@ * Signature: ()J */ jlong Java_org_rocksdb_CompactionOptionsUniversal_newCompactionOptionsUniversal( - JNIEnv* /*env*/, jclass /*jcls*/) { + JNIEnv*, jclass) { const auto* opt = new rocksdb::CompactionOptionsUniversal(); return reinterpret_cast(opt); } @@ -29,7 +29,7 @@ jlong Java_org_rocksdb_CompactionOptionsUniversal_newCompactionOptionsUniversal( * Signature: (JI)V */ void Java_org_rocksdb_CompactionOptionsUniversal_setSizeRatio( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jsize_ratio) { + JNIEnv*, jobject, jlong jhandle, jint jsize_ratio) { auto* opt = reinterpret_cast(jhandle); opt->size_ratio = static_cast(jsize_ratio); } @@ -39,9 +39,8 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setSizeRatio( * Method: sizeRatio * Signature: (J)I */ -jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->size_ratio); } @@ -52,7 +51,7 @@ jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_CompactionOptionsUniversal_setMinMergeWidth( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jmin_merge_width) { + JNIEnv*, jobject, jlong jhandle, jint jmin_merge_width) { auto* opt = reinterpret_cast(jhandle); opt->min_merge_width = static_cast(jmin_merge_width); } @@ -62,9 +61,8 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setMinMergeWidth( * Method: minMergeWidth * Signature: (J)I */ -jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->min_merge_width); } @@ -75,7 +73,7 @@ jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_CompactionOptionsUniversal_setMaxMergeWidth( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jmax_merge_width) { + JNIEnv*, jobject, jlong jhandle, jint jmax_merge_width) { auto* opt = reinterpret_cast(jhandle); opt->max_merge_width = static_cast(jmax_merge_width); } @@ -85,9 +83,8 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setMaxMergeWidth( * Method: maxMergeWidth * Signature: (J)I */ -jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->max_merge_width); } @@ -98,8 +95,7 @@ jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_CompactionOptionsUniversal_setMaxSizeAmplificationPercent( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jmax_size_amplification_percent) { + JNIEnv*, jobject, jlong jhandle, jint jmax_size_amplification_percent) { auto* opt = reinterpret_cast(jhandle); opt->max_size_amplification_percent = static_cast(jmax_size_amplification_percent); @@ -111,7 +107,7 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setMaxSizeAmplificationPercent( * Signature: (J)I */ jint Java_org_rocksdb_CompactionOptionsUniversal_maxSizeAmplificationPercent( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->max_size_amplification_percent); } @@ -122,7 +118,7 @@ jint Java_org_rocksdb_CompactionOptionsUniversal_maxSizeAmplificationPercent( * Signature: (JI)V */ void Java_org_rocksdb_CompactionOptionsUniversal_setCompressionSizePercent( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jint jcompression_size_percent) { auto* opt = reinterpret_cast(jhandle); opt->compression_size_percent = @@ -135,7 +131,7 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setCompressionSizePercent( * Signature: (J)I */ jint Java_org_rocksdb_CompactionOptionsUniversal_compressionSizePercent( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->compression_size_percent); } @@ -146,7 +142,7 @@ jint Java_org_rocksdb_CompactionOptionsUniversal_compressionSizePercent( * Signature: (JB)V */ void Java_org_rocksdb_CompactionOptionsUniversal_setStopStyle( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jbyte jstop_style_value) { + JNIEnv*, jobject, jlong jhandle, jbyte jstop_style_value) { auto* opt = reinterpret_cast(jhandle); opt->stop_style = rocksdb::CompactionStopStyleJni::toCppCompactionStopStyle( jstop_style_value); @@ -157,9 +153,8 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setStopStyle( * Method: stopStyle * Signature: (J)B */ -jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return rocksdb::CompactionStopStyleJni::toJavaCompactionStopStyle( opt->stop_style); @@ -171,8 +166,7 @@ jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_CompactionOptionsUniversal_setAllowTrivialMove( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jallow_trivial_move) { + JNIEnv*, jobject, jlong jhandle, jboolean jallow_trivial_move) { auto* opt = reinterpret_cast(jhandle); opt->allow_trivial_move = static_cast(jallow_trivial_move); } @@ -183,7 +177,7 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setAllowTrivialMove( * Signature: (J)Z */ jboolean Java_org_rocksdb_CompactionOptionsUniversal_allowTrivialMove( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return opt->allow_trivial_move; } @@ -194,6 +188,6 @@ jboolean Java_org_rocksdb_CompactionOptionsUniversal_allowTrivialMove( * Signature: (J)V */ void Java_org_rocksdb_CompactionOptionsUniversal_disposeInternal( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { delete reinterpret_cast(jhandle); } diff --git a/java/rocksjni/compression_options.cc b/java/rocksjni/compression_options.cc index a5598abe1..f0155eb33 100644 --- a/java/rocksjni/compression_options.cc +++ b/java/rocksjni/compression_options.cc @@ -17,7 +17,7 @@ * Signature: ()J */ jlong Java_org_rocksdb_CompressionOptions_newCompressionOptions( - JNIEnv* /*env*/, jclass /*jcls*/) { + JNIEnv*, jclass) { const auto* opt = new rocksdb::CompressionOptions(); return reinterpret_cast(opt); } @@ -27,10 +27,8 @@ jlong Java_org_rocksdb_CompressionOptions_newCompressionOptions( * Method: setWindowBits * Signature: (JI)V */ -void Java_org_rocksdb_CompressionOptions_setWindowBits(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint jwindow_bits) { +void Java_org_rocksdb_CompressionOptions_setWindowBits( + JNIEnv*, jobject, jlong jhandle, jint jwindow_bits) { auto* opt = reinterpret_cast(jhandle); opt->window_bits = static_cast(jwindow_bits); } @@ -40,9 +38,8 @@ void Java_org_rocksdb_CompressionOptions_setWindowBits(JNIEnv* /*env*/, * Method: windowBits * Signature: (J)I */ -jint Java_org_rocksdb_CompressionOptions_windowBits(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_CompressionOptions_windowBits( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->window_bits); } @@ -52,9 +49,8 @@ jint Java_org_rocksdb_CompressionOptions_windowBits(JNIEnv* /*env*/, * Method: setLevel * Signature: (JI)V */ -void Java_org_rocksdb_CompressionOptions_setLevel(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, jint jlevel) { +void Java_org_rocksdb_CompressionOptions_setLevel( + JNIEnv*, jobject, jlong jhandle, jint jlevel) { auto* opt = reinterpret_cast(jhandle); opt->level = static_cast(jlevel); } @@ -64,9 +60,8 @@ void Java_org_rocksdb_CompressionOptions_setLevel(JNIEnv* /*env*/, * Method: level * Signature: (J)I */ -jint Java_org_rocksdb_CompressionOptions_level(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_CompressionOptions_level( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->level); } @@ -76,10 +71,8 @@ jint Java_org_rocksdb_CompressionOptions_level(JNIEnv* /*env*/, * Method: setStrategy * Signature: (JI)V */ -void Java_org_rocksdb_CompressionOptions_setStrategy(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint jstrategy) { +void Java_org_rocksdb_CompressionOptions_setStrategy( + JNIEnv*, jobject, jlong jhandle, jint jstrategy) { auto* opt = reinterpret_cast(jhandle); opt->strategy = static_cast(jstrategy); } @@ -89,9 +82,8 @@ void Java_org_rocksdb_CompressionOptions_setStrategy(JNIEnv* /*env*/, * Method: strategy * Signature: (J)I */ -jint Java_org_rocksdb_CompressionOptions_strategy(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_CompressionOptions_strategy( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->strategy); } @@ -101,12 +93,10 @@ jint Java_org_rocksdb_CompressionOptions_strategy(JNIEnv* /*env*/, * Method: setMaxDictBytes * Signature: (JI)V */ -void Java_org_rocksdb_CompressionOptions_setMaxDictBytes(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint jmax_dict_bytes) { +void Java_org_rocksdb_CompressionOptions_setMaxDictBytes( + JNIEnv*, jobject, jlong jhandle, jint jmax_dict_bytes) { auto* opt = reinterpret_cast(jhandle); - opt->max_dict_bytes = static_cast(jmax_dict_bytes); + opt->max_dict_bytes = static_cast(jmax_dict_bytes); } /* @@ -114,44 +104,61 @@ void Java_org_rocksdb_CompressionOptions_setMaxDictBytes(JNIEnv* /*env*/, * Method: maxDictBytes * Signature: (J)I */ -jint Java_org_rocksdb_CompressionOptions_maxDictBytes(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_CompressionOptions_maxDictBytes( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->max_dict_bytes); } /* * Class: org_rocksdb_CompressionOptions - * Method: setEnabled + * Method: setZstdMaxTrainBytes * Signature: (JI)V */ -void Java_org_rocksdb_CompressionOptions_setEnabled(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean jenabled) { +void Java_org_rocksdb_CompressionOptions_setZstdMaxTrainBytes( + JNIEnv*, jobject, jlong jhandle, jint jzstd_max_train_bytes) { auto* opt = reinterpret_cast(jhandle); - opt->enabled = static_cast(jenabled); + opt->zstd_max_train_bytes = static_cast(jzstd_max_train_bytes); } /* * Class: org_rocksdb_CompressionOptions - * Method: Enabled + * Method: zstdMaxTrainBytes * Signature: (J)I */ -jint Java_org_rocksdb_CompressionOptions_enabled(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_CompressionOptions_zstdMaxTrainBytes( + JNIEnv *, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->zstd_max_train_bytes); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setEnabled + * Signature: (JZ)V + */ +void Java_org_rocksdb_CompressionOptions_setEnabled( + JNIEnv*, jobject, jlong jhandle, jboolean jenabled) { + auto* opt = reinterpret_cast(jhandle); + opt->enabled = jenabled == JNI_TRUE; +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: enabled + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_CompressionOptions_enabled( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); - return static_cast(opt->enabled); + return static_cast(opt->enabled); } /* * Class: org_rocksdb_CompressionOptions * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_CompressionOptions_disposeInternal(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_CompressionOptions_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { delete reinterpret_cast(jhandle); } diff --git a/java/rocksjni/env.cc b/java/rocksjni/env.cc index 5433faf00..ed54bd36a 100644 --- a/java/rocksjni/env.cc +++ b/java/rocksjni/env.cc @@ -6,66 +6,160 @@ // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::Env methods from Java side. +#include +#include + +#include "portal.h" #include "rocksdb/env.h" #include "include/org_rocksdb_Env.h" +#include "include/org_rocksdb_HdfsEnv.h" #include "include/org_rocksdb_RocksEnv.h" #include "include/org_rocksdb_RocksMemEnv.h" +#include "include/org_rocksdb_TimedEnv.h" /* * Class: org_rocksdb_Env * Method: getDefaultEnvInternal * Signature: ()J */ -jlong Java_org_rocksdb_Env_getDefaultEnvInternal(JNIEnv* /*env*/, - jclass /*jclazz*/) { +jlong Java_org_rocksdb_Env_getDefaultEnvInternal( + JNIEnv*, jclass) { return reinterpret_cast(rocksdb::Env::Default()); } +/* + * Class: org_rocksdb_RocksEnv + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RocksEnv_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { + auto* e = reinterpret_cast(jhandle); + assert(e != nullptr); + delete e; +} + /* * Class: org_rocksdb_Env * Method: setBackgroundThreads - * Signature: (JII)V + * Signature: (JIB)V */ -void Java_org_rocksdb_Env_setBackgroundThreads(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jint num, jint priority) { +void Java_org_rocksdb_Env_setBackgroundThreads( + JNIEnv*, jobject, jlong jhandle, jint jnum, jbyte jpriority_value) { auto* rocks_env = reinterpret_cast(jhandle); - switch (priority) { - case org_rocksdb_Env_FLUSH_POOL: - rocks_env->SetBackgroundThreads(num, rocksdb::Env::Priority::LOW); - break; - case org_rocksdb_Env_COMPACTION_POOL: - rocks_env->SetBackgroundThreads(num, rocksdb::Env::Priority::HIGH); - break; - } + rocks_env->SetBackgroundThreads(static_cast(jnum), + rocksdb::PriorityJni::toCppPriority(jpriority_value)); } /* - * Class: org_rocksdb_sEnv + * Class: org_rocksdb_Env + * Method: getBackgroundThreads + * Signature: (JB)I + */ +jint Java_org_rocksdb_Env_getBackgroundThreads( + JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) { + auto* rocks_env = reinterpret_cast(jhandle); + const int num = rocks_env->GetBackgroundThreads( + rocksdb::PriorityJni::toCppPriority(jpriority_value)); + return static_cast(num); +} + +/* + * Class: org_rocksdb_Env * Method: getThreadPoolQueueLen - * Signature: (JI)I + * Signature: (JB)I + */ +jint Java_org_rocksdb_Env_getThreadPoolQueueLen( + JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) { + auto* rocks_env = reinterpret_cast(jhandle); + const int queue_len = rocks_env->GetThreadPoolQueueLen( + rocksdb::PriorityJni::toCppPriority(jpriority_value)); + return static_cast(queue_len); +} + +/* + * Class: org_rocksdb_Env + * Method: incBackgroundThreadsIfNeeded + * Signature: (JIB)V + */ +void Java_org_rocksdb_Env_incBackgroundThreadsIfNeeded( + JNIEnv*, jobject, jlong jhandle, jint jnum, jbyte jpriority_value) { + auto* rocks_env = reinterpret_cast(jhandle); + rocks_env->IncBackgroundThreadsIfNeeded(static_cast(jnum), + rocksdb::PriorityJni::toCppPriority(jpriority_value)); +} + +/* + * Class: org_rocksdb_Env + * Method: lowerThreadPoolIOPriority + * Signature: (JB)V + */ +void Java_org_rocksdb_Env_lowerThreadPoolIOPriority( + JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) { + auto* rocks_env = reinterpret_cast(jhandle); + rocks_env->LowerThreadPoolIOPriority( + rocksdb::PriorityJni::toCppPriority(jpriority_value)); +} + +/* + * Class: org_rocksdb_Env + * Method: lowerThreadPoolCPUPriority + * Signature: (JB)V + */ +void Java_org_rocksdb_Env_lowerThreadPoolCPUPriority( + JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) { + auto* rocks_env = reinterpret_cast(jhandle); + rocks_env->LowerThreadPoolCPUPriority( + rocksdb::PriorityJni::toCppPriority(jpriority_value)); +} + +/* + * Class: org_rocksdb_Env + * Method: getThreadList + * Signature: (J)[Lorg/rocksdb/ThreadStatus; */ -jint Java_org_rocksdb_Env_getThreadPoolQueueLen(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jint pool_id) { +jobjectArray Java_org_rocksdb_Env_getThreadList( + JNIEnv* env, jobject, jlong jhandle) { auto* rocks_env = reinterpret_cast(jhandle); - switch (pool_id) { - case org_rocksdb_RocksEnv_FLUSH_POOL: - return rocks_env->GetThreadPoolQueueLen(rocksdb::Env::Priority::LOW); - case org_rocksdb_RocksEnv_COMPACTION_POOL: - return rocks_env->GetThreadPoolQueueLen(rocksdb::Env::Priority::HIGH); + std::vector thread_status; + rocksdb::Status s = rocks_env->GetThreadList(&thread_status); + if (!s.ok()) { + // error, throw exception + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + // object[] + const jsize len = static_cast(thread_status.size()); + jobjectArray jthread_status = + env->NewObjectArray(len, rocksdb::ThreadStatusJni::getJClass(env), nullptr); + if (jthread_status == nullptr) { + // an exception occurred + return nullptr; + } + for (jsize i = 0; i < len; ++i) { + jobject jts = + rocksdb::ThreadStatusJni::construct(env, &(thread_status[i])); + env->SetObjectArrayElement(jthread_status, i, jts); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(jthread_status); + return nullptr; + } } - return 0; + + return jthread_status; } /* * Class: org_rocksdb_RocksMemEnv * Method: createMemEnv - * Signature: ()J + * Signature: (J)J */ -jlong Java_org_rocksdb_RocksMemEnv_createMemEnv(JNIEnv* /*env*/, - jclass /*jclazz*/) { - return reinterpret_cast(rocksdb::NewMemEnv(rocksdb::Env::Default())); +jlong Java_org_rocksdb_RocksMemEnv_createMemEnv( + JNIEnv*, jclass, jlong jbase_env_handle) { + auto* base_env = reinterpret_cast(jbase_env_handle); + return reinterpret_cast(rocksdb::NewMemEnv(base_env)); } /* @@ -73,10 +167,68 @@ jlong Java_org_rocksdb_RocksMemEnv_createMemEnv(JNIEnv* /*env*/, * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_RocksMemEnv_disposeInternal(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_RocksMemEnv_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { + auto* e = reinterpret_cast(jhandle); + assert(e != nullptr); + delete e; +} + +/* + * Class: org_rocksdb_HdfsEnv + * Method: createHdfsEnv + * Signature: (Ljava/lang/String;)J + */ +jlong Java_org_rocksdb_HdfsEnv_createHdfsEnv( + JNIEnv* env, jclass, jstring jfsname) { + jboolean has_exception = JNI_FALSE; + auto fsname = rocksdb::JniUtil::copyStdString(env, jfsname, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return 0; + } + rocksdb::Env* hdfs_env; + rocksdb::Status s = rocksdb::NewHdfsEnv(&hdfs_env, fsname); + if (!s.ok()) { + // error occurred + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return 0; + } + return reinterpret_cast(hdfs_env); +} + +/* + * Class: org_rocksdb_HdfsEnv + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_HdfsEnv_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { auto* e = reinterpret_cast(jhandle); assert(e != nullptr); delete e; } + +/* + * Class: org_rocksdb_TimedEnv + * Method: createTimedEnv + * Signature: (J)J + */ +jlong Java_org_rocksdb_TimedEnv_createTimedEnv( + JNIEnv*, jclass, jlong jbase_env_handle) { + auto* base_env = reinterpret_cast(jbase_env_handle); + return reinterpret_cast(rocksdb::NewTimedEnv(base_env)); +} + +/* + * Class: org_rocksdb_TimedEnv + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TimedEnv_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { + auto* e = reinterpret_cast(jhandle); + assert(e != nullptr); + delete e; +} + diff --git a/java/rocksjni/env_options.cc b/java/rocksjni/env_options.cc index 1c0ebe374..9ed330183 100644 --- a/java/rocksjni/env_options.cc +++ b/java/rocksjni/env_options.cc @@ -32,20 +32,32 @@ * Method: newEnvOptions * Signature: ()J */ -jlong Java_org_rocksdb_EnvOptions_newEnvOptions(JNIEnv * /*env*/, - jclass /*jcls*/) { +jlong Java_org_rocksdb_EnvOptions_newEnvOptions__( + JNIEnv*, jclass) { auto *env_opt = new rocksdb::EnvOptions(); return reinterpret_cast(env_opt); } +/* + * Class: org_rocksdb_EnvOptions + * Method: newEnvOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_EnvOptions_newEnvOptions__J( + JNIEnv*, jclass, jlong jdboptions_handle) { + auto* db_options = + reinterpret_cast(jdboptions_handle); + auto* env_opt = new rocksdb::EnvOptions(*db_options); + return reinterpret_cast(env_opt); +} + /* * Class: org_rocksdb_EnvOptions * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_EnvOptions_disposeInternal(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_EnvOptions_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { auto *eo = reinterpret_cast(jhandle); assert(eo != nullptr); delete eo; @@ -53,93 +65,82 @@ void Java_org_rocksdb_EnvOptions_disposeInternal(JNIEnv * /*env*/, /* * Class: org_rocksdb_EnvOptions - * Method: setUseDirectReads + * Method: setUseMmapReads * Signature: (JZ)V */ -void Java_org_rocksdb_EnvOptions_setUseDirectReads(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean use_direct_reads) { - ENV_OPTIONS_SET_BOOL(jhandle, use_direct_reads); +void Java_org_rocksdb_EnvOptions_setUseMmapReads( + JNIEnv*, jobject, jlong jhandle, jboolean use_mmap_reads) { + ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_reads); } /* * Class: org_rocksdb_EnvOptions - * Method: useDirectReads + * Method: useMmapReads * Signature: (J)Z */ -jboolean Java_org_rocksdb_EnvOptions_useDirectReads(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle) { - return ENV_OPTIONS_GET(jhandle, use_direct_reads); +jboolean Java_org_rocksdb_EnvOptions_useMmapReads( + JNIEnv*, jobject, jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, use_mmap_reads); } /* * Class: org_rocksdb_EnvOptions - * Method: setUseDirectWrites + * Method: setUseMmapWrites * Signature: (JZ)V */ -void Java_org_rocksdb_EnvOptions_setUseDirectWrites( - JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean use_direct_writes) { - ENV_OPTIONS_SET_BOOL(jhandle, use_direct_writes); +void Java_org_rocksdb_EnvOptions_setUseMmapWrites( + JNIEnv*, jobject, jlong jhandle, jboolean use_mmap_writes) { + ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_writes); } /* * Class: org_rocksdb_EnvOptions - * Method: useDirectWrites + * Method: useMmapWrites * Signature: (J)Z */ -jboolean Java_org_rocksdb_EnvOptions_useDirectWrites(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle) { - return ENV_OPTIONS_GET(jhandle, use_direct_writes); +jboolean Java_org_rocksdb_EnvOptions_useMmapWrites( + JNIEnv*, jobject, jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, use_mmap_writes); } /* * Class: org_rocksdb_EnvOptions - * Method: setUseMmapReads + * Method: setUseDirectReads * Signature: (JZ)V */ -void Java_org_rocksdb_EnvOptions_setUseMmapReads(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean use_mmap_reads) { - ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_reads); +void Java_org_rocksdb_EnvOptions_setUseDirectReads( + JNIEnv*, jobject, jlong jhandle, jboolean use_direct_reads) { + ENV_OPTIONS_SET_BOOL(jhandle, use_direct_reads); } /* * Class: org_rocksdb_EnvOptions - * Method: useMmapReads + * Method: useDirectReads * Signature: (J)Z */ -jboolean Java_org_rocksdb_EnvOptions_useMmapReads(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle) { - return ENV_OPTIONS_GET(jhandle, use_mmap_reads); +jboolean Java_org_rocksdb_EnvOptions_useDirectReads( + JNIEnv*, jobject, jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, use_direct_reads); } /* * Class: org_rocksdb_EnvOptions - * Method: setUseMmapWrites + * Method: setUseDirectWrites * Signature: (JZ)V */ -void Java_org_rocksdb_EnvOptions_setUseMmapWrites(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean use_mmap_writes) { - ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_writes); +void Java_org_rocksdb_EnvOptions_setUseDirectWrites( + JNIEnv*, jobject, jlong jhandle, jboolean use_direct_writes) { + ENV_OPTIONS_SET_BOOL(jhandle, use_direct_writes); } /* * Class: org_rocksdb_EnvOptions - * Method: useMmapWrites + * Method: useDirectWrites * Signature: (J)Z */ -jboolean Java_org_rocksdb_EnvOptions_useMmapWrites(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle) { - return ENV_OPTIONS_GET(jhandle, use_mmap_writes); +jboolean Java_org_rocksdb_EnvOptions_useDirectWrites( + JNIEnv*, jobject, jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, use_direct_writes); } /* @@ -147,10 +148,8 @@ jboolean Java_org_rocksdb_EnvOptions_useMmapWrites(JNIEnv * /*env*/, * Method: setAllowFallocate * Signature: (JZ)V */ -void Java_org_rocksdb_EnvOptions_setAllowFallocate(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean allow_fallocate) { +void Java_org_rocksdb_EnvOptions_setAllowFallocate( + JNIEnv*, jobject, jlong jhandle, jboolean allow_fallocate) { ENV_OPTIONS_SET_BOOL(jhandle, allow_fallocate); } @@ -159,9 +158,8 @@ void Java_org_rocksdb_EnvOptions_setAllowFallocate(JNIEnv * /*env*/, * Method: allowFallocate * Signature: (J)Z */ -jboolean Java_org_rocksdb_EnvOptions_allowFallocate(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_EnvOptions_allowFallocate( + JNIEnv*, jobject, jlong jhandle) { return ENV_OPTIONS_GET(jhandle, allow_fallocate); } @@ -170,10 +168,8 @@ jboolean Java_org_rocksdb_EnvOptions_allowFallocate(JNIEnv * /*env*/, * Method: setSetFdCloexec * Signature: (JZ)V */ -void Java_org_rocksdb_EnvOptions_setSetFdCloexec(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean set_fd_cloexec) { +void Java_org_rocksdb_EnvOptions_setSetFdCloexec( + JNIEnv*, jobject, jlong jhandle, jboolean set_fd_cloexec) { ENV_OPTIONS_SET_BOOL(jhandle, set_fd_cloexec); } @@ -182,9 +178,8 @@ void Java_org_rocksdb_EnvOptions_setSetFdCloexec(JNIEnv * /*env*/, * Method: setFdCloexec * Signature: (J)Z */ -jboolean Java_org_rocksdb_EnvOptions_setFdCloexec(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_EnvOptions_setFdCloexec( + JNIEnv*, jobject, jlong jhandle) { return ENV_OPTIONS_GET(jhandle, set_fd_cloexec); } @@ -193,10 +188,8 @@ jboolean Java_org_rocksdb_EnvOptions_setFdCloexec(JNIEnv * /*env*/, * Method: setBytesPerSync * Signature: (JJ)V */ -void Java_org_rocksdb_EnvOptions_setBytesPerSync(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong bytes_per_sync) { +void Java_org_rocksdb_EnvOptions_setBytesPerSync( + JNIEnv*, jobject, jlong jhandle, jlong bytes_per_sync) { ENV_OPTIONS_SET_UINT64_T(jhandle, bytes_per_sync); } @@ -205,9 +198,8 @@ void Java_org_rocksdb_EnvOptions_setBytesPerSync(JNIEnv * /*env*/, * Method: bytesPerSync * Signature: (J)J */ -jlong Java_org_rocksdb_EnvOptions_bytesPerSync(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_EnvOptions_bytesPerSync( + JNIEnv*, jobject, jlong jhandle) { return ENV_OPTIONS_GET(jhandle, bytes_per_sync); } @@ -217,8 +209,7 @@ jlong Java_org_rocksdb_EnvOptions_bytesPerSync(JNIEnv * /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_EnvOptions_setFallocateWithKeepSize( - JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean fallocate_with_keep_size) { + JNIEnv*, jobject, jlong jhandle, jboolean fallocate_with_keep_size) { ENV_OPTIONS_SET_BOOL(jhandle, fallocate_with_keep_size); } @@ -227,9 +218,8 @@ void Java_org_rocksdb_EnvOptions_setFallocateWithKeepSize( * Method: fallocateWithKeepSize * Signature: (J)Z */ -jboolean Java_org_rocksdb_EnvOptions_fallocateWithKeepSize(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_EnvOptions_fallocateWithKeepSize( + JNIEnv*, jobject, jlong jhandle) { return ENV_OPTIONS_GET(jhandle, fallocate_with_keep_size); } @@ -239,8 +229,7 @@ jboolean Java_org_rocksdb_EnvOptions_fallocateWithKeepSize(JNIEnv * /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_EnvOptions_setCompactionReadaheadSize( - JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle, - jlong compaction_readahead_size) { + JNIEnv*, jobject, jlong jhandle, jlong compaction_readahead_size) { ENV_OPTIONS_SET_SIZE_T(jhandle, compaction_readahead_size); } @@ -249,9 +238,8 @@ void Java_org_rocksdb_EnvOptions_setCompactionReadaheadSize( * Method: compactionReadaheadSize * Signature: (J)J */ -jlong Java_org_rocksdb_EnvOptions_compactionReadaheadSize(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_EnvOptions_compactionReadaheadSize( + JNIEnv*, jobject, jlong jhandle) { return ENV_OPTIONS_GET(jhandle, compaction_readahead_size); } @@ -261,8 +249,7 @@ jlong Java_org_rocksdb_EnvOptions_compactionReadaheadSize(JNIEnv * /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_EnvOptions_setRandomAccessMaxBufferSize( - JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle, - jlong random_access_max_buffer_size) { + JNIEnv*, jobject, jlong jhandle, jlong random_access_max_buffer_size) { ENV_OPTIONS_SET_SIZE_T(jhandle, random_access_max_buffer_size); } @@ -271,9 +258,8 @@ void Java_org_rocksdb_EnvOptions_setRandomAccessMaxBufferSize( * Method: randomAccessMaxBufferSize * Signature: (J)J */ -jlong Java_org_rocksdb_EnvOptions_randomAccessMaxBufferSize(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_EnvOptions_randomAccessMaxBufferSize( + JNIEnv*, jobject, jlong jhandle) { return ENV_OPTIONS_GET(jhandle, random_access_max_buffer_size); } @@ -283,8 +269,7 @@ jlong Java_org_rocksdb_EnvOptions_randomAccessMaxBufferSize(JNIEnv * /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_EnvOptions_setWritableFileMaxBufferSize( - JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle, - jlong writable_file_max_buffer_size) { + JNIEnv*, jobject, jlong jhandle, jlong writable_file_max_buffer_size) { ENV_OPTIONS_SET_SIZE_T(jhandle, writable_file_max_buffer_size); } @@ -293,9 +278,8 @@ void Java_org_rocksdb_EnvOptions_setWritableFileMaxBufferSize( * Method: writableFileMaxBufferSize * Signature: (J)J */ -jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize(JNIEnv * /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize( + JNIEnv*, jobject, jlong jhandle) { return ENV_OPTIONS_GET(jhandle, writable_file_max_buffer_size); } @@ -304,9 +288,8 @@ jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize(JNIEnv * /*env*/, * Method: setRateLimiter * Signature: (JJ)V */ -void Java_org_rocksdb_EnvOptions_setRateLimiter(JNIEnv * /*env*/, - jobject /*jobj*/, jlong jhandle, - jlong rl_handle) { +void Java_org_rocksdb_EnvOptions_setRateLimiter( + JNIEnv*, jobject, jlong jhandle, jlong rl_handle) { auto *sptr_rate_limiter = reinterpret_cast *>(rl_handle); auto *env_opt = reinterpret_cast(jhandle); diff --git a/java/rocksjni/ingest_external_file_options.cc b/java/rocksjni/ingest_external_file_options.cc index a26e6f6d5..e0871ff8e 100644 --- a/java/rocksjni/ingest_external_file_options.cc +++ b/java/rocksjni/ingest_external_file_options.cc @@ -17,7 +17,7 @@ * Signature: ()J */ jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__( - JNIEnv* /*env*/, jclass /*jclazz*/) { + JNIEnv*, jclass) { auto* options = new rocksdb::IngestExternalFileOptions(); return reinterpret_cast(options); } @@ -28,7 +28,7 @@ jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__( * Signature: (ZZZZ)J */ jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__ZZZZ( - JNIEnv* /*env*/, jclass /*jcls*/, jboolean jmove_files, + JNIEnv*, jclass, jboolean jmove_files, jboolean jsnapshot_consistency, jboolean jallow_global_seqno, jboolean jallow_blocking_flush) { auto* options = new rocksdb::IngestExternalFileOptions(); @@ -44,9 +44,8 @@ jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__Z * Method: moveFiles * Signature: (J)Z */ -jboolean Java_org_rocksdb_IngestExternalFileOptions_moveFiles(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_IngestExternalFileOptions_moveFiles( + JNIEnv*, jobject, jlong jhandle) { auto* options = reinterpret_cast(jhandle); return static_cast(options->move_files); @@ -58,7 +57,7 @@ jboolean Java_org_rocksdb_IngestExternalFileOptions_moveFiles(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_IngestExternalFileOptions_setMoveFiles( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean jmove_files) { + JNIEnv*, jobject, jlong jhandle, jboolean jmove_files) { auto* options = reinterpret_cast(jhandle); options->move_files = static_cast(jmove_files); @@ -70,7 +69,7 @@ void Java_org_rocksdb_IngestExternalFileOptions_setMoveFiles( * Signature: (J)Z */ jboolean Java_org_rocksdb_IngestExternalFileOptions_snapshotConsistency( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* options = reinterpret_cast(jhandle); return static_cast(options->snapshot_consistency); @@ -82,8 +81,7 @@ jboolean Java_org_rocksdb_IngestExternalFileOptions_snapshotConsistency( * Signature: (JZ)V */ void Java_org_rocksdb_IngestExternalFileOptions_setSnapshotConsistency( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jsnapshot_consistency) { + JNIEnv*, jobject, jlong jhandle, jboolean jsnapshot_consistency) { auto* options = reinterpret_cast(jhandle); options->snapshot_consistency = static_cast(jsnapshot_consistency); @@ -95,7 +93,7 @@ void Java_org_rocksdb_IngestExternalFileOptions_setSnapshotConsistency( * Signature: (J)Z */ jboolean Java_org_rocksdb_IngestExternalFileOptions_allowGlobalSeqNo( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* options = reinterpret_cast(jhandle); return static_cast(options->allow_global_seqno); @@ -107,8 +105,7 @@ jboolean Java_org_rocksdb_IngestExternalFileOptions_allowGlobalSeqNo( * Signature: (JZ)V */ void Java_org_rocksdb_IngestExternalFileOptions_setAllowGlobalSeqNo( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jallow_global_seqno) { + JNIEnv*, jobject, jlong jhandle, jboolean jallow_global_seqno) { auto* options = reinterpret_cast(jhandle); options->allow_global_seqno = static_cast(jallow_global_seqno); @@ -120,7 +117,7 @@ void Java_org_rocksdb_IngestExternalFileOptions_setAllowGlobalSeqNo( * Signature: (J)Z */ jboolean Java_org_rocksdb_IngestExternalFileOptions_allowBlockingFlush( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* options = reinterpret_cast(jhandle); return static_cast(options->allow_blocking_flush); @@ -132,22 +129,68 @@ jboolean Java_org_rocksdb_IngestExternalFileOptions_allowBlockingFlush( * Signature: (JZ)V */ void Java_org_rocksdb_IngestExternalFileOptions_setAllowBlockingFlush( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jallow_blocking_flush) { + JNIEnv*, jobject, jlong jhandle, jboolean jallow_blocking_flush) { auto* options = reinterpret_cast(jhandle); options->allow_blocking_flush = static_cast(jallow_blocking_flush); } +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: ingestBehind + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_IngestExternalFileOptions_ingestBehind( + JNIEnv*, jobject, jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return options->ingest_behind == JNI_TRUE; +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: setIngestBehind + * Signature: (JZ)V + */ +void Java_org_rocksdb_IngestExternalFileOptions_setIngestBehind( + JNIEnv*, jobject, jlong jhandle, jboolean jingest_behind) { + auto* options = + reinterpret_cast(jhandle); + options->ingest_behind = jingest_behind == JNI_TRUE; +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: writeGlobalSeqno + * Signature: (J)Z + */ +JNIEXPORT jboolean JNICALL Java_org_rocksdb_IngestExternalFileOptions_writeGlobalSeqno( + JNIEnv*, jobject, jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return options->write_global_seqno == JNI_TRUE; +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: setWriteGlobalSeqno + * Signature: (JZ)V + */ +JNIEXPORT void JNICALL Java_org_rocksdb_IngestExternalFileOptions_setWriteGlobalSeqno( + JNIEnv*, jobject, jlong jhandle, jboolean jwrite_global_seqno) { + auto* options = + reinterpret_cast(jhandle); + options->write_global_seqno = jwrite_global_seqno == JNI_TRUE; +} + /* * Class: org_rocksdb_IngestExternalFileOptions * Method: disposeInternal * Signature: (J)V */ void Java_org_rocksdb_IngestExternalFileOptions_disposeInternal( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* options = reinterpret_cast(jhandle); delete options; - // @lint-ignore TXT4 T25377293 Grandfathered in } \ No newline at end of file diff --git a/java/rocksjni/memory_util.cc b/java/rocksjni/memory_util.cc index 9c2bfd04e..043850213 100644 --- a/java/rocksjni/memory_util.cc +++ b/java/rocksjni/memory_util.cc @@ -66,7 +66,7 @@ jobject Java_org_rocksdb_MemoryUtil_getApproximateMemoryUsageByType( // exception occurred return nullptr; } - const rocksdb::HashMapJni::FnMapKV + const rocksdb::HashMapJni::FnMapKV fn_map_kv = [env](const std::pair& pair) { // Construct key diff --git a/java/rocksjni/memtablejni.cc b/java/rocksjni/memtablejni.cc index effb6eda0..ad704c3b1 100644 --- a/java/rocksjni/memtablejni.cc +++ b/java/rocksjni/memtablejni.cc @@ -20,7 +20,7 @@ jlong Java_org_rocksdb_HashSkipListMemTableConfig_newMemTableFactoryHandle( JNIEnv* env, jobject /*jobj*/, jlong jbucket_count, jint jheight, jint jbranching_factor) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jbucket_count); + rocksdb::Status s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jbucket_count); if (s.ok()) { return reinterpret_cast(rocksdb::NewHashSkipListRepFactory( static_cast(jbucket_count), static_cast(jheight), @@ -40,9 +40,9 @@ jlong Java_org_rocksdb_HashLinkedListMemTableConfig_newMemTableFactoryHandle( jlong jhuge_page_tlb_size, jint jbucket_entries_logging_threshold, jboolean jif_log_bucket_dist_when_flash, jint jthreshold_use_skiplist) { rocksdb::Status statusBucketCount = - rocksdb::check_if_jlong_fits_size_t(jbucket_count); + rocksdb::JniUtil::check_if_jlong_fits_size_t(jbucket_count); rocksdb::Status statusHugePageTlb = - rocksdb::check_if_jlong_fits_size_t(jhuge_page_tlb_size); + rocksdb::JniUtil::check_if_jlong_fits_size_t(jhuge_page_tlb_size); if (statusBucketCount.ok() && statusHugePageTlb.ok()) { return reinterpret_cast(rocksdb::NewHashLinkListRepFactory( static_cast(jbucket_count), @@ -63,7 +63,7 @@ jlong Java_org_rocksdb_HashLinkedListMemTableConfig_newMemTableFactoryHandle( */ jlong Java_org_rocksdb_VectorMemTableConfig_newMemTableFactoryHandle( JNIEnv* env, jobject /*jobj*/, jlong jreserved_size) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jreserved_size); + rocksdb::Status s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jreserved_size); if (s.ok()) { return reinterpret_cast( new rocksdb::VectorRepFactory(static_cast(jreserved_size))); @@ -79,7 +79,7 @@ jlong Java_org_rocksdb_VectorMemTableConfig_newMemTableFactoryHandle( */ jlong Java_org_rocksdb_SkipListMemTableConfig_newMemTableFactoryHandle0( JNIEnv* env, jobject /*jobj*/, jlong jlookahead) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jlookahead); + rocksdb::Status s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jlookahead); if (s.ok()) { return reinterpret_cast( new rocksdb::SkipListFactory(static_cast(jlookahead))); diff --git a/java/rocksjni/optimistic_transaction_db.cc b/java/rocksjni/optimistic_transaction_db.cc index 27c8d3822..1505ff989 100644 --- a/java/rocksjni/optimistic_transaction_db.cc +++ b/java/rocksjni/optimistic_transaction_db.cc @@ -22,7 +22,7 @@ * Signature: (JLjava/lang/String;)J */ jlong Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2( - JNIEnv* env, jclass /*jcls*/, jlong joptions_handle, jstring jdb_path) { + JNIEnv* env, jclass, jlong joptions_handle, jstring jdb_path) { const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); if (db_path == nullptr) { // exception thrown: OutOfMemoryError @@ -50,7 +50,7 @@ jlong Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2( */ jlongArray Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2_3_3B_3J( - JNIEnv* env, jclass /*jcls*/, jlong jdb_options_handle, jstring jdb_path, + JNIEnv* env, jclass, jlong jdb_options_handle, jstring jdb_path, jobjectArray jcolumn_names, jlongArray jcolumn_options_handles) { const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); if (db_path == nullptr) { @@ -150,14 +150,40 @@ Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2_3_3B_3J( return nullptr; } +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_OptimisticTransactionDB_disposeInternal( + JNIEnv *, jobject, jlong jhandle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + assert(optimistic_txn_db != nullptr); + delete optimistic_txn_db; +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: closeDatabase + * Signature: (J)V + */ +void Java_org_rocksdb_OptimisticTransactionDB_closeDatabase( + JNIEnv* env, jclass, jlong jhandle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + assert(optimistic_txn_db != nullptr); + rocksdb::Status s = optimistic_txn_db->Close(); + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + /* * Class: org_rocksdb_OptimisticTransactionDB * Method: beginTransaction * Signature: (JJ)J */ jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction__JJ( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jwrite_options_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle) { auto* optimistic_txn_db = reinterpret_cast(jhandle); auto* write_options = @@ -193,8 +219,8 @@ jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction__JJJ( * Signature: (JJJ)J */ jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJ( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jwrite_options_handle, jlong jold_txn_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle, + jlong jold_txn_handle) { auto* optimistic_txn_db = reinterpret_cast(jhandle); auto* write_options = @@ -218,9 +244,8 @@ jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJ( * Signature: (JJJJ)J */ jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJJ( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jwrite_options_handle, jlong joptimistic_txn_options_handle, - jlong jold_txn_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle, + jlong joptimistic_txn_options_handle, jlong jold_txn_handle) { auto* optimistic_txn_db = reinterpret_cast(jhandle); auto* write_options = @@ -245,21 +270,9 @@ jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJJ( * Method: getBaseDB * Signature: (J)J */ -jlong Java_org_rocksdb_OptimisticTransactionDB_getBaseDB(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_OptimisticTransactionDB_getBaseDB( + JNIEnv*, jobject, jlong jhandle) { auto* optimistic_txn_db = reinterpret_cast(jhandle); return reinterpret_cast(optimistic_txn_db->GetBaseDB()); } - -/* - * Class: org_rocksdb_OptimisticTransactionDB - * Method: disposeInternal - * Signature: (J)V - */ -void Java_org_rocksdb_OptimisticTransactionDB_disposeInternal(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { - delete reinterpret_cast(jhandle); -} diff --git a/java/rocksjni/options.cc b/java/rocksjni/options.cc index a0e137544..12f44b5eb 100644 --- a/java/rocksjni/options.cc +++ b/java/rocksjni/options.cc @@ -22,6 +22,7 @@ #include "rocksjni/comparatorjnicallback.h" #include "rocksjni/portal.h" #include "rocksjni/statisticsjni.h" +#include "rocksjni/table_filter_jnicallback.h" #include "rocksdb/comparator.h" #include "rocksdb/convenience.h" @@ -40,7 +41,8 @@ * Method: newOptions * Signature: ()J */ -jlong Java_org_rocksdb_Options_newOptions__(JNIEnv* /*env*/, jclass /*jcls*/) { +jlong Java_org_rocksdb_Options_newOptions__( + JNIEnv*, jclass) { auto* op = new rocksdb::Options(); return reinterpret_cast(op); } @@ -50,9 +52,8 @@ jlong Java_org_rocksdb_Options_newOptions__(JNIEnv* /*env*/, jclass /*jcls*/) { * Method: newOptions * Signature: (JJ)J */ -jlong Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* /*env*/, jclass /*jcls*/, - jlong jdboptions, - jlong jcfoptions) { +jlong Java_org_rocksdb_Options_newOptions__JJ( + JNIEnv*, jclass, jlong jdboptions, jlong jcfoptions) { auto* dbOpt = reinterpret_cast(jdboptions); auto* cfOpt = reinterpret_cast(jcfoptions); @@ -65,8 +66,8 @@ jlong Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* /*env*/, jclass /*jcls*/, * Method: copyOptions * Signature: (J)J */ -jlong Java_org_rocksdb_Options_copyOptions(JNIEnv* /*env*/, jclass /*jcls*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_copyOptions( + JNIEnv*, jclass, jlong jhandle) { auto new_opt = new rocksdb::Options(*(reinterpret_cast(jhandle))); return reinterpret_cast(new_opt); @@ -77,8 +78,8 @@ jlong Java_org_rocksdb_Options_copyOptions(JNIEnv* /*env*/, jclass /*jcls*/, * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_Options_disposeInternal(JNIEnv* /*env*/, jobject /*jobj*/, - jlong handle) { +void Java_org_rocksdb_Options_disposeInternal( + JNIEnv*, jobject, jlong handle) { auto* op = reinterpret_cast(handle); assert(op != nullptr); delete op; @@ -89,10 +90,8 @@ void Java_org_rocksdb_Options_disposeInternal(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setIncreaseParallelism * Signature: (JI)V */ -void Java_org_rocksdb_Options_setIncreaseParallelism(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint totalThreads) { +void Java_org_rocksdb_Options_setIncreaseParallelism( + JNIEnv*, jobject, jlong jhandle, jint totalThreads) { reinterpret_cast(jhandle)->IncreaseParallelism( static_cast(totalThreads)); } @@ -102,9 +101,8 @@ void Java_org_rocksdb_Options_setIncreaseParallelism(JNIEnv* /*env*/, * Method: setCreateIfMissing * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setCreateIfMissing(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, jboolean flag) { +void Java_org_rocksdb_Options_setCreateIfMissing( + JNIEnv*, jobject, jlong jhandle, jboolean flag) { reinterpret_cast(jhandle)->create_if_missing = flag; } @@ -113,9 +111,8 @@ void Java_org_rocksdb_Options_setCreateIfMissing(JNIEnv* /*env*/, * Method: createIfMissing * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_createIfMissing(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_createIfMissing( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->create_if_missing; } @@ -124,10 +121,8 @@ jboolean Java_org_rocksdb_Options_createIfMissing(JNIEnv* /*env*/, * Method: setCreateMissingColumnFamilies * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setCreateMissingColumnFamilies(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean flag) { +void Java_org_rocksdb_Options_setCreateMissingColumnFamilies( + JNIEnv*, jobject, jlong jhandle, jboolean flag) { reinterpret_cast(jhandle)->create_missing_column_families = flag; } @@ -137,9 +132,8 @@ void Java_org_rocksdb_Options_setCreateMissingColumnFamilies(JNIEnv* /*env*/, * Method: createMissingColumnFamilies * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_createMissingColumnFamilies(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_createMissingColumnFamilies( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->create_missing_column_families; } @@ -149,10 +143,8 @@ jboolean Java_org_rocksdb_Options_createMissingColumnFamilies(JNIEnv* /*env*/, * Method: setComparatorHandle * Signature: (JI)V */ -void Java_org_rocksdb_Options_setComparatorHandle__JI(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint builtinComparator) { +void Java_org_rocksdb_Options_setComparatorHandle__JI( + JNIEnv*, jobject, jlong jhandle, jint builtinComparator) { switch (builtinComparator) { case 1: reinterpret_cast(jhandle)->comparator = @@ -170,11 +162,9 @@ void Java_org_rocksdb_Options_setComparatorHandle__JI(JNIEnv* /*env*/, * Method: setComparatorHandle * Signature: (JJB)V */ -void Java_org_rocksdb_Options_setComparatorHandle__JJB(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jopt_handle, - jlong jcomparator_handle, - jbyte jcomparator_type) { +void Java_org_rocksdb_Options_setComparatorHandle__JJB( + JNIEnv*, jobject, jlong jopt_handle, jlong jcomparator_handle, + jbyte jcomparator_type) { rocksdb::Comparator* comparator = nullptr; switch (jcomparator_type) { // JAVA_COMPARATOR @@ -203,10 +193,8 @@ void Java_org_rocksdb_Options_setComparatorHandle__JJB(JNIEnv* /*env*/, * Method: setMergeOperatorName * Signature: (JJjava/lang/String)V */ -void Java_org_rocksdb_Options_setMergeOperatorName(JNIEnv* env, - jobject /*jobj*/, - jlong jhandle, - jstring jop_name) { +void Java_org_rocksdb_Options_setMergeOperatorName( + JNIEnv* env, jobject, jlong jhandle, jstring jop_name) { const char* op_name = env->GetStringUTFChars(jop_name, nullptr); if (op_name == nullptr) { // exception thrown: OutOfMemoryError @@ -225,9 +213,8 @@ void Java_org_rocksdb_Options_setMergeOperatorName(JNIEnv* env, * Method: setMergeOperator * Signature: (JJjava/lang/String)V */ -void Java_org_rocksdb_Options_setMergeOperator(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jlong mergeOperatorHandle) { +void Java_org_rocksdb_Options_setMergeOperator( + JNIEnv*, jobject, jlong jhandle, jlong mergeOperatorHandle) { reinterpret_cast(jhandle)->merge_operator = *(reinterpret_cast*>( mergeOperatorHandle)); @@ -239,7 +226,7 @@ void Java_org_rocksdb_Options_setMergeOperator(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setCompactionFilterHandle( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jopt_handle, + JNIEnv*, jobject, jlong jopt_handle, jlong jcompactionfilter_handle) { reinterpret_cast(jopt_handle)-> compaction_filter = reinterpret_cast @@ -252,7 +239,7 @@ void Java_org_rocksdb_Options_setCompactionFilterHandle( * Signature: (JJ)V */ void JNICALL Java_org_rocksdb_Options_setCompactionFilterFactoryHandle( - JNIEnv* /* env */, jobject /* jobj */, jlong jopt_handle, + JNIEnv*, jobject, jlong jopt_handle, jlong jcompactionfilterfactory_handle) { auto* cff_factory = reinterpret_cast *>( @@ -266,10 +253,10 @@ void JNICALL Java_org_rocksdb_Options_setCompactionFilterFactoryHandle( * Method: setWriteBufferSize * Signature: (JJ)I */ -void Java_org_rocksdb_Options_setWriteBufferSize(JNIEnv* env, jobject /*jobj*/, - jlong jhandle, - jlong jwrite_buffer_size) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jwrite_buffer_size); +void Java_org_rocksdb_Options_setWriteBufferSize( + JNIEnv* env, jobject, jlong jhandle, jlong jwrite_buffer_size) { + auto s = + rocksdb::JniUtil::check_if_jlong_fits_size_t(jwrite_buffer_size); if (s.ok()) { reinterpret_cast(jhandle)->write_buffer_size = jwrite_buffer_size; @@ -283,9 +270,9 @@ void Java_org_rocksdb_Options_setWriteBufferSize(JNIEnv* env, jobject /*jobj*/, * Method: setWriteBufferManager * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setWriteBufferManager(JNIEnv* /*env*/, jobject /*jobj*/, - jlong joptions_handle, - jlong jwrite_buffer_manager_handle) { +void Java_org_rocksdb_Options_setWriteBufferManager( + JNIEnv*, jobject, jlong joptions_handle, + jlong jwrite_buffer_manager_handle) { auto* write_buffer_manager = reinterpret_cast *>(jwrite_buffer_manager_handle); reinterpret_cast(joptions_handle)->write_buffer_manager = @@ -297,9 +284,8 @@ void Java_org_rocksdb_Options_setWriteBufferManager(JNIEnv* /*env*/, jobject /*j * Method: writeBufferSize * Signature: (J)J */ -jlong Java_org_rocksdb_Options_writeBufferSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_writeBufferSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->write_buffer_size; } @@ -309,7 +295,7 @@ jlong Java_org_rocksdb_Options_writeBufferSize(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setMaxWriteBufferNumber( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jint jmax_write_buffer_number) { reinterpret_cast(jhandle)->max_write_buffer_number = jmax_write_buffer_number; @@ -320,9 +306,8 @@ void Java_org_rocksdb_Options_setMaxWriteBufferNumber( * Method: setStatistics * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setStatistics(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, - jlong jstatistics_handle) { +void Java_org_rocksdb_Options_setStatistics( + JNIEnv*, jobject, jlong jhandle, jlong jstatistics_handle) { auto* opt = reinterpret_cast(jhandle); auto* pSptr = reinterpret_cast*>( jstatistics_handle); @@ -334,8 +319,8 @@ void Java_org_rocksdb_Options_setStatistics(JNIEnv* /*env*/, jobject /*jobj*/, * Method: statistics * Signature: (J)J */ -jlong Java_org_rocksdb_Options_statistics(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_statistics( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); std::shared_ptr sptr = opt->statistics; if (sptr == nullptr) { @@ -352,9 +337,8 @@ jlong Java_org_rocksdb_Options_statistics(JNIEnv* /*env*/, jobject /*jobj*/, * Method: maxWriteBufferNumber * Signature: (J)I */ -jint Java_org_rocksdb_Options_maxWriteBufferNumber(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_maxWriteBufferNumber( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_write_buffer_number; } @@ -363,9 +347,8 @@ jint Java_org_rocksdb_Options_maxWriteBufferNumber(JNIEnv* /*env*/, * Method: errorIfExists * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_errorIfExists(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_errorIfExists( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->error_if_exists; } @@ -374,9 +357,8 @@ jboolean Java_org_rocksdb_Options_errorIfExists(JNIEnv* /*env*/, * Method: setErrorIfExists * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setErrorIfExists(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jboolean error_if_exists) { +void Java_org_rocksdb_Options_setErrorIfExists( + JNIEnv*, jobject, jlong jhandle, jboolean error_if_exists) { reinterpret_cast(jhandle)->error_if_exists = static_cast(error_if_exists); } @@ -386,9 +368,8 @@ void Java_org_rocksdb_Options_setErrorIfExists(JNIEnv* /*env*/, * Method: paranoidChecks * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_paranoidChecks(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_paranoidChecks( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->paranoid_checks; } @@ -397,9 +378,8 @@ jboolean Java_org_rocksdb_Options_paranoidChecks(JNIEnv* /*env*/, * Method: setParanoidChecks * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setParanoidChecks(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jboolean paranoid_checks) { +void Java_org_rocksdb_Options_setParanoidChecks( + JNIEnv*, jobject, jlong jhandle, jboolean paranoid_checks) { reinterpret_cast(jhandle)->paranoid_checks = static_cast(paranoid_checks); } @@ -409,8 +389,8 @@ void Java_org_rocksdb_Options_setParanoidChecks(JNIEnv* /*env*/, * Method: setEnv * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setEnv(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jlong jenv) { +void Java_org_rocksdb_Options_setEnv( + JNIEnv*, jobject, jlong jhandle, jlong jenv) { reinterpret_cast(jhandle)->env = reinterpret_cast(jenv); } @@ -420,10 +400,8 @@ void Java_org_rocksdb_Options_setEnv(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setMaxTotalWalSize * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setMaxTotalWalSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong jmax_total_wal_size) { +void Java_org_rocksdb_Options_setMaxTotalWalSize( + JNIEnv*, jobject, jlong jhandle, jlong jmax_total_wal_size) { reinterpret_cast(jhandle)->max_total_wal_size = static_cast(jmax_total_wal_size); } @@ -433,9 +411,8 @@ void Java_org_rocksdb_Options_setMaxTotalWalSize(JNIEnv* /*env*/, * Method: maxTotalWalSize * Signature: (J)J */ -jlong Java_org_rocksdb_Options_maxTotalWalSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_maxTotalWalSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_total_wal_size; } @@ -444,8 +421,8 @@ jlong Java_org_rocksdb_Options_maxTotalWalSize(JNIEnv* /*env*/, * Method: maxOpenFiles * Signature: (J)I */ -jint Java_org_rocksdb_Options_maxOpenFiles(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_maxOpenFiles( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_open_files; } @@ -454,9 +431,8 @@ jint Java_org_rocksdb_Options_maxOpenFiles(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setMaxOpenFiles * Signature: (JI)V */ -void Java_org_rocksdb_Options_setMaxOpenFiles(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, - jint max_open_files) { +void Java_org_rocksdb_Options_setMaxOpenFiles( + JNIEnv*, jobject, jlong jhandle, jint max_open_files) { reinterpret_cast(jhandle)->max_open_files = static_cast(max_open_files); } @@ -467,8 +443,7 @@ void Java_org_rocksdb_Options_setMaxOpenFiles(JNIEnv* /*env*/, jobject /*jobj*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setMaxFileOpeningThreads( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jmax_file_opening_threads) { + JNIEnv*, jobject, jlong jhandle, jint jmax_file_opening_threads) { reinterpret_cast(jhandle)->max_file_opening_threads = static_cast(jmax_file_opening_threads); } @@ -478,9 +453,8 @@ void Java_org_rocksdb_Options_setMaxFileOpeningThreads( * Method: maxFileOpeningThreads * Signature: (J)I */ -jint Java_org_rocksdb_Options_maxFileOpeningThreads(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_maxFileOpeningThreads( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->max_file_opening_threads); } @@ -490,8 +464,8 @@ jint Java_org_rocksdb_Options_maxFileOpeningThreads(JNIEnv* /*env*/, * Method: useFsync * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_useFsync(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_useFsync( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->use_fsync; } @@ -500,8 +474,8 @@ jboolean Java_org_rocksdb_Options_useFsync(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setUseFsync * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setUseFsync(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jboolean use_fsync) { +void Java_org_rocksdb_Options_setUseFsync( + JNIEnv*, jobject, jlong jhandle, jboolean use_fsync) { reinterpret_cast(jhandle)->use_fsync = static_cast(use_fsync); } @@ -511,9 +485,9 @@ void Java_org_rocksdb_Options_setUseFsync(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setDbPaths * Signature: (J[Ljava/lang/String;[J)V */ -void Java_org_rocksdb_Options_setDbPaths(JNIEnv* env, jobject /*jobj*/, - jlong jhandle, jobjectArray jpaths, - jlongArray jtarget_sizes) { +void Java_org_rocksdb_Options_setDbPaths( + JNIEnv* env, jobject, jlong jhandle, jobjectArray jpaths, + jlongArray jtarget_sizes) { std::vector db_paths; jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr); if (ptr_jtarget_size == nullptr) { @@ -557,8 +531,8 @@ void Java_org_rocksdb_Options_setDbPaths(JNIEnv* env, jobject /*jobj*/, * Method: dbPathsLen * Signature: (J)J */ -jlong Java_org_rocksdb_Options_dbPathsLen(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_dbPathsLen( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->db_paths.size()); } @@ -568,9 +542,9 @@ jlong Java_org_rocksdb_Options_dbPathsLen(JNIEnv* /*env*/, jobject /*jobj*/, * Method: dbPaths * Signature: (J[Ljava/lang/String;[J)V */ -void Java_org_rocksdb_Options_dbPaths(JNIEnv* env, jobject /*jobj*/, - jlong jhandle, jobjectArray jpaths, - jlongArray jtarget_sizes) { +void Java_org_rocksdb_Options_dbPaths( + JNIEnv* env, jobject, jlong jhandle, jobjectArray jpaths, + jlongArray jtarget_sizes) { jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr); if (ptr_jtarget_size == nullptr) { // exception thrown: OutOfMemoryError @@ -607,8 +581,8 @@ void Java_org_rocksdb_Options_dbPaths(JNIEnv* env, jobject /*jobj*/, * Method: dbLogDir * Signature: (J)Ljava/lang/String */ -jstring Java_org_rocksdb_Options_dbLogDir(JNIEnv* env, jobject /*jobj*/, - jlong jhandle) { +jstring Java_org_rocksdb_Options_dbLogDir( + JNIEnv* env, jobject, jlong jhandle) { return env->NewStringUTF( reinterpret_cast(jhandle)->db_log_dir.c_str()); } @@ -618,8 +592,8 @@ jstring Java_org_rocksdb_Options_dbLogDir(JNIEnv* env, jobject /*jobj*/, * Method: setDbLogDir * Signature: (JLjava/lang/String)V */ -void Java_org_rocksdb_Options_setDbLogDir(JNIEnv* env, jobject /*jobj*/, - jlong jhandle, jstring jdb_log_dir) { +void Java_org_rocksdb_Options_setDbLogDir( + JNIEnv* env, jobject, jlong jhandle, jstring jdb_log_dir) { const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr); if (log_dir == nullptr) { // exception thrown: OutOfMemoryError @@ -634,8 +608,8 @@ void Java_org_rocksdb_Options_setDbLogDir(JNIEnv* env, jobject /*jobj*/, * Method: walDir * Signature: (J)Ljava/lang/String */ -jstring Java_org_rocksdb_Options_walDir(JNIEnv* env, jobject /*jobj*/, - jlong jhandle) { +jstring Java_org_rocksdb_Options_walDir( + JNIEnv* env, jobject, jlong jhandle) { return env->NewStringUTF( reinterpret_cast(jhandle)->wal_dir.c_str()); } @@ -645,8 +619,8 @@ jstring Java_org_rocksdb_Options_walDir(JNIEnv* env, jobject /*jobj*/, * Method: setWalDir * Signature: (JLjava/lang/String)V */ -void Java_org_rocksdb_Options_setWalDir(JNIEnv* env, jobject /*jobj*/, - jlong jhandle, jstring jwal_dir) { +void Java_org_rocksdb_Options_setWalDir( + JNIEnv* env, jobject, jlong jhandle, jstring jwal_dir) { const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr); if (wal_dir == nullptr) { // exception thrown: OutOfMemoryError @@ -661,9 +635,8 @@ void Java_org_rocksdb_Options_setWalDir(JNIEnv* env, jobject /*jobj*/, * Method: deleteObsoleteFilesPeriodMicros * Signature: (J)J */ -jlong Java_org_rocksdb_Options_deleteObsoleteFilesPeriodMicros(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_deleteObsoleteFilesPeriodMicros( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->delete_obsolete_files_period_micros; } @@ -674,7 +647,7 @@ jlong Java_org_rocksdb_Options_deleteObsoleteFilesPeriodMicros(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setDeleteObsoleteFilesPeriodMicros( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jlong micros) { + JNIEnv*, jobject, jlong jhandle, jlong micros) { reinterpret_cast(jhandle) ->delete_obsolete_files_period_micros = static_cast(micros); } @@ -684,10 +657,8 @@ void Java_org_rocksdb_Options_setDeleteObsoleteFilesPeriodMicros( * Method: setBaseBackgroundCompactions * Signature: (JI)V */ -void Java_org_rocksdb_Options_setBaseBackgroundCompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint max) { +void Java_org_rocksdb_Options_setBaseBackgroundCompactions( + JNIEnv*, jobject, jlong jhandle, jint max) { reinterpret_cast(jhandle)->base_background_compactions = static_cast(max); } @@ -697,9 +668,8 @@ void Java_org_rocksdb_Options_setBaseBackgroundCompactions(JNIEnv* /*env*/, * Method: baseBackgroundCompactions * Signature: (J)I */ -jint Java_org_rocksdb_Options_baseBackgroundCompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_baseBackgroundCompactions( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->base_background_compactions; } @@ -709,9 +679,8 @@ jint Java_org_rocksdb_Options_baseBackgroundCompactions(JNIEnv* /*env*/, * Method: maxBackgroundCompactions * Signature: (J)I */ -jint Java_org_rocksdb_Options_maxBackgroundCompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_maxBackgroundCompactions( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->max_background_compactions; } @@ -721,10 +690,8 @@ jint Java_org_rocksdb_Options_maxBackgroundCompactions(JNIEnv* /*env*/, * Method: setMaxBackgroundCompactions * Signature: (JI)V */ -void Java_org_rocksdb_Options_setMaxBackgroundCompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint max) { +void Java_org_rocksdb_Options_setMaxBackgroundCompactions( + JNIEnv*, jobject, jlong jhandle, jint max) { reinterpret_cast(jhandle)->max_background_compactions = static_cast(max); } @@ -734,9 +701,8 @@ void Java_org_rocksdb_Options_setMaxBackgroundCompactions(JNIEnv* /*env*/, * Method: setMaxSubcompactions * Signature: (JI)V */ -void Java_org_rocksdb_Options_setMaxSubcompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, jint max) { +void Java_org_rocksdb_Options_setMaxSubcompactions( + JNIEnv*, jobject, jlong jhandle, jint max) { reinterpret_cast(jhandle)->max_subcompactions = static_cast(max); } @@ -746,9 +712,8 @@ void Java_org_rocksdb_Options_setMaxSubcompactions(JNIEnv* /*env*/, * Method: maxSubcompactions * Signature: (J)I */ -jint Java_org_rocksdb_Options_maxSubcompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_maxSubcompactions( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_subcompactions; } @@ -757,9 +722,8 @@ jint Java_org_rocksdb_Options_maxSubcompactions(JNIEnv* /*env*/, * Method: maxBackgroundFlushes * Signature: (J)I */ -jint Java_org_rocksdb_Options_maxBackgroundFlushes(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_maxBackgroundFlushes( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_background_flushes; } @@ -769,8 +733,7 @@ jint Java_org_rocksdb_Options_maxBackgroundFlushes(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setMaxBackgroundFlushes( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint max_background_flushes) { + JNIEnv*, jobject, jlong jhandle, jint max_background_flushes) { reinterpret_cast(jhandle)->max_background_flushes = static_cast(max_background_flushes); } @@ -780,9 +743,8 @@ void Java_org_rocksdb_Options_setMaxBackgroundFlushes( * Method: maxBackgroundJobs * Signature: (J)I */ -jint Java_org_rocksdb_Options_maxBackgroundJobs(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_maxBackgroundJobs( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_background_jobs; } @@ -791,10 +753,8 @@ jint Java_org_rocksdb_Options_maxBackgroundJobs(JNIEnv* /*env*/, * Method: setMaxBackgroundJobs * Signature: (JI)V */ -void Java_org_rocksdb_Options_setMaxBackgroundJobs(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint max_background_jobs) { +void Java_org_rocksdb_Options_setMaxBackgroundJobs( + JNIEnv*, jobject, jlong jhandle, jint max_background_jobs) { reinterpret_cast(jhandle)->max_background_jobs = static_cast(max_background_jobs); } @@ -804,8 +764,8 @@ void Java_org_rocksdb_Options_setMaxBackgroundJobs(JNIEnv* /*env*/, * Method: maxLogFileSize * Signature: (J)J */ -jlong Java_org_rocksdb_Options_maxLogFileSize(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_maxLogFileSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_log_file_size; } @@ -814,10 +774,9 @@ jlong Java_org_rocksdb_Options_maxLogFileSize(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setMaxLogFileSize * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setMaxLogFileSize(JNIEnv* env, jobject /*jobj*/, - jlong jhandle, - jlong max_log_file_size) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(max_log_file_size); +void Java_org_rocksdb_Options_setMaxLogFileSize( + JNIEnv* env, jobject, jlong jhandle, jlong max_log_file_size) { + auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(max_log_file_size); if (s.ok()) { reinterpret_cast(jhandle)->max_log_file_size = max_log_file_size; @@ -831,9 +790,8 @@ void Java_org_rocksdb_Options_setMaxLogFileSize(JNIEnv* env, jobject /*jobj*/, * Method: logFileTimeToRoll * Signature: (J)J */ -jlong Java_org_rocksdb_Options_logFileTimeToRoll(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_logFileTimeToRoll( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->log_file_time_to_roll; } @@ -843,9 +801,9 @@ jlong Java_org_rocksdb_Options_logFileTimeToRoll(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setLogFileTimeToRoll( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong log_file_time_to_roll) { - rocksdb::Status s = - rocksdb::check_if_jlong_fits_size_t(log_file_time_to_roll); + JNIEnv* env, jobject, jlong jhandle, jlong log_file_time_to_roll) { + auto s = + rocksdb::JniUtil::check_if_jlong_fits_size_t(log_file_time_to_roll); if (s.ok()) { reinterpret_cast(jhandle)->log_file_time_to_roll = log_file_time_to_roll; @@ -859,8 +817,8 @@ void Java_org_rocksdb_Options_setLogFileTimeToRoll( * Method: keepLogFileNum * Signature: (J)J */ -jlong Java_org_rocksdb_Options_keepLogFileNum(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_keepLogFileNum( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->keep_log_file_num; } @@ -869,10 +827,9 @@ jlong Java_org_rocksdb_Options_keepLogFileNum(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setKeepLogFileNum * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setKeepLogFileNum(JNIEnv* env, jobject /*jobj*/, - jlong jhandle, - jlong keep_log_file_num) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(keep_log_file_num); +void Java_org_rocksdb_Options_setKeepLogFileNum( + JNIEnv* env, jobject, jlong jhandle, jlong keep_log_file_num) { + auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(keep_log_file_num); if (s.ok()) { reinterpret_cast(jhandle)->keep_log_file_num = keep_log_file_num; @@ -886,9 +843,8 @@ void Java_org_rocksdb_Options_setKeepLogFileNum(JNIEnv* env, jobject /*jobj*/, * Method: recycleLogFileNum * Signature: (J)J */ -jlong Java_org_rocksdb_Options_recycleLogFileNum(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_recycleLogFileNum( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->recycle_log_file_num; } @@ -897,11 +853,9 @@ jlong Java_org_rocksdb_Options_recycleLogFileNum(JNIEnv* /*env*/, * Method: setRecycleLogFileNum * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setRecycleLogFileNum(JNIEnv* env, - jobject /*jobj*/, - jlong jhandle, - jlong recycle_log_file_num) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(recycle_log_file_num); +void Java_org_rocksdb_Options_setRecycleLogFileNum( + JNIEnv* env, jobject, jlong jhandle, jlong recycle_log_file_num) { + auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(recycle_log_file_num); if (s.ok()) { reinterpret_cast(jhandle)->recycle_log_file_num = recycle_log_file_num; @@ -915,9 +869,8 @@ void Java_org_rocksdb_Options_setRecycleLogFileNum(JNIEnv* env, * Method: maxManifestFileSize * Signature: (J)J */ -jlong Java_org_rocksdb_Options_maxManifestFileSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_maxManifestFileSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_manifest_file_size; } @@ -925,9 +878,8 @@ jlong Java_org_rocksdb_Options_maxManifestFileSize(JNIEnv* /*env*/, * Method: memTableFactoryName * Signature: (J)Ljava/lang/String */ -jstring Java_org_rocksdb_Options_memTableFactoryName(JNIEnv* env, - jobject /*jobj*/, - jlong jhandle) { +jstring Java_org_rocksdb_Options_memTableFactoryName( + JNIEnv* env, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get(); @@ -949,8 +901,7 @@ jstring Java_org_rocksdb_Options_memTableFactoryName(JNIEnv* env, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setMaxManifestFileSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong max_manifest_file_size) { + JNIEnv*, jobject, jlong jhandle, jlong max_manifest_file_size) { reinterpret_cast(jhandle)->max_manifest_file_size = static_cast(max_manifest_file_size); } @@ -959,10 +910,8 @@ void Java_org_rocksdb_Options_setMaxManifestFileSize( * Method: setMemTableFactory * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setMemTableFactory(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong jfactory_handle) { +void Java_org_rocksdb_Options_setMemTableFactory( + JNIEnv*, jobject, jlong jhandle, jlong jfactory_handle) { reinterpret_cast(jhandle)->memtable_factory.reset( reinterpret_cast(jfactory_handle)); } @@ -972,9 +921,8 @@ void Java_org_rocksdb_Options_setMemTableFactory(JNIEnv* /*env*/, * Method: setRateLimiter * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setRateLimiter(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, - jlong jrate_limiter_handle) { +void Java_org_rocksdb_Options_setRateLimiter( + JNIEnv*, jobject, jlong jhandle, jlong jrate_limiter_handle) { std::shared_ptr* pRateLimiter = reinterpret_cast*>( jrate_limiter_handle); @@ -987,8 +935,7 @@ void Java_org_rocksdb_Options_setRateLimiter(JNIEnv* /*env*/, jobject /*jobj*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setSstFileManager( - JNIEnv* /*env*/, jobject /*job*/, jlong jhandle, - jlong jsst_file_manager_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jsst_file_manager_handle) { auto* sptr_sst_file_manager = reinterpret_cast*>( jsst_file_manager_handle); @@ -1001,8 +948,8 @@ void Java_org_rocksdb_Options_setSstFileManager( * Method: setLogger * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setLogger(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jlong jlogger_handle) { +void Java_org_rocksdb_Options_setLogger( + JNIEnv*, jobject, jlong jhandle, jlong jlogger_handle) { std::shared_ptr* pLogger = reinterpret_cast*>( jlogger_handle); @@ -1014,8 +961,8 @@ void Java_org_rocksdb_Options_setLogger(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setInfoLogLevel * Signature: (JB)V */ -void Java_org_rocksdb_Options_setInfoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jbyte jlog_level) { +void Java_org_rocksdb_Options_setInfoLogLevel( + JNIEnv*, jobject, jlong jhandle, jbyte jlog_level) { reinterpret_cast(jhandle)->info_log_level = static_cast(jlog_level); } @@ -1025,8 +972,8 @@ void Java_org_rocksdb_Options_setInfoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/, * Method: infoLogLevel * Signature: (J)B */ -jbyte Java_org_rocksdb_Options_infoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_Options_infoLogLevel( + JNIEnv*, jobject, jlong jhandle) { return static_cast( reinterpret_cast(jhandle)->info_log_level); } @@ -1036,9 +983,8 @@ jbyte Java_org_rocksdb_Options_infoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/, * Method: tableCacheNumshardbits * Signature: (J)I */ -jint Java_org_rocksdb_Options_tableCacheNumshardbits(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_tableCacheNumshardbits( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->table_cache_numshardbits; } @@ -1048,8 +994,7 @@ jint Java_org_rocksdb_Options_tableCacheNumshardbits(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setTableCacheNumshardbits( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint table_cache_numshardbits) { + JNIEnv*, jobject, jlong jhandle, jint table_cache_numshardbits) { reinterpret_cast(jhandle)->table_cache_numshardbits = static_cast(table_cache_numshardbits); } @@ -1059,7 +1004,7 @@ void Java_org_rocksdb_Options_setTableCacheNumshardbits( * Signature: (JI)V */ void Java_org_rocksdb_Options_useFixedLengthPrefixExtractor( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jprefix_length) { + JNIEnv*, jobject, jlong jhandle, jint jprefix_length) { reinterpret_cast(jhandle)->prefix_extractor.reset( rocksdb::NewFixedPrefixTransform(static_cast(jprefix_length))); } @@ -1068,10 +1013,8 @@ void Java_org_rocksdb_Options_useFixedLengthPrefixExtractor( * Method: useCappedPrefixExtractor * Signature: (JI)V */ -void Java_org_rocksdb_Options_useCappedPrefixExtractor(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint jprefix_length) { +void Java_org_rocksdb_Options_useCappedPrefixExtractor( + JNIEnv*, jobject, jlong jhandle, jint jprefix_length) { reinterpret_cast(jhandle)->prefix_extractor.reset( rocksdb::NewCappedPrefixTransform(static_cast(jprefix_length))); } @@ -1081,8 +1024,8 @@ void Java_org_rocksdb_Options_useCappedPrefixExtractor(JNIEnv* /*env*/, * Method: walTtlSeconds * Signature: (J)J */ -jlong Java_org_rocksdb_Options_walTtlSeconds(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_walTtlSeconds( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->WAL_ttl_seconds; } @@ -1091,9 +1034,8 @@ jlong Java_org_rocksdb_Options_walTtlSeconds(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setWalTtlSeconds * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setWalTtlSeconds(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jlong WAL_ttl_seconds) { +void Java_org_rocksdb_Options_setWalTtlSeconds( + JNIEnv*, jobject, jlong jhandle, jlong WAL_ttl_seconds) { reinterpret_cast(jhandle)->WAL_ttl_seconds = static_cast(WAL_ttl_seconds); } @@ -1103,8 +1045,8 @@ void Java_org_rocksdb_Options_setWalTtlSeconds(JNIEnv* /*env*/, * Method: walTtlSeconds * Signature: (J)J */ -jlong Java_org_rocksdb_Options_walSizeLimitMB(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_walSizeLimitMB( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->WAL_size_limit_MB; } @@ -1113,9 +1055,8 @@ jlong Java_org_rocksdb_Options_walSizeLimitMB(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setWalSizeLimitMB * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setWalSizeLimitMB(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jlong WAL_size_limit_MB) { +void Java_org_rocksdb_Options_setWalSizeLimitMB( + JNIEnv*, jobject, jlong jhandle, jlong WAL_size_limit_MB) { reinterpret_cast(jhandle)->WAL_size_limit_MB = static_cast(WAL_size_limit_MB); } @@ -1125,9 +1066,8 @@ void Java_org_rocksdb_Options_setWalSizeLimitMB(JNIEnv* /*env*/, * Method: manifestPreallocationSize * Signature: (J)J */ -jlong Java_org_rocksdb_Options_manifestPreallocationSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_manifestPreallocationSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->manifest_preallocation_size; } @@ -1138,8 +1078,8 @@ jlong Java_org_rocksdb_Options_manifestPreallocationSize(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setManifestPreallocationSize( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong preallocation_size) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(preallocation_size); + JNIEnv* env, jobject, jlong jhandle, jlong preallocation_size) { + auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(preallocation_size); if (s.ok()) { reinterpret_cast(jhandle)->manifest_preallocation_size = preallocation_size; @@ -1152,11 +1092,12 @@ void Java_org_rocksdb_Options_setManifestPreallocationSize( * Method: setTableFactory * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setTableFactory(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, - jlong jfactory_handle) { - reinterpret_cast(jhandle)->table_factory.reset( - reinterpret_cast(jfactory_handle)); +void Java_org_rocksdb_Options_setTableFactory( + JNIEnv*, jobject, jlong jhandle, jlong jtable_factory_handle) { + auto* options = reinterpret_cast(jhandle); + auto* table_factory = + reinterpret_cast(jtable_factory_handle); + options->table_factory.reset(table_factory); } /* @@ -1164,9 +1105,8 @@ void Java_org_rocksdb_Options_setTableFactory(JNIEnv* /*env*/, jobject /*jobj*/, * Method: allowMmapReads * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_allowMmapReads(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_allowMmapReads( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->allow_mmap_reads; } @@ -1175,9 +1115,8 @@ jboolean Java_org_rocksdb_Options_allowMmapReads(JNIEnv* /*env*/, * Method: setAllowMmapReads * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setAllowMmapReads(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jboolean allow_mmap_reads) { +void Java_org_rocksdb_Options_setAllowMmapReads( + JNIEnv*, jobject, jlong jhandle, jboolean allow_mmap_reads) { reinterpret_cast(jhandle)->allow_mmap_reads = static_cast(allow_mmap_reads); } @@ -1187,9 +1126,8 @@ void Java_org_rocksdb_Options_setAllowMmapReads(JNIEnv* /*env*/, * Method: allowMmapWrites * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_allowMmapWrites(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_allowMmapWrites( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->allow_mmap_writes; } @@ -1198,10 +1136,8 @@ jboolean Java_org_rocksdb_Options_allowMmapWrites(JNIEnv* /*env*/, * Method: setAllowMmapWrites * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setAllowMmapWrites(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean allow_mmap_writes) { +void Java_org_rocksdb_Options_setAllowMmapWrites( + JNIEnv*, jobject, jlong jhandle, jboolean allow_mmap_writes) { reinterpret_cast(jhandle)->allow_mmap_writes = static_cast(allow_mmap_writes); } @@ -1211,9 +1147,8 @@ void Java_org_rocksdb_Options_setAllowMmapWrites(JNIEnv* /*env*/, * Method: useDirectReads * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_useDirectReads(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_useDirectReads( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->use_direct_reads; } @@ -1222,9 +1157,8 @@ jboolean Java_org_rocksdb_Options_useDirectReads(JNIEnv* /*env*/, * Method: setUseDirectReads * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setUseDirectReads(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jboolean use_direct_reads) { +void Java_org_rocksdb_Options_setUseDirectReads( + JNIEnv*, jobject, jlong jhandle, jboolean use_direct_reads) { reinterpret_cast(jhandle)->use_direct_reads = static_cast(use_direct_reads); } @@ -1235,7 +1169,7 @@ void Java_org_rocksdb_Options_setUseDirectReads(JNIEnv* /*env*/, * Signature: (J)Z */ jboolean Java_org_rocksdb_Options_useDirectIoForFlushAndCompaction( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->use_direct_io_for_flush_and_compaction; } @@ -1246,7 +1180,7 @@ jboolean Java_org_rocksdb_Options_useDirectIoForFlushAndCompaction( * Signature: (JZ)V */ void Java_org_rocksdb_Options_setUseDirectIoForFlushAndCompaction( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jboolean use_direct_io_for_flush_and_compaction) { reinterpret_cast(jhandle) ->use_direct_io_for_flush_and_compaction = @@ -1258,9 +1192,8 @@ void Java_org_rocksdb_Options_setUseDirectIoForFlushAndCompaction( * Method: setAllowFAllocate * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setAllowFAllocate(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jboolean jallow_fallocate) { +void Java_org_rocksdb_Options_setAllowFAllocate( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_fallocate) { reinterpret_cast(jhandle)->allow_fallocate = static_cast(jallow_fallocate); } @@ -1270,9 +1203,8 @@ void Java_org_rocksdb_Options_setAllowFAllocate(JNIEnv* /*env*/, * Method: allowFAllocate * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_allowFAllocate(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_allowFAllocate( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->allow_fallocate); } @@ -1282,9 +1214,8 @@ jboolean Java_org_rocksdb_Options_allowFAllocate(JNIEnv* /*env*/, * Method: isFdCloseOnExec * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_isFdCloseOnExec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_isFdCloseOnExec( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->is_fd_close_on_exec; } @@ -1293,10 +1224,8 @@ jboolean Java_org_rocksdb_Options_isFdCloseOnExec(JNIEnv* /*env*/, * Method: setIsFdCloseOnExec * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setIsFdCloseOnExec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean is_fd_close_on_exec) { +void Java_org_rocksdb_Options_setIsFdCloseOnExec( + JNIEnv*, jobject, jlong jhandle, jboolean is_fd_close_on_exec) { reinterpret_cast(jhandle)->is_fd_close_on_exec = static_cast(is_fd_close_on_exec); } @@ -1306,9 +1235,8 @@ void Java_org_rocksdb_Options_setIsFdCloseOnExec(JNIEnv* /*env*/, * Method: statsDumpPeriodSec * Signature: (J)I */ -jint Java_org_rocksdb_Options_statsDumpPeriodSec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_statsDumpPeriodSec( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->stats_dump_period_sec; } @@ -1318,7 +1246,7 @@ jint Java_org_rocksdb_Options_statsDumpPeriodSec(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setStatsDumpPeriodSec( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jint stats_dump_period_sec) { reinterpret_cast(jhandle)->stats_dump_period_sec = static_cast(stats_dump_period_sec); @@ -1329,9 +1257,8 @@ void Java_org_rocksdb_Options_setStatsDumpPeriodSec( * Method: adviseRandomOnOpen * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_adviseRandomOnOpen(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_adviseRandomOnOpen( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->advise_random_on_open; } @@ -1341,7 +1268,7 @@ jboolean Java_org_rocksdb_Options_adviseRandomOnOpen(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_Options_setAdviseRandomOnOpen( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jboolean advise_random_on_open) { reinterpret_cast(jhandle)->advise_random_on_open = static_cast(advise_random_on_open); @@ -1353,7 +1280,7 @@ void Java_org_rocksdb_Options_setAdviseRandomOnOpen( * Signature: (JJ)V */ void Java_org_rocksdb_Options_setDbWriteBufferSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jdb_write_buffer_size) { auto* opt = reinterpret_cast(jhandle); opt->db_write_buffer_size = static_cast(jdb_write_buffer_size); @@ -1364,9 +1291,8 @@ void Java_org_rocksdb_Options_setDbWriteBufferSize( * Method: dbWriteBufferSize * Signature: (J)J */ -jlong Java_org_rocksdb_Options_dbWriteBufferSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_dbWriteBufferSize( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->db_write_buffer_size); } @@ -1377,7 +1303,7 @@ jlong Java_org_rocksdb_Options_dbWriteBufferSize(JNIEnv* /*env*/, * Signature: (JB)V */ void Java_org_rocksdb_Options_setAccessHintOnCompactionStart( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jbyte jaccess_hint_value) { auto* opt = reinterpret_cast(jhandle); opt->access_hint_on_compaction_start = @@ -1389,9 +1315,8 @@ void Java_org_rocksdb_Options_setAccessHintOnCompactionStart( * Method: accessHintOnCompactionStart * Signature: (J)B */ -jbyte Java_org_rocksdb_Options_accessHintOnCompactionStart(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_Options_accessHintOnCompactionStart( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return rocksdb::AccessHintJni::toJavaAccessHint( opt->access_hint_on_compaction_start); @@ -1403,7 +1328,7 @@ jbyte Java_org_rocksdb_Options_accessHintOnCompactionStart(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_Options_setNewTableReaderForCompactionInputs( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jboolean jnew_table_reader_for_compaction_inputs) { auto* opt = reinterpret_cast(jhandle); opt->new_table_reader_for_compaction_inputs = @@ -1416,7 +1341,7 @@ void Java_org_rocksdb_Options_setNewTableReaderForCompactionInputs( * Signature: (J)Z */ jboolean Java_org_rocksdb_Options_newTableReaderForCompactionInputs( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->new_table_reader_for_compaction_inputs); } @@ -1427,7 +1352,7 @@ jboolean Java_org_rocksdb_Options_newTableReaderForCompactionInputs( * Signature: (JJ)V */ void Java_org_rocksdb_Options_setCompactionReadaheadSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jcompaction_readahead_size) { auto* opt = reinterpret_cast(jhandle); opt->compaction_readahead_size = @@ -1439,9 +1364,8 @@ void Java_org_rocksdb_Options_setCompactionReadaheadSize( * Method: compactionReadaheadSize * Signature: (J)J */ -jlong Java_org_rocksdb_Options_compactionReadaheadSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_compactionReadaheadSize( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->compaction_readahead_size); } @@ -1452,8 +1376,7 @@ jlong Java_org_rocksdb_Options_compactionReadaheadSize(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setRandomAccessMaxBufferSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jrandom_access_max_buffer_size) { + JNIEnv*, jobject, jlong jhandle, jlong jrandom_access_max_buffer_size) { auto* opt = reinterpret_cast(jhandle); opt->random_access_max_buffer_size = static_cast(jrandom_access_max_buffer_size); @@ -1464,9 +1387,8 @@ void Java_org_rocksdb_Options_setRandomAccessMaxBufferSize( * Method: randomAccessMaxBufferSize * Signature: (J)J */ -jlong Java_org_rocksdb_Options_randomAccessMaxBufferSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_randomAccessMaxBufferSize( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->random_access_max_buffer_size); } @@ -1477,7 +1399,7 @@ jlong Java_org_rocksdb_Options_randomAccessMaxBufferSize(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setWritableFileMaxBufferSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jwritable_file_max_buffer_size) { auto* opt = reinterpret_cast(jhandle); opt->writable_file_max_buffer_size = @@ -1489,9 +1411,8 @@ void Java_org_rocksdb_Options_setWritableFileMaxBufferSize( * Method: writableFileMaxBufferSize * Signature: (J)J */ -jlong Java_org_rocksdb_Options_writableFileMaxBufferSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_writableFileMaxBufferSize( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->writable_file_max_buffer_size); } @@ -1501,9 +1422,8 @@ jlong Java_org_rocksdb_Options_writableFileMaxBufferSize(JNIEnv* /*env*/, * Method: useAdaptiveMutex * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_useAdaptiveMutex(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_useAdaptiveMutex( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->use_adaptive_mutex; } @@ -1512,10 +1432,8 @@ jboolean Java_org_rocksdb_Options_useAdaptiveMutex(JNIEnv* /*env*/, * Method: setUseAdaptiveMutex * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setUseAdaptiveMutex(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean use_adaptive_mutex) { +void Java_org_rocksdb_Options_setUseAdaptiveMutex( + JNIEnv*, jobject, jlong jhandle, jboolean use_adaptive_mutex) { reinterpret_cast(jhandle)->use_adaptive_mutex = static_cast(use_adaptive_mutex); } @@ -1525,8 +1443,8 @@ void Java_org_rocksdb_Options_setUseAdaptiveMutex(JNIEnv* /*env*/, * Method: bytesPerSync * Signature: (J)J */ -jlong Java_org_rocksdb_Options_bytesPerSync(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_bytesPerSync( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->bytes_per_sync; } @@ -1535,9 +1453,8 @@ jlong Java_org_rocksdb_Options_bytesPerSync(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setBytesPerSync * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setBytesPerSync(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, - jlong bytes_per_sync) { +void Java_org_rocksdb_Options_setBytesPerSync( + JNIEnv*, jobject, jlong jhandle, jlong bytes_per_sync) { reinterpret_cast(jhandle)->bytes_per_sync = static_cast(bytes_per_sync); } @@ -1547,10 +1464,8 @@ void Java_org_rocksdb_Options_setBytesPerSync(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setWalBytesPerSync * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setWalBytesPerSync(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong jwal_bytes_per_sync) { +void Java_org_rocksdb_Options_setWalBytesPerSync( + JNIEnv*, jobject, jlong jhandle, jlong jwal_bytes_per_sync) { reinterpret_cast(jhandle)->wal_bytes_per_sync = static_cast(jwal_bytes_per_sync); } @@ -1560,9 +1475,8 @@ void Java_org_rocksdb_Options_setWalBytesPerSync(JNIEnv* /*env*/, * Method: walBytesPerSync * Signature: (J)J */ -jlong Java_org_rocksdb_Options_walBytesPerSync(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_walBytesPerSync( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->wal_bytes_per_sync); } @@ -1573,8 +1487,7 @@ jlong Java_org_rocksdb_Options_walBytesPerSync(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_Options_setEnableThreadTracking( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jenable_thread_tracking) { + JNIEnv*, jobject, jlong jhandle, jboolean jenable_thread_tracking) { auto* opt = reinterpret_cast(jhandle); opt->enable_thread_tracking = static_cast(jenable_thread_tracking); } @@ -1584,9 +1497,8 @@ void Java_org_rocksdb_Options_setEnableThreadTracking( * Method: enableThreadTracking * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_enableThreadTracking(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_enableThreadTracking( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->enable_thread_tracking); } @@ -1596,10 +1508,8 @@ jboolean Java_org_rocksdb_Options_enableThreadTracking(JNIEnv* /*env*/, * Method: setDelayedWriteRate * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setDelayedWriteRate(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong jdelayed_write_rate) { +void Java_org_rocksdb_Options_setDelayedWriteRate( + JNIEnv*, jobject, jlong jhandle, jlong jdelayed_write_rate) { auto* opt = reinterpret_cast(jhandle); opt->delayed_write_rate = static_cast(jdelayed_write_rate); } @@ -1609,22 +1519,41 @@ void Java_org_rocksdb_Options_setDelayedWriteRate(JNIEnv* /*env*/, * Method: delayedWriteRate * Signature: (J)J */ -jlong Java_org_rocksdb_Options_delayedWriteRate(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_delayedWriteRate( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->delayed_write_rate); } +/* + * Class: org_rocksdb_Options + * Method: setEnablePipelinedWrite + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setEnablePipelinedWrite( + JNIEnv*, jobject, jlong jhandle, jboolean jenable_pipelined_write) { + auto* opt = reinterpret_cast(jhandle); + opt->enable_pipelined_write = jenable_pipelined_write == JNI_TRUE; +} + +/* + * Class: org_rocksdb_Options + * Method: enablePipelinedWrite + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_enablePipelinedWrite( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->enable_pipelined_write); +} + /* * Class: org_rocksdb_Options * Method: setAllowConcurrentMemtableWrite * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setAllowConcurrentMemtableWrite(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean allow) { +void Java_org_rocksdb_Options_setAllowConcurrentMemtableWrite( + JNIEnv*, jobject, jlong jhandle, jboolean allow) { reinterpret_cast(jhandle) ->allow_concurrent_memtable_write = static_cast(allow); } @@ -1634,9 +1563,8 @@ void Java_org_rocksdb_Options_setAllowConcurrentMemtableWrite(JNIEnv* /*env*/, * Method: allowConcurrentMemtableWrite * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_allowConcurrentMemtableWrite(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_allowConcurrentMemtableWrite( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->allow_concurrent_memtable_write; } @@ -1647,7 +1575,7 @@ jboolean Java_org_rocksdb_Options_allowConcurrentMemtableWrite(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_Options_setEnableWriteThreadAdaptiveYield( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean yield) { + JNIEnv*, jobject, jlong jhandle, jboolean yield) { reinterpret_cast(jhandle) ->enable_write_thread_adaptive_yield = static_cast(yield); } @@ -1658,7 +1586,7 @@ void Java_org_rocksdb_Options_setEnableWriteThreadAdaptiveYield( * Signature: (J)Z */ jboolean Java_org_rocksdb_Options_enableWriteThreadAdaptiveYield( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->enable_write_thread_adaptive_yield; } @@ -1668,10 +1596,8 @@ jboolean Java_org_rocksdb_Options_enableWriteThreadAdaptiveYield( * Method: setWriteThreadMaxYieldUsec * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setWriteThreadMaxYieldUsec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong max) { +void Java_org_rocksdb_Options_setWriteThreadMaxYieldUsec( + JNIEnv*, jobject, jlong jhandle, jlong max) { reinterpret_cast(jhandle)->write_thread_max_yield_usec = static_cast(max); } @@ -1681,9 +1607,8 @@ void Java_org_rocksdb_Options_setWriteThreadMaxYieldUsec(JNIEnv* /*env*/, * Method: writeThreadMaxYieldUsec * Signature: (J)J */ -jlong Java_org_rocksdb_Options_writeThreadMaxYieldUsec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_writeThreadMaxYieldUsec( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->write_thread_max_yield_usec; } @@ -1693,10 +1618,8 @@ jlong Java_org_rocksdb_Options_writeThreadMaxYieldUsec(JNIEnv* /*env*/, * Method: setWriteThreadSlowYieldUsec * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setWriteThreadSlowYieldUsec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong slow) { +void Java_org_rocksdb_Options_setWriteThreadSlowYieldUsec( + JNIEnv*, jobject, jlong jhandle, jlong slow) { reinterpret_cast(jhandle)->write_thread_slow_yield_usec = static_cast(slow); } @@ -1706,9 +1629,8 @@ void Java_org_rocksdb_Options_setWriteThreadSlowYieldUsec(JNIEnv* /*env*/, * Method: writeThreadSlowYieldUsec * Signature: (J)J */ -jlong Java_org_rocksdb_Options_writeThreadSlowYieldUsec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_writeThreadSlowYieldUsec( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->write_thread_slow_yield_usec; } @@ -1719,7 +1641,7 @@ jlong Java_org_rocksdb_Options_writeThreadSlowYieldUsec(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_Options_setSkipStatsUpdateOnDbOpen( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jboolean jskip_stats_update_on_db_open) { auto* opt = reinterpret_cast(jhandle); opt->skip_stats_update_on_db_open = @@ -1731,9 +1653,8 @@ void Java_org_rocksdb_Options_setSkipStatsUpdateOnDbOpen( * Method: skipStatsUpdateOnDbOpen * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_skipStatsUpdateOnDbOpen(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_skipStatsUpdateOnDbOpen( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->skip_stats_update_on_db_open); } @@ -1744,7 +1665,7 @@ jboolean Java_org_rocksdb_Options_skipStatsUpdateOnDbOpen(JNIEnv* /*env*/, * Signature: (JB)V */ void Java_org_rocksdb_Options_setWalRecoveryMode( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jbyte jwal_recovery_mode_value) { auto* opt = reinterpret_cast(jhandle); opt->wal_recovery_mode = rocksdb::WALRecoveryModeJni::toCppWALRecoveryMode( @@ -1756,9 +1677,8 @@ void Java_org_rocksdb_Options_setWalRecoveryMode( * Method: walRecoveryMode * Signature: (J)B */ -jbyte Java_org_rocksdb_Options_walRecoveryMode(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_Options_walRecoveryMode( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return rocksdb::WALRecoveryModeJni::toJavaWALRecoveryMode( opt->wal_recovery_mode); @@ -1769,8 +1689,8 @@ jbyte Java_org_rocksdb_Options_walRecoveryMode(JNIEnv* /*env*/, * Method: setAllow2pc * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setAllow2pc(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jboolean jallow_2pc) { +void Java_org_rocksdb_Options_setAllow2pc( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_2pc) { auto* opt = reinterpret_cast(jhandle); opt->allow_2pc = static_cast(jallow_2pc); } @@ -1780,8 +1700,8 @@ void Java_org_rocksdb_Options_setAllow2pc(JNIEnv* /*env*/, jobject /*jobj*/, * Method: allow2pc * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_allow2pc(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_allow2pc( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->allow_2pc); } @@ -1791,23 +1711,35 @@ jboolean Java_org_rocksdb_Options_allow2pc(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setRowCache * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setRowCache(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, - jlong jrow_cache_handle) { +void Java_org_rocksdb_Options_setRowCache( + JNIEnv*, jobject, jlong jhandle, jlong jrow_cache_handle) { auto* opt = reinterpret_cast(jhandle); auto* row_cache = reinterpret_cast*>(jrow_cache_handle); opt->row_cache = *row_cache; } + +/* + * Class: org_rocksdb_Options + * Method: setWalFilter + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWalFilter( + JNIEnv*, jobject, jlong jhandle, jlong jwal_filter_handle) { + auto* opt = reinterpret_cast(jhandle); + auto* wal_filter = + reinterpret_cast(jwal_filter_handle); + opt->wal_filter = wal_filter; +} + /* * Class: org_rocksdb_Options * Method: setFailIfOptionsFileError * Signature: (JZ)V */ void Java_org_rocksdb_Options_setFailIfOptionsFileError( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jfail_if_options_file_error) { + JNIEnv*, jobject, jlong jhandle, jboolean jfail_if_options_file_error) { auto* opt = reinterpret_cast(jhandle); opt->fail_if_options_file_error = static_cast(jfail_if_options_file_error); @@ -1818,9 +1750,8 @@ void Java_org_rocksdb_Options_setFailIfOptionsFileError( * Method: failIfOptionsFileError * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_failIfOptionsFileError(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_failIfOptionsFileError( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->fail_if_options_file_error); } @@ -1830,10 +1761,8 @@ jboolean Java_org_rocksdb_Options_failIfOptionsFileError(JNIEnv* /*env*/, * Method: setDumpMallocStats * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setDumpMallocStats(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean jdump_malloc_stats) { +void Java_org_rocksdb_Options_setDumpMallocStats( + JNIEnv*, jobject, jlong jhandle, jboolean jdump_malloc_stats) { auto* opt = reinterpret_cast(jhandle); opt->dump_malloc_stats = static_cast(jdump_malloc_stats); } @@ -1843,9 +1772,8 @@ void Java_org_rocksdb_Options_setDumpMallocStats(JNIEnv* /*env*/, * Method: dumpMallocStats * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_dumpMallocStats(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_dumpMallocStats( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->dump_malloc_stats); } @@ -1856,8 +1784,7 @@ jboolean Java_org_rocksdb_Options_dumpMallocStats(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_Options_setAvoidFlushDuringRecovery( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean javoid_flush_during_recovery) { + JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_recovery) { auto* opt = reinterpret_cast(jhandle); opt->avoid_flush_during_recovery = static_cast(javoid_flush_during_recovery); @@ -1868,9 +1795,8 @@ void Java_org_rocksdb_Options_setAvoidFlushDuringRecovery( * Method: avoidFlushDuringRecovery * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_avoidFlushDuringRecovery(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_avoidFlushDuringRecovery( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->avoid_flush_during_recovery); } @@ -1881,8 +1807,7 @@ jboolean Java_org_rocksdb_Options_avoidFlushDuringRecovery(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_Options_setAvoidFlushDuringShutdown( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean javoid_flush_during_shutdown) { + JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_shutdown) { auto* opt = reinterpret_cast(jhandle); opt->avoid_flush_during_shutdown = static_cast(javoid_flush_during_shutdown); @@ -1893,19 +1818,128 @@ void Java_org_rocksdb_Options_setAvoidFlushDuringShutdown( * Method: avoidFlushDuringShutdown * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_avoidFlushDuringShutdown(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_avoidFlushDuringShutdown( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->avoid_flush_during_shutdown); } +/* + * Class: org_rocksdb_Options + * Method: setAllowIngestBehind + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowIngestBehind( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_ingest_behind) { + auto* opt = reinterpret_cast(jhandle); + opt->allow_ingest_behind = jallow_ingest_behind == JNI_TRUE; +} + +/* + * Class: org_rocksdb_Options + * Method: allowIngestBehind + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowIngestBehind( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->allow_ingest_behind); +} + +/* + * Class: org_rocksdb_Options + * Method: setPreserveDeletes + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setPreserveDeletes( + JNIEnv*, jobject, jlong jhandle, jboolean jpreserve_deletes) { + auto* opt = reinterpret_cast(jhandle); + opt->preserve_deletes = jpreserve_deletes == JNI_TRUE; +} + +/* + * Class: org_rocksdb_Options + * Method: preserveDeletes + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_preserveDeletes( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->preserve_deletes); +} + +/* + * Class: org_rocksdb_Options + * Method: setTwoWriteQueues + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setTwoWriteQueues( + JNIEnv*, jobject, jlong jhandle, jboolean jtwo_write_queues) { + auto* opt = reinterpret_cast(jhandle); + opt->two_write_queues = jtwo_write_queues == JNI_TRUE; +} + +/* + * Class: org_rocksdb_Options + * Method: twoWriteQueues + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_twoWriteQueues( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->two_write_queues); +} + +/* + * Class: org_rocksdb_Options + * Method: setManualWalFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setManualWalFlush( + JNIEnv*, jobject, jlong jhandle, jboolean jmanual_wal_flush) { + auto* opt = reinterpret_cast(jhandle); + opt->manual_wal_flush = jmanual_wal_flush == JNI_TRUE; +} + +/* + * Class: org_rocksdb_Options + * Method: manualWalFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_manualWalFlush( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->manual_wal_flush); +} + +/* + * Class: org_rocksdb_Options + * Method: setAtomicFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAtomicFlush( + JNIEnv*, jobject, jlong jhandle, jboolean jatomic_flush) { + auto* opt = reinterpret_cast(jhandle); + opt->atomic_flush = jatomic_flush == JNI_TRUE; +} + +/* + * Class: org_rocksdb_Options + * Method: atomicFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_atomicFlush( + JNIEnv *, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->atomic_flush); +} + /* * Method: tableFactoryName * Signature: (J)Ljava/lang/String */ -jstring Java_org_rocksdb_Options_tableFactoryName(JNIEnv* env, jobject /*jobj*/, - jlong jhandle) { +jstring Java_org_rocksdb_Options_tableFactoryName( + JNIEnv* env, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); rocksdb::TableFactory* tf = opt->table_factory.get(); @@ -1921,9 +1955,8 @@ jstring Java_org_rocksdb_Options_tableFactoryName(JNIEnv* env, jobject /*jobj*/, * Method: minWriteBufferNumberToMerge * Signature: (J)I */ -jint Java_org_rocksdb_Options_minWriteBufferNumberToMerge(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_minWriteBufferNumberToMerge( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->min_write_buffer_number_to_merge; } @@ -1934,8 +1967,7 @@ jint Java_org_rocksdb_Options_minWriteBufferNumberToMerge(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setMinWriteBufferNumberToMerge( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jmin_write_buffer_number_to_merge) { + JNIEnv*, jobject, jlong jhandle, jint jmin_write_buffer_number_to_merge) { reinterpret_cast(jhandle) ->min_write_buffer_number_to_merge = static_cast(jmin_write_buffer_number_to_merge); @@ -1945,9 +1977,8 @@ void Java_org_rocksdb_Options_setMinWriteBufferNumberToMerge( * Method: maxWriteBufferNumberToMaintain * Signature: (J)I */ -jint Java_org_rocksdb_Options_maxWriteBufferNumberToMaintain(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_maxWriteBufferNumberToMaintain( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->max_write_buffer_number_to_maintain; } @@ -1958,7 +1989,7 @@ jint Java_org_rocksdb_Options_maxWriteBufferNumberToMaintain(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setMaxWriteBufferNumberToMaintain( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jint jmax_write_buffer_number_to_maintain) { reinterpret_cast(jhandle) ->max_write_buffer_number_to_maintain = @@ -1971,8 +2002,7 @@ void Java_org_rocksdb_Options_setMaxWriteBufferNumberToMaintain( * Signature: (JB)V */ void Java_org_rocksdb_Options_setCompressionType( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jbyte jcompression_type_value) { + JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) { auto* opts = reinterpret_cast(jhandle); opts->compression = rocksdb::CompressionTypeJni::toCppCompressionType( jcompression_type_value); @@ -1983,9 +2013,8 @@ void Java_org_rocksdb_Options_setCompressionType( * Method: compressionType * Signature: (J)B */ -jbyte Java_org_rocksdb_Options_compressionType(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_Options_compressionType( + JNIEnv*, jobject, jlong jhandle) { auto* opts = reinterpret_cast(jhandle); return rocksdb::CompressionTypeJni::toJavaCompressionType(opts->compression); } @@ -2001,8 +2030,8 @@ jbyte Java_org_rocksdb_Options_compressionType(JNIEnv* /*env*/, * @return A std::unique_ptr to the vector, or std::unique_ptr(nullptr) if a JNI * exception occurs */ -std::unique_ptr> -rocksdb_compression_vector_helper(JNIEnv* env, jbyteArray jcompression_levels) { +std::unique_ptr>rocksdb_compression_vector_helper( + JNIEnv* env, jbyteArray jcompression_levels) { jsize len = env->GetArrayLength(jcompression_levels); jbyte* jcompression_level = env->GetByteArrayElements(jcompression_levels, nullptr); @@ -2072,8 +2101,7 @@ jbyteArray rocksdb_compression_list_helper( * Signature: (J[B)V */ void Java_org_rocksdb_Options_setCompressionPerLevel( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, - jbyteArray jcompressionLevels) { + JNIEnv* env, jobject, jlong jhandle, jbyteArray jcompressionLevels) { auto uptr_compression_levels = rocksdb_compression_vector_helper(env, jcompressionLevels); if (!uptr_compression_levels) { @@ -2089,9 +2117,8 @@ void Java_org_rocksdb_Options_setCompressionPerLevel( * Method: compressionPerLevel * Signature: (J)[B */ -jbyteArray Java_org_rocksdb_Options_compressionPerLevel(JNIEnv* env, - jobject /*jobj*/, - jlong jhandle) { +jbyteArray Java_org_rocksdb_Options_compressionPerLevel( + JNIEnv* env, jobject, jlong jhandle) { auto* options = reinterpret_cast(jhandle); return rocksdb_compression_list_helper(env, options->compression_per_level); } @@ -2102,8 +2129,7 @@ jbyteArray Java_org_rocksdb_Options_compressionPerLevel(JNIEnv* env, * Signature: (JB)V */ void Java_org_rocksdb_Options_setBottommostCompressionType( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jbyte jcompression_type_value) { + JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) { auto* options = reinterpret_cast(jhandle); options->bottommost_compression = rocksdb::CompressionTypeJni::toCppCompressionType( @@ -2115,9 +2141,8 @@ void Java_org_rocksdb_Options_setBottommostCompressionType( * Method: bottommostCompressionType * Signature: (J)B */ -jbyte Java_org_rocksdb_Options_bottommostCompressionType(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_Options_bottommostCompressionType( + JNIEnv*, jobject, jlong jhandle) { auto* options = reinterpret_cast(jhandle); return rocksdb::CompressionTypeJni::toJavaCompressionType( options->bottommost_compression); @@ -2129,7 +2154,7 @@ jbyte Java_org_rocksdb_Options_bottommostCompressionType(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setBottommostCompressionOptions( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jbottommost_compression_options_handle) { auto* options = reinterpret_cast(jhandle); auto* bottommost_compression_options = @@ -2144,8 +2169,7 @@ void Java_org_rocksdb_Options_setBottommostCompressionOptions( * Signature: (JJ)V */ void Java_org_rocksdb_Options_setCompressionOptions( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jcompression_options_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jcompression_options_handle) { auto* options = reinterpret_cast(jhandle); auto* compression_options = reinterpret_cast( jcompression_options_handle); @@ -2157,12 +2181,12 @@ void Java_org_rocksdb_Options_setCompressionOptions( * Method: setCompactionStyle * Signature: (JB)V */ -void Java_org_rocksdb_Options_setCompactionStyle(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jbyte compaction_style) { - reinterpret_cast(jhandle)->compaction_style = - static_cast(compaction_style); +void Java_org_rocksdb_Options_setCompactionStyle( + JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_style) { + auto* options = reinterpret_cast(jhandle); + options->compaction_style = + rocksdb::CompactionStyleJni::toCppCompactionStyle( + jcompaction_style); } /* @@ -2170,10 +2194,11 @@ void Java_org_rocksdb_Options_setCompactionStyle(JNIEnv* /*env*/, * Method: compactionStyle * Signature: (J)B */ -jbyte Java_org_rocksdb_Options_compactionStyle(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { - return reinterpret_cast(jhandle)->compaction_style; +jbyte Java_org_rocksdb_Options_compactionStyle( + JNIEnv*, jobject, jlong jhandle) { + auto* options = reinterpret_cast(jhandle); + return rocksdb::CompactionStyleJni::toJavaCompactionStyle( + options->compaction_style); } /* @@ -2182,8 +2207,7 @@ jbyte Java_org_rocksdb_Options_compactionStyle(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setMaxTableFilesSizeFIFO( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jmax_table_files_size) { + JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) { reinterpret_cast(jhandle) ->compaction_options_fifo.max_table_files_size = static_cast(jmax_table_files_size); @@ -2194,9 +2218,8 @@ void Java_org_rocksdb_Options_setMaxTableFilesSizeFIFO( * Method: maxTableFilesSizeFIFO * Signature: (J)J */ -jlong Java_org_rocksdb_Options_maxTableFilesSizeFIFO(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_maxTableFilesSizeFIFO( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->compaction_options_fifo.max_table_files_size; } @@ -2206,8 +2229,8 @@ jlong Java_org_rocksdb_Options_maxTableFilesSizeFIFO(JNIEnv* /*env*/, * Method: numLevels * Signature: (J)I */ -jint Java_org_rocksdb_Options_numLevels(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_numLevels( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->num_levels; } @@ -2216,8 +2239,8 @@ jint Java_org_rocksdb_Options_numLevels(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setNumLevels * Signature: (JI)V */ -void Java_org_rocksdb_Options_setNumLevels(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jint jnum_levels) { +void Java_org_rocksdb_Options_setNumLevels( + JNIEnv*, jobject, jlong jhandle, jint jnum_levels) { reinterpret_cast(jhandle)->num_levels = static_cast(jnum_levels); } @@ -2228,7 +2251,7 @@ void Java_org_rocksdb_Options_setNumLevels(JNIEnv* /*env*/, jobject /*jobj*/, * Signature: (J)I */ jint Java_org_rocksdb_Options_levelZeroFileNumCompactionTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level0_file_num_compaction_trigger; } @@ -2239,7 +2262,7 @@ jint Java_org_rocksdb_Options_levelZeroFileNumCompactionTrigger( * Signature: (JI)V */ void Java_org_rocksdb_Options_setLevelZeroFileNumCompactionTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jint jlevel0_file_num_compaction_trigger) { reinterpret_cast(jhandle) ->level0_file_num_compaction_trigger = @@ -2251,9 +2274,8 @@ void Java_org_rocksdb_Options_setLevelZeroFileNumCompactionTrigger( * Method: levelZeroSlowdownWritesTrigger * Signature: (J)I */ -jint Java_org_rocksdb_Options_levelZeroSlowdownWritesTrigger(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_levelZeroSlowdownWritesTrigger( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level0_slowdown_writes_trigger; } @@ -2264,8 +2286,7 @@ jint Java_org_rocksdb_Options_levelZeroSlowdownWritesTrigger(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setLevelZeroSlowdownWritesTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jlevel0_slowdown_writes_trigger) { + JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) { reinterpret_cast(jhandle)->level0_slowdown_writes_trigger = static_cast(jlevel0_slowdown_writes_trigger); } @@ -2275,9 +2296,8 @@ void Java_org_rocksdb_Options_setLevelZeroSlowdownWritesTrigger( * Method: levelZeroStopWritesTrigger * Signature: (J)I */ -jint Java_org_rocksdb_Options_levelZeroStopWritesTrigger(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_levelZeroStopWritesTrigger( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level0_stop_writes_trigger; } @@ -2288,8 +2308,7 @@ jint Java_org_rocksdb_Options_levelZeroStopWritesTrigger(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setLevelZeroStopWritesTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jlevel0_stop_writes_trigger) { + JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) { reinterpret_cast(jhandle)->level0_stop_writes_trigger = static_cast(jlevel0_stop_writes_trigger); } @@ -2299,9 +2318,8 @@ void Java_org_rocksdb_Options_setLevelZeroStopWritesTrigger( * Method: targetFileSizeBase * Signature: (J)J */ -jlong Java_org_rocksdb_Options_targetFileSizeBase(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_targetFileSizeBase( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->target_file_size_base; } @@ -2311,8 +2329,7 @@ jlong Java_org_rocksdb_Options_targetFileSizeBase(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setTargetFileSizeBase( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jtarget_file_size_base) { + JNIEnv*, jobject, jlong jhandle, jlong jtarget_file_size_base) { reinterpret_cast(jhandle)->target_file_size_base = static_cast(jtarget_file_size_base); } @@ -2322,9 +2339,8 @@ void Java_org_rocksdb_Options_setTargetFileSizeBase( * Method: targetFileSizeMultiplier * Signature: (J)I */ -jint Java_org_rocksdb_Options_targetFileSizeMultiplier(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_targetFileSizeMultiplier( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->target_file_size_multiplier; } @@ -2335,8 +2351,7 @@ jint Java_org_rocksdb_Options_targetFileSizeMultiplier(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setTargetFileSizeMultiplier( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jtarget_file_size_multiplier) { + JNIEnv*, jobject, jlong jhandle, jint jtarget_file_size_multiplier) { reinterpret_cast(jhandle)->target_file_size_multiplier = static_cast(jtarget_file_size_multiplier); } @@ -2346,9 +2361,8 @@ void Java_org_rocksdb_Options_setTargetFileSizeMultiplier( * Method: maxBytesForLevelBase * Signature: (J)J */ -jlong Java_org_rocksdb_Options_maxBytesForLevelBase(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_maxBytesForLevelBase( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_bytes_for_level_base; } @@ -2358,8 +2372,7 @@ jlong Java_org_rocksdb_Options_maxBytesForLevelBase(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setMaxBytesForLevelBase( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jmax_bytes_for_level_base) { + JNIEnv*, jobject, jlong jhandle, jlong jmax_bytes_for_level_base) { reinterpret_cast(jhandle)->max_bytes_for_level_base = static_cast(jmax_bytes_for_level_base); } @@ -2370,7 +2383,7 @@ void Java_org_rocksdb_Options_setMaxBytesForLevelBase( * Signature: (J)Z */ jboolean Java_org_rocksdb_Options_levelCompactionDynamicLevelBytes( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level_compaction_dynamic_level_bytes; } @@ -2381,8 +2394,7 @@ jboolean Java_org_rocksdb_Options_levelCompactionDynamicLevelBytes( * Signature: (JZ)V */ void Java_org_rocksdb_Options_setLevelCompactionDynamicLevelBytes( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jenable_dynamic_level_bytes) { + JNIEnv*, jobject, jlong jhandle, jboolean jenable_dynamic_level_bytes) { reinterpret_cast(jhandle) ->level_compaction_dynamic_level_bytes = (jenable_dynamic_level_bytes); } @@ -2392,9 +2404,8 @@ void Java_org_rocksdb_Options_setLevelCompactionDynamicLevelBytes( * Method: maxBytesForLevelMultiplier * Signature: (J)D */ -jdouble Java_org_rocksdb_Options_maxBytesForLevelMultiplier(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jdouble Java_org_rocksdb_Options_maxBytesForLevelMultiplier( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->max_bytes_for_level_multiplier; } @@ -2405,8 +2416,7 @@ jdouble Java_org_rocksdb_Options_maxBytesForLevelMultiplier(JNIEnv* /*env*/, * Signature: (JD)V */ void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplier( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jdouble jmax_bytes_for_level_multiplier) { + JNIEnv*, jobject, jlong jhandle, jdouble jmax_bytes_for_level_multiplier) { reinterpret_cast(jhandle)->max_bytes_for_level_multiplier = static_cast(jmax_bytes_for_level_multiplier); } @@ -2416,9 +2426,8 @@ void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplier( * Method: maxCompactionBytes * Signature: (J)I */ -jlong Java_org_rocksdb_Options_maxCompactionBytes(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_maxCompactionBytes( + JNIEnv*, jobject, jlong jhandle) { return static_cast( reinterpret_cast(jhandle)->max_compaction_bytes); } @@ -2429,8 +2438,7 @@ jlong Java_org_rocksdb_Options_maxCompactionBytes(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setMaxCompactionBytes( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jmax_compaction_bytes) { + JNIEnv*, jobject, jlong jhandle, jlong jmax_compaction_bytes) { reinterpret_cast(jhandle)->max_compaction_bytes = static_cast(jmax_compaction_bytes); } @@ -2440,8 +2448,8 @@ void Java_org_rocksdb_Options_setMaxCompactionBytes( * Method: arenaBlockSize * Signature: (J)J */ -jlong Java_org_rocksdb_Options_arenaBlockSize(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_arenaBlockSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->arena_block_size; } @@ -2450,10 +2458,9 @@ jlong Java_org_rocksdb_Options_arenaBlockSize(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setArenaBlockSize * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setArenaBlockSize(JNIEnv* env, - jobject /*jobj*/, jlong jhandle, - jlong jarena_block_size) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jarena_block_size); +void Java_org_rocksdb_Options_setArenaBlockSize( + JNIEnv* env, jobject, jlong jhandle, jlong jarena_block_size) { + auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jarena_block_size); if (s.ok()) { reinterpret_cast(jhandle)->arena_block_size = jarena_block_size; @@ -2467,9 +2474,8 @@ void Java_org_rocksdb_Options_setArenaBlockSize(JNIEnv* env, * Method: disableAutoCompactions * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_disableAutoCompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_disableAutoCompactions( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->disable_auto_compactions; } @@ -2479,8 +2485,7 @@ jboolean Java_org_rocksdb_Options_disableAutoCompactions(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_Options_setDisableAutoCompactions( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jdisable_auto_compactions) { + JNIEnv*, jobject, jlong jhandle, jboolean jdisable_auto_compactions) { reinterpret_cast(jhandle)->disable_auto_compactions = static_cast(jdisable_auto_compactions); } @@ -2490,9 +2495,8 @@ void Java_org_rocksdb_Options_setDisableAutoCompactions( * Method: maxSequentialSkipInIterations * Signature: (J)J */ -jlong Java_org_rocksdb_Options_maxSequentialSkipInIterations(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_maxSequentialSkipInIterations( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->max_sequential_skip_in_iterations; } @@ -2503,7 +2507,7 @@ jlong Java_org_rocksdb_Options_maxSequentialSkipInIterations(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setMaxSequentialSkipInIterations( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jmax_sequential_skip_in_iterations) { reinterpret_cast(jhandle) ->max_sequential_skip_in_iterations = @@ -2515,9 +2519,8 @@ void Java_org_rocksdb_Options_setMaxSequentialSkipInIterations( * Method: inplaceUpdateSupport * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_inplaceUpdateSupport(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_inplaceUpdateSupport( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->inplace_update_support; } @@ -2527,8 +2530,7 @@ jboolean Java_org_rocksdb_Options_inplaceUpdateSupport(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_Options_setInplaceUpdateSupport( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jinplace_update_support) { + JNIEnv*, jobject, jlong jhandle, jboolean jinplace_update_support) { reinterpret_cast(jhandle)->inplace_update_support = static_cast(jinplace_update_support); } @@ -2538,9 +2540,8 @@ void Java_org_rocksdb_Options_setInplaceUpdateSupport( * Method: inplaceUpdateNumLocks * Signature: (J)J */ -jlong Java_org_rocksdb_Options_inplaceUpdateNumLocks(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_inplaceUpdateNumLocks( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->inplace_update_num_locks; } @@ -2550,10 +2551,9 @@ jlong Java_org_rocksdb_Options_inplaceUpdateNumLocks(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setInplaceUpdateNumLocks( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, - jlong jinplace_update_num_locks) { - rocksdb::Status s = - rocksdb::check_if_jlong_fits_size_t(jinplace_update_num_locks); + JNIEnv* env, jobject, jlong jhandle, jlong jinplace_update_num_locks) { + auto s = + rocksdb::JniUtil::check_if_jlong_fits_size_t(jinplace_update_num_locks); if (s.ok()) { reinterpret_cast(jhandle)->inplace_update_num_locks = jinplace_update_num_locks; @@ -2567,9 +2567,8 @@ void Java_org_rocksdb_Options_setInplaceUpdateNumLocks( * Method: memtablePrefixBloomSizeRatio * Signature: (J)I */ -jdouble Java_org_rocksdb_Options_memtablePrefixBloomSizeRatio(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jdouble Java_org_rocksdb_Options_memtablePrefixBloomSizeRatio( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->memtable_prefix_bloom_size_ratio; } @@ -2580,7 +2579,7 @@ jdouble Java_org_rocksdb_Options_memtablePrefixBloomSizeRatio(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setMemtablePrefixBloomSizeRatio( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jdouble jmemtable_prefix_bloom_size_ratio) { reinterpret_cast(jhandle) ->memtable_prefix_bloom_size_ratio = @@ -2592,8 +2591,8 @@ void Java_org_rocksdb_Options_setMemtablePrefixBloomSizeRatio( * Method: bloomLocality * Signature: (J)I */ -jint Java_org_rocksdb_Options_bloomLocality(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_bloomLocality( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->bloom_locality; } @@ -2602,9 +2601,8 @@ jint Java_org_rocksdb_Options_bloomLocality(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setBloomLocality * Signature: (JI)V */ -void Java_org_rocksdb_Options_setBloomLocality(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jint jbloom_locality) { +void Java_org_rocksdb_Options_setBloomLocality( + JNIEnv*, jobject, jlong jhandle, jint jbloom_locality) { reinterpret_cast(jhandle)->bloom_locality = static_cast(jbloom_locality); } @@ -2614,9 +2612,8 @@ void Java_org_rocksdb_Options_setBloomLocality(JNIEnv* /*env*/, * Method: maxSuccessiveMerges * Signature: (J)J */ -jlong Java_org_rocksdb_Options_maxSuccessiveMerges(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_maxSuccessiveMerges( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_successive_merges; } @@ -2626,10 +2623,9 @@ jlong Java_org_rocksdb_Options_maxSuccessiveMerges(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setMaxSuccessiveMerges( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, - jlong jmax_successive_merges) { - rocksdb::Status s = - rocksdb::check_if_jlong_fits_size_t(jmax_successive_merges); + JNIEnv* env, jobject, jlong jhandle, jlong jmax_successive_merges) { + auto s = + rocksdb::JniUtil::check_if_jlong_fits_size_t(jmax_successive_merges); if (s.ok()) { reinterpret_cast(jhandle)->max_successive_merges = jmax_successive_merges; @@ -2643,9 +2639,8 @@ void Java_org_rocksdb_Options_setMaxSuccessiveMerges( * Method: optimizeFiltersForHits * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_optimizeFiltersForHits(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_optimizeFiltersForHits( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->optimize_filters_for_hits; } @@ -2656,8 +2651,7 @@ jboolean Java_org_rocksdb_Options_optimizeFiltersForHits(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_Options_setOptimizeFiltersForHits( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean joptimize_filters_for_hits) { + JNIEnv*, jobject, jlong jhandle, jboolean joptimize_filters_for_hits) { reinterpret_cast(jhandle)->optimize_filters_for_hits = static_cast(joptimize_filters_for_hits); } @@ -2667,9 +2661,8 @@ void Java_org_rocksdb_Options_setOptimizeFiltersForHits( * Method: optimizeForSmallDb * Signature: (J)V */ -void Java_org_rocksdb_Options_optimizeForSmallDb(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_Options_optimizeForSmallDb( + JNIEnv*, jobject, jlong jhandle) { reinterpret_cast(jhandle)->OptimizeForSmallDb(); } @@ -2679,8 +2672,7 @@ void Java_org_rocksdb_Options_optimizeForSmallDb(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_optimizeForPointLookup( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong block_cache_size_mb) { + JNIEnv*, jobject, jlong jhandle, jlong block_cache_size_mb) { reinterpret_cast(jhandle)->OptimizeForPointLookup( block_cache_size_mb); } @@ -2691,8 +2683,7 @@ void Java_org_rocksdb_Options_optimizeForPointLookup( * Signature: (JJ)V */ void Java_org_rocksdb_Options_optimizeLevelStyleCompaction( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong memtable_memory_budget) { + JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) { reinterpret_cast(jhandle)->OptimizeLevelStyleCompaction( memtable_memory_budget); } @@ -2703,8 +2694,7 @@ void Java_org_rocksdb_Options_optimizeLevelStyleCompaction( * Signature: (JJ)V */ void Java_org_rocksdb_Options_optimizeUniversalStyleCompaction( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong memtable_memory_budget) { + JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) { reinterpret_cast(jhandle) ->OptimizeUniversalStyleCompaction(memtable_memory_budget); } @@ -2714,9 +2704,8 @@ void Java_org_rocksdb_Options_optimizeUniversalStyleCompaction( * Method: prepareForBulkLoad * Signature: (J)V */ -void Java_org_rocksdb_Options_prepareForBulkLoad(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_Options_prepareForBulkLoad( + JNIEnv*, jobject, jlong jhandle) { reinterpret_cast(jhandle)->PrepareForBulkLoad(); } @@ -2725,9 +2714,8 @@ void Java_org_rocksdb_Options_prepareForBulkLoad(JNIEnv* /*env*/, * Method: memtableHugePageSize * Signature: (J)J */ -jlong Java_org_rocksdb_Options_memtableHugePageSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_memtableHugePageSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->memtable_huge_page_size; } @@ -2737,10 +2725,9 @@ jlong Java_org_rocksdb_Options_memtableHugePageSize(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setMemtableHugePageSize( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, - jlong jmemtable_huge_page_size) { - rocksdb::Status s = - rocksdb::check_if_jlong_fits_size_t(jmemtable_huge_page_size); + JNIEnv* env, jobject, jlong jhandle, jlong jmemtable_huge_page_size) { + auto s = + rocksdb::JniUtil::check_if_jlong_fits_size_t(jmemtable_huge_page_size); if (s.ok()) { reinterpret_cast(jhandle)->memtable_huge_page_size = jmemtable_huge_page_size; @@ -2754,9 +2741,8 @@ void Java_org_rocksdb_Options_setMemtableHugePageSize( * Method: softPendingCompactionBytesLimit * Signature: (J)J */ -jlong Java_org_rocksdb_Options_softPendingCompactionBytesLimit(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_softPendingCompactionBytesLimit( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->soft_pending_compaction_bytes_limit; } @@ -2767,7 +2753,7 @@ jlong Java_org_rocksdb_Options_softPendingCompactionBytesLimit(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setSoftPendingCompactionBytesLimit( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jsoft_pending_compaction_bytes_limit) { reinterpret_cast(jhandle) ->soft_pending_compaction_bytes_limit = @@ -2779,9 +2765,8 @@ void Java_org_rocksdb_Options_setSoftPendingCompactionBytesLimit( * Method: softHardCompactionBytesLimit * Signature: (J)J */ -jlong Java_org_rocksdb_Options_hardPendingCompactionBytesLimit(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_Options_hardPendingCompactionBytesLimit( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->hard_pending_compaction_bytes_limit; } @@ -2792,7 +2777,7 @@ jlong Java_org_rocksdb_Options_hardPendingCompactionBytesLimit(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_Options_setHardPendingCompactionBytesLimit( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jhard_pending_compaction_bytes_limit) { reinterpret_cast(jhandle) ->hard_pending_compaction_bytes_limit = @@ -2804,9 +2789,8 @@ void Java_org_rocksdb_Options_setHardPendingCompactionBytesLimit( * Method: level0FileNumCompactionTrigger * Signature: (J)I */ -jint Java_org_rocksdb_Options_level0FileNumCompactionTrigger(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_level0FileNumCompactionTrigger( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level0_file_num_compaction_trigger; } @@ -2817,7 +2801,7 @@ jint Java_org_rocksdb_Options_level0FileNumCompactionTrigger(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setLevel0FileNumCompactionTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jint jlevel0_file_num_compaction_trigger) { reinterpret_cast(jhandle) ->level0_file_num_compaction_trigger = @@ -2829,9 +2813,8 @@ void Java_org_rocksdb_Options_setLevel0FileNumCompactionTrigger( * Method: level0SlowdownWritesTrigger * Signature: (J)I */ -jint Java_org_rocksdb_Options_level0SlowdownWritesTrigger(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_level0SlowdownWritesTrigger( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level0_slowdown_writes_trigger; } @@ -2842,8 +2825,7 @@ jint Java_org_rocksdb_Options_level0SlowdownWritesTrigger(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setLevel0SlowdownWritesTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jlevel0_slowdown_writes_trigger) { + JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) { reinterpret_cast(jhandle)->level0_slowdown_writes_trigger = static_cast(jlevel0_slowdown_writes_trigger); } @@ -2853,9 +2835,8 @@ void Java_org_rocksdb_Options_setLevel0SlowdownWritesTrigger( * Method: level0StopWritesTrigger * Signature: (J)I */ -jint Java_org_rocksdb_Options_level0StopWritesTrigger(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_Options_level0StopWritesTrigger( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level0_stop_writes_trigger; } @@ -2866,8 +2847,7 @@ jint Java_org_rocksdb_Options_level0StopWritesTrigger(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_Options_setLevel0StopWritesTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jlevel0_stop_writes_trigger) { + JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) { reinterpret_cast(jhandle)->level0_stop_writes_trigger = static_cast(jlevel0_stop_writes_trigger); } @@ -2878,7 +2858,7 @@ void Java_org_rocksdb_Options_setLevel0StopWritesTrigger( * Signature: (J)[I */ jintArray Java_org_rocksdb_Options_maxBytesForLevelMultiplierAdditional( - JNIEnv* env, jobject /*jobj*/, jlong jhandle) { + JNIEnv* env, jobject, jlong jhandle) { auto mbflma = reinterpret_cast(jhandle) ->max_bytes_for_level_multiplier_additional; @@ -2916,7 +2896,7 @@ jintArray Java_org_rocksdb_Options_maxBytesForLevelMultiplierAdditional( * Signature: (J[I)V */ void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplierAdditional( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, + JNIEnv* env, jobject, jlong jhandle, jintArray jmax_bytes_for_level_multiplier_additional) { jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional); jint* additionals = env->GetIntArrayElements( @@ -2942,9 +2922,8 @@ void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplierAdditional( * Method: paranoidFileChecks * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_paranoidFileChecks(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_paranoidFileChecks( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->paranoid_file_checks; } @@ -2954,8 +2933,7 @@ jboolean Java_org_rocksdb_Options_paranoidFileChecks(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_Options_setParanoidFileChecks( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jparanoid_file_checks) { + JNIEnv*, jobject, jlong jhandle, jboolean jparanoid_file_checks) { reinterpret_cast(jhandle)->paranoid_file_checks = static_cast(jparanoid_file_checks); } @@ -2966,8 +2944,7 @@ void Java_org_rocksdb_Options_setParanoidFileChecks( * Signature: (JB)V */ void Java_org_rocksdb_Options_setCompactionPriority( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jbyte jcompaction_priority_value) { + JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_priority_value) { auto* opts = reinterpret_cast(jhandle); opts->compaction_pri = rocksdb::CompactionPriorityJni::toCppCompactionPriority( @@ -2979,9 +2956,8 @@ void Java_org_rocksdb_Options_setCompactionPriority( * Method: compactionPriority * Signature: (J)B */ -jbyte Java_org_rocksdb_Options_compactionPriority(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_Options_compactionPriority( + JNIEnv*, jobject, jlong jhandle) { auto* opts = reinterpret_cast(jhandle); return rocksdb::CompactionPriorityJni::toJavaCompactionPriority( opts->compaction_pri); @@ -2992,10 +2968,8 @@ jbyte Java_org_rocksdb_Options_compactionPriority(JNIEnv* /*env*/, * Method: setReportBgIoStats * Signature: (JZ)V */ -void Java_org_rocksdb_Options_setReportBgIoStats(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean jreport_bg_io_stats) { +void Java_org_rocksdb_Options_setReportBgIoStats( + JNIEnv*, jobject, jlong jhandle, jboolean jreport_bg_io_stats) { auto* opts = reinterpret_cast(jhandle); opts->report_bg_io_stats = static_cast(jreport_bg_io_stats); } @@ -3005,20 +2979,41 @@ void Java_org_rocksdb_Options_setReportBgIoStats(JNIEnv* /*env*/, * Method: reportBgIoStats * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_reportBgIoStats(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_reportBgIoStats( + JNIEnv*, jobject, jlong jhandle) { auto* opts = reinterpret_cast(jhandle); return static_cast(opts->report_bg_io_stats); } +/* + * Class: org_rocksdb_Options + * Method: setTtl + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setTtl( + JNIEnv*, jobject, jlong jhandle, jlong jttl) { + auto* opts = reinterpret_cast(jhandle); + opts->ttl = static_cast(jttl); +} + +/* + * Class: org_rocksdb_Options + * Method: ttl + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_ttl( + JNIEnv*, jobject, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->ttl); +} + /* * Class: org_rocksdb_Options * Method: setCompactionOptionsUniversal * Signature: (JJ)V */ void Java_org_rocksdb_Options_setCompactionOptionsUniversal( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jcompaction_options_universal_handle) { auto* opts = reinterpret_cast(jhandle); auto* opts_uni = reinterpret_cast( @@ -3032,8 +3027,7 @@ void Java_org_rocksdb_Options_setCompactionOptionsUniversal( * Signature: (JJ)V */ void Java_org_rocksdb_Options_setCompactionOptionsFIFO( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jcompaction_options_fifo_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jcompaction_options_fifo_handle) { auto* opts = reinterpret_cast(jhandle); auto* opts_fifo = reinterpret_cast( jcompaction_options_fifo_handle); @@ -3046,8 +3040,7 @@ void Java_org_rocksdb_Options_setCompactionOptionsFIFO( * Signature: (JZ)V */ void Java_org_rocksdb_Options_setForceConsistencyChecks( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jforce_consistency_checks) { + JNIEnv*, jobject, jlong jhandle, jboolean jforce_consistency_checks) { auto* opts = reinterpret_cast(jhandle); opts->force_consistency_checks = static_cast(jforce_consistency_checks); } @@ -3057,9 +3050,8 @@ void Java_org_rocksdb_Options_setForceConsistencyChecks( * Method: forceConsistencyChecks * Signature: (J)Z */ -jboolean Java_org_rocksdb_Options_forceConsistencyChecks(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_Options_forceConsistencyChecks( + JNIEnv*, jobject, jlong jhandle) { auto* opts = reinterpret_cast(jhandle); return static_cast(opts->force_consistency_checks); } @@ -3073,7 +3065,7 @@ jboolean Java_org_rocksdb_Options_forceConsistencyChecks(JNIEnv* /*env*/, * Signature: ()J */ jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions( - JNIEnv* /*env*/, jclass /*jcls*/) { + JNIEnv*, jclass) { auto* op = new rocksdb::ColumnFamilyOptions(); return reinterpret_cast(op); } @@ -3084,19 +3076,31 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions( * Signature: (J)J */ jlong Java_org_rocksdb_ColumnFamilyOptions_copyColumnFamilyOptions( - JNIEnv* /*env*/, jclass /*jcls*/, jlong jhandle) { + JNIEnv*, jclass, jlong jhandle) { auto new_opt = new rocksdb::ColumnFamilyOptions( *(reinterpret_cast(jhandle))); return reinterpret_cast(new_opt); } +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: newColumnFamilyOptionsFromOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptionsFromOptions( + JNIEnv*, jclass, jlong joptions_handle) { + auto new_opt = new rocksdb::ColumnFamilyOptions( + *reinterpret_cast(joptions_handle)); + return reinterpret_cast(new_opt); +} + /* * Class: org_rocksdb_ColumnFamilyOptions * Method: getColumnFamilyOptionsFromProps * Signature: (Ljava/util/String;)J */ jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps( - JNIEnv* env, jclass /*jclazz*/, jstring jopt_string) { + JNIEnv* env, jclass, jstring jopt_string) { const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr); if (opt_string == nullptr) { // exception thrown: OutOfMemoryError @@ -3126,9 +3130,8 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps( * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong handle) { +void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal( + JNIEnv*, jobject, jlong handle) { auto* cfo = reinterpret_cast(handle); assert(cfo != nullptr); delete cfo; @@ -3139,9 +3142,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal(JNIEnv* /*env*/, * Method: optimizeForSmallDb * Signature: (J)V */ -void Java_org_rocksdb_ColumnFamilyOptions_optimizeForSmallDb(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_ColumnFamilyOptions_optimizeForSmallDb( + JNIEnv*, jobject, jlong jhandle) { reinterpret_cast(jhandle) ->OptimizeForSmallDb(); } @@ -3152,8 +3154,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_optimizeForSmallDb(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_optimizeForPointLookup( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong block_cache_size_mb) { + JNIEnv*, jobject, jlong jhandle, jlong block_cache_size_mb) { reinterpret_cast(jhandle) ->OptimizeForPointLookup(block_cache_size_mb); } @@ -3164,8 +3165,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_optimizeForPointLookup( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_optimizeLevelStyleCompaction( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong memtable_memory_budget) { + JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) { reinterpret_cast(jhandle) ->OptimizeLevelStyleCompaction(memtable_memory_budget); } @@ -3176,8 +3176,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_optimizeLevelStyleCompaction( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_optimizeUniversalStyleCompaction( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong memtable_memory_budget) { + JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) { reinterpret_cast(jhandle) ->OptimizeUniversalStyleCompaction(memtable_memory_budget); } @@ -3188,7 +3187,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_optimizeUniversalStyleCompaction( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JI( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint builtinComparator) { + JNIEnv*, jobject, jlong jhandle, jint builtinComparator) { switch (builtinComparator) { case 1: reinterpret_cast(jhandle)->comparator = @@ -3207,8 +3206,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JI( * Signature: (JJB)V */ void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JJB( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jopt_handle, - jlong jcomparator_handle, jbyte jcomparator_type) { + JNIEnv*, jobject, jlong jopt_handle, jlong jcomparator_handle, + jbyte jcomparator_type) { rocksdb::Comparator* comparator = nullptr; switch (jcomparator_type) { // JAVA_COMPARATOR @@ -3238,7 +3237,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JJB( * Signature: (JJjava/lang/String)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperatorName( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, jstring jop_name) { + JNIEnv* env, jobject, jlong jhandle, jstring jop_name) { auto* options = reinterpret_cast(jhandle); const char* op_name = env->GetStringUTFChars(jop_name, nullptr); if (op_name == nullptr) { @@ -3257,8 +3256,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperatorName( * Signature: (JJjava/lang/String)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperator( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong mergeOperatorHandle) { + JNIEnv*, jobject, jlong jhandle, jlong mergeOperatorHandle) { reinterpret_cast(jhandle)->merge_operator = *(reinterpret_cast*>( mergeOperatorHandle)); @@ -3270,8 +3268,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperator( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterHandle( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jopt_handle, - jlong jcompactionfilter_handle) { + JNIEnv*, jobject, jlong jopt_handle, jlong jcompactionfilter_handle) { reinterpret_cast(jopt_handle) ->compaction_filter = reinterpret_cast(jcompactionfilter_handle); @@ -3282,9 +3279,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterHandle( * Method: setCompactionFilterFactoryHandle * Signature: (JJ)V */ -void JNICALL -Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterFactoryHandle( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jopt_handle, +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterFactoryHandle( + JNIEnv*, jobject, jlong jopt_handle, jlong jcompactionfilterfactory_handle) { auto* cff_factory = reinterpret_cast*>( @@ -3299,9 +3295,8 @@ Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterFactoryHandle( * Signature: (JJ)I */ void Java_org_rocksdb_ColumnFamilyOptions_setWriteBufferSize( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, - jlong jwrite_buffer_size) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jwrite_buffer_size); + JNIEnv* env, jobject, jlong jhandle, jlong jwrite_buffer_size) { + auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jwrite_buffer_size); if (s.ok()) { reinterpret_cast(jhandle) ->write_buffer_size = jwrite_buffer_size; @@ -3315,9 +3310,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setWriteBufferSize( * Method: writeBufferSize * Signature: (J)J */ -jlong Java_org_rocksdb_ColumnFamilyOptions_writeBufferSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_ColumnFamilyOptions_writeBufferSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->write_buffer_size; } @@ -3328,8 +3322,7 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_writeBufferSize(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumber( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jmax_write_buffer_number) { + JNIEnv*, jobject, jlong jhandle, jint jmax_write_buffer_number) { reinterpret_cast(jhandle) ->max_write_buffer_number = jmax_write_buffer_number; } @@ -3339,9 +3332,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumber( * Method: maxWriteBufferNumber * Signature: (J)I */ -jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumber(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumber( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->max_write_buffer_number; } @@ -3351,7 +3343,7 @@ jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumber(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMemTableFactory( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jlong jfactory_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jfactory_handle) { reinterpret_cast(jhandle) ->memtable_factory.reset( reinterpret_cast(jfactory_handle)); @@ -3363,7 +3355,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMemTableFactory( * Signature: (J)Ljava/lang/String */ jstring Java_org_rocksdb_ColumnFamilyOptions_memTableFactoryName( - JNIEnv* env, jobject /*jobj*/, jlong jhandle) { + JNIEnv* env, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get(); @@ -3384,7 +3376,7 @@ jstring Java_org_rocksdb_ColumnFamilyOptions_memTableFactoryName( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_useFixedLengthPrefixExtractor( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jprefix_length) { + JNIEnv*, jobject, jlong jhandle, jint jprefix_length) { reinterpret_cast(jhandle) ->prefix_extractor.reset( rocksdb::NewFixedPrefixTransform(static_cast(jprefix_length))); @@ -3395,7 +3387,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_useFixedLengthPrefixExtractor( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_useCappedPrefixExtractor( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jprefix_length) { + JNIEnv*, jobject, jlong jhandle, jint jprefix_length) { reinterpret_cast(jhandle) ->prefix_extractor.reset( rocksdb::NewCappedPrefixTransform(static_cast(jprefix_length))); @@ -3406,7 +3398,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_useCappedPrefixExtractor( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setTableFactory( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jlong jfactory_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jfactory_handle) { reinterpret_cast(jhandle)->table_factory.reset( reinterpret_cast(jfactory_handle)); } @@ -3415,9 +3407,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setTableFactory( * Method: tableFactoryName * Signature: (J)Ljava/lang/String */ -jstring Java_org_rocksdb_ColumnFamilyOptions_tableFactoryName(JNIEnv* env, - jobject /*jobj*/, - jlong jhandle) { +jstring Java_org_rocksdb_ColumnFamilyOptions_tableFactoryName( + JNIEnv* env, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); rocksdb::TableFactory* tf = opt->table_factory.get(); @@ -3434,7 +3425,7 @@ jstring Java_org_rocksdb_ColumnFamilyOptions_tableFactoryName(JNIEnv* env, * Signature: (J)I */ jint Java_org_rocksdb_ColumnFamilyOptions_minWriteBufferNumberToMerge( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->min_write_buffer_number_to_merge; } @@ -3445,8 +3436,7 @@ jint Java_org_rocksdb_ColumnFamilyOptions_minWriteBufferNumberToMerge( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMinWriteBufferNumberToMerge( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jmin_write_buffer_number_to_merge) { + JNIEnv*, jobject, jlong jhandle, jint jmin_write_buffer_number_to_merge) { reinterpret_cast(jhandle) ->min_write_buffer_number_to_merge = static_cast(jmin_write_buffer_number_to_merge); @@ -3458,7 +3448,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMinWriteBufferNumberToMerge( * Signature: (J)I */ jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumberToMaintain( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->max_write_buffer_number_to_maintain; } @@ -3469,7 +3459,7 @@ jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumberToMaintain( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumberToMaintain( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jint jmax_write_buffer_number_to_maintain) { reinterpret_cast(jhandle) ->max_write_buffer_number_to_maintain = @@ -3482,8 +3472,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumberToMaintain( * Signature: (JB)V */ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionType( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jbyte jcompression_type_value) { + JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) { auto* cf_opts = reinterpret_cast(jhandle); cf_opts->compression = rocksdb::CompressionTypeJni::toCppCompressionType( jcompression_type_value); @@ -3494,9 +3483,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionType( * Method: compressionType * Signature: (J)B */ -jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType( + JNIEnv*, jobject, jlong jhandle) { auto* cf_opts = reinterpret_cast(jhandle); return rocksdb::CompressionTypeJni::toJavaCompressionType( cf_opts->compression); @@ -3508,8 +3496,7 @@ jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType(JNIEnv* /*env*/, * Signature: (J[B)V */ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, - jbyteArray jcompressionLevels) { + JNIEnv* env, jobject, jlong jhandle, jbyteArray jcompressionLevels) { auto* options = reinterpret_cast(jhandle); auto uptr_compression_levels = rocksdb_compression_vector_helper(env, jcompressionLevels); @@ -3526,7 +3513,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel( * Signature: (J)[B */ jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel( - JNIEnv* env, jobject /*jobj*/, jlong jhandle) { + JNIEnv* env, jobject, jlong jhandle) { auto* cf_options = reinterpret_cast(jhandle); return rocksdb_compression_list_helper(env, cf_options->compression_per_level); @@ -3538,8 +3525,7 @@ jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel( * Signature: (JB)V */ void Java_org_rocksdb_ColumnFamilyOptions_setBottommostCompressionType( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jbyte jcompression_type_value) { + JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) { auto* cf_options = reinterpret_cast(jhandle); cf_options->bottommost_compression = rocksdb::CompressionTypeJni::toCppCompressionType( @@ -3552,7 +3538,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setBottommostCompressionType( * Signature: (J)B */ jbyte Java_org_rocksdb_ColumnFamilyOptions_bottommostCompressionType( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* cf_options = reinterpret_cast(jhandle); return rocksdb::CompressionTypeJni::toJavaCompressionType( cf_options->bottommost_compression); @@ -3563,7 +3549,7 @@ jbyte Java_org_rocksdb_ColumnFamilyOptions_bottommostCompressionType( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setBottommostCompressionOptions( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jbottommost_compression_options_handle) { auto* cf_options = reinterpret_cast(jhandle); auto* bottommost_compression_options = @@ -3578,8 +3564,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setBottommostCompressionOptions( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionOptions( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jcompression_options_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jcompression_options_handle) { auto* cf_options = reinterpret_cast(jhandle); auto* compression_options = reinterpret_cast( jcompression_options_handle); @@ -3592,9 +3577,10 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionOptions( * Signature: (JB)V */ void Java_org_rocksdb_ColumnFamilyOptions_setCompactionStyle( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jbyte compaction_style) { - reinterpret_cast(jhandle)->compaction_style = - static_cast(compaction_style); + JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_style) { + auto* cf_options = reinterpret_cast(jhandle); + cf_options->compaction_style = + rocksdb::CompactionStyleJni::toCppCompactionStyle(jcompaction_style); } /* @@ -3602,11 +3588,11 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompactionStyle( * Method: compactionStyle * Signature: (J)B */ -jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionStyle(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { - return reinterpret_cast(jhandle) - ->compaction_style; +jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionStyle( + JNIEnv*, jobject, jlong jhandle) { + auto* cf_options = reinterpret_cast(jhandle); + return rocksdb::CompactionStyleJni::toJavaCompactionStyle( + cf_options->compaction_style); } /* @@ -3615,8 +3601,7 @@ jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionStyle(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMaxTableFilesSizeFIFO( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jmax_table_files_size) { + JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) { reinterpret_cast(jhandle) ->compaction_options_fifo.max_table_files_size = static_cast(jmax_table_files_size); @@ -3628,7 +3613,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxTableFilesSizeFIFO( * Signature: (J)J */ jlong Java_org_rocksdb_ColumnFamilyOptions_maxTableFilesSizeFIFO( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->compaction_options_fifo.max_table_files_size; } @@ -3638,9 +3623,8 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_maxTableFilesSizeFIFO( * Method: numLevels * Signature: (J)I */ -jint Java_org_rocksdb_ColumnFamilyOptions_numLevels(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_ColumnFamilyOptions_numLevels( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->num_levels; } @@ -3649,10 +3633,8 @@ jint Java_org_rocksdb_ColumnFamilyOptions_numLevels(JNIEnv* /*env*/, * Method: setNumLevels * Signature: (JI)V */ -void Java_org_rocksdb_ColumnFamilyOptions_setNumLevels(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint jnum_levels) { +void Java_org_rocksdb_ColumnFamilyOptions_setNumLevels( + JNIEnv*, jobject, jlong jhandle, jint jnum_levels) { reinterpret_cast(jhandle)->num_levels = static_cast(jnum_levels); } @@ -3663,7 +3645,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setNumLevels(JNIEnv* /*env*/, * Signature: (J)I */ jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroFileNumCompactionTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level0_file_num_compaction_trigger; } @@ -3674,7 +3656,7 @@ jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroFileNumCompactionTrigger( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroFileNumCompactionTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jint jlevel0_file_num_compaction_trigger) { reinterpret_cast(jhandle) ->level0_file_num_compaction_trigger = @@ -3687,7 +3669,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroFileNumCompactionTrigger( * Signature: (J)I */ jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroSlowdownWritesTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level0_slowdown_writes_trigger; } @@ -3698,8 +3680,7 @@ jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroSlowdownWritesTrigger( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroSlowdownWritesTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jlevel0_slowdown_writes_trigger) { + JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) { reinterpret_cast(jhandle) ->level0_slowdown_writes_trigger = static_cast(jlevel0_slowdown_writes_trigger); @@ -3711,7 +3692,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroSlowdownWritesTrigger( * Signature: (J)I */ jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroStopWritesTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level0_stop_writes_trigger; } @@ -3722,8 +3703,7 @@ jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroStopWritesTrigger( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroStopWritesTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jlevel0_stop_writes_trigger) { + JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) { reinterpret_cast(jhandle) ->level0_stop_writes_trigger = static_cast(jlevel0_stop_writes_trigger); @@ -3734,9 +3714,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroStopWritesTrigger( * Method: targetFileSizeBase * Signature: (J)J */ -jlong Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeBase(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeBase( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->target_file_size_base; } @@ -3747,8 +3726,7 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeBase(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeBase( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jtarget_file_size_base) { + JNIEnv*, jobject, jlong jhandle, jlong jtarget_file_size_base) { reinterpret_cast(jhandle) ->target_file_size_base = static_cast(jtarget_file_size_base); } @@ -3759,7 +3737,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeBase( * Signature: (J)I */ jint Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeMultiplier( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->target_file_size_multiplier; } @@ -3770,8 +3748,7 @@ jint Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeMultiplier( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeMultiplier( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jtarget_file_size_multiplier) { + JNIEnv*, jobject, jlong jhandle, jint jtarget_file_size_multiplier) { reinterpret_cast(jhandle) ->target_file_size_multiplier = static_cast(jtarget_file_size_multiplier); @@ -3783,7 +3760,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeMultiplier( * Signature: (J)J */ jlong Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelBase( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->max_bytes_for_level_base; } @@ -3794,8 +3771,7 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelBase( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelBase( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jmax_bytes_for_level_base) { + JNIEnv*, jobject, jlong jhandle, jlong jmax_bytes_for_level_base) { reinterpret_cast(jhandle) ->max_bytes_for_level_base = static_cast(jmax_bytes_for_level_base); @@ -3807,7 +3783,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelBase( * Signature: (J)Z */ jboolean Java_org_rocksdb_ColumnFamilyOptions_levelCompactionDynamicLevelBytes( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level_compaction_dynamic_level_bytes; } @@ -3818,8 +3794,7 @@ jboolean Java_org_rocksdb_ColumnFamilyOptions_levelCompactionDynamicLevelBytes( * Signature: (JZ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setLevelCompactionDynamicLevelBytes( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jenable_dynamic_level_bytes) { + JNIEnv*, jobject, jlong jhandle, jboolean jenable_dynamic_level_bytes) { reinterpret_cast(jhandle) ->level_compaction_dynamic_level_bytes = (jenable_dynamic_level_bytes); } @@ -3830,7 +3805,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setLevelCompactionDynamicLevelBytes( * Signature: (J)D */ jdouble Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplier( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->max_bytes_for_level_multiplier; } @@ -3841,8 +3816,7 @@ jdouble Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplier( * Signature: (JD)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplier( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jdouble jmax_bytes_for_level_multiplier) { + JNIEnv*, jobject, jlong jhandle, jdouble jmax_bytes_for_level_multiplier) { reinterpret_cast(jhandle) ->max_bytes_for_level_multiplier = static_cast(jmax_bytes_for_level_multiplier); @@ -3853,9 +3827,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplier( * Method: maxCompactionBytes * Signature: (J)I */ -jlong Java_org_rocksdb_ColumnFamilyOptions_maxCompactionBytes(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_ColumnFamilyOptions_maxCompactionBytes( + JNIEnv*, jobject, jlong jhandle) { return static_cast( reinterpret_cast(jhandle) ->max_compaction_bytes); @@ -3867,8 +3840,7 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_maxCompactionBytes(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMaxCompactionBytes( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jmax_compaction_bytes) { + JNIEnv*, jobject, jlong jhandle, jlong jmax_compaction_bytes) { reinterpret_cast(jhandle) ->max_compaction_bytes = static_cast(jmax_compaction_bytes); } @@ -3878,9 +3850,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxCompactionBytes( * Method: arenaBlockSize * Signature: (J)J */ -jlong Java_org_rocksdb_ColumnFamilyOptions_arenaBlockSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_ColumnFamilyOptions_arenaBlockSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->arena_block_size; } @@ -3891,8 +3862,8 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_arenaBlockSize(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setArenaBlockSize( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jarena_block_size) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jarena_block_size); + JNIEnv* env, jobject, jlong jhandle, jlong jarena_block_size) { + auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jarena_block_size); if (s.ok()) { reinterpret_cast(jhandle)->arena_block_size = jarena_block_size; @@ -3907,7 +3878,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setArenaBlockSize( * Signature: (J)Z */ jboolean Java_org_rocksdb_ColumnFamilyOptions_disableAutoCompactions( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->disable_auto_compactions; } @@ -3918,8 +3889,7 @@ jboolean Java_org_rocksdb_ColumnFamilyOptions_disableAutoCompactions( * Signature: (JZ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setDisableAutoCompactions( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jdisable_auto_compactions) { + JNIEnv*, jobject, jlong jhandle, jboolean jdisable_auto_compactions) { reinterpret_cast(jhandle) ->disable_auto_compactions = static_cast(jdisable_auto_compactions); } @@ -3930,7 +3900,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setDisableAutoCompactions( * Signature: (J)J */ jlong Java_org_rocksdb_ColumnFamilyOptions_maxSequentialSkipInIterations( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->max_sequential_skip_in_iterations; } @@ -3941,7 +3911,7 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_maxSequentialSkipInIterations( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMaxSequentialSkipInIterations( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jmax_sequential_skip_in_iterations) { reinterpret_cast(jhandle) ->max_sequential_skip_in_iterations = @@ -3954,7 +3924,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxSequentialSkipInIterations( * Signature: (J)Z */ jboolean Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateSupport( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->inplace_update_support; } @@ -3965,8 +3935,7 @@ jboolean Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateSupport( * Signature: (JZ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateSupport( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jinplace_update_support) { + JNIEnv*, jobject, jlong jhandle, jboolean jinplace_update_support) { reinterpret_cast(jhandle) ->inplace_update_support = static_cast(jinplace_update_support); } @@ -3977,7 +3946,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateSupport( * Signature: (J)J */ jlong Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateNumLocks( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->inplace_update_num_locks; } @@ -3988,10 +3957,9 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateNumLocks( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateNumLocks( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, - jlong jinplace_update_num_locks) { - rocksdb::Status s = - rocksdb::check_if_jlong_fits_size_t(jinplace_update_num_locks); + JNIEnv* env, jobject, jlong jhandle, jlong jinplace_update_num_locks) { + auto s = + rocksdb::JniUtil::check_if_jlong_fits_size_t(jinplace_update_num_locks); if (s.ok()) { reinterpret_cast(jhandle) ->inplace_update_num_locks = jinplace_update_num_locks; @@ -4006,7 +3974,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateNumLocks( * Signature: (J)I */ jdouble Java_org_rocksdb_ColumnFamilyOptions_memtablePrefixBloomSizeRatio( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->memtable_prefix_bloom_size_ratio; } @@ -4017,7 +3985,7 @@ jdouble Java_org_rocksdb_ColumnFamilyOptions_memtablePrefixBloomSizeRatio( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMemtablePrefixBloomSizeRatio( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jdouble jmemtable_prefix_bloom_size_ratio) { reinterpret_cast(jhandle) ->memtable_prefix_bloom_size_ratio = @@ -4029,9 +3997,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMemtablePrefixBloomSizeRatio( * Method: bloomLocality * Signature: (J)I */ -jint Java_org_rocksdb_ColumnFamilyOptions_bloomLocality(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_ColumnFamilyOptions_bloomLocality( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->bloom_locality; } @@ -4042,7 +4009,7 @@ jint Java_org_rocksdb_ColumnFamilyOptions_bloomLocality(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setBloomLocality( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jbloom_locality) { + JNIEnv*, jobject, jlong jhandle, jint jbloom_locality) { reinterpret_cast(jhandle)->bloom_locality = static_cast(jbloom_locality); } @@ -4052,9 +4019,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setBloomLocality( * Method: maxSuccessiveMerges * Signature: (J)J */ -jlong Java_org_rocksdb_ColumnFamilyOptions_maxSuccessiveMerges(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_ColumnFamilyOptions_maxSuccessiveMerges( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->max_successive_merges; } @@ -4065,10 +4031,9 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_maxSuccessiveMerges(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMaxSuccessiveMerges( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, - jlong jmax_successive_merges) { - rocksdb::Status s = - rocksdb::check_if_jlong_fits_size_t(jmax_successive_merges); + JNIEnv* env, jobject, jlong jhandle, jlong jmax_successive_merges) { + auto s = + rocksdb::JniUtil::check_if_jlong_fits_size_t(jmax_successive_merges); if (s.ok()) { reinterpret_cast(jhandle) ->max_successive_merges = jmax_successive_merges; @@ -4083,7 +4048,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxSuccessiveMerges( * Signature: (J)Z */ jboolean Java_org_rocksdb_ColumnFamilyOptions_optimizeFiltersForHits( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->optimize_filters_for_hits; } @@ -4094,8 +4059,7 @@ jboolean Java_org_rocksdb_ColumnFamilyOptions_optimizeFiltersForHits( * Signature: (JZ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setOptimizeFiltersForHits( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean joptimize_filters_for_hits) { + JNIEnv*, jobject, jlong jhandle, jboolean joptimize_filters_for_hits) { reinterpret_cast(jhandle) ->optimize_filters_for_hits = static_cast(joptimize_filters_for_hits); @@ -4107,7 +4071,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setOptimizeFiltersForHits( * Signature: (J)J */ jlong Java_org_rocksdb_ColumnFamilyOptions_memtableHugePageSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->memtable_huge_page_size; } @@ -4118,10 +4082,9 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_memtableHugePageSize( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMemtableHugePageSize( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, - jlong jmemtable_huge_page_size) { - rocksdb::Status s = - rocksdb::check_if_jlong_fits_size_t(jmemtable_huge_page_size); + JNIEnv* env, jobject, jlong jhandle, jlong jmemtable_huge_page_size) { + auto s = + rocksdb::JniUtil::check_if_jlong_fits_size_t(jmemtable_huge_page_size); if (s.ok()) { reinterpret_cast(jhandle) ->memtable_huge_page_size = jmemtable_huge_page_size; @@ -4136,7 +4099,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMemtableHugePageSize( * Signature: (J)J */ jlong Java_org_rocksdb_ColumnFamilyOptions_softPendingCompactionBytesLimit( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->soft_pending_compaction_bytes_limit; } @@ -4147,7 +4110,7 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_softPendingCompactionBytesLimit( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setSoftPendingCompactionBytesLimit( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jsoft_pending_compaction_bytes_limit) { reinterpret_cast(jhandle) ->soft_pending_compaction_bytes_limit = @@ -4160,7 +4123,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setSoftPendingCompactionBytesLimit( * Signature: (J)J */ jlong Java_org_rocksdb_ColumnFamilyOptions_hardPendingCompactionBytesLimit( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->hard_pending_compaction_bytes_limit; } @@ -4171,7 +4134,7 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_hardPendingCompactionBytesLimit( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setHardPendingCompactionBytesLimit( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jhard_pending_compaction_bytes_limit) { reinterpret_cast(jhandle) ->hard_pending_compaction_bytes_limit = @@ -4184,7 +4147,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setHardPendingCompactionBytesLimit( * Signature: (J)I */ jint Java_org_rocksdb_ColumnFamilyOptions_level0FileNumCompactionTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level0_file_num_compaction_trigger; } @@ -4195,7 +4158,7 @@ jint Java_org_rocksdb_ColumnFamilyOptions_level0FileNumCompactionTrigger( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setLevel0FileNumCompactionTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jint jlevel0_file_num_compaction_trigger) { reinterpret_cast(jhandle) ->level0_file_num_compaction_trigger = @@ -4208,7 +4171,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setLevel0FileNumCompactionTrigger( * Signature: (J)I */ jint Java_org_rocksdb_ColumnFamilyOptions_level0SlowdownWritesTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level0_slowdown_writes_trigger; } @@ -4219,8 +4182,7 @@ jint Java_org_rocksdb_ColumnFamilyOptions_level0SlowdownWritesTrigger( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setLevel0SlowdownWritesTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jlevel0_slowdown_writes_trigger) { + JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) { reinterpret_cast(jhandle) ->level0_slowdown_writes_trigger = static_cast(jlevel0_slowdown_writes_trigger); @@ -4232,7 +4194,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setLevel0SlowdownWritesTrigger( * Signature: (J)I */ jint Java_org_rocksdb_ColumnFamilyOptions_level0StopWritesTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->level0_stop_writes_trigger; } @@ -4243,8 +4205,7 @@ jint Java_org_rocksdb_ColumnFamilyOptions_level0StopWritesTrigger( * Signature: (JI)V */ void Java_org_rocksdb_ColumnFamilyOptions_setLevel0StopWritesTrigger( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jlevel0_stop_writes_trigger) { + JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) { reinterpret_cast(jhandle) ->level0_stop_writes_trigger = static_cast(jlevel0_stop_writes_trigger); @@ -4255,9 +4216,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setLevel0StopWritesTrigger( * Method: maxBytesForLevelMultiplierAdditional * Signature: (J)[I */ -jintArray -Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplierAdditional( - JNIEnv* env, jobject /*jobj*/, jlong jhandle) { +jintArray Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplierAdditional( + JNIEnv* env, jobject, jlong jhandle) { auto mbflma = reinterpret_cast(jhandle) ->max_bytes_for_level_multiplier_additional; @@ -4294,7 +4254,7 @@ Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplierAdditional( * Signature: (J[I)V */ void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplierAdditional( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, + JNIEnv* env, jobject, jlong jhandle, jintArray jmax_bytes_for_level_multiplier_additional) { jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional); jint* additionals = @@ -4321,7 +4281,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplierAdditiona * Signature: (J)Z */ jboolean Java_org_rocksdb_ColumnFamilyOptions_paranoidFileChecks( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->paranoid_file_checks; } @@ -4332,8 +4292,7 @@ jboolean Java_org_rocksdb_ColumnFamilyOptions_paranoidFileChecks( * Signature: (JZ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setParanoidFileChecks( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jparanoid_file_checks) { + JNIEnv*, jobject, jlong jhandle, jboolean jparanoid_file_checks) { reinterpret_cast(jhandle) ->paranoid_file_checks = static_cast(jparanoid_file_checks); } @@ -4344,8 +4303,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setParanoidFileChecks( * Signature: (JB)V */ void Java_org_rocksdb_ColumnFamilyOptions_setCompactionPriority( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jbyte jcompaction_priority_value) { + JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_priority_value) { auto* cf_opts = reinterpret_cast(jhandle); cf_opts->compaction_pri = rocksdb::CompactionPriorityJni::toCppCompactionPriority( @@ -4357,9 +4315,8 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompactionPriority( * Method: compactionPriority * Signature: (J)B */ -jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionPriority(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionPriority( + JNIEnv*, jobject, jlong jhandle) { auto* cf_opts = reinterpret_cast(jhandle); return rocksdb::CompactionPriorityJni::toJavaCompactionPriority( cf_opts->compaction_pri); @@ -4371,8 +4328,7 @@ jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionPriority(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setReportBgIoStats( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jreport_bg_io_stats) { + JNIEnv*, jobject, jlong jhandle, jboolean jreport_bg_io_stats) { auto* cf_opts = reinterpret_cast(jhandle); cf_opts->report_bg_io_stats = static_cast(jreport_bg_io_stats); } @@ -4382,20 +4338,41 @@ void Java_org_rocksdb_ColumnFamilyOptions_setReportBgIoStats( * Method: reportBgIoStats * Signature: (J)Z */ -jboolean Java_org_rocksdb_ColumnFamilyOptions_reportBgIoStats(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_ColumnFamilyOptions_reportBgIoStats( + JNIEnv*, jobject, jlong jhandle) { auto* cf_opts = reinterpret_cast(jhandle); return static_cast(cf_opts->report_bg_io_stats); } +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setTtl + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setTtl( + JNIEnv*, jobject, jlong jhandle, jlong jttl) { + auto* cf_opts = reinterpret_cast(jhandle); + cf_opts->ttl = static_cast(jttl); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: ttl + * Signature: (J)J + */ +JNIEXPORT jlong JNICALL Java_org_rocksdb_ColumnFamilyOptions_ttl( + JNIEnv*, jobject, jlong jhandle) { + auto* cf_opts = reinterpret_cast(jhandle); + return static_cast(cf_opts->ttl); +} + /* * Class: org_rocksdb_ColumnFamilyOptions * Method: setCompactionOptionsUniversal * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsUniversal( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jlong jcompaction_options_universal_handle) { auto* cf_opts = reinterpret_cast(jhandle); auto* opts_uni = reinterpret_cast( @@ -4409,8 +4386,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsUniversal( * Signature: (JJ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsFIFO( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jcompaction_options_fifo_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jcompaction_options_fifo_handle) { auto* cf_opts = reinterpret_cast(jhandle); auto* opts_fifo = reinterpret_cast( jcompaction_options_fifo_handle); @@ -4423,8 +4399,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsFIFO( * Signature: (JZ)V */ void Java_org_rocksdb_ColumnFamilyOptions_setForceConsistencyChecks( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jforce_consistency_checks) { + JNIEnv*, jobject, jlong jhandle, jboolean jforce_consistency_checks) { auto* cf_opts = reinterpret_cast(jhandle); cf_opts->force_consistency_checks = static_cast(jforce_consistency_checks); @@ -4436,7 +4411,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setForceConsistencyChecks( * Signature: (J)Z */ jboolean Java_org_rocksdb_ColumnFamilyOptions_forceConsistencyChecks( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* cf_opts = reinterpret_cast(jhandle); return static_cast(cf_opts->force_consistency_checks); } @@ -4449,7 +4424,8 @@ jboolean Java_org_rocksdb_ColumnFamilyOptions_forceConsistencyChecks( * Method: newDBOptions * Signature: ()J */ -jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* /*env*/, jclass /*jcls*/) { +jlong Java_org_rocksdb_DBOptions_newDBOptions( + JNIEnv*, jclass) { auto* dbop = new rocksdb::DBOptions(); return reinterpret_cast(dbop); } @@ -4459,21 +4435,32 @@ jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* /*env*/, jclass /*jcls*/) * Method: copyDBOptions * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_copyDBOptions(JNIEnv* /*env*/, jclass /*jcls*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_copyDBOptions( + JNIEnv*, jclass, jlong jhandle) { auto new_opt = new rocksdb::DBOptions(*(reinterpret_cast(jhandle))); return reinterpret_cast(new_opt); } +/* + * Class: org_rocksdb_DBOptions + * Method: newDBOptionsFromOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_newDBOptionsFromOptions( + JNIEnv*, jclass, jlong joptions_handle) { + auto new_opt = + new rocksdb::DBOptions(*reinterpret_cast(joptions_handle)); + return reinterpret_cast(new_opt); +} + /* * Class: org_rocksdb_DBOptions * Method: getDBOptionsFromProps * Signature: (Ljava/util/String;)J */ -jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps(JNIEnv* env, - jclass /*jclazz*/, - jstring jopt_string) { +jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps( + JNIEnv* env, jclass, jstring jopt_string) { const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr); if (opt_string == nullptr) { // exception thrown: OutOfMemoryError @@ -4503,9 +4490,8 @@ jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps(JNIEnv* env, * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_DBOptions_disposeInternal(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong handle) { +void Java_org_rocksdb_DBOptions_disposeInternal( + JNIEnv*, jobject, jlong handle) { auto* dbo = reinterpret_cast(handle); assert(dbo != nullptr); delete dbo; @@ -4516,9 +4502,8 @@ void Java_org_rocksdb_DBOptions_disposeInternal(JNIEnv* /*env*/, * Method: optimizeForSmallDb * Signature: (J)V */ -void Java_org_rocksdb_DBOptions_optimizeForSmallDb(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_DBOptions_optimizeForSmallDb( + JNIEnv*, jobject, jlong jhandle) { reinterpret_cast(jhandle)->OptimizeForSmallDb(); } @@ -4527,8 +4512,8 @@ void Java_org_rocksdb_DBOptions_optimizeForSmallDb(JNIEnv* /*env*/, * Method: setEnv * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setEnv(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jlong jenv_handle) { +void Java_org_rocksdb_DBOptions_setEnv( + JNIEnv*, jobject, jlong jhandle, jlong jenv_handle) { reinterpret_cast(jhandle)->env = reinterpret_cast(jenv_handle); } @@ -4538,10 +4523,8 @@ void Java_org_rocksdb_DBOptions_setEnv(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setIncreaseParallelism * Signature: (JI)V */ -void Java_org_rocksdb_DBOptions_setIncreaseParallelism(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint totalThreads) { +void Java_org_rocksdb_DBOptions_setIncreaseParallelism( + JNIEnv*, jobject, jlong jhandle, jint totalThreads) { reinterpret_cast(jhandle)->IncreaseParallelism( static_cast(totalThreads)); } @@ -4551,10 +4534,8 @@ void Java_org_rocksdb_DBOptions_setIncreaseParallelism(JNIEnv* /*env*/, * Method: setCreateIfMissing * Signature: (JZ)V */ -void Java_org_rocksdb_DBOptions_setCreateIfMissing(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean flag) { +void Java_org_rocksdb_DBOptions_setCreateIfMissing( + JNIEnv*, jobject, jlong jhandle, jboolean flag) { reinterpret_cast(jhandle)->create_if_missing = flag; } @@ -4563,9 +4544,8 @@ void Java_org_rocksdb_DBOptions_setCreateIfMissing(JNIEnv* /*env*/, * Method: createIfMissing * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_createIfMissing(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_createIfMissing( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->create_if_missing; } @@ -4574,10 +4554,8 @@ jboolean Java_org_rocksdb_DBOptions_createIfMissing(JNIEnv* /*env*/, * Method: setCreateMissingColumnFamilies * Signature: (JZ)V */ -void Java_org_rocksdb_DBOptions_setCreateMissingColumnFamilies(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean flag) { +void Java_org_rocksdb_DBOptions_setCreateMissingColumnFamilies( + JNIEnv*, jobject, jlong jhandle, jboolean flag) { reinterpret_cast(jhandle) ->create_missing_column_families = flag; } @@ -4588,7 +4566,7 @@ void Java_org_rocksdb_DBOptions_setCreateMissingColumnFamilies(JNIEnv* /*env*/, * Signature: (J)Z */ jboolean Java_org_rocksdb_DBOptions_createMissingColumnFamilies( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->create_missing_column_families; } @@ -4598,10 +4576,8 @@ jboolean Java_org_rocksdb_DBOptions_createMissingColumnFamilies( * Method: setErrorIfExists * Signature: (JZ)V */ -void Java_org_rocksdb_DBOptions_setErrorIfExists(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean error_if_exists) { +void Java_org_rocksdb_DBOptions_setErrorIfExists( + JNIEnv*, jobject, jlong jhandle, jboolean error_if_exists) { reinterpret_cast(jhandle)->error_if_exists = static_cast(error_if_exists); } @@ -4611,9 +4587,8 @@ void Java_org_rocksdb_DBOptions_setErrorIfExists(JNIEnv* /*env*/, * Method: errorIfExists * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_errorIfExists(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_errorIfExists( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->error_if_exists; } @@ -4622,10 +4597,8 @@ jboolean Java_org_rocksdb_DBOptions_errorIfExists(JNIEnv* /*env*/, * Method: setParanoidChecks * Signature: (JZ)V */ -void Java_org_rocksdb_DBOptions_setParanoidChecks(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean paranoid_checks) { +void Java_org_rocksdb_DBOptions_setParanoidChecks( + JNIEnv*, jobject, jlong jhandle, jboolean paranoid_checks) { reinterpret_cast(jhandle)->paranoid_checks = static_cast(paranoid_checks); } @@ -4635,9 +4608,8 @@ void Java_org_rocksdb_DBOptions_setParanoidChecks(JNIEnv* /*env*/, * Method: paranoidChecks * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_paranoidChecks(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_paranoidChecks( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->paranoid_checks; } @@ -4646,9 +4618,8 @@ jboolean Java_org_rocksdb_DBOptions_paranoidChecks(JNIEnv* /*env*/, * Method: setRateLimiter * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setRateLimiter(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jlong jrate_limiter_handle) { +void Java_org_rocksdb_DBOptions_setRateLimiter( + JNIEnv*, jobject, jlong jhandle, jlong jrate_limiter_handle) { std::shared_ptr* pRateLimiter = reinterpret_cast*>( jrate_limiter_handle); @@ -4661,8 +4632,7 @@ void Java_org_rocksdb_DBOptions_setRateLimiter(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_DBOptions_setSstFileManager( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jsst_file_manager_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jsst_file_manager_handle) { auto* sptr_sst_file_manager = reinterpret_cast*>( jsst_file_manager_handle); @@ -4675,8 +4645,8 @@ void Java_org_rocksdb_DBOptions_setSstFileManager( * Method: setLogger * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setLogger(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jlong jlogger_handle) { +void Java_org_rocksdb_DBOptions_setLogger( + JNIEnv*, jobject, jlong jhandle, jlong jlogger_handle) { std::shared_ptr* pLogger = reinterpret_cast*>( jlogger_handle); @@ -4688,9 +4658,8 @@ void Java_org_rocksdb_DBOptions_setLogger(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setInfoLogLevel * Signature: (JB)V */ -void Java_org_rocksdb_DBOptions_setInfoLogLevel(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jbyte jlog_level) { +void Java_org_rocksdb_DBOptions_setInfoLogLevel( + JNIEnv*, jobject, jlong jhandle, jbyte jlog_level) { reinterpret_cast(jhandle)->info_log_level = static_cast(jlog_level); } @@ -4700,8 +4669,8 @@ void Java_org_rocksdb_DBOptions_setInfoLogLevel(JNIEnv* /*env*/, * Method: infoLogLevel * Signature: (J)B */ -jbyte Java_org_rocksdb_DBOptions_infoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_DBOptions_infoLogLevel( + JNIEnv*, jobject, jlong jhandle) { return static_cast( reinterpret_cast(jhandle)->info_log_level); } @@ -4711,10 +4680,8 @@ jbyte Java_org_rocksdb_DBOptions_infoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setMaxTotalWalSize * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setMaxTotalWalSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong jmax_total_wal_size) { +void Java_org_rocksdb_DBOptions_setMaxTotalWalSize( + JNIEnv*, jobject, jlong jhandle, jlong jmax_total_wal_size) { reinterpret_cast(jhandle)->max_total_wal_size = static_cast(jmax_total_wal_size); } @@ -4724,9 +4691,8 @@ void Java_org_rocksdb_DBOptions_setMaxTotalWalSize(JNIEnv* /*env*/, * Method: maxTotalWalSize * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_maxTotalWalSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_maxTotalWalSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_total_wal_size; } @@ -4735,9 +4701,8 @@ jlong Java_org_rocksdb_DBOptions_maxTotalWalSize(JNIEnv* /*env*/, * Method: setMaxOpenFiles * Signature: (JI)V */ -void Java_org_rocksdb_DBOptions_setMaxOpenFiles(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jint max_open_files) { +void Java_org_rocksdb_DBOptions_setMaxOpenFiles( + JNIEnv*, jobject, jlong jhandle, jint max_open_files) { reinterpret_cast(jhandle)->max_open_files = static_cast(max_open_files); } @@ -4747,8 +4712,8 @@ void Java_org_rocksdb_DBOptions_setMaxOpenFiles(JNIEnv* /*env*/, * Method: maxOpenFiles * Signature: (J)I */ -jint Java_org_rocksdb_DBOptions_maxOpenFiles(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_DBOptions_maxOpenFiles( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_open_files; } @@ -4758,8 +4723,7 @@ jint Java_org_rocksdb_DBOptions_maxOpenFiles(JNIEnv* /*env*/, jobject /*jobj*/, * Signature: (JI)V */ void Java_org_rocksdb_DBOptions_setMaxFileOpeningThreads( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jmax_file_opening_threads) { + JNIEnv*, jobject, jlong jhandle, jint jmax_file_opening_threads) { reinterpret_cast(jhandle)->max_file_opening_threads = static_cast(jmax_file_opening_threads); } @@ -4769,9 +4733,8 @@ void Java_org_rocksdb_DBOptions_setMaxFileOpeningThreads( * Method: maxFileOpeningThreads * Signature: (J)I */ -jint Java_org_rocksdb_DBOptions_maxFileOpeningThreads(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_DBOptions_maxFileOpeningThreads( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->max_file_opening_threads); } @@ -4781,9 +4744,8 @@ jint Java_org_rocksdb_DBOptions_maxFileOpeningThreads(JNIEnv* /*env*/, * Method: setStatistics * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setStatistics(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, - jlong jstatistics_handle) { +void Java_org_rocksdb_DBOptions_setStatistics( + JNIEnv*, jobject, jlong jhandle, jlong jstatistics_handle) { auto* opt = reinterpret_cast(jhandle); auto* pSptr = reinterpret_cast*>( jstatistics_handle); @@ -4795,8 +4757,8 @@ void Java_org_rocksdb_DBOptions_setStatistics(JNIEnv* /*env*/, jobject /*jobj*/, * Method: statistics * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_statistics(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_statistics( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); std::shared_ptr sptr = opt->statistics; if (sptr == nullptr) { @@ -4813,8 +4775,8 @@ jlong Java_org_rocksdb_DBOptions_statistics(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setUseFsync * Signature: (JZ)V */ -void Java_org_rocksdb_DBOptions_setUseFsync(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jboolean use_fsync) { +void Java_org_rocksdb_DBOptions_setUseFsync( + JNIEnv*, jobject, jlong jhandle, jboolean use_fsync) { reinterpret_cast(jhandle)->use_fsync = static_cast(use_fsync); } @@ -4824,8 +4786,8 @@ void Java_org_rocksdb_DBOptions_setUseFsync(JNIEnv* /*env*/, jobject /*jobj*/, * Method: useFsync * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_useFsync(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_useFsync( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->use_fsync; } @@ -4834,9 +4796,9 @@ jboolean Java_org_rocksdb_DBOptions_useFsync(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setDbPaths * Signature: (J[Ljava/lang/String;[J)V */ -void Java_org_rocksdb_DBOptions_setDbPaths(JNIEnv* env, jobject /*jobj*/, - jlong jhandle, jobjectArray jpaths, - jlongArray jtarget_sizes) { +void Java_org_rocksdb_DBOptions_setDbPaths( + JNIEnv* env, jobject, jlong jhandle, jobjectArray jpaths, + jlongArray jtarget_sizes) { std::vector db_paths; jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr); if (ptr_jtarget_size == nullptr) { @@ -4880,8 +4842,8 @@ void Java_org_rocksdb_DBOptions_setDbPaths(JNIEnv* env, jobject /*jobj*/, * Method: dbPathsLen * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_dbPathsLen(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_dbPathsLen( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->db_paths.size()); } @@ -4891,9 +4853,9 @@ jlong Java_org_rocksdb_DBOptions_dbPathsLen(JNIEnv* /*env*/, jobject /*jobj*/, * Method: dbPaths * Signature: (J[Ljava/lang/String;[J)V */ -void Java_org_rocksdb_DBOptions_dbPaths(JNIEnv* env, jobject /*jobj*/, - jlong jhandle, jobjectArray jpaths, - jlongArray jtarget_sizes) { +void Java_org_rocksdb_DBOptions_dbPaths( + JNIEnv* env, jobject, jlong jhandle, jobjectArray jpaths, + jlongArray jtarget_sizes) { jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr); if (ptr_jtarget_size == nullptr) { // exception thrown: OutOfMemoryError @@ -4930,9 +4892,8 @@ void Java_org_rocksdb_DBOptions_dbPaths(JNIEnv* env, jobject /*jobj*/, * Method: setDbLogDir * Signature: (JLjava/lang/String)V */ -void Java_org_rocksdb_DBOptions_setDbLogDir(JNIEnv* env, jobject /*jobj*/, - jlong jhandle, - jstring jdb_log_dir) { +void Java_org_rocksdb_DBOptions_setDbLogDir( + JNIEnv* env, jobject, jlong jhandle, jstring jdb_log_dir) { const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr); if (log_dir == nullptr) { // exception thrown: OutOfMemoryError @@ -4948,8 +4909,8 @@ void Java_org_rocksdb_DBOptions_setDbLogDir(JNIEnv* env, jobject /*jobj*/, * Method: dbLogDir * Signature: (J)Ljava/lang/String */ -jstring Java_org_rocksdb_DBOptions_dbLogDir(JNIEnv* env, jobject /*jobj*/, - jlong jhandle) { +jstring Java_org_rocksdb_DBOptions_dbLogDir( + JNIEnv* env, jobject, jlong jhandle) { return env->NewStringUTF( reinterpret_cast(jhandle)->db_log_dir.c_str()); } @@ -4959,8 +4920,8 @@ jstring Java_org_rocksdb_DBOptions_dbLogDir(JNIEnv* env, jobject /*jobj*/, * Method: setWalDir * Signature: (JLjava/lang/String)V */ -void Java_org_rocksdb_DBOptions_setWalDir(JNIEnv* env, jobject /*jobj*/, - jlong jhandle, jstring jwal_dir) { +void Java_org_rocksdb_DBOptions_setWalDir( + JNIEnv* env, jobject, jlong jhandle, jstring jwal_dir) { const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0); reinterpret_cast(jhandle)->wal_dir.assign(wal_dir); env->ReleaseStringUTFChars(jwal_dir, wal_dir); @@ -4971,8 +4932,8 @@ void Java_org_rocksdb_DBOptions_setWalDir(JNIEnv* env, jobject /*jobj*/, * Method: walDir * Signature: (J)Ljava/lang/String */ -jstring Java_org_rocksdb_DBOptions_walDir(JNIEnv* env, jobject /*jobj*/, - jlong jhandle) { +jstring Java_org_rocksdb_DBOptions_walDir( + JNIEnv* env, jobject, jlong jhandle) { return env->NewStringUTF( reinterpret_cast(jhandle)->wal_dir.c_str()); } @@ -4983,7 +4944,7 @@ jstring Java_org_rocksdb_DBOptions_walDir(JNIEnv* env, jobject /*jobj*/, * Signature: (JJ)V */ void Java_org_rocksdb_DBOptions_setDeleteObsoleteFilesPeriodMicros( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jlong micros) { + JNIEnv*, jobject, jlong jhandle, jlong micros) { reinterpret_cast(jhandle) ->delete_obsolete_files_period_micros = static_cast(micros); } @@ -4994,7 +4955,7 @@ void Java_org_rocksdb_DBOptions_setDeleteObsoleteFilesPeriodMicros( * Signature: (J)J */ jlong Java_org_rocksdb_DBOptions_deleteObsoleteFilesPeriodMicros( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->delete_obsolete_files_period_micros; } @@ -5004,10 +4965,8 @@ jlong Java_org_rocksdb_DBOptions_deleteObsoleteFilesPeriodMicros( * Method: setBaseBackgroundCompactions * Signature: (JI)V */ -void Java_org_rocksdb_DBOptions_setBaseBackgroundCompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint max) { +void Java_org_rocksdb_DBOptions_setBaseBackgroundCompactions( + JNIEnv*, jobject, jlong jhandle, jint max) { reinterpret_cast(jhandle)->base_background_compactions = static_cast(max); } @@ -5017,9 +4976,8 @@ void Java_org_rocksdb_DBOptions_setBaseBackgroundCompactions(JNIEnv* /*env*/, * Method: baseBackgroundCompactions * Signature: (J)I */ -jint Java_org_rocksdb_DBOptions_baseBackgroundCompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_DBOptions_baseBackgroundCompactions( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->base_background_compactions; } @@ -5029,10 +4987,8 @@ jint Java_org_rocksdb_DBOptions_baseBackgroundCompactions(JNIEnv* /*env*/, * Method: setMaxBackgroundCompactions * Signature: (JI)V */ -void Java_org_rocksdb_DBOptions_setMaxBackgroundCompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint max) { +void Java_org_rocksdb_DBOptions_setMaxBackgroundCompactions( + JNIEnv*, jobject, jlong jhandle, jint max) { reinterpret_cast(jhandle)->max_background_compactions = static_cast(max); } @@ -5042,9 +4998,8 @@ void Java_org_rocksdb_DBOptions_setMaxBackgroundCompactions(JNIEnv* /*env*/, * Method: maxBackgroundCompactions * Signature: (J)I */ -jint Java_org_rocksdb_DBOptions_maxBackgroundCompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_DBOptions_maxBackgroundCompactions( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->max_background_compactions; } @@ -5054,9 +5009,8 @@ jint Java_org_rocksdb_DBOptions_maxBackgroundCompactions(JNIEnv* /*env*/, * Method: setMaxSubcompactions * Signature: (JI)V */ -void Java_org_rocksdb_DBOptions_setMaxSubcompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, jint max) { +void Java_org_rocksdb_DBOptions_setMaxSubcompactions( + JNIEnv*, jobject, jlong jhandle, jint max) { reinterpret_cast(jhandle)->max_subcompactions = static_cast(max); } @@ -5066,9 +5020,8 @@ void Java_org_rocksdb_DBOptions_setMaxSubcompactions(JNIEnv* /*env*/, * Method: maxSubcompactions * Signature: (J)I */ -jint Java_org_rocksdb_DBOptions_maxSubcompactions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_DBOptions_maxSubcompactions( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_subcompactions; } @@ -5078,8 +5031,7 @@ jint Java_org_rocksdb_DBOptions_maxSubcompactions(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_DBOptions_setMaxBackgroundFlushes( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint max_background_flushes) { + JNIEnv*, jobject, jlong jhandle, jint max_background_flushes) { reinterpret_cast(jhandle)->max_background_flushes = static_cast(max_background_flushes); } @@ -5089,9 +5041,8 @@ void Java_org_rocksdb_DBOptions_setMaxBackgroundFlushes( * Method: maxBackgroundFlushes * Signature: (J)I */ -jint Java_org_rocksdb_DBOptions_maxBackgroundFlushes(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_DBOptions_maxBackgroundFlushes( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_background_flushes; } @@ -5100,10 +5051,8 @@ jint Java_org_rocksdb_DBOptions_maxBackgroundFlushes(JNIEnv* /*env*/, * Method: setMaxBackgroundJobs * Signature: (JI)V */ -void Java_org_rocksdb_DBOptions_setMaxBackgroundJobs(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jint max_background_jobs) { +void Java_org_rocksdb_DBOptions_setMaxBackgroundJobs( + JNIEnv*, jobject, jlong jhandle, jint max_background_jobs) { reinterpret_cast(jhandle)->max_background_jobs = static_cast(max_background_jobs); } @@ -5113,9 +5062,8 @@ void Java_org_rocksdb_DBOptions_setMaxBackgroundJobs(JNIEnv* /*env*/, * Method: maxBackgroundJobs * Signature: (J)I */ -jint Java_org_rocksdb_DBOptions_maxBackgroundJobs(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_DBOptions_maxBackgroundJobs( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_background_jobs; } @@ -5124,11 +5072,9 @@ jint Java_org_rocksdb_DBOptions_maxBackgroundJobs(JNIEnv* /*env*/, * Method: setMaxLogFileSize * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setMaxLogFileSize(JNIEnv* env, - jobject /*jobj*/, - jlong jhandle, - jlong max_log_file_size) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(max_log_file_size); +void Java_org_rocksdb_DBOptions_setMaxLogFileSize( + JNIEnv* env, jobject, jlong jhandle, jlong max_log_file_size) { + auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(max_log_file_size); if (s.ok()) { reinterpret_cast(jhandle)->max_log_file_size = max_log_file_size; @@ -5142,9 +5088,8 @@ void Java_org_rocksdb_DBOptions_setMaxLogFileSize(JNIEnv* env, * Method: maxLogFileSize * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_maxLogFileSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_maxLogFileSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_log_file_size; } @@ -5154,10 +5099,9 @@ jlong Java_org_rocksdb_DBOptions_maxLogFileSize(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_DBOptions_setLogFileTimeToRoll( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, - jlong log_file_time_to_roll) { - rocksdb::Status s = - rocksdb::check_if_jlong_fits_size_t(log_file_time_to_roll); + JNIEnv* env, jobject, jlong jhandle, jlong log_file_time_to_roll) { + auto s = + rocksdb::JniUtil::check_if_jlong_fits_size_t(log_file_time_to_roll); if (s.ok()) { reinterpret_cast(jhandle)->log_file_time_to_roll = log_file_time_to_roll; @@ -5171,9 +5115,8 @@ void Java_org_rocksdb_DBOptions_setLogFileTimeToRoll( * Method: logFileTimeToRoll * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_logFileTimeToRoll(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_logFileTimeToRoll( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->log_file_time_to_roll; } @@ -5182,11 +5125,9 @@ jlong Java_org_rocksdb_DBOptions_logFileTimeToRoll(JNIEnv* /*env*/, * Method: setKeepLogFileNum * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setKeepLogFileNum(JNIEnv* env, - jobject /*jobj*/, - jlong jhandle, - jlong keep_log_file_num) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(keep_log_file_num); +void Java_org_rocksdb_DBOptions_setKeepLogFileNum( + JNIEnv* env, jobject, jlong jhandle, jlong keep_log_file_num) { + auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(keep_log_file_num); if (s.ok()) { reinterpret_cast(jhandle)->keep_log_file_num = keep_log_file_num; @@ -5200,9 +5141,8 @@ void Java_org_rocksdb_DBOptions_setKeepLogFileNum(JNIEnv* env, * Method: keepLogFileNum * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_keepLogFileNum(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_keepLogFileNum( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->keep_log_file_num; } @@ -5212,9 +5152,8 @@ jlong Java_org_rocksdb_DBOptions_keepLogFileNum(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_DBOptions_setRecycleLogFileNum( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, - jlong recycle_log_file_num) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(recycle_log_file_num); + JNIEnv* env, jobject, jlong jhandle, jlong recycle_log_file_num) { + auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(recycle_log_file_num); if (s.ok()) { reinterpret_cast(jhandle)->recycle_log_file_num = recycle_log_file_num; @@ -5228,9 +5167,8 @@ void Java_org_rocksdb_DBOptions_setRecycleLogFileNum( * Method: recycleLogFileNum * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_recycleLogFileNum(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_recycleLogFileNum( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->recycle_log_file_num; } @@ -5240,8 +5178,7 @@ jlong Java_org_rocksdb_DBOptions_recycleLogFileNum(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_DBOptions_setMaxManifestFileSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong max_manifest_file_size) { + JNIEnv*, jobject, jlong jhandle, jlong max_manifest_file_size) { reinterpret_cast(jhandle)->max_manifest_file_size = static_cast(max_manifest_file_size); } @@ -5251,9 +5188,8 @@ void Java_org_rocksdb_DBOptions_setMaxManifestFileSize( * Method: maxManifestFileSize * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_maxManifestFileSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_maxManifestFileSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->max_manifest_file_size; } @@ -5263,8 +5199,7 @@ jlong Java_org_rocksdb_DBOptions_maxManifestFileSize(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_DBOptions_setTableCacheNumshardbits( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint table_cache_numshardbits) { + JNIEnv*, jobject, jlong jhandle, jint table_cache_numshardbits) { reinterpret_cast(jhandle)->table_cache_numshardbits = static_cast(table_cache_numshardbits); } @@ -5274,9 +5209,8 @@ void Java_org_rocksdb_DBOptions_setTableCacheNumshardbits( * Method: tableCacheNumshardbits * Signature: (J)I */ -jint Java_org_rocksdb_DBOptions_tableCacheNumshardbits(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_DBOptions_tableCacheNumshardbits( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->table_cache_numshardbits; } @@ -5286,10 +5220,8 @@ jint Java_org_rocksdb_DBOptions_tableCacheNumshardbits(JNIEnv* /*env*/, * Method: setWalTtlSeconds * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setWalTtlSeconds(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong WAL_ttl_seconds) { +void Java_org_rocksdb_DBOptions_setWalTtlSeconds( + JNIEnv*, jobject, jlong jhandle, jlong WAL_ttl_seconds) { reinterpret_cast(jhandle)->WAL_ttl_seconds = static_cast(WAL_ttl_seconds); } @@ -5299,9 +5231,8 @@ void Java_org_rocksdb_DBOptions_setWalTtlSeconds(JNIEnv* /*env*/, * Method: walTtlSeconds * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_walTtlSeconds(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_walTtlSeconds( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->WAL_ttl_seconds; } @@ -5310,10 +5241,8 @@ jlong Java_org_rocksdb_DBOptions_walTtlSeconds(JNIEnv* /*env*/, * Method: setWalSizeLimitMB * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setWalSizeLimitMB(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong WAL_size_limit_MB) { +void Java_org_rocksdb_DBOptions_setWalSizeLimitMB( + JNIEnv*, jobject, jlong jhandle, jlong WAL_size_limit_MB) { reinterpret_cast(jhandle)->WAL_size_limit_MB = static_cast(WAL_size_limit_MB); } @@ -5323,9 +5252,8 @@ void Java_org_rocksdb_DBOptions_setWalSizeLimitMB(JNIEnv* /*env*/, * Method: walTtlSeconds * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_walSizeLimitMB(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_walSizeLimitMB( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->WAL_size_limit_MB; } @@ -5335,9 +5263,8 @@ jlong Java_org_rocksdb_DBOptions_walSizeLimitMB(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_DBOptions_setManifestPreallocationSize( - JNIEnv* env, jobject /*jobj*/, jlong jhandle, - jlong preallocation_size) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(preallocation_size); + JNIEnv* env, jobject, jlong jhandle, jlong preallocation_size) { + auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(preallocation_size); if (s.ok()) { reinterpret_cast(jhandle) ->manifest_preallocation_size = preallocation_size; @@ -5351,9 +5278,8 @@ void Java_org_rocksdb_DBOptions_setManifestPreallocationSize( * Method: manifestPreallocationSize * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_manifestPreallocationSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_manifestPreallocationSize( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->manifest_preallocation_size; } @@ -5363,9 +5289,8 @@ jlong Java_org_rocksdb_DBOptions_manifestPreallocationSize(JNIEnv* /*env*/, * Method: useDirectReads * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_useDirectReads(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_useDirectReads( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->use_direct_reads; } @@ -5374,10 +5299,8 @@ jboolean Java_org_rocksdb_DBOptions_useDirectReads(JNIEnv* /*env*/, * Method: setUseDirectReads * Signature: (JZ)V */ -void Java_org_rocksdb_DBOptions_setUseDirectReads(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean use_direct_reads) { +void Java_org_rocksdb_DBOptions_setUseDirectReads( + JNIEnv*, jobject, jlong jhandle, jboolean use_direct_reads) { reinterpret_cast(jhandle)->use_direct_reads = static_cast(use_direct_reads); } @@ -5388,7 +5311,7 @@ void Java_org_rocksdb_DBOptions_setUseDirectReads(JNIEnv* /*env*/, * Signature: (J)Z */ jboolean Java_org_rocksdb_DBOptions_useDirectIoForFlushAndCompaction( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->use_direct_io_for_flush_and_compaction; } @@ -5399,7 +5322,7 @@ jboolean Java_org_rocksdb_DBOptions_useDirectIoForFlushAndCompaction( * Signature: (JZ)V */ void Java_org_rocksdb_DBOptions_setUseDirectIoForFlushAndCompaction( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jboolean use_direct_io_for_flush_and_compaction) { reinterpret_cast(jhandle) ->use_direct_io_for_flush_and_compaction = @@ -5411,10 +5334,8 @@ void Java_org_rocksdb_DBOptions_setUseDirectIoForFlushAndCompaction( * Method: setAllowFAllocate * Signature: (JZ)V */ -void Java_org_rocksdb_DBOptions_setAllowFAllocate(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean jallow_fallocate) { +void Java_org_rocksdb_DBOptions_setAllowFAllocate( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_fallocate) { reinterpret_cast(jhandle)->allow_fallocate = static_cast(jallow_fallocate); } @@ -5424,9 +5345,8 @@ void Java_org_rocksdb_DBOptions_setAllowFAllocate(JNIEnv* /*env*/, * Method: allowFAllocate * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_allowFAllocate(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_allowFAllocate( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->allow_fallocate); } @@ -5436,10 +5356,8 @@ jboolean Java_org_rocksdb_DBOptions_allowFAllocate(JNIEnv* /*env*/, * Method: setAllowMmapReads * Signature: (JZ)V */ -void Java_org_rocksdb_DBOptions_setAllowMmapReads(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean allow_mmap_reads) { +void Java_org_rocksdb_DBOptions_setAllowMmapReads( + JNIEnv*, jobject, jlong jhandle, jboolean allow_mmap_reads) { reinterpret_cast(jhandle)->allow_mmap_reads = static_cast(allow_mmap_reads); } @@ -5449,9 +5367,8 @@ void Java_org_rocksdb_DBOptions_setAllowMmapReads(JNIEnv* /*env*/, * Method: allowMmapReads * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_allowMmapReads(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_allowMmapReads( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->allow_mmap_reads; } @@ -5460,10 +5377,8 @@ jboolean Java_org_rocksdb_DBOptions_allowMmapReads(JNIEnv* /*env*/, * Method: setAllowMmapWrites * Signature: (JZ)V */ -void Java_org_rocksdb_DBOptions_setAllowMmapWrites(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean allow_mmap_writes) { +void Java_org_rocksdb_DBOptions_setAllowMmapWrites( + JNIEnv*, jobject, jlong jhandle, jboolean allow_mmap_writes) { reinterpret_cast(jhandle)->allow_mmap_writes = static_cast(allow_mmap_writes); } @@ -5473,9 +5388,8 @@ void Java_org_rocksdb_DBOptions_setAllowMmapWrites(JNIEnv* /*env*/, * Method: allowMmapWrites * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_allowMmapWrites(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_allowMmapWrites( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->allow_mmap_writes; } @@ -5485,8 +5399,7 @@ jboolean Java_org_rocksdb_DBOptions_allowMmapWrites(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_DBOptions_setIsFdCloseOnExec( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean is_fd_close_on_exec) { + JNIEnv*, jobject, jlong jhandle, jboolean is_fd_close_on_exec) { reinterpret_cast(jhandle)->is_fd_close_on_exec = static_cast(is_fd_close_on_exec); } @@ -5496,9 +5409,8 @@ void Java_org_rocksdb_DBOptions_setIsFdCloseOnExec( * Method: isFdCloseOnExec * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_isFdCloseOnExec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_isFdCloseOnExec( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->is_fd_close_on_exec; } @@ -5508,8 +5420,7 @@ jboolean Java_org_rocksdb_DBOptions_isFdCloseOnExec(JNIEnv* /*env*/, * Signature: (JI)V */ void Java_org_rocksdb_DBOptions_setStatsDumpPeriodSec( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint stats_dump_period_sec) { + JNIEnv*, jobject, jlong jhandle, jint stats_dump_period_sec) { reinterpret_cast(jhandle)->stats_dump_period_sec = static_cast(stats_dump_period_sec); } @@ -5519,9 +5430,8 @@ void Java_org_rocksdb_DBOptions_setStatsDumpPeriodSec( * Method: statsDumpPeriodSec * Signature: (J)I */ -jint Java_org_rocksdb_DBOptions_statsDumpPeriodSec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jint Java_org_rocksdb_DBOptions_statsDumpPeriodSec( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->stats_dump_period_sec; } @@ -5531,8 +5441,7 @@ jint Java_org_rocksdb_DBOptions_statsDumpPeriodSec(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_DBOptions_setAdviseRandomOnOpen( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean advise_random_on_open) { + JNIEnv*, jobject, jlong jhandle, jboolean advise_random_on_open) { reinterpret_cast(jhandle)->advise_random_on_open = static_cast(advise_random_on_open); } @@ -5542,9 +5451,8 @@ void Java_org_rocksdb_DBOptions_setAdviseRandomOnOpen( * Method: adviseRandomOnOpen * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_adviseRandomOnOpen(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_adviseRandomOnOpen( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->advise_random_on_open; } @@ -5554,8 +5462,7 @@ jboolean Java_org_rocksdb_DBOptions_adviseRandomOnOpen(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_DBOptions_setDbWriteBufferSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jdb_write_buffer_size) { + JNIEnv*, jobject, jlong jhandle, jlong jdb_write_buffer_size) { auto* opt = reinterpret_cast(jhandle); opt->db_write_buffer_size = static_cast(jdb_write_buffer_size); } @@ -5565,9 +5472,9 @@ void Java_org_rocksdb_DBOptions_setDbWriteBufferSize( * Method: setWriteBufferManager * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setWriteBufferManager(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jdb_options_handle, - jlong jwrite_buffer_manager_handle) { +void Java_org_rocksdb_DBOptions_setWriteBufferManager( + JNIEnv*, jobject, jlong jdb_options_handle, + jlong jwrite_buffer_manager_handle) { auto* write_buffer_manager = reinterpret_cast *>(jwrite_buffer_manager_handle); reinterpret_cast(jdb_options_handle)->write_buffer_manager = @@ -5579,9 +5486,8 @@ void Java_org_rocksdb_DBOptions_setWriteBufferManager(JNIEnv* /*env*/, jobject / * Method: dbWriteBufferSize * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_dbWriteBufferSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_dbWriteBufferSize( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->db_write_buffer_size); } @@ -5592,8 +5498,7 @@ jlong Java_org_rocksdb_DBOptions_dbWriteBufferSize(JNIEnv* /*env*/, * Signature: (JB)V */ void Java_org_rocksdb_DBOptions_setAccessHintOnCompactionStart( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jbyte jaccess_hint_value) { + JNIEnv*, jobject, jlong jhandle, jbyte jaccess_hint_value) { auto* opt = reinterpret_cast(jhandle); opt->access_hint_on_compaction_start = rocksdb::AccessHintJni::toCppAccessHint(jaccess_hint_value); @@ -5604,9 +5509,8 @@ void Java_org_rocksdb_DBOptions_setAccessHintOnCompactionStart( * Method: accessHintOnCompactionStart * Signature: (J)B */ -jbyte Java_org_rocksdb_DBOptions_accessHintOnCompactionStart(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_DBOptions_accessHintOnCompactionStart( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return rocksdb::AccessHintJni::toJavaAccessHint( opt->access_hint_on_compaction_start); @@ -5618,7 +5522,7 @@ jbyte Java_org_rocksdb_DBOptions_accessHintOnCompactionStart(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_DBOptions_setNewTableReaderForCompactionInputs( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jboolean jnew_table_reader_for_compaction_inputs) { auto* opt = reinterpret_cast(jhandle); opt->new_table_reader_for_compaction_inputs = @@ -5631,7 +5535,7 @@ void Java_org_rocksdb_DBOptions_setNewTableReaderForCompactionInputs( * Signature: (J)Z */ jboolean Java_org_rocksdb_DBOptions_newTableReaderForCompactionInputs( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->new_table_reader_for_compaction_inputs); } @@ -5642,8 +5546,7 @@ jboolean Java_org_rocksdb_DBOptions_newTableReaderForCompactionInputs( * Signature: (JJ)V */ void Java_org_rocksdb_DBOptions_setCompactionReadaheadSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jcompaction_readahead_size) { + JNIEnv*, jobject, jlong jhandle, jlong jcompaction_readahead_size) { auto* opt = reinterpret_cast(jhandle); opt->compaction_readahead_size = static_cast(jcompaction_readahead_size); @@ -5654,9 +5557,8 @@ void Java_org_rocksdb_DBOptions_setCompactionReadaheadSize( * Method: compactionReadaheadSize * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_compactionReadaheadSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_compactionReadaheadSize( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->compaction_readahead_size); } @@ -5667,8 +5569,7 @@ jlong Java_org_rocksdb_DBOptions_compactionReadaheadSize(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_DBOptions_setRandomAccessMaxBufferSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jrandom_access_max_buffer_size) { + JNIEnv*, jobject, jlong jhandle, jlong jrandom_access_max_buffer_size) { auto* opt = reinterpret_cast(jhandle); opt->random_access_max_buffer_size = static_cast(jrandom_access_max_buffer_size); @@ -5679,9 +5580,8 @@ void Java_org_rocksdb_DBOptions_setRandomAccessMaxBufferSize( * Method: randomAccessMaxBufferSize * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_randomAccessMaxBufferSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_randomAccessMaxBufferSize( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->random_access_max_buffer_size); } @@ -5692,8 +5592,7 @@ jlong Java_org_rocksdb_DBOptions_randomAccessMaxBufferSize(JNIEnv* /*env*/, * Signature: (JJ)V */ void Java_org_rocksdb_DBOptions_setWritableFileMaxBufferSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jwritable_file_max_buffer_size) { + JNIEnv*, jobject, jlong jhandle, jlong jwritable_file_max_buffer_size) { auto* opt = reinterpret_cast(jhandle); opt->writable_file_max_buffer_size = static_cast(jwritable_file_max_buffer_size); @@ -5704,9 +5603,8 @@ void Java_org_rocksdb_DBOptions_setWritableFileMaxBufferSize( * Method: writableFileMaxBufferSize * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_writableFileMaxBufferSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_writableFileMaxBufferSize( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->writable_file_max_buffer_size); } @@ -5717,8 +5615,7 @@ jlong Java_org_rocksdb_DBOptions_writableFileMaxBufferSize(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_DBOptions_setUseAdaptiveMutex( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean use_adaptive_mutex) { + JNIEnv*, jobject, jlong jhandle, jboolean use_adaptive_mutex) { reinterpret_cast(jhandle)->use_adaptive_mutex = static_cast(use_adaptive_mutex); } @@ -5728,9 +5625,8 @@ void Java_org_rocksdb_DBOptions_setUseAdaptiveMutex( * Method: useAdaptiveMutex * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_useAdaptiveMutex(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_useAdaptiveMutex( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->use_adaptive_mutex; } @@ -5739,9 +5635,8 @@ jboolean Java_org_rocksdb_DBOptions_useAdaptiveMutex(JNIEnv* /*env*/, * Method: setBytesPerSync * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setBytesPerSync(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jlong bytes_per_sync) { +void Java_org_rocksdb_DBOptions_setBytesPerSync( + JNIEnv*, jobject, jlong jhandle, jlong bytes_per_sync) { reinterpret_cast(jhandle)->bytes_per_sync = static_cast(bytes_per_sync); } @@ -5751,8 +5646,8 @@ void Java_org_rocksdb_DBOptions_setBytesPerSync(JNIEnv* /*env*/, * Method: bytesPerSync * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_bytesPerSync(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_bytesPerSync( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->bytes_per_sync; } @@ -5761,10 +5656,8 @@ jlong Java_org_rocksdb_DBOptions_bytesPerSync(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setWalBytesPerSync * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setWalBytesPerSync(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong jwal_bytes_per_sync) { +void Java_org_rocksdb_DBOptions_setWalBytesPerSync( + JNIEnv*, jobject, jlong jhandle, jlong jwal_bytes_per_sync) { reinterpret_cast(jhandle)->wal_bytes_per_sync = static_cast(jwal_bytes_per_sync); } @@ -5774,60 +5667,76 @@ void Java_org_rocksdb_DBOptions_setWalBytesPerSync(JNIEnv* /*env*/, * Method: walBytesPerSync * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_walBytesPerSync(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_walBytesPerSync( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->wal_bytes_per_sync); } /* * Class: org_rocksdb_DBOptions - * Method: setEnableThreadTracking + * Method: setDelayedWriteRate + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setDelayedWriteRate( + JNIEnv*, jobject, jlong jhandle, jlong jdelayed_write_rate) { + auto* opt = reinterpret_cast(jhandle); + opt->delayed_write_rate = static_cast(jdelayed_write_rate); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: delayedWriteRate + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_delayedWriteRate( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->delayed_write_rate); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setEnablePipelinedWrite * Signature: (JZ)V */ -void Java_org_rocksdb_DBOptions_setEnableThreadTracking( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jenable_thread_tracking) { +void Java_org_rocksdb_DBOptions_setEnablePipelinedWrite( + JNIEnv*, jobject, jlong jhandle, jboolean jenable_pipelined_write) { auto* opt = reinterpret_cast(jhandle); - opt->enable_thread_tracking = static_cast(jenable_thread_tracking); + opt->enable_pipelined_write = jenable_pipelined_write == JNI_TRUE; } /* * Class: org_rocksdb_DBOptions - * Method: enableThreadTracking + * Method: enablePipelinedWrite * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_enableThreadTracking(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_enablePipelinedWrite( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); - return static_cast(opt->enable_thread_tracking); + return static_cast(opt->enable_pipelined_write); } /* * Class: org_rocksdb_DBOptions - * Method: setDelayedWriteRate - * Signature: (JJ)V + * Method: setEnableThreadTracking + * Signature: (JZ)V */ -void Java_org_rocksdb_DBOptions_setDelayedWriteRate(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong jdelayed_write_rate) { +void Java_org_rocksdb_DBOptions_setEnableThreadTracking( + JNIEnv*, jobject, jlong jhandle, jboolean jenable_thread_tracking) { auto* opt = reinterpret_cast(jhandle); - opt->delayed_write_rate = static_cast(jdelayed_write_rate); + opt->enable_thread_tracking = jenable_thread_tracking == JNI_TRUE; } /* * Class: org_rocksdb_DBOptions - * Method: delayedWriteRate - * Signature: (J)J + * Method: enableThreadTracking + * Signature: (J)Z */ -jlong Java_org_rocksdb_DBOptions_delayedWriteRate(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_enableThreadTracking( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); - return static_cast(opt->delayed_write_rate); + return static_cast(opt->enable_thread_tracking); } /* @@ -5836,7 +5745,7 @@ jlong Java_org_rocksdb_DBOptions_delayedWriteRate(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_DBOptions_setAllowConcurrentMemtableWrite( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean allow) { + JNIEnv*, jobject, jlong jhandle, jboolean allow) { reinterpret_cast(jhandle) ->allow_concurrent_memtable_write = static_cast(allow); } @@ -5847,7 +5756,7 @@ void Java_org_rocksdb_DBOptions_setAllowConcurrentMemtableWrite( * Signature: (J)Z */ jboolean Java_org_rocksdb_DBOptions_allowConcurrentMemtableWrite( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->allow_concurrent_memtable_write; } @@ -5858,7 +5767,7 @@ jboolean Java_org_rocksdb_DBOptions_allowConcurrentMemtableWrite( * Signature: (JZ)V */ void Java_org_rocksdb_DBOptions_setEnableWriteThreadAdaptiveYield( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean yield) { + JNIEnv*, jobject, jlong jhandle, jboolean yield) { reinterpret_cast(jhandle) ->enable_write_thread_adaptive_yield = static_cast(yield); } @@ -5869,7 +5778,7 @@ void Java_org_rocksdb_DBOptions_setEnableWriteThreadAdaptiveYield( * Signature: (J)Z */ jboolean Java_org_rocksdb_DBOptions_enableWriteThreadAdaptiveYield( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->enable_write_thread_adaptive_yield; } @@ -5879,10 +5788,8 @@ jboolean Java_org_rocksdb_DBOptions_enableWriteThreadAdaptiveYield( * Method: setWriteThreadMaxYieldUsec * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setWriteThreadMaxYieldUsec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong max) { +void Java_org_rocksdb_DBOptions_setWriteThreadMaxYieldUsec( + JNIEnv*, jobject, jlong jhandle, jlong max) { reinterpret_cast(jhandle)->write_thread_max_yield_usec = static_cast(max); } @@ -5892,9 +5799,8 @@ void Java_org_rocksdb_DBOptions_setWriteThreadMaxYieldUsec(JNIEnv* /*env*/, * Method: writeThreadMaxYieldUsec * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_writeThreadMaxYieldUsec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_writeThreadMaxYieldUsec( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->write_thread_max_yield_usec; } @@ -5904,10 +5810,8 @@ jlong Java_org_rocksdb_DBOptions_writeThreadMaxYieldUsec(JNIEnv* /*env*/, * Method: setWriteThreadSlowYieldUsec * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setWriteThreadSlowYieldUsec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong slow) { +void Java_org_rocksdb_DBOptions_setWriteThreadSlowYieldUsec( + JNIEnv*, jobject, jlong jhandle, jlong slow) { reinterpret_cast(jhandle)->write_thread_slow_yield_usec = static_cast(slow); } @@ -5917,9 +5821,8 @@ void Java_org_rocksdb_DBOptions_setWriteThreadSlowYieldUsec(JNIEnv* /*env*/, * Method: writeThreadSlowYieldUsec * Signature: (J)J */ -jlong Java_org_rocksdb_DBOptions_writeThreadSlowYieldUsec(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_DBOptions_writeThreadSlowYieldUsec( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->write_thread_slow_yield_usec; } @@ -5930,8 +5833,7 @@ jlong Java_org_rocksdb_DBOptions_writeThreadSlowYieldUsec(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_DBOptions_setSkipStatsUpdateOnDbOpen( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jskip_stats_update_on_db_open) { + JNIEnv*, jobject, jlong jhandle, jboolean jskip_stats_update_on_db_open) { auto* opt = reinterpret_cast(jhandle); opt->skip_stats_update_on_db_open = static_cast(jskip_stats_update_on_db_open); @@ -5942,9 +5844,8 @@ void Java_org_rocksdb_DBOptions_setSkipStatsUpdateOnDbOpen( * Method: skipStatsUpdateOnDbOpen * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_skipStatsUpdateOnDbOpen(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_skipStatsUpdateOnDbOpen( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->skip_stats_update_on_db_open); } @@ -5955,8 +5856,7 @@ jboolean Java_org_rocksdb_DBOptions_skipStatsUpdateOnDbOpen(JNIEnv* /*env*/, * Signature: (JB)V */ void Java_org_rocksdb_DBOptions_setWalRecoveryMode( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jbyte jwal_recovery_mode_value) { + JNIEnv*, jobject, jlong jhandle, jbyte jwal_recovery_mode_value) { auto* opt = reinterpret_cast(jhandle); opt->wal_recovery_mode = rocksdb::WALRecoveryModeJni::toCppWALRecoveryMode( jwal_recovery_mode_value); @@ -5967,9 +5867,8 @@ void Java_org_rocksdb_DBOptions_setWalRecoveryMode( * Method: walRecoveryMode * Signature: (J)B */ -jbyte Java_org_rocksdb_DBOptions_walRecoveryMode(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_DBOptions_walRecoveryMode( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return rocksdb::WALRecoveryModeJni::toJavaWALRecoveryMode( opt->wal_recovery_mode); @@ -5980,9 +5879,8 @@ jbyte Java_org_rocksdb_DBOptions_walRecoveryMode(JNIEnv* /*env*/, * Method: setAllow2pc * Signature: (JZ)V */ -void Java_org_rocksdb_DBOptions_setAllow2pc(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, - jboolean jallow_2pc) { +void Java_org_rocksdb_DBOptions_setAllow2pc( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_2pc) { auto* opt = reinterpret_cast(jhandle); opt->allow_2pc = static_cast(jallow_2pc); } @@ -5992,8 +5890,8 @@ void Java_org_rocksdb_DBOptions_setAllow2pc(JNIEnv* /*env*/, jobject /*jobj*/, * Method: allow2pc * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_allow2pc(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_allow2pc( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->allow_2pc); } @@ -6003,23 +5901,34 @@ jboolean Java_org_rocksdb_DBOptions_allow2pc(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setRowCache * Signature: (JJ)V */ -void Java_org_rocksdb_DBOptions_setRowCache(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, - jlong jrow_cache_handle) { +void Java_org_rocksdb_DBOptions_setRowCache( + JNIEnv*, jobject, jlong jhandle, jlong jrow_cache_handle) { auto* opt = reinterpret_cast(jhandle); auto* row_cache = reinterpret_cast*>(jrow_cache_handle); opt->row_cache = *row_cache; } +/* + * Class: org_rocksdb_DBOptions + * Method: setWalFilter + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWalFilter( + JNIEnv*, jobject, jlong jhandle, jlong jwal_filter_handle) { + auto* opt = reinterpret_cast(jhandle); + auto* wal_filter = + reinterpret_cast(jwal_filter_handle); + opt->wal_filter = wal_filter; +} + /* * Class: org_rocksdb_DBOptions * Method: setFailIfOptionsFileError * Signature: (JZ)V */ void Java_org_rocksdb_DBOptions_setFailIfOptionsFileError( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jfail_if_options_file_error) { + JNIEnv*, jobject, jlong jhandle, jboolean jfail_if_options_file_error) { auto* opt = reinterpret_cast(jhandle); opt->fail_if_options_file_error = static_cast(jfail_if_options_file_error); @@ -6030,9 +5939,8 @@ void Java_org_rocksdb_DBOptions_setFailIfOptionsFileError( * Method: failIfOptionsFileError * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_failIfOptionsFileError(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_failIfOptionsFileError( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->fail_if_options_file_error); } @@ -6043,8 +5951,7 @@ jboolean Java_org_rocksdb_DBOptions_failIfOptionsFileError(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_DBOptions_setDumpMallocStats( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jdump_malloc_stats) { + JNIEnv*, jobject, jlong jhandle, jboolean jdump_malloc_stats) { auto* opt = reinterpret_cast(jhandle); opt->dump_malloc_stats = static_cast(jdump_malloc_stats); } @@ -6054,9 +5961,8 @@ void Java_org_rocksdb_DBOptions_setDumpMallocStats( * Method: dumpMallocStats * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_dumpMallocStats(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_dumpMallocStats( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->dump_malloc_stats); } @@ -6067,8 +5973,7 @@ jboolean Java_org_rocksdb_DBOptions_dumpMallocStats(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_DBOptions_setAvoidFlushDuringRecovery( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean javoid_flush_during_recovery) { + JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_recovery) { auto* opt = reinterpret_cast(jhandle); opt->avoid_flush_during_recovery = static_cast(javoid_flush_during_recovery); @@ -6079,21 +5984,129 @@ void Java_org_rocksdb_DBOptions_setAvoidFlushDuringRecovery( * Method: avoidFlushDuringRecovery * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringRecovery(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringRecovery( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->avoid_flush_during_recovery); } +/* + * Class: org_rocksdb_DBOptions + * Method: setAllowIngestBehind + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAllowIngestBehind( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_ingest_behind) { + auto* opt = reinterpret_cast(jhandle); + opt->allow_ingest_behind = jallow_ingest_behind == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: allowIngestBehind + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_allowIngestBehind( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->allow_ingest_behind); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setPreserveDeletes + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setPreserveDeletes( + JNIEnv*, jobject, jlong jhandle, jboolean jpreserve_deletes) { + auto* opt = reinterpret_cast(jhandle); + opt->preserve_deletes = jpreserve_deletes == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: preserveDeletes + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_preserveDeletes( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->preserve_deletes); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setTwoWriteQueues + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setTwoWriteQueues( + JNIEnv*, jobject, jlong jhandle, jboolean jtwo_write_queues) { + auto* opt = reinterpret_cast(jhandle); + opt->two_write_queues = jtwo_write_queues == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: twoWriteQueues + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_twoWriteQueues( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->two_write_queues); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setManualWalFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setManualWalFlush( + JNIEnv*, jobject, jlong jhandle, jboolean jmanual_wal_flush) { + auto* opt = reinterpret_cast(jhandle); + opt->manual_wal_flush = jmanual_wal_flush == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: manualWalFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_manualWalFlush( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->manual_wal_flush); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAtomicFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAtomicFlush( + JNIEnv*, jobject, jlong jhandle, jboolean jatomic_flush) { + auto* opt = reinterpret_cast(jhandle); + opt->atomic_flush = jatomic_flush == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: atomicFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_atomicFlush( + JNIEnv *, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->atomic_flush); +} + /* * Class: org_rocksdb_DBOptions * Method: setAvoidFlushDuringShutdown * Signature: (JZ)V */ void Java_org_rocksdb_DBOptions_setAvoidFlushDuringShutdown( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean javoid_flush_during_shutdown) { + JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_shutdown) { auto* opt = reinterpret_cast(jhandle); opt->avoid_flush_during_shutdown = static_cast(javoid_flush_during_shutdown); @@ -6104,9 +6117,8 @@ void Java_org_rocksdb_DBOptions_setAvoidFlushDuringShutdown( * Method: avoidFlushDuringShutdown * Signature: (J)Z */ -jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringShutdown(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringShutdown( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->avoid_flush_during_shutdown); } @@ -6119,8 +6131,8 @@ jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringShutdown(JNIEnv* /*env*/, * Method: newWriteOptions * Signature: ()J */ -jlong Java_org_rocksdb_WriteOptions_newWriteOptions(JNIEnv* /*env*/, - jclass /*jcls*/) { +jlong Java_org_rocksdb_WriteOptions_newWriteOptions( + JNIEnv*, jclass) { auto* op = new rocksdb::WriteOptions(); return reinterpret_cast(op); } @@ -6130,9 +6142,8 @@ jlong Java_org_rocksdb_WriteOptions_newWriteOptions(JNIEnv* /*env*/, * Method: copyWriteOptions * Signature: (J)J */ -jlong Java_org_rocksdb_WriteOptions_copyWriteOptions(JNIEnv* /*env*/, - jclass /*jcls*/, - jlong jhandle) { +jlong Java_org_rocksdb_WriteOptions_copyWriteOptions( + JNIEnv*, jclass, jlong jhandle) { auto new_opt = new rocksdb::WriteOptions( *(reinterpret_cast(jhandle))); return reinterpret_cast(new_opt); @@ -6143,9 +6154,8 @@ jlong Java_org_rocksdb_WriteOptions_copyWriteOptions(JNIEnv* /*env*/, * Method: disposeInternal * Signature: ()V */ -void Java_org_rocksdb_WriteOptions_disposeInternal(JNIEnv* /*env*/, - jobject /*jwrite_options*/, - jlong jhandle) { +void Java_org_rocksdb_WriteOptions_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { auto* write_options = reinterpret_cast(jhandle); assert(write_options != nullptr); delete write_options; @@ -6156,9 +6166,8 @@ void Java_org_rocksdb_WriteOptions_disposeInternal(JNIEnv* /*env*/, * Method: setSync * Signature: (JZ)V */ -void Java_org_rocksdb_WriteOptions_setSync(JNIEnv* /*env*/, - jobject /*jwrite_options*/, - jlong jhandle, jboolean jflag) { +void Java_org_rocksdb_WriteOptions_setSync( + JNIEnv*, jobject, jlong jhandle, jboolean jflag) { reinterpret_cast(jhandle)->sync = jflag; } @@ -6167,9 +6176,8 @@ void Java_org_rocksdb_WriteOptions_setSync(JNIEnv* /*env*/, * Method: sync * Signature: (J)Z */ -jboolean Java_org_rocksdb_WriteOptions_sync(JNIEnv* /*env*/, - jobject /*jwrite_options*/, - jlong jhandle) { +jboolean Java_org_rocksdb_WriteOptions_sync( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->sync; } @@ -6178,10 +6186,8 @@ jboolean Java_org_rocksdb_WriteOptions_sync(JNIEnv* /*env*/, * Method: setDisableWAL * Signature: (JZ)V */ -void Java_org_rocksdb_WriteOptions_setDisableWAL(JNIEnv* /*env*/, - jobject /*jwrite_options*/, - jlong jhandle, - jboolean jflag) { +void Java_org_rocksdb_WriteOptions_setDisableWAL( + JNIEnv*, jobject, jlong jhandle, jboolean jflag) { reinterpret_cast(jhandle)->disableWAL = jflag; } @@ -6190,9 +6196,8 @@ void Java_org_rocksdb_WriteOptions_setDisableWAL(JNIEnv* /*env*/, * Method: disableWAL * Signature: (J)Z */ -jboolean Java_org_rocksdb_WriteOptions_disableWAL(JNIEnv* /*env*/, - jobject /*jwrite_options*/, - jlong jhandle) { +jboolean Java_org_rocksdb_WriteOptions_disableWAL( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->disableWAL; } @@ -6202,7 +6207,7 @@ jboolean Java_org_rocksdb_WriteOptions_disableWAL(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_WriteOptions_setIgnoreMissingColumnFamilies( - JNIEnv* /*env*/, jobject /*jwrite_options*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jboolean jignore_missing_column_families) { reinterpret_cast(jhandle) ->ignore_missing_column_families = @@ -6215,7 +6220,7 @@ void Java_org_rocksdb_WriteOptions_setIgnoreMissingColumnFamilies( * Signature: (J)Z */ jboolean Java_org_rocksdb_WriteOptions_ignoreMissingColumnFamilies( - JNIEnv* /*env*/, jobject /*jwrite_options*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->ignore_missing_column_families; } @@ -6225,10 +6230,8 @@ jboolean Java_org_rocksdb_WriteOptions_ignoreMissingColumnFamilies( * Method: setNoSlowdown * Signature: (JZ)V */ -void Java_org_rocksdb_WriteOptions_setNoSlowdown(JNIEnv* /*env*/, - jobject /*jwrite_options*/, - jlong jhandle, - jboolean jno_slowdown) { +void Java_org_rocksdb_WriteOptions_setNoSlowdown( + JNIEnv*, jobject, jlong jhandle, jboolean jno_slowdown) { reinterpret_cast(jhandle)->no_slowdown = static_cast(jno_slowdown); } @@ -6238,12 +6241,32 @@ void Java_org_rocksdb_WriteOptions_setNoSlowdown(JNIEnv* /*env*/, * Method: noSlowdown * Signature: (J)Z */ -jboolean Java_org_rocksdb_WriteOptions_noSlowdown(JNIEnv* /*env*/, - jobject /*jwrite_options*/, - jlong jhandle) { +jboolean Java_org_rocksdb_WriteOptions_noSlowdown( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->no_slowdown; } +/* + * Class: org_rocksdb_WriteOptions + * Method: setLowPri + * Signature: (JZ)V + */ +void Java_org_rocksdb_WriteOptions_setLowPri( + JNIEnv*, jobject, jlong jhandle, jboolean jlow_pri) { + reinterpret_cast(jhandle)->low_pri = + static_cast(jlow_pri); +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: lowPri + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteOptions_lowPri( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle)->low_pri; +} + ///////////////////////////////////////////////////////////////////// // rocksdb::ReadOptions @@ -6252,19 +6275,32 @@ jboolean Java_org_rocksdb_WriteOptions_noSlowdown(JNIEnv* /*env*/, * Method: newReadOptions * Signature: ()J */ -jlong Java_org_rocksdb_ReadOptions_newReadOptions(JNIEnv* /*env*/, - jclass /*jcls*/) { +jlong Java_org_rocksdb_ReadOptions_newReadOptions__( + JNIEnv*, jclass) { auto* read_options = new rocksdb::ReadOptions(); return reinterpret_cast(read_options); } +/* + * Class: org_rocksdb_ReadOptions + * Method: newReadOptions + * Signature: (ZZ)J + */ +jlong Java_org_rocksdb_ReadOptions_newReadOptions__ZZ( + JNIEnv*, jclass, jboolean jverify_checksums, jboolean jfill_cache) { + auto* read_options = + new rocksdb::ReadOptions(static_cast(jverify_checksums), + static_cast(jfill_cache)); + return reinterpret_cast(read_options); +} + /* * Class: org_rocksdb_ReadOptions * Method: copyReadOptions * Signature: (J)J */ -jlong Java_org_rocksdb_ReadOptions_copyReadOptions(JNIEnv* /*env*/, jclass /*jcls*/, - jlong jhandle) { +jlong Java_org_rocksdb_ReadOptions_copyReadOptions( + JNIEnv*, jclass, jlong jhandle) { auto new_opt = new rocksdb::ReadOptions( *(reinterpret_cast(jhandle))); return reinterpret_cast(new_opt); @@ -6275,9 +6311,8 @@ jlong Java_org_rocksdb_ReadOptions_copyReadOptions(JNIEnv* /*env*/, jclass /*jcl * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_ReadOptions_disposeInternal(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_ReadOptions_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { auto* read_options = reinterpret_cast(jhandle); assert(read_options != nullptr); delete read_options; @@ -6289,8 +6324,7 @@ void Java_org_rocksdb_ReadOptions_disposeInternal(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_ReadOptions_setVerifyChecksums( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jverify_checksums) { + JNIEnv*, jobject, jlong jhandle, jboolean jverify_checksums) { reinterpret_cast(jhandle)->verify_checksums = static_cast(jverify_checksums); } @@ -6300,9 +6334,8 @@ void Java_org_rocksdb_ReadOptions_setVerifyChecksums( * Method: verifyChecksums * Signature: (J)Z */ -jboolean Java_org_rocksdb_ReadOptions_verifyChecksums(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_ReadOptions_verifyChecksums( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->verify_checksums; } @@ -6311,9 +6344,8 @@ jboolean Java_org_rocksdb_ReadOptions_verifyChecksums(JNIEnv* /*env*/, * Method: setFillCache * Signature: (JZ)V */ -void Java_org_rocksdb_ReadOptions_setFillCache(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jboolean jfill_cache) { +void Java_org_rocksdb_ReadOptions_setFillCache( + JNIEnv*, jobject, jlong jhandle, jboolean jfill_cache) { reinterpret_cast(jhandle)->fill_cache = static_cast(jfill_cache); } @@ -6323,9 +6355,8 @@ void Java_org_rocksdb_ReadOptions_setFillCache(JNIEnv* /*env*/, * Method: fillCache * Signature: (J)Z */ -jboolean Java_org_rocksdb_ReadOptions_fillCache(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_ReadOptions_fillCache( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->fill_cache; } @@ -6334,8 +6365,8 @@ jboolean Java_org_rocksdb_ReadOptions_fillCache(JNIEnv* /*env*/, * Method: setTailing * Signature: (JZ)V */ -void Java_org_rocksdb_ReadOptions_setTailing(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jboolean jtailing) { +void Java_org_rocksdb_ReadOptions_setTailing( + JNIEnv*, jobject, jlong jhandle, jboolean jtailing) { reinterpret_cast(jhandle)->tailing = static_cast(jtailing); } @@ -6345,8 +6376,8 @@ void Java_org_rocksdb_ReadOptions_setTailing(JNIEnv* /*env*/, jobject /*jobj*/, * Method: tailing * Signature: (J)Z */ -jboolean Java_org_rocksdb_ReadOptions_tailing(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_ReadOptions_tailing( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->tailing; } @@ -6355,8 +6386,8 @@ jboolean Java_org_rocksdb_ReadOptions_tailing(JNIEnv* /*env*/, jobject /*jobj*/, * Method: managed * Signature: (J)Z */ -jboolean Java_org_rocksdb_ReadOptions_managed(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_ReadOptions_managed( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->managed; } @@ -6365,8 +6396,8 @@ jboolean Java_org_rocksdb_ReadOptions_managed(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setManaged * Signature: (JZ)V */ -void Java_org_rocksdb_ReadOptions_setManaged(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jboolean jmanaged) { +void Java_org_rocksdb_ReadOptions_setManaged( + JNIEnv*, jobject, jlong jhandle, jboolean jmanaged) { reinterpret_cast(jhandle)->managed = static_cast(jmanaged); } @@ -6376,9 +6407,8 @@ void Java_org_rocksdb_ReadOptions_setManaged(JNIEnv* /*env*/, jobject /*jobj*/, * Method: totalOrderSeek * Signature: (J)Z */ -jboolean Java_org_rocksdb_ReadOptions_totalOrderSeek(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_ReadOptions_totalOrderSeek( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->total_order_seek; } @@ -6388,8 +6418,7 @@ jboolean Java_org_rocksdb_ReadOptions_totalOrderSeek(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_ReadOptions_setTotalOrderSeek( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jtotal_order_seek) { + JNIEnv*, jobject, jlong jhandle, jboolean jtotal_order_seek) { reinterpret_cast(jhandle)->total_order_seek = static_cast(jtotal_order_seek); } @@ -6399,9 +6428,8 @@ void Java_org_rocksdb_ReadOptions_setTotalOrderSeek( * Method: prefixSameAsStart * Signature: (J)Z */ -jboolean Java_org_rocksdb_ReadOptions_prefixSameAsStart(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_ReadOptions_prefixSameAsStart( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->prefix_same_as_start; } @@ -6411,8 +6439,7 @@ jboolean Java_org_rocksdb_ReadOptions_prefixSameAsStart(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_ReadOptions_setPrefixSameAsStart( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jprefix_same_as_start) { + JNIEnv*, jobject, jlong jhandle, jboolean jprefix_same_as_start) { reinterpret_cast(jhandle)->prefix_same_as_start = static_cast(jprefix_same_as_start); } @@ -6422,8 +6449,8 @@ void Java_org_rocksdb_ReadOptions_setPrefixSameAsStart( * Method: pinData * Signature: (J)Z */ -jboolean Java_org_rocksdb_ReadOptions_pinData(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_ReadOptions_pinData( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->pin_data; } @@ -6432,9 +6459,8 @@ jboolean Java_org_rocksdb_ReadOptions_pinData(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setPinData * Signature: (JZ)V */ -void Java_org_rocksdb_ReadOptions_setPinData(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, - jboolean jpin_data) { +void Java_org_rocksdb_ReadOptions_setPinData( + JNIEnv*, jobject, jlong jhandle, jboolean jpin_data) { reinterpret_cast(jhandle)->pin_data = static_cast(jpin_data); } @@ -6445,7 +6471,7 @@ void Java_org_rocksdb_ReadOptions_setPinData(JNIEnv* /*env*/, jobject /*jobj*/, * Signature: (J)Z */ jboolean Java_org_rocksdb_ReadOptions_backgroundPurgeOnIteratorCleanup( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->background_purge_on_iterator_cleanup); } @@ -6456,7 +6482,7 @@ jboolean Java_org_rocksdb_ReadOptions_backgroundPurgeOnIteratorCleanup( * Signature: (JZ)V */ void Java_org_rocksdb_ReadOptions_setBackgroundPurgeOnIteratorCleanup( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + JNIEnv*, jobject, jlong jhandle, jboolean jbackground_purge_on_iterator_cleanup) { auto* opt = reinterpret_cast(jhandle); opt->background_purge_on_iterator_cleanup = @@ -6468,9 +6494,8 @@ void Java_org_rocksdb_ReadOptions_setBackgroundPurgeOnIteratorCleanup( * Method: readaheadSize * Signature: (J)J */ -jlong Java_org_rocksdb_ReadOptions_readaheadSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_ReadOptions_readaheadSize( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->readahead_size); } @@ -6480,22 +6505,42 @@ jlong Java_org_rocksdb_ReadOptions_readaheadSize(JNIEnv* /*env*/, * Method: setReadaheadSize * Signature: (JJ)V */ -void Java_org_rocksdb_ReadOptions_setReadaheadSize(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jlong jreadahead_size) { +void Java_org_rocksdb_ReadOptions_setReadaheadSize( + JNIEnv*, jobject, jlong jhandle, jlong jreadahead_size) { auto* opt = reinterpret_cast(jhandle); opt->readahead_size = static_cast(jreadahead_size); } +/* + * Class: org_rocksdb_ReadOptions + * Method: maxSkippableInternalKeys + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_maxSkippableInternalKeys( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_skippable_internal_keys); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setMaxSkippableInternalKeys + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setMaxSkippableInternalKeys( + JNIEnv*, jobject, jlong jhandle, jlong jmax_skippable_internal_keys) { + auto* opt = reinterpret_cast(jhandle); + opt->max_skippable_internal_keys = + static_cast(jmax_skippable_internal_keys); +} + /* * Class: org_rocksdb_ReadOptions * Method: ignoreRangeDeletions * Signature: (J)Z */ -jboolean Java_org_rocksdb_ReadOptions_ignoreRangeDeletions(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_ReadOptions_ignoreRangeDeletions( + JNIEnv*, jobject, jlong jhandle) { auto* opt = reinterpret_cast(jhandle); return static_cast(opt->ignore_range_deletions); } @@ -6506,8 +6551,7 @@ jboolean Java_org_rocksdb_ReadOptions_ignoreRangeDeletions(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_ReadOptions_setIgnoreRangeDeletions( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean jignore_range_deletions) { + JNIEnv*, jobject, jlong jhandle, jboolean jignore_range_deletions) { auto* opt = reinterpret_cast(jhandle); opt->ignore_range_deletions = static_cast(jignore_range_deletions); } @@ -6517,8 +6561,8 @@ void Java_org_rocksdb_ReadOptions_setIgnoreRangeDeletions( * Method: setSnapshot * Signature: (JJ)V */ -void Java_org_rocksdb_ReadOptions_setSnapshot(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jlong jsnapshot) { +void Java_org_rocksdb_ReadOptions_setSnapshot( + JNIEnv*, jobject, jlong jhandle, jlong jsnapshot) { reinterpret_cast(jhandle)->snapshot = reinterpret_cast(jsnapshot); } @@ -6528,8 +6572,8 @@ void Java_org_rocksdb_ReadOptions_setSnapshot(JNIEnv* /*env*/, jobject /*jobj*/, * Method: snapshot * Signature: (J)J */ -jlong Java_org_rocksdb_ReadOptions_snapshot(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_ReadOptions_snapshot( + JNIEnv*, jobject, jlong jhandle) { auto& snapshot = reinterpret_cast(jhandle)->snapshot; return reinterpret_cast(snapshot); } @@ -6539,8 +6583,8 @@ jlong Java_org_rocksdb_ReadOptions_snapshot(JNIEnv* /*env*/, jobject /*jobj*/, * Method: readTier * Signature: (J)B */ -jbyte Java_org_rocksdb_ReadOptions_readTier(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_ReadOptions_readTier( + JNIEnv*, jobject, jlong jhandle) { return static_cast( reinterpret_cast(jhandle)->read_tier); } @@ -6550,8 +6594,8 @@ jbyte Java_org_rocksdb_ReadOptions_readTier(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setReadTier * Signature: (JB)V */ -void Java_org_rocksdb_ReadOptions_setReadTier(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle, jbyte jread_tier) { +void Java_org_rocksdb_ReadOptions_setReadTier( + JNIEnv*, jobject, jlong jhandle, jbyte jread_tier) { reinterpret_cast(jhandle)->read_tier = static_cast(jread_tier); } @@ -6562,8 +6606,7 @@ void Java_org_rocksdb_ReadOptions_setReadTier(JNIEnv* /*env*/, jobject /*jobj*/, * Signature: (JJ)I */ void Java_org_rocksdb_ReadOptions_setIterateUpperBound( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jupper_bound_slice_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jupper_bound_slice_handle) { reinterpret_cast(jhandle)->iterate_upper_bound = reinterpret_cast(jupper_bound_slice_handle); } @@ -6573,9 +6616,8 @@ void Java_org_rocksdb_ReadOptions_setIterateUpperBound( * Method: iterateUpperBound * Signature: (J)J */ -jlong Java_org_rocksdb_ReadOptions_iterateUpperBound(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_ReadOptions_iterateUpperBound( + JNIEnv*, jobject, jlong jhandle) { auto& upper_bound_slice_handle = reinterpret_cast(jhandle)->iterate_upper_bound; return reinterpret_cast(upper_bound_slice_handle); @@ -6587,8 +6629,7 @@ jlong Java_org_rocksdb_ReadOptions_iterateUpperBound(JNIEnv* /*env*/, * Signature: (JJ)I */ void Java_org_rocksdb_ReadOptions_setIterateLowerBound( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jlower_bound_slice_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jlower_bound_slice_handle) { reinterpret_cast(jhandle)->iterate_lower_bound = reinterpret_cast(jlower_bound_slice_handle); } @@ -6598,14 +6639,48 @@ void Java_org_rocksdb_ReadOptions_setIterateLowerBound( * Method: iterateLowerBound * Signature: (J)J */ -jlong Java_org_rocksdb_ReadOptions_iterateLowerBound(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jlong Java_org_rocksdb_ReadOptions_iterateLowerBound( + JNIEnv*, jobject, jlong jhandle) { auto& lower_bound_slice_handle = reinterpret_cast(jhandle)->iterate_lower_bound; return reinterpret_cast(lower_bound_slice_handle); } +/* + * Class: org_rocksdb_ReadOptions + * Method: setTableFilter + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setTableFilter( + JNIEnv*, jobject, jlong jhandle, jlong jjni_table_filter_handle) { + auto* opt = reinterpret_cast(jhandle); + auto* jni_table_filter = + reinterpret_cast(jjni_table_filter_handle); + opt->table_filter = jni_table_filter->GetTableFilterFunction(); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setIterStartSeqnum + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setIterStartSeqnum( + JNIEnv*, jobject, jlong jhandle, jlong jiter_start_seqnum) { + auto* opt = reinterpret_cast(jhandle); + opt->iter_start_seqnum = static_cast(jiter_start_seqnum); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: iterStartSeqnum + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_iterStartSeqnum( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->iter_start_seqnum); +} + ///////////////////////////////////////////////////////////////////// // rocksdb::ComparatorOptions @@ -6614,8 +6689,8 @@ jlong Java_org_rocksdb_ReadOptions_iterateLowerBound(JNIEnv* /*env*/, * Method: newComparatorOptions * Signature: ()J */ -jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions(JNIEnv* /*env*/, - jclass /*jcls*/) { +jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions( + JNIEnv*, jclass) { auto* comparator_opt = new rocksdb::ComparatorJniCallbackOptions(); return reinterpret_cast(comparator_opt); } @@ -6625,9 +6700,8 @@ jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions(JNIEnv* /*env*/, * Method: useAdaptiveMutex * Signature: (J)Z */ -jboolean Java_org_rocksdb_ComparatorOptions_useAdaptiveMutex(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_ComparatorOptions_useAdaptiveMutex( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle) ->use_adaptive_mutex; } @@ -6638,8 +6712,7 @@ jboolean Java_org_rocksdb_ComparatorOptions_useAdaptiveMutex(JNIEnv* /*env*/, * Signature: (JZ)V */ void Java_org_rocksdb_ComparatorOptions_setUseAdaptiveMutex( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jboolean juse_adaptive_mutex) { + JNIEnv*, jobject, jlong jhandle, jboolean juse_adaptive_mutex) { reinterpret_cast(jhandle) ->use_adaptive_mutex = static_cast(juse_adaptive_mutex); } @@ -6649,9 +6722,8 @@ void Java_org_rocksdb_ComparatorOptions_setUseAdaptiveMutex( * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_ComparatorOptions_disposeInternal(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_ComparatorOptions_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { auto* comparator_opt = reinterpret_cast(jhandle); assert(comparator_opt != nullptr); @@ -6666,8 +6738,8 @@ void Java_org_rocksdb_ComparatorOptions_disposeInternal(JNIEnv* /*env*/, * Method: newFlushOptions * Signature: ()J */ -jlong Java_org_rocksdb_FlushOptions_newFlushOptions(JNIEnv* /*env*/, - jclass /*jcls*/) { +jlong Java_org_rocksdb_FlushOptions_newFlushOptions( + JNIEnv*, jclass) { auto* flush_opt = new rocksdb::FlushOptions(); return reinterpret_cast(flush_opt); } @@ -6677,10 +6749,8 @@ jlong Java_org_rocksdb_FlushOptions_newFlushOptions(JNIEnv* /*env*/, * Method: setWaitForFlush * Signature: (JZ)V */ -void Java_org_rocksdb_FlushOptions_setWaitForFlush(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jboolean jwait) { +void Java_org_rocksdb_FlushOptions_setWaitForFlush( + JNIEnv*, jobject, jlong jhandle, jboolean jwait) { reinterpret_cast(jhandle)->wait = static_cast(jwait); } @@ -6690,20 +6760,40 @@ void Java_org_rocksdb_FlushOptions_setWaitForFlush(JNIEnv* /*env*/, * Method: waitForFlush * Signature: (J)Z */ -jboolean Java_org_rocksdb_FlushOptions_waitForFlush(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +jboolean Java_org_rocksdb_FlushOptions_waitForFlush( + JNIEnv*, jobject, jlong jhandle) { return reinterpret_cast(jhandle)->wait; } +/* + * Class: org_rocksdb_FlushOptions + * Method: setAllowWriteStall + * Signature: (JZ)V + */ +void Java_org_rocksdb_FlushOptions_setAllowWriteStall( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_write_stall) { + auto* flush_options = reinterpret_cast(jhandle); + flush_options->allow_write_stall = jallow_write_stall == JNI_TRUE; +} + +/* + * Class: org_rocksdb_FlushOptions + * Method: allowWriteStall + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_FlushOptions_allowWriteStall( + JNIEnv*, jobject, jlong jhandle) { + auto* flush_options = reinterpret_cast(jhandle); + return static_cast(flush_options->allow_write_stall); +} + /* * Class: org_rocksdb_FlushOptions * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_FlushOptions_disposeInternal(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_FlushOptions_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { auto* flush_opt = reinterpret_cast(jhandle); assert(flush_opt != nullptr); delete flush_opt; diff --git a/java/rocksjni/options_util.cc b/java/rocksjni/options_util.cc index 2e057c407..7dd007845 100644 --- a/java/rocksjni/options_util.cc +++ b/java/rocksjni/options_util.cc @@ -7,6 +7,7 @@ // calling C++ rocksdb::OptionsUtil methods from Java side. #include +#include #include "include/org_rocksdb_OptionsUtil.h" @@ -56,19 +57,23 @@ void build_column_family_descriptor_list( void Java_org_rocksdb_OptionsUtil_loadLatestOptions( JNIEnv* env, jclass /*jcls*/, jstring jdbpath, jlong jenv_handle, jlong jdb_opts_handle, jobject jcfds, jboolean ignore_unknown_options) { - const char* db_path = env->GetStringUTFChars(jdbpath, nullptr); + jboolean has_exception = JNI_FALSE; + auto db_path = rocksdb::JniUtil::copyStdString(env, jdbpath, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } std::vector cf_descs; rocksdb::Status s = rocksdb::LoadLatestOptions( db_path, reinterpret_cast(jenv_handle), reinterpret_cast(jdb_opts_handle), &cf_descs, ignore_unknown_options); - env->ReleaseStringUTFChars(jdbpath, db_path); - if (!s.ok()) { + // error, raise an exception rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } else { + build_column_family_descriptor_list(env, jcfds, cf_descs); } - - build_column_family_descriptor_list(env, jcfds, cf_descs); } /* @@ -79,19 +84,23 @@ void Java_org_rocksdb_OptionsUtil_loadLatestOptions( void Java_org_rocksdb_OptionsUtil_loadOptionsFromFile( JNIEnv* env, jclass /*jcls*/, jstring jopts_file_name, jlong jenv_handle, jlong jdb_opts_handle, jobject jcfds, jboolean ignore_unknown_options) { - const char* opts_file_name = env->GetStringUTFChars(jopts_file_name, nullptr); + jboolean has_exception = JNI_FALSE; + auto opts_file_name = rocksdb::JniUtil::copyStdString(env, jopts_file_name, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } std::vector cf_descs; rocksdb::Status s = rocksdb::LoadOptionsFromFile( opts_file_name, reinterpret_cast(jenv_handle), reinterpret_cast(jdb_opts_handle), &cf_descs, ignore_unknown_options); - env->ReleaseStringUTFChars(jopts_file_name, opts_file_name); - if (!s.ok()) { + // error, raise an exception rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } else { + build_column_family_descriptor_list(env, jcfds, cf_descs); } - - build_column_family_descriptor_list(env, jcfds, cf_descs); } /* @@ -101,14 +110,21 @@ void Java_org_rocksdb_OptionsUtil_loadOptionsFromFile( */ jstring Java_org_rocksdb_OptionsUtil_getLatestOptionsFileName( JNIEnv* env, jclass /*jcls*/, jstring jdbpath, jlong jenv_handle) { - const char* db_path = env->GetStringUTFChars(jdbpath, nullptr); + jboolean has_exception = JNI_FALSE; + auto db_path = rocksdb::JniUtil::copyStdString(env, jdbpath, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return nullptr; + } std::string options_file_name; - if (db_path != nullptr) { - rocksdb::GetLatestOptionsFileName( - db_path, reinterpret_cast(jenv_handle), - &options_file_name); + rocksdb::Status s = rocksdb::GetLatestOptionsFileName( + db_path, reinterpret_cast(jenv_handle), + &options_file_name); + if (!s.ok()) { + // error, raise an exception + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } else { + return env->NewStringUTF(options_file_name.c_str()); } - env->ReleaseStringUTFChars(jdbpath, db_path); - - return env->NewStringUTF(options_file_name.c_str()); } diff --git a/java/rocksjni/persistent_cache.cc b/java/rocksjni/persistent_cache.cc new file mode 100644 index 000000000..2b6fc60ba --- /dev/null +++ b/java/rocksjni/persistent_cache.cc @@ -0,0 +1,53 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::PersistentCache. + +#include +#include + +#include "include/org_rocksdb_PersistentCache.h" +#include "rocksdb/persistent_cache.h" +#include "loggerjnicallback.h" +#include "portal.h" + +/* + * Class: org_rocksdb_PersistentCache + * Method: newPersistentCache + * Signature: (JLjava/lang/String;JJZ)J + */ +jlong Java_org_rocksdb_PersistentCache_newPersistentCache( + JNIEnv* env, jclass, jlong jenv_handle, jstring jpath, + jlong jsz, jlong jlogger_handle, jboolean joptimized_for_nvm) { + auto* rocks_env = reinterpret_cast(jenv_handle); + jboolean has_exception = JNI_FALSE; + std::string path = rocksdb::JniUtil::copyStdString(env, jpath, &has_exception); + if (has_exception == JNI_TRUE) { + return 0; + } + auto* logger = + reinterpret_cast*>(jlogger_handle); + auto* cache = new std::shared_ptr(nullptr); + rocksdb::Status s = rocksdb::NewPersistentCache( + rocks_env, path, static_cast(jsz), *logger, + static_cast(joptimized_for_nvm), cache); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } + return reinterpret_cast(cache); +} + +/* + * Class: org_rocksdb_PersistentCache + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_PersistentCache_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { + auto* cache = + reinterpret_cast*>(jhandle); + delete cache; // delete std::shared_ptr +} diff --git a/java/rocksjni/portal.h b/java/rocksjni/portal.h index c24ff1688..70e67653e 100644 --- a/java/rocksjni/portal.h +++ b/java/rocksjni/portal.h @@ -10,11 +10,12 @@ #ifndef JAVA_ROCKSJNI_PORTAL_H_ #define JAVA_ROCKSJNI_PORTAL_H_ +#include #include -#include #include #include #include +#include #include #include #include @@ -25,6 +26,7 @@ #include "rocksdb/filter_policy.h" #include "rocksdb/rate_limiter.h" #include "rocksdb/status.h" +#include "rocksdb/table.h" #include "rocksdb/utilities/backupable_db.h" #include "rocksdb/utilities/memory_util.h" #include "rocksdb/utilities/transaction_db.h" @@ -32,7 +34,10 @@ #include "rocksjni/compaction_filter_factory_jnicallback.h" #include "rocksjni/comparatorjnicallback.h" #include "rocksjni/loggerjnicallback.h" +#include "rocksjni/table_filter_jnicallback.h" +#include "rocksjni/trace_writer_jnicallback.h" #include "rocksjni/transaction_notifier_jnicallback.h" +#include "rocksjni/wal_filter_jnicallback.h" #include "rocksjni/writebatchhandlerjnicallback.h" // Remove macro on windows @@ -42,15 +47,6 @@ namespace rocksdb { -// Detect if jlong overflows size_t -inline Status check_if_jlong_fits_size_t(const jlong& jvalue) { - Status s = Status::OK(); - if (static_cast(jvalue) > std::numeric_limits::max()) { - s = Status::InvalidArgument(Slice("jlong overflows 32 bit value.")); - } - return s; -} - class JavaClass { public: /** @@ -159,11 +155,12 @@ template class JavaException : public JavaClass { } }; -// The portal class for org.rocksdb.RocksDB -class RocksDBJni : public RocksDBNativeClass { +// The portal class for java.lang.IllegalArgumentException +class IllegalArgumentExceptionJni : + public JavaException { public: /** - * Get the Java Class org.rocksdb.RocksDB + * Get the Java Class java.lang.IllegalArgumentException * * @param env A pointer to the Java environment * @@ -172,7 +169,34 @@ class RocksDBJni : public RocksDBNativeClass { * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksDB"); + return JavaException::getJClass(env, "java/lang/IllegalArgumentException"); + } + + /** + * Create and throw a Java IllegalArgumentException with the provided status + * + * If s.ok() == true, then this function will not throw any exception. + * + * @param env A pointer to the Java environment + * @param s The status for the exception + * + * @return true if an exception was thrown, false otherwise + */ + static bool ThrowNew(JNIEnv* env, const Status& s) { + assert(!s.ok()); + if (s.ok()) { + return false; + } + + // get the IllegalArgumentException class + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + std::cerr << "IllegalArgumentExceptionJni::ThrowNew/class - Error: unexpected exception!" << std::endl; + return env->ExceptionCheck(); + } + + return JavaException::ThrowNew(env, s.ToString()); } }; @@ -473,6 +497,100 @@ class StatusJni : public RocksDBNativeClass { } } + static std::unique_ptr toCppStatus( + const jbyte jcode_value, const jbyte jsub_code_value) { + std::unique_ptr status; + switch (jcode_value) { + case 0x0: + //Ok + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::OK())); + break; + case 0x1: + //NotFound + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::NotFound( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x2: + //Corruption + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::Corruption( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x3: + //NotSupported + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::NotSupported( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x4: + //InvalidArgument + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::InvalidArgument( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x5: + //IOError + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::IOError( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x6: + //MergeInProgress + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::MergeInProgress( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x7: + //Incomplete + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::Incomplete( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x8: + //ShutdownInProgress + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::ShutdownInProgress( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x9: + //TimedOut + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::TimedOut( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0xA: + //Aborted + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::Aborted( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0xB: + //Busy + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::Busy( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0xC: + //Expired + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::Expired( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0xD: + //TryAgain + status = std::unique_ptr( + new rocksdb::Status(rocksdb::Status::TryAgain( + rocksdb::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x7F: + default: + return nullptr; + } + return status; + } + // Returns the equivalent rocksdb::Status for the Java org.rocksdb.Status static std::unique_ptr toCppStatus(JNIEnv* env, const jobject jstatus) { jmethodID mid_code = getCodeMethod(env); @@ -514,14 +632,14 @@ class StatusJni : public RocksDBNativeClass { return nullptr; } - jbyte jsubCode_value = 0x0; // None + jbyte jsub_code_value = 0x0; // None if (jsubCode != nullptr) { jmethodID mid_subCode_value = rocksdb::SubCodeJni::getValueMethod(env); if (mid_subCode_value == nullptr) { // exception occurred return nullptr; } - jsubCode_value =env->CallByteMethod(jsubCode, mid_subCode_value); + jsub_code_value = env->CallByteMethod(jsubCode, mid_subCode_value); if (env->ExceptionCheck()) { // exception occurred if (jcode != nullptr) { @@ -548,68 +666,8 @@ class StatusJni : public RocksDBNativeClass { return nullptr; } - std::unique_ptr status; - switch (jcode_value) { - case 0x0: - //Ok - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::OK())); - break; - case 0x1: - //NotFound - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::NotFound(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0x2: - //Corruption - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::Corruption(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0x3: - //NotSupported - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::NotSupported(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0x4: - //InvalidArgument - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::InvalidArgument(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0x5: - //IOError - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::IOError(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0x6: - //MergeInProgress - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::MergeInProgress(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0x7: - //Incomplete - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::Incomplete(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0x8: - //ShutdownInProgress - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::ShutdownInProgress(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0x9: - //TimedOut - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::TimedOut(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0xA: - //Aborted - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::Aborted(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0xB: - //Busy - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::Busy(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0xC: - //Expired - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::Expired(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0xD: - //TryAgain - status = std::unique_ptr(new rocksdb::Status(rocksdb::Status::TryAgain(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value)))); - break; - case 0x7F: - default: - return nullptr; - } + std::unique_ptr status = + toCppStatus(jcode_value, jsub_code_value); // delete all local refs if (jstate != nullptr) { @@ -680,7 +738,6 @@ class RocksDBExceptionJni : * @return true if an exception was thrown, false otherwise */ static bool ThrowNew(JNIEnv* env, const Status& s) { - assert(!s.ok()); if (s.ok()) { return false; } @@ -895,12 +952,11 @@ class RocksDBExceptionJni : } }; -// The portal class for java.lang.IllegalArgumentException -class IllegalArgumentExceptionJni : - public JavaException { +// The portal class for java.util.List +class ListJni : public JavaClass { public: /** - * Get the Java Class java.lang.IllegalArgumentException + * Get the Java Class java.util.List * * @param env A pointer to the Java environment * @@ -908,45 +964,25 @@ class IllegalArgumentExceptionJni : * ClassFormatError, ClassCircularityError, NoClassDefFoundError, * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ - static jclass getJClass(JNIEnv* env) { - return JavaException::getJClass(env, "java/lang/IllegalArgumentException"); + static jclass getListClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/util/List"); } /** - * Create and throw a Java IllegalArgumentException with the provided status - * - * If s.ok() == true, then this function will not throw any exception. + * Get the Java Class java.util.ArrayList * * @param env A pointer to the Java environment - * @param s The status for the exception * - * @return true if an exception was thrown, false otherwise + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ - static bool ThrowNew(JNIEnv* env, const Status& s) { - assert(!s.ok()); - if (s.ok()) { - return false; - } - - // get the IllegalArgumentException class - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - std::cerr << "IllegalArgumentExceptionJni::ThrowNew/class - Error: unexpected exception!" << std::endl; - return env->ExceptionCheck(); - } - - return JavaException::ThrowNew(env, s.ToString()); + static jclass getArrayListClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/util/ArrayList"); } -}; - -// The portal class for org.rocksdb.Options -class OptionsJni : public RocksDBNativeClass< - rocksdb::Options*, OptionsJni> { - public: /** - * Get the Java Class org.rocksdb.Options + * Get the Java Class java.util.Iterator * * @param env A pointer to the Java environment * @@ -954,87 +990,119 @@ class OptionsJni : public RocksDBNativeClass< * ClassFormatError, ClassCircularityError, NoClassDefFoundError, * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/Options"); + static jclass getIteratorClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/util/Iterator"); } -}; -// The portal class for org.rocksdb.DBOptions -class DBOptionsJni : public RocksDBNativeClass< - rocksdb::DBOptions*, DBOptionsJni> { - public: /** - * Get the Java Class org.rocksdb.DBOptions + * Get the Java Method: List#iterator * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/DBOptions"); + static jmethodID getIteratorMethod(JNIEnv* env) { + jclass jlist_clazz = getListClass(env); + if(jlist_clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jlist_clazz, "iterator", "()Ljava/util/Iterator;"); + assert(mid != nullptr); + return mid; } -}; -// The portal class for org.rocksdb.ColumnFamilyOptions -class ColumnFamilyOptionsJni - : public RocksDBNativeClass { - public: /** - * Get the Java Class org.rocksdb.ColumnFamilyOptions + * Get the Java Method: Iterator#hasNext * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, - "org/rocksdb/ColumnFamilyOptions"); + static jmethodID getHasNextMethod(JNIEnv* env) { + jclass jiterator_clazz = getIteratorClass(env); + if(jiterator_clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jiterator_clazz, "hasNext", "()Z"); + assert(mid != nullptr); + return mid; } /** - * Create a new Java org.rocksdb.ColumnFamilyOptions object with the same - * properties as the provided C++ rocksdb::ColumnFamilyOptions object + * Get the Java Method: Iterator#next * * @param env A pointer to the Java environment - * @param cfoptions A pointer to rocksdb::ColumnFamilyOptions object * - * @return A reference to a Java org.rocksdb.ColumnFamilyOptions object, or - * nullptr if an an exception occurs + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jobject construct(JNIEnv* env, const ColumnFamilyOptions* cfoptions) { - auto* cfo = new rocksdb::ColumnFamilyOptions(*cfoptions); - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { + static jmethodID getNextMethod(JNIEnv* env) { + jclass jiterator_clazz = getIteratorClass(env); + if(jiterator_clazz == nullptr) { // exception occurred accessing class return nullptr; } - jmethodID mid = env->GetMethodID(jclazz, "", "(J)V"); - if (mid == nullptr) { - // exception thrown: NoSuchMethodException or OutOfMemoryError + static jmethodID mid = + env->GetMethodID(jiterator_clazz, "next", "()Ljava/lang/Object;"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: ArrayList constructor + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved + */ + static jmethodID getArrayListConstructorMethodId(JNIEnv* env) { + jclass jarray_list_clazz = getArrayListClass(env); + if(jarray_list_clazz == nullptr) { + // exception occurred accessing class return nullptr; } + static jmethodID mid = + env->GetMethodID(jarray_list_clazz, "", "(I)V"); + assert(mid != nullptr); + return mid; + } - jobject jcfd = env->NewObject(jclazz, mid, reinterpret_cast(cfo)); - if (env->ExceptionCheck()) { + /** + * Get the Java Method: List#add + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved + */ + static jmethodID getListAddMethodId(JNIEnv* env) { + jclass jlist_clazz = getListClass(env); + if(jlist_clazz == nullptr) { + // exception occurred accessing class return nullptr; } - return jcfd; + static jmethodID mid = + env->GetMethodID(jlist_clazz, "add", "(Ljava/lang/Object;)Z"); + assert(mid != nullptr); + return mid; } }; -// The portal class for org.rocksdb.WriteOptions -class WriteOptionsJni : public RocksDBNativeClass< - rocksdb::WriteOptions*, WriteOptionsJni> { +// The portal class for java.lang.Byte +class ByteJni : public JavaClass { public: /** - * Get the Java Class org.rocksdb.WriteOptions + * Get the Java Class java.lang.Byte * * @param env A pointer to the Java environment * @@ -1043,16 +1111,11 @@ class WriteOptionsJni : public RocksDBNativeClass< * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteOptions"); + return JavaClass::getJClass(env, "java/lang/Byte"); } -}; -// The portal class for org.rocksdb.ReadOptions -class ReadOptionsJni : public RocksDBNativeClass< - rocksdb::ReadOptions*, ReadOptionsJni> { - public: /** - * Get the Java Class org.rocksdb.ReadOptions + * Get the Java Class byte[] * * @param env A pointer to the Java environment * @@ -1060,66 +1123,87 @@ class ReadOptionsJni : public RocksDBNativeClass< * ClassFormatError, ClassCircularityError, NoClassDefFoundError, * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/ReadOptions"); + static jclass getArrayJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "[B"); } -}; -// The portal class for org.rocksdb.WriteBatch -class WriteBatchJni : public RocksDBNativeClass< - rocksdb::WriteBatch*, WriteBatchJni> { - public: /** - * Get the Java Class org.rocksdb.WriteBatch + * Creates a new 2-dimensional Java Byte Array byte[][] * * @param env A pointer to the Java environment + * @param len The size of the first dimension * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return A reference to the Java byte[][] or nullptr if an exception occurs */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteBatch"); + static jobjectArray new2dByteArray(JNIEnv* env, const jsize len) { + jclass clazz = getArrayJClass(env); + if(clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + return env->NewObjectArray(len, clazz, nullptr); } /** - * Create a new Java org.rocksdb.WriteBatch object + * Get the Java Method: Byte#byteValue * * @param env A pointer to the Java environment - * @param wb A pointer to rocksdb::WriteBatch object * - * @return A reference to a Java org.rocksdb.WriteBatch object, or - * nullptr if an an exception occurs + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved */ - static jobject construct(JNIEnv* env, const WriteBatch* wb) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { + static jmethodID getByteValueMethod(JNIEnv* env) { + jclass clazz = getJClass(env); + if(clazz == nullptr) { // exception occurred accessing class return nullptr; } - jmethodID mid = env->GetMethodID(jclazz, "", "(J)V"); + static jmethodID mid = env->GetMethodID(clazz, "byteValue", "()B"); + assert(mid != nullptr); + return mid; + } + + /** + * Calls the Java Method: Byte#valueOf, returning a constructed Byte jobject + * + * @param env A pointer to the Java environment + * + * @return A constructing Byte object or nullptr if the class or method id could not + * be retrieved, or an exception occurred + */ + static jobject valueOf(JNIEnv* env, jbyte jprimitive_byte) { + jclass clazz = getJClass(env); + if (clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetStaticMethodID(clazz, "valueOf", "(B)Ljava/lang/Byte;"); if (mid == nullptr) { // exception thrown: NoSuchMethodException or OutOfMemoryError return nullptr; } - jobject jwb = env->NewObject(jclazz, mid, reinterpret_cast(wb)); + const jobject jbyte_obj = + env->CallStaticObjectMethod(clazz, mid, jprimitive_byte); if (env->ExceptionCheck()) { + // exception occurred return nullptr; } - return jwb; + return jbyte_obj; } + }; -// The portal class for org.rocksdb.WriteBatch.Handler -class WriteBatchHandlerJni : public RocksDBNativeClass< - const rocksdb::WriteBatchHandlerJniCallback*, - WriteBatchHandlerJni> { +// The portal class for java.lang.Integer +class IntegerJni : public JavaClass { public: /** - * Get the Java Class org.rocksdb.WriteBatch.Handler + * Get the Java Class java.lang.Integer * * @param env A pointer to the Java environment * @@ -1128,446 +1212,1375 @@ class WriteBatchHandlerJni : public RocksDBNativeClass< * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, - "org/rocksdb/WriteBatch$Handler"); + return JavaClass::getJClass(env, "java/lang/Integer"); } - /** - * Get the Java Method: WriteBatch.Handler#put - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getPutCfMethodId(JNIEnv* env) { + static jobject valueOf(JNIEnv* env, jint jprimitive_int) { jclass jclazz = getJClass(env); - if(jclazz == nullptr) { + if (jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, "put", "(I[B[B)V"); - assert(mid != nullptr); - return mid; - } + jmethodID mid = + env->GetStaticMethodID(jclazz, "valueOf", "(I)Ljava/lang/Integer;"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } - /** - * Get the Java Method: WriteBatch.Handler#put - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getPutMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class + const jobject jinteger_obj = + env->CallStaticObjectMethod(jclazz, mid, jprimitive_int); + if (env->ExceptionCheck()) { + // exception occurred return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, "put", "([B[B)V"); - assert(mid != nullptr); - return mid; + return jinteger_obj; } +}; +// The portal class for java.lang.Long +class LongJni : public JavaClass { + public: /** - * Get the Java Method: WriteBatch.Handler#merge + * Get the Java Class java.lang.Long * * @param env A pointer to the Java environment * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ - static jmethodID getMergeCfMethodId(JNIEnv* env) { + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/lang/Long"); + } + + static jobject valueOf(JNIEnv* env, jlong jprimitive_long) { jclass jclazz = getJClass(env); - if(jclazz == nullptr) { + if (jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, "merge", "(I[B[B)V"); - assert(mid != nullptr); - return mid; - } + jmethodID mid = + env->GetStaticMethodID(jclazz, "valueOf", "(J)Ljava/lang/Long;"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } - /** - * Get the Java Method: WriteBatch.Handler#merge - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getMergeMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class + const jobject jlong_obj = + env->CallStaticObjectMethod(jclazz, mid, jprimitive_long); + if (env->ExceptionCheck()) { + // exception occurred return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, "merge", "([B[B)V"); - assert(mid != nullptr); - return mid; + return jlong_obj; } +}; +// The portal class for java.lang.StringBuilder +class StringBuilderJni : public JavaClass { + public: /** - * Get the Java Method: WriteBatch.Handler#delete + * Get the Java Class java.lang.StringBuilder * * @param env A pointer to the Java environment * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ - static jmethodID getDeleteCfMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; - } - - static jmethodID mid = env->GetMethodID(jclazz, "delete", "(I[B)V"); - assert(mid != nullptr); - return mid; + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/lang/StringBuilder"); } /** - * Get the Java Method: WriteBatch.Handler#delete + * Get the Java Method: StringBuilder#append * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getDeleteMethodId(JNIEnv* env) { + static jmethodID getListAddMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, "delete", "([B)V"); + static jmethodID mid = + env->GetMethodID(jclazz, "append", + "(Ljava/lang/String;)Ljava/lang/StringBuilder;"); assert(mid != nullptr); return mid; } /** - * Get the Java Method: WriteBatch.Handler#singleDelete - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getSingleDeleteCfMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; - } - - static jmethodID mid = env->GetMethodID(jclazz, "singleDelete", "(I[B)V"); - assert(mid != nullptr); - return mid; - } - - /** - * Get the Java Method: WriteBatch.Handler#singleDelete + * Appends a C-style string to a StringBuilder * * @param env A pointer to the Java environment + * @param jstring_builder Reference to a java.lang.StringBuilder + * @param c_str A C-style string to append to the StringBuilder * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved + * @return A reference to the updated StringBuilder, or a nullptr if + * an exception occurs */ - static jmethodID getSingleDeleteMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class + static jobject append(JNIEnv* env, jobject jstring_builder, + const char* c_str) { + jmethodID mid = getListAddMethodId(env); + if(mid == nullptr) { + // exception occurred accessing class or method return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, "singleDelete", "([B)V"); - assert(mid != nullptr); - return mid; - } - - /** - * Get the Java Method: WriteBatch.Handler#deleteRange - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getDeleteRangeCfMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if (jclazz == nullptr) { - // exception occurred accessing class + jstring new_value_str = env->NewStringUTF(c_str); + if(new_value_str == nullptr) { + // exception thrown: OutOfMemoryError return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "(I[B[B)V"); - assert(mid != nullptr); - return mid; - } - - /** - * Get the Java Method: WriteBatch.Handler#deleteRange - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getDeleteRangeMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if (jclazz == nullptr) { - // exception occurred accessing class + jobject jresult_string_builder = + env->CallObjectMethod(jstring_builder, mid, new_value_str); + if(env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(new_value_str); return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "([B[B)V"); - assert(mid != nullptr); - return mid; + return jresult_string_builder; } +}; - /** - * Get the Java Method: WriteBatch.Handler#logData - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getLogDataMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; +// various utility functions for working with RocksDB and JNI +class JniUtil { + public: + /** + * Detect if jlong overflows size_t + * + * @param jvalue the jlong value + * + * @return + */ + inline static Status check_if_jlong_fits_size_t(const jlong& jvalue) { + Status s = Status::OK(); + if (static_cast(jvalue) > std::numeric_limits::max()) { + s = Status::InvalidArgument(Slice("jlong overflows 32 bit value.")); + } + return s; } - static jmethodID mid = env->GetMethodID(jclazz, "logData", "([B)V"); - assert(mid != nullptr); - return mid; - } - - /** - * Get the Java Method: WriteBatch.Handler#putBlobIndex - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getPutBlobIndexCfMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; - } + /** + * Obtains a reference to the JNIEnv from + * the JVM + * + * If the current thread is not attached to the JavaVM + * then it will be attached so as to retrieve the JNIEnv + * + * If a thread is attached, it must later be manually + * released by calling JavaVM::DetachCurrentThread. + * This can be handled by always matching calls to this + * function with calls to {@link JniUtil::releaseJniEnv(JavaVM*, jboolean)} + * + * @param jvm (IN) A pointer to the JavaVM instance + * @param attached (OUT) A pointer to a boolean which + * will be set to JNI_TRUE if we had to attach the thread + * + * @return A pointer to the JNIEnv or nullptr if a fatal error + * occurs and the JNIEnv cannot be retrieved + */ + static JNIEnv* getJniEnv(JavaVM* jvm, jboolean* attached) { + assert(jvm != nullptr); - static jmethodID mid = env->GetMethodID(jclazz, "putBlobIndex", "(I[B[B)V"); - assert(mid != nullptr); - return mid; - } + JNIEnv *env; + const jint env_rs = jvm->GetEnv(reinterpret_cast(&env), + JNI_VERSION_1_2); - /** - * Get the Java Method: WriteBatch.Handler#markBeginPrepare - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getMarkBeginPrepareMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; + if(env_rs == JNI_OK) { + // current thread is already attached, return the JNIEnv + *attached = JNI_FALSE; + return env; + } else if(env_rs == JNI_EDETACHED) { + // current thread is not attached, attempt to attach + const jint rs_attach = jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); + if(rs_attach == JNI_OK) { + *attached = JNI_TRUE; + return env; + } else { + // error, could not attach the thread + std::cerr << "JniUtil::getJniEnv - Fatal: could not attach current thread to JVM!" << std::endl; + return nullptr; + } + } else if(env_rs == JNI_EVERSION) { + // error, JDK does not support JNI_VERSION_1_2+ + std::cerr << "JniUtil::getJniEnv - Fatal: JDK does not support JNI_VERSION_1_2" << std::endl; + return nullptr; + } else { + std::cerr << "JniUtil::getJniEnv - Fatal: Unknown error: env_rs=" << env_rs << std::endl; + return nullptr; + } } - static jmethodID mid = env->GetMethodID(jclazz, "markBeginPrepare", "()V"); - assert(mid != nullptr); - return mid; - } - - /** - * Get the Java Method: WriteBatch.Handler#markEndPrepare - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getMarkEndPrepareMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; + /** + * Counterpart to {@link JniUtil::getJniEnv(JavaVM*, jboolean*)} + * + * Detachess the current thread from the JVM if it was previously + * attached + * + * @param jvm (IN) A pointer to the JavaVM instance + * @param attached (IN) JNI_TRUE if we previously had to attach the thread + * to the JavaVM to get the JNIEnv + */ + static void releaseJniEnv(JavaVM* jvm, jboolean& attached) { + assert(jvm != nullptr); + if(attached == JNI_TRUE) { + const jint rs_detach = jvm->DetachCurrentThread(); + assert(rs_detach == JNI_OK); + if(rs_detach != JNI_OK) { + std::cerr << "JniUtil::getJniEnv - Warn: Unable to detach current thread from JVM!" << std::endl; + } + } } - static jmethodID mid = env->GetMethodID(jclazz, "markEndPrepare", "([B)V"); - assert(mid != nullptr); - return mid; - } - - /** - * Get the Java Method: WriteBatch.Handler#markNoop - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getMarkNoopMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; + /** + * Copies a Java String[] to a C++ std::vector + * + * @param env (IN) A pointer to the java environment + * @param jss (IN) The Java String array to copy + * @param has_exception (OUT) will be set to JNI_TRUE + * if an OutOfMemoryError or ArrayIndexOutOfBoundsException + * exception occurs + * + * @return A std::vector containing copies of the Java strings + */ + static std::vector copyStrings(JNIEnv* env, + jobjectArray jss, jboolean* has_exception) { + return rocksdb::JniUtil::copyStrings(env, jss, + env->GetArrayLength(jss), has_exception); } - static jmethodID mid = env->GetMethodID(jclazz, "markNoop", "(Z)V"); - assert(mid != nullptr); - return mid; - } - - /** - * Get the Java Method: WriteBatch.Handler#markRollback - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getMarkRollbackMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; - } + /** + * Copies a Java String[] to a C++ std::vector + * + * @param env (IN) A pointer to the java environment + * @param jss (IN) The Java String array to copy + * @param jss_len (IN) The length of the Java String array to copy + * @param has_exception (OUT) will be set to JNI_TRUE + * if an OutOfMemoryError or ArrayIndexOutOfBoundsException + * exception occurs + * + * @return A std::vector containing copies of the Java strings + */ + static std::vector copyStrings(JNIEnv* env, + jobjectArray jss, const jsize jss_len, jboolean* has_exception) { + std::vector strs; + strs.reserve(jss_len); + for (jsize i = 0; i < jss_len; i++) { + jobject js = env->GetObjectArrayElement(jss, i); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + *has_exception = JNI_TRUE; + return strs; + } - static jmethodID mid = env->GetMethodID(jclazz, "markRollback", "([B)V"); - assert(mid != nullptr); - return mid; - } + jstring jstr = static_cast(js); + const char* str = env->GetStringUTFChars(jstr, nullptr); + if(str == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(js); + *has_exception = JNI_TRUE; + return strs; + } - /** - * Get the Java Method: WriteBatch.Handler#markCommit - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getMarkCommitMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; - } + strs.push_back(std::string(str)); - static jmethodID mid = env->GetMethodID(jclazz, "markCommit", "([B)V"); - assert(mid != nullptr); - return mid; - } + env->ReleaseStringUTFChars(jstr, str); + env->DeleteLocalRef(js); + } - /** - * Get the Java Method: WriteBatch.Handler#shouldContinue - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getContinueMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; + *has_exception = JNI_FALSE; + return strs; } - static jmethodID mid = env->GetMethodID(jclazz, "shouldContinue", "()Z"); - assert(mid != nullptr); - return mid; - } -}; - -class WriteBatchSavePointJni : public JavaClass { - public: - /** - * Get the Java Class org.rocksdb.WriteBatch.SavePoint - * - * @param env A pointer to the Java environment - * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown - */ - static jclass getJClass(JNIEnv* env) { - return JavaClass::getJClass(env, "org/rocksdb/WriteBatch$SavePoint"); - } + /** + * Copies a jstring to a C-style null-terminated byte string + * and releases the original jstring + * + * The jstring is copied as UTF-8 + * + * If an exception occurs, then JNIEnv::ExceptionCheck() + * will have been called + * + * @param env (IN) A pointer to the java environment + * @param js (IN) The java string to copy + * @param has_exception (OUT) will be set to JNI_TRUE + * if an OutOfMemoryError exception occurs + * + * @return A pointer to the copied string, or a + * nullptr if has_exception == JNI_TRUE + */ + static std::unique_ptr copyString(JNIEnv* env, jstring js, + jboolean* has_exception) { + const char *utf = env->GetStringUTFChars(js, nullptr); + if(utf == nullptr) { + // exception thrown: OutOfMemoryError + env->ExceptionCheck(); + *has_exception = JNI_TRUE; + return nullptr; + } else if(env->ExceptionCheck()) { + // exception thrown + env->ReleaseStringUTFChars(js, utf); + *has_exception = JNI_TRUE; + return nullptr; + } - /** - * Get the Java Method: HistogramData constructor - * - * @param env A pointer to the Java environment - * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved - */ - static jmethodID getConstructorMethodId(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; + const jsize utf_len = env->GetStringUTFLength(js); + std::unique_ptr str(new char[utf_len + 1]); // Note: + 1 is needed for the c_str null terminator + std::strcpy(str.get(), utf); + env->ReleaseStringUTFChars(js, utf); + *has_exception = JNI_FALSE; + return str; } - static jmethodID mid = env->GetMethodID(jclazz, "", "(JJJ)V"); - assert(mid != nullptr); - return mid; - } + /** + * Copies a jstring to a std::string + * and releases the original jstring + * + * If an exception occurs, then JNIEnv::ExceptionCheck() + * will have been called + * + * @param env (IN) A pointer to the java environment + * @param js (IN) The java string to copy + * @param has_exception (OUT) will be set to JNI_TRUE + * if an OutOfMemoryError exception occurs + * + * @return A std:string copy of the jstring, or an + * empty std::string if has_exception == JNI_TRUE + */ + static std::string copyStdString(JNIEnv* env, jstring js, + jboolean* has_exception) { + const char *utf = env->GetStringUTFChars(js, nullptr); + if(utf == nullptr) { + // exception thrown: OutOfMemoryError + env->ExceptionCheck(); + *has_exception = JNI_TRUE; + return std::string(); + } else if(env->ExceptionCheck()) { + // exception thrown + env->ReleaseStringUTFChars(js, utf); + *has_exception = JNI_TRUE; + return std::string(); + } - /** - * Create a new Java org.rocksdb.WriteBatch.SavePoint object - * - * @param env A pointer to the Java environment - * @param savePoint A pointer to rocksdb::WriteBatch::SavePoint object - * - * @return A reference to a Java org.rocksdb.WriteBatch.SavePoint object, or - * nullptr if an an exception occurs - */ - static jobject construct(JNIEnv* env, const SavePoint &save_point) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; + std::string name(utf); + env->ReleaseStringUTFChars(js, utf); + *has_exception = JNI_FALSE; + return name; } - jmethodID mid = getConstructorMethodId(env); - if (mid == nullptr) { - // exception thrown: NoSuchMethodException or OutOfMemoryError - return nullptr; + /** + * Copies bytes from a std::string to a jByteArray + * + * @param env A pointer to the java environment + * @param bytes The bytes to copy + * + * @return the Java byte[], or nullptr if an exception occurs + * + * @throws RocksDBException thrown + * if memory size to copy exceeds general java specific array size limitation. + */ + static jbyteArray copyBytes(JNIEnv* env, std::string bytes) { + return createJavaByteArrayWithSizeCheck(env, bytes.c_str(), bytes.size()); } - jobject jsave_point = env->NewObject(jclazz, mid, - static_cast(save_point.size), - static_cast(save_point.count), - static_cast(save_point.content_flags)); - if (env->ExceptionCheck()) { - return nullptr; - } + /** + * Given a Java byte[][] which is an array of java.lang.Strings + * where each String is a byte[], the passed function `string_fn` + * will be called on each String, the result is the collected by + * calling the passed function `collector_fn` + * + * @param env (IN) A pointer to the java environment + * @param jbyte_strings (IN) A Java array of Strings expressed as bytes + * @param string_fn (IN) A transform function to call for each String + * @param collector_fn (IN) A collector which is called for the result + * of each `string_fn` + * @param has_exception (OUT) will be set to JNI_TRUE + * if an ArrayIndexOutOfBoundsException or OutOfMemoryError + * exception occurs + */ + template static void byteStrings(JNIEnv* env, + jobjectArray jbyte_strings, + std::function string_fn, + std::function collector_fn, + jboolean *has_exception) { + const jsize jlen = env->GetArrayLength(jbyte_strings); - return jsave_point; - } -}; + for(jsize i = 0; i < jlen; i++) { + jobject jbyte_string_obj = env->GetObjectArrayElement(jbyte_strings, i); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + *has_exception = JNI_TRUE; // signal error + return; + } -// The portal class for org.rocksdb.WriteBatchWithIndex -class WriteBatchWithIndexJni : public RocksDBNativeClass< - rocksdb::WriteBatchWithIndex*, WriteBatchWithIndexJni> { + jbyteArray jbyte_string_ary = + reinterpret_cast(jbyte_string_obj); + T result = byteString(env, jbyte_string_ary, string_fn, has_exception); + + env->DeleteLocalRef(jbyte_string_obj); + + if(*has_exception == JNI_TRUE) { + // exception thrown: OutOfMemoryError + return; + } + + collector_fn(i, result); + } + + *has_exception = JNI_FALSE; + } + + /** + * Given a Java String which is expressed as a Java Byte Array byte[], + * the passed function `string_fn` will be called on the String + * and the result returned + * + * @param env (IN) A pointer to the java environment + * @param jbyte_string_ary (IN) A Java String expressed in bytes + * @param string_fn (IN) A transform function to call on the String + * @param has_exception (OUT) will be set to JNI_TRUE + * if an OutOfMemoryError exception occurs + */ + template static T byteString(JNIEnv* env, + jbyteArray jbyte_string_ary, + std::function string_fn, + jboolean* has_exception) { + const jsize jbyte_string_len = env->GetArrayLength(jbyte_string_ary); + return byteString(env, jbyte_string_ary, jbyte_string_len, string_fn, + has_exception); + } + + /** + * Given a Java String which is expressed as a Java Byte Array byte[], + * the passed function `string_fn` will be called on the String + * and the result returned + * + * @param env (IN) A pointer to the java environment + * @param jbyte_string_ary (IN) A Java String expressed in bytes + * @param jbyte_string_len (IN) The length of the Java String + * expressed in bytes + * @param string_fn (IN) A transform function to call on the String + * @param has_exception (OUT) will be set to JNI_TRUE + * if an OutOfMemoryError exception occurs + */ + template static T byteString(JNIEnv* env, + jbyteArray jbyte_string_ary, const jsize jbyte_string_len, + std::function string_fn, + jboolean* has_exception) { + jbyte* jbyte_string = + env->GetByteArrayElements(jbyte_string_ary, nullptr); + if(jbyte_string == nullptr) { + // exception thrown: OutOfMemoryError + *has_exception = JNI_TRUE; + return nullptr; // signal error + } + + T result = + string_fn(reinterpret_cast(jbyte_string), jbyte_string_len); + + env->ReleaseByteArrayElements(jbyte_string_ary, jbyte_string, JNI_ABORT); + + *has_exception = JNI_FALSE; + return result; + } + + /** + * Converts a std::vector to a Java byte[][] where each Java String + * is expressed as a Java Byte Array byte[]. + * + * @param env A pointer to the java environment + * @param strings A vector of Strings + * + * @return A Java array of Strings expressed as bytes, + * or nullptr if an exception is thrown + */ + static jobjectArray stringsBytes(JNIEnv* env, std::vector strings) { + jclass jcls_ba = ByteJni::getArrayJClass(env); + if(jcls_ba == nullptr) { + // exception occurred + return nullptr; + } + + const jsize len = static_cast(strings.size()); + + jobjectArray jbyte_strings = env->NewObjectArray(len, jcls_ba, nullptr); + if(jbyte_strings == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + for (jsize i = 0; i < len; i++) { + std::string *str = &strings[i]; + const jsize str_len = static_cast(str->size()); + + jbyteArray jbyte_string_ary = env->NewByteArray(str_len); + if(jbyte_string_ary == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jbyte_strings); + return nullptr; + } + + env->SetByteArrayRegion( + jbyte_string_ary, 0, str_len, + const_cast(reinterpret_cast(str->c_str()))); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jbyte_string_ary); + env->DeleteLocalRef(jbyte_strings); + return nullptr; + } + + env->SetObjectArrayElement(jbyte_strings, i, jbyte_string_ary); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + // or ArrayStoreException + env->DeleteLocalRef(jbyte_string_ary); + env->DeleteLocalRef(jbyte_strings); + return nullptr; + } + + env->DeleteLocalRef(jbyte_string_ary); + } + + return jbyte_strings; + } + + /** + * Converts a std::vector to a Java String[]. + * + * @param env A pointer to the java environment + * @param strings A vector of Strings + * + * @return A Java array of Strings, + * or nullptr if an exception is thrown + */ + static jobjectArray toJavaStrings(JNIEnv* env, + const std::vector* strings) { + jclass jcls_str = env->FindClass("java/lang/String"); + if(jcls_str == nullptr) { + // exception occurred + return nullptr; + } + + const jsize len = static_cast(strings->size()); + + jobjectArray jstrings = env->NewObjectArray(len, jcls_str, nullptr); + if(jstrings == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + for (jsize i = 0; i < len; i++) { + const std::string *str = &((*strings)[i]); + jstring js = rocksdb::JniUtil::toJavaString(env, str); + if (js == nullptr) { + env->DeleteLocalRef(jstrings); + return nullptr; + } + + env->SetObjectArrayElement(jstrings, i, js); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + // or ArrayStoreException + env->DeleteLocalRef(js); + env->DeleteLocalRef(jstrings); + return nullptr; + } + } + + return jstrings; + } + + /** + * Creates a Java UTF String from a C++ std::string + * + * @param env A pointer to the java environment + * @param string the C++ std::string + * @param treat_empty_as_null true if empty strings should be treated as null + * + * @return the Java UTF string, or nullptr if the provided string + * is null (or empty and treat_empty_as_null is set), or if an + * exception occurs allocating the Java String. + */ + static jstring toJavaString(JNIEnv* env, const std::string* string, + const bool treat_empty_as_null = false) { + if (string == nullptr) { + return nullptr; + } + + if (treat_empty_as_null && string->empty()) { + return nullptr; + } + + return env->NewStringUTF(string->c_str()); + } + + /** + * Copies bytes to a new jByteArray with the check of java array size limitation. + * + * @param bytes pointer to memory to copy to a new jByteArray + * @param size number of bytes to copy + * + * @return the Java byte[], or nullptr if an exception occurs + * + * @throws RocksDBException thrown + * if memory size to copy exceeds general java array size limitation to avoid overflow. + */ + static jbyteArray createJavaByteArrayWithSizeCheck(JNIEnv* env, const char* bytes, const size_t size) { + // Limitation for java array size is vm specific + // In general it cannot exceed Integer.MAX_VALUE (2^31 - 1) + // Current HotSpot VM limitation for array size is Integer.MAX_VALUE - 5 (2^31 - 1 - 5) + // It means that the next call to env->NewByteArray can still end with + // OutOfMemoryError("Requested array size exceeds VM limit") coming from VM + static const size_t MAX_JARRAY_SIZE = (static_cast(1)) << 31; + if(size > MAX_JARRAY_SIZE) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, "Requested array size exceeds VM limit"); + return nullptr; + } + + const jsize jlen = static_cast(size); + jbyteArray jbytes = env->NewByteArray(jlen); + if(jbytes == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetByteArrayRegion(jbytes, 0, jlen, + const_cast(reinterpret_cast(bytes))); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jbytes); + return nullptr; + } + + return jbytes; + } + + /** + * Copies bytes from a rocksdb::Slice to a jByteArray + * + * @param env A pointer to the java environment + * @param bytes The bytes to copy + * + * @return the Java byte[] or nullptr if an exception occurs + * + * @throws RocksDBException thrown + * if memory size to copy exceeds general java specific array size limitation. + */ + static jbyteArray copyBytes(JNIEnv* env, const Slice& bytes) { + return createJavaByteArrayWithSizeCheck(env, bytes.data(), bytes.size()); + } + + /* + * Helper for operations on a key and value + * for example WriteBatch->Put + * + * TODO(AR) could be used for RocksDB->Put etc. + */ + static std::unique_ptr kv_op( + std::function op, + JNIEnv* env, jobject /*jobj*/, + jbyteArray jkey, jint jkey_len, + jbyteArray jvalue, jint jvalue_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if(env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + jbyte* value = env->GetByteArrayElements(jvalue, nullptr); + if(env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + if(key != nullptr) { + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + } + return nullptr; + } + + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + rocksdb::Slice value_slice(reinterpret_cast(value), + jvalue_len); + + auto status = op(key_slice, value_slice); + + if(value != nullptr) { + env->ReleaseByteArrayElements(jvalue, value, JNI_ABORT); + } + if(key != nullptr) { + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + } + + return std::unique_ptr(new rocksdb::Status(status)); + } + + /* + * Helper for operations on a key + * for example WriteBatch->Delete + * + * TODO(AR) could be used for RocksDB->Delete etc. + */ + static std::unique_ptr k_op( + std::function op, + JNIEnv* env, jobject /*jobj*/, + jbyteArray jkey, jint jkey_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if(env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + + auto status = op(key_slice); + + if(key != nullptr) { + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + } + + return std::unique_ptr(new rocksdb::Status(status)); + } + + /* + * Helper for operations on a value + * for example WriteBatchWithIndex->GetFromBatch + */ + static jbyteArray v_op( + std::function op, + JNIEnv* env, jbyteArray jkey, jint jkey_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if(env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + + std::string value; + rocksdb::Status s = op(key_slice, &value); + + if(key != nullptr) { + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + } + + if (s.IsNotFound()) { + return nullptr; + } + + if (s.ok()) { + jbyteArray jret_value = + env->NewByteArray(static_cast(value.size())); + if(jret_value == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetByteArrayRegion(jret_value, 0, static_cast(value.size()), + const_cast(reinterpret_cast(value.c_str()))); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + if(jret_value != nullptr) { + env->DeleteLocalRef(jret_value); + } + return nullptr; + } + + return jret_value; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + /** + * Creates a vector of C++ pointers from + * a Java array of C++ pointer addresses. + * + * @param env (IN) A pointer to the java environment + * @param pointers (IN) A Java array of C++ pointer addresses + * @param has_exception (OUT) will be set to JNI_TRUE + * if an ArrayIndexOutOfBoundsException or OutOfMemoryError + * exception occurs. + * + * @return A vector of C++ pointers. + */ + template static std::vector fromJPointers( + JNIEnv* env, jlongArray jptrs, jboolean *has_exception) { + const jsize jptrs_len = env->GetArrayLength(jptrs); + std::vector ptrs; + jlong* jptr = env->GetLongArrayElements(jptrs, nullptr); + if (jptr == nullptr) { + // exception thrown: OutOfMemoryError + *has_exception = JNI_TRUE; + return ptrs; + } + ptrs.reserve(jptrs_len); + for (jsize i = 0; i < jptrs_len; i++) { + ptrs.push_back(reinterpret_cast(jptr[i])); + } + env->ReleaseLongArrayElements(jptrs, jptr, JNI_ABORT); + return ptrs; + } + + /** + * Creates a Java array of C++ pointer addresses + * from a vector of C++ pointers. + * + * @param env (IN) A pointer to the java environment + * @param pointers (IN) A vector of C++ pointers + * @param has_exception (OUT) will be set to JNI_TRUE + * if an ArrayIndexOutOfBoundsException or OutOfMemoryError + * exception occurs + * + * @return Java array of C++ pointer addresses. + */ + template static jlongArray toJPointers(JNIEnv* env, + const std::vector &pointers, + jboolean *has_exception) { + const jsize len = static_cast(pointers.size()); + std::unique_ptr results(new jlong[len]); + std::transform(pointers.begin(), pointers.end(), results.get(), [](T* pointer) -> jlong { + return reinterpret_cast(pointer); + }); + + jlongArray jpointers = env->NewLongArray(len); + if (jpointers == nullptr) { + // exception thrown: OutOfMemoryError + *has_exception = JNI_TRUE; + return nullptr; + } + + env->SetLongArrayRegion(jpointers, 0, len, results.get()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + *has_exception = JNI_TRUE; + env->DeleteLocalRef(jpointers); + return nullptr; + } + + *has_exception = JNI_FALSE; + + return jpointers; + } +}; + +class MapJni : public JavaClass { + public: + /** + * Get the Java Class java.util.Map + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/util/Map"); + } + + /** + * Get the Java Method: Map#put + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved + */ + static jmethodID getMapPutMethodId(JNIEnv* env) { + jclass jlist_clazz = getJClass(env); + if(jlist_clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jlist_clazz, "put", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;"); + assert(mid != nullptr); + return mid; + } +}; + +class HashMapJni : public JavaClass { + public: + /** + * Get the Java Class java.util.HashMap + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/util/HashMap"); + } + + /** + * Create a new Java java.util.HashMap object. + * + * @param env A pointer to the Java environment + * + * @return A reference to a Java java.util.HashMap object, or + * nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, const uint32_t initial_capacity = 16) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", "(I)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jobject jhash_map = env->NewObject(jclazz, mid, static_cast(initial_capacity)); + if (env->ExceptionCheck()) { + return nullptr; + } + + return jhash_map; + } + + /** + * A function which maps a std::pair to a std::pair + * + * @return Either a pointer to a std::pair, or nullptr + * if an error occurs during the mapping + */ + template + using FnMapKV = std::function> (const std::pair&)>; + + // template ::value_type, std::pair>::value, int32_t>::type = 0> + // static void putAll(JNIEnv* env, const jobject jhash_map, I iterator, const FnMapKV &fn_map_kv) { + /** + * Returns true if it succeeds, false if an error occurs + */ + template + static bool putAll(JNIEnv* env, const jobject jhash_map, iterator_type iterator, iterator_type end, const FnMapKV &fn_map_kv) { + const jmethodID jmid_put = rocksdb::MapJni::getMapPutMethodId(env); + if (jmid_put == nullptr) { + return false; + } + + for (auto it = iterator; it != end; ++it) { + const std::unique_ptr> result = fn_map_kv(*it); + if (result == nullptr) { + // an error occurred during fn_map_kv + return false; + } + env->CallObjectMethod(jhash_map, jmid_put, result->first, result->second); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(result->second); + env->DeleteLocalRef(result->first); + return false; + } + + // release local references + env->DeleteLocalRef(result->second); + env->DeleteLocalRef(result->first); + } + + return true; + } + + /** + * Creates a java.util.Map from a std::map + * + * @param env A pointer to the Java environment + * @param map the Cpp map + * + * @return a reference to the Java java.util.Map object, or nullptr if an exception occcurred + */ + static jobject fromCppMap(JNIEnv* env, const std::map* map) { + if (map == nullptr) { + return nullptr; + } + + jobject jhash_map = construct(env, static_cast(map->size())); + if (jhash_map == nullptr) { + // exception occurred + return nullptr; + } + + const rocksdb::HashMapJni::FnMapKV fn_map_kv = + [env](const std::pair& kv) { + jstring jkey = rocksdb::JniUtil::toJavaString(env, &(kv.first), false); + if (env->ExceptionCheck()) { + // an error occurred + return std::unique_ptr>(nullptr); + } + + jstring jvalue = rocksdb::JniUtil::toJavaString(env, &(kv.second), true); + if (env->ExceptionCheck()) { + // an error occurred + env->DeleteLocalRef(jkey); + return std::unique_ptr>(nullptr); + } + + return std::unique_ptr>(new std::pair(static_cast(jkey), static_cast(jvalue))); + }; + + if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) { + // exception occurred + return nullptr; + } + + return jhash_map; + } + + /** + * Creates a java.util.Map from a std::map + * + * @param env A pointer to the Java environment + * @param map the Cpp map + * + * @return a reference to the Java java.util.Map object, or nullptr if an exception occcurred + */ + static jobject fromCppMap(JNIEnv* env, const std::map* map) { + if (map == nullptr) { + return nullptr; + } + + if (map == nullptr) { + return nullptr; + } + + jobject jhash_map = construct(env, static_cast(map->size())); + if (jhash_map == nullptr) { + // exception occurred + return nullptr; + } + + const rocksdb::HashMapJni::FnMapKV fn_map_kv = + [env](const std::pair& kv) { + jstring jkey = rocksdb::JniUtil::toJavaString(env, &(kv.first), false); + if (env->ExceptionCheck()) { + // an error occurred + return std::unique_ptr>(nullptr); + } + + jobject jvalue = rocksdb::IntegerJni::valueOf(env, static_cast(kv.second)); + if (env->ExceptionCheck()) { + // an error occurred + env->DeleteLocalRef(jkey); + return std::unique_ptr>(nullptr); + } + + return std::unique_ptr>(new std::pair(static_cast(jkey), jvalue)); + }; + + if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) { + // exception occurred + return nullptr; + } + + return jhash_map; + } + + /** + * Creates a java.util.Map from a std::map + * + * @param env A pointer to the Java environment + * @param map the Cpp map + * + * @return a reference to the Java java.util.Map object, or nullptr if an exception occcurred + */ + static jobject fromCppMap(JNIEnv* env, const std::map* map) { + if (map == nullptr) { + return nullptr; + } + + jobject jhash_map = construct(env, static_cast(map->size())); + if (jhash_map == nullptr) { + // exception occurred + return nullptr; + } + + const rocksdb::HashMapJni::FnMapKV fn_map_kv = + [env](const std::pair& kv) { + jstring jkey = rocksdb::JniUtil::toJavaString(env, &(kv.first), false); + if (env->ExceptionCheck()) { + // an error occurred + return std::unique_ptr>(nullptr); + } + + jobject jvalue = rocksdb::LongJni::valueOf(env, static_cast(kv.second)); + if (env->ExceptionCheck()) { + // an error occurred + env->DeleteLocalRef(jkey); + return std::unique_ptr>(nullptr); + } + + return std::unique_ptr>(new std::pair(static_cast(jkey), jvalue)); + }; + + if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) { + // exception occurred + return nullptr; + } + + return jhash_map; + } + + /** + * Creates a java.util.Map from a std::map + * + * @param env A pointer to the Java environment + * @param map the Cpp map + * + * @return a reference to the Java java.util.Map object, or nullptr if an exception occcurred + */ + static jobject fromCppMap(JNIEnv* env, const std::map* map) { + if (map == nullptr) { + return nullptr; + } + + jobject jhash_map = construct(env, static_cast(map->size())); + if (jhash_map == nullptr) { + // exception occurred + return nullptr; + } + + const rocksdb::HashMapJni::FnMapKV fn_map_kv = + [env](const std::pair& kv) { + jobject jkey = rocksdb::IntegerJni::valueOf(env, static_cast(kv.first)); + if (env->ExceptionCheck()) { + // an error occurred + return std::unique_ptr>(nullptr); + } + + jobject jvalue = rocksdb::LongJni::valueOf(env, static_cast(kv.second)); + if (env->ExceptionCheck()) { + // an error occurred + env->DeleteLocalRef(jkey); + return std::unique_ptr>(nullptr); + } + + return std::unique_ptr>(new std::pair(static_cast(jkey), jvalue)); + }; + + if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) { + // exception occurred + return nullptr; + } + + return jhash_map; + } +}; + +// The portal class for org.rocksdb.RocksDB +class RocksDBJni : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.RocksDB + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksDB"); + } +}; + +// The portal class for org.rocksdb.Options +class OptionsJni : public RocksDBNativeClass< + rocksdb::Options*, OptionsJni> { + public: + /** + * Get the Java Class org.rocksdb.Options + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/Options"); + } +}; + +// The portal class for org.rocksdb.DBOptions +class DBOptionsJni : public RocksDBNativeClass< + rocksdb::DBOptions*, DBOptionsJni> { + public: + /** + * Get the Java Class org.rocksdb.DBOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/DBOptions"); + } +}; + +// The portal class for org.rocksdb.ColumnFamilyOptions +class ColumnFamilyOptionsJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.ColumnFamilyOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/ColumnFamilyOptions"); + } + + /** + * Create a new Java org.rocksdb.ColumnFamilyOptions object with the same + * properties as the provided C++ rocksdb::ColumnFamilyOptions object + * + * @param env A pointer to the Java environment + * @param cfoptions A pointer to rocksdb::ColumnFamilyOptions object + * + * @return A reference to a Java org.rocksdb.ColumnFamilyOptions object, or + * nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, const ColumnFamilyOptions* cfoptions) { + auto* cfo = new rocksdb::ColumnFamilyOptions(*cfoptions); + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", "(J)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jobject jcfd = env->NewObject(jclazz, mid, reinterpret_cast(cfo)); + if (env->ExceptionCheck()) { + return nullptr; + } + + return jcfd; + } +}; + +// The portal class for org.rocksdb.WriteOptions +class WriteOptionsJni : public RocksDBNativeClass< + rocksdb::WriteOptions*, WriteOptionsJni> { + public: + /** + * Get the Java Class org.rocksdb.WriteOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteOptions"); + } +}; + +// The portal class for org.rocksdb.ReadOptions +class ReadOptionsJni : public RocksDBNativeClass< + rocksdb::ReadOptions*, ReadOptionsJni> { + public: + /** + * Get the Java Class org.rocksdb.ReadOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/ReadOptions"); + } +}; + +// The portal class for org.rocksdb.WriteBatch +class WriteBatchJni : public RocksDBNativeClass< + rocksdb::WriteBatch*, WriteBatchJni> { + public: + /** + * Get the Java Class org.rocksdb.WriteBatch + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteBatch"); + } + + /** + * Create a new Java org.rocksdb.WriteBatch object + * + * @param env A pointer to the Java environment + * @param wb A pointer to rocksdb::WriteBatch object + * + * @return A reference to a Java org.rocksdb.WriteBatch object, or + * nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, const WriteBatch* wb) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", "(J)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jobject jwb = env->NewObject(jclazz, mid, reinterpret_cast(wb)); + if (env->ExceptionCheck()) { + return nullptr; + } + + return jwb; + } +}; + +// The portal class for org.rocksdb.WriteBatch.Handler +class WriteBatchHandlerJni : public RocksDBNativeClass< + const rocksdb::WriteBatchHandlerJniCallback*, + WriteBatchHandlerJni> { public: /** - * Get the Java Class org.rocksdb.WriteBatchWithIndex + * Get the Java Class org.rocksdb.WriteBatch.Handler * * @param env A pointer to the Java environment * @@ -1577,376 +2590,445 @@ class WriteBatchWithIndexJni : public RocksDBNativeClass< */ static jclass getJClass(JNIEnv* env) { return RocksDBNativeClass::getJClass(env, - "org/rocksdb/WriteBatchWithIndex"); + "org/rocksdb/WriteBatch$Handler"); } -}; -// The portal class for org.rocksdb.HistogramData -class HistogramDataJni : public JavaClass { - public: /** - * Get the Java Class org.rocksdb.HistogramData + * Get the Java Method: WriteBatch.Handler#put * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return JavaClass::getJClass(env, "org/rocksdb/HistogramData"); + static jmethodID getPutCfMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "put", "(I[B[B)V"); + assert(mid != nullptr); + return mid; } /** - * Get the Java Method: HistogramData constructor + * Get the Java Method: WriteBatch.Handler#put * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getConstructorMethodId(JNIEnv* env) { + static jmethodID getPutMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, "", "(DDDDDDJJD)V"); + static jmethodID mid = env->GetMethodID(jclazz, "put", "([B[B)V"); assert(mid != nullptr); return mid; } -}; -// The portal class for org.rocksdb.BackupableDBOptions -class BackupableDBOptionsJni : public RocksDBNativeClass< - rocksdb::BackupableDBOptions*, BackupableDBOptionsJni> { - public: /** - * Get the Java Class org.rocksdb.BackupableDBOptions + * Get the Java Method: WriteBatch.Handler#merge * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, - "org/rocksdb/BackupableDBOptions"); + static jmethodID getMergeCfMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "merge", "(I[B[B)V"); + assert(mid != nullptr); + return mid; } -}; -// The portal class for org.rocksdb.BackupEngine -class BackupEngineJni : public RocksDBNativeClass< - rocksdb::BackupEngine*, BackupEngineJni> { - public: /** - * Get the Java Class org.rocksdb.BackupableEngine + * Get the Java Method: WriteBatch.Handler#merge * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/BackupEngine"); + static jmethodID getMergeMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "merge", "([B[B)V"); + assert(mid != nullptr); + return mid; } -}; -// The portal class for org.rocksdb.RocksIterator -class IteratorJni : public RocksDBNativeClass< - rocksdb::Iterator*, IteratorJni> { - public: /** - * Get the Java Class org.rocksdb.RocksIterator + * Get the Java Method: WriteBatch.Handler#delete * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksIterator"); + static jmethodID getDeleteCfMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "delete", "(I[B)V"); + assert(mid != nullptr); + return mid; } -}; -// The portal class for org.rocksdb.Filter -class FilterJni : public RocksDBNativeClass< - std::shared_ptr*, FilterJni> { - public: /** - * Get the Java Class org.rocksdb.Filter + * Get the Java Method: WriteBatch.Handler#delete * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/Filter"); + static jmethodID getDeleteMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "delete", "([B)V"); + assert(mid != nullptr); + return mid; } -}; -// The portal class for org.rocksdb.ColumnFamilyHandle -class ColumnFamilyHandleJni : public RocksDBNativeClass< - rocksdb::ColumnFamilyHandle*, ColumnFamilyHandleJni> { - public: /** - * Get the Java Class org.rocksdb.ColumnFamilyHandle + * Get the Java Method: WriteBatch.Handler#singleDelete * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, - "org/rocksdb/ColumnFamilyHandle"); + static jmethodID getSingleDeleteCfMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "singleDelete", "(I[B)V"); + assert(mid != nullptr); + return mid; } -}; -// The portal class for org.rocksdb.FlushOptions -class FlushOptionsJni : public RocksDBNativeClass< - rocksdb::FlushOptions*, FlushOptionsJni> { - public: /** - * Get the Java Class org.rocksdb.FlushOptions + * Get the Java Method: WriteBatch.Handler#singleDelete * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/FlushOptions"); + static jmethodID getSingleDeleteMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "singleDelete", "([B)V"); + assert(mid != nullptr); + return mid; } -}; -// The portal class for org.rocksdb.ComparatorOptions -class ComparatorOptionsJni : public RocksDBNativeClass< - rocksdb::ComparatorJniCallbackOptions*, ComparatorOptionsJni> { - public: /** - * Get the Java Class org.rocksdb.ComparatorOptions + * Get the Java Method: WriteBatch.Handler#deleteRange * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/ComparatorOptions"); + static jmethodID getDeleteRangeCfMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "(I[B[B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#deleteRange + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved + */ + static jmethodID getDeleteRangeMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "([B[B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#logData + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved + */ + static jmethodID getLogDataMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "logData", "([B)V"); + assert(mid != nullptr); + return mid; } -}; -// The portal class for org.rocksdb.AbstractCompactionFilterFactory -class AbstractCompactionFilterFactoryJni : public RocksDBNativeClass< - const rocksdb::CompactionFilterFactoryJniCallback*, - AbstractCompactionFilterFactoryJni> { - public: /** - * Get the Java Class org.rocksdb.AbstractCompactionFilterFactory + * Get the Java Method: WriteBatch.Handler#putBlobIndex * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, - "org/rocksdb/AbstractCompactionFilterFactory"); + static jmethodID getPutBlobIndexCfMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "putBlobIndex", "(I[B[B)V"); + assert(mid != nullptr); + return mid; } /** - * Get the Java Method: AbstractCompactionFilterFactory#name + * Get the Java Method: WriteBatch.Handler#markBeginPrepare * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getNameMethodId(JNIEnv* env) { + static jmethodID getMarkBeginPrepareMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = env->GetMethodID( - jclazz, "name", "()Ljava/lang/String;"); + static jmethodID mid = env->GetMethodID(jclazz, "markBeginPrepare", "()V"); assert(mid != nullptr); return mid; } /** - * Get the Java Method: AbstractCompactionFilterFactory#createCompactionFilter + * Get the Java Method: WriteBatch.Handler#markEndPrepare * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getCreateCompactionFilterMethodId(JNIEnv* env) { + static jmethodID getMarkEndPrepareMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, - "createCompactionFilter", - "(ZZ)J"); + static jmethodID mid = env->GetMethodID(jclazz, "markEndPrepare", "([B)V"); assert(mid != nullptr); return mid; } -}; - -// The portal class for org.rocksdb.AbstractTransactionNotifier -class AbstractTransactionNotifierJni : public RocksDBNativeClass< - const rocksdb::TransactionNotifierJniCallback*, - AbstractTransactionNotifierJni> { - public: - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, - "org/rocksdb/AbstractTransactionNotifier"); - } - // Get the java method `snapshotCreated` - // of org.rocksdb.AbstractTransactionNotifier. - static jmethodID getSnapshotCreatedMethodId(JNIEnv* env) { + /** + * Get the Java Method: WriteBatch.Handler#markNoop + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved + */ + static jmethodID getMarkNoopMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, "snapshotCreated", "(J)V"); + static jmethodID mid = env->GetMethodID(jclazz, "markNoop", "(Z)V"); assert(mid != nullptr); return mid; } -}; -// The portal class for org.rocksdb.AbstractComparator -class AbstractComparatorJni : public RocksDBNativeClass< - const rocksdb::BaseComparatorJniCallback*, - AbstractComparatorJni> { - public: /** - * Get the Java Class org.rocksdb.AbstractComparator + * Get the Java Method: WriteBatch.Handler#markRollback * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, - "org/rocksdb/AbstractComparator"); + static jmethodID getMarkRollbackMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "markRollback", "([B)V"); + assert(mid != nullptr); + return mid; } /** - * Get the Java Method: Comparator#name + * Get the Java Method: WriteBatch.Handler#markCommit * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getNameMethodId(JNIEnv* env) { + static jmethodID getMarkCommitMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = - env->GetMethodID(jclazz, "name", "()Ljava/lang/String;"); + static jmethodID mid = env->GetMethodID(jclazz, "markCommit", "([B)V"); assert(mid != nullptr); return mid; } /** - * Get the Java Method: Comparator#compare + * Get the Java Method: WriteBatch.Handler#shouldContinue * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getCompareMethodId(JNIEnv* env) { + static jmethodID getContinueMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = - env->GetMethodID(jclazz, "compare", - "(Lorg/rocksdb/AbstractSlice;Lorg/rocksdb/AbstractSlice;)I"); + static jmethodID mid = env->GetMethodID(jclazz, "shouldContinue", "()Z"); assert(mid != nullptr); return mid; } +}; +class WriteBatchSavePointJni : public JavaClass { + public: /** - * Get the Java Method: Comparator#findShortestSeparator + * Get the Java Class org.rocksdb.WriteBatch.SavePoint + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/WriteBatch$SavePoint"); + } + + /** + * Get the Java Method: HistogramData constructor * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getFindShortestSeparatorMethodId(JNIEnv* env) { + static jmethodID getConstructorMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = - env->GetMethodID(jclazz, "findShortestSeparator", - "(Ljava/lang/String;Lorg/rocksdb/AbstractSlice;)Ljava/lang/String;"); + static jmethodID mid = env->GetMethodID(jclazz, "", "(JJJ)V"); assert(mid != nullptr); return mid; } /** - * Get the Java Method: Comparator#findShortSuccessor + * Create a new Java org.rocksdb.WriteBatch.SavePoint object * * @param env A pointer to the Java environment + * @param savePoint A pointer to rocksdb::WriteBatch::SavePoint object * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved + * @return A reference to a Java org.rocksdb.WriteBatch.SavePoint object, or + * nullptr if an an exception occurs */ - static jmethodID getFindShortSuccessorMethodId(JNIEnv* env) { + static jobject construct(JNIEnv* env, const SavePoint &save_point) { jclass jclazz = getJClass(env); if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = - env->GetMethodID(jclazz, "findShortSuccessor", - "(Ljava/lang/String;)Ljava/lang/String;"); - assert(mid != nullptr); - return mid; + jmethodID mid = getConstructorMethodId(env); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jobject jsave_point = env->NewObject(jclazz, mid, + static_cast(save_point.size), + static_cast(save_point.count), + static_cast(save_point.content_flags)); + if (env->ExceptionCheck()) { + return nullptr; + } + + return jsave_point; } }; -// The portal class for org.rocksdb.AbstractSlice -class AbstractSliceJni : public NativeRocksMutableObject< - const rocksdb::Slice*, AbstractSliceJni> { +// The portal class for org.rocksdb.WriteBatchWithIndex +class WriteBatchWithIndexJni : public RocksDBNativeClass< + rocksdb::WriteBatchWithIndex*, WriteBatchWithIndexJni> { public: /** - * Get the Java Class org.rocksdb.AbstractSlice + * Get the Java Class org.rocksdb.WriteBatchWithIndex * * @param env A pointer to the Java environment * @@ -1955,16 +3037,16 @@ class AbstractSliceJni : public NativeRocksMutableObject< * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/AbstractSlice"); + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/WriteBatchWithIndex"); } }; -// The portal class for org.rocksdb.Slice -class SliceJni : public NativeRocksMutableObject< - const rocksdb::Slice*, AbstractSliceJni> { +// The portal class for org.rocksdb.HistogramData +class HistogramDataJni : public JavaClass { public: /** - * Get the Java Class org.rocksdb.Slice + * Get the Java Class org.rocksdb.HistogramData * * @param env A pointer to the Java environment * @@ -1973,45 +3055,91 @@ class SliceJni : public NativeRocksMutableObject< * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/Slice"); + return JavaClass::getJClass(env, "org/rocksdb/HistogramData"); } /** - * Constructs a Slice object + * Get the Java Method: HistogramData constructor * * @param env A pointer to the Java environment * - * @return A reference to a Java Slice object, or a nullptr if an - * exception occurs + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jobject construct0(JNIEnv* env) { + static jmethodID getConstructorMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, "", "()V"); - if(mid == nullptr) { - // exception occurred accessing method - return nullptr; - } + static jmethodID mid = env->GetMethodID(jclazz, "", "(DDDDDDJJD)V"); + assert(mid != nullptr); + return mid; + } +}; - jobject jslice = env->NewObject(jclazz, mid); - if(env->ExceptionCheck()) { - return nullptr; - } +// The portal class for org.rocksdb.BackupableDBOptions +class BackupableDBOptionsJni : public RocksDBNativeClass< + rocksdb::BackupableDBOptions*, BackupableDBOptionsJni> { + public: + /** + * Get the Java Class org.rocksdb.BackupableDBOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/BackupableDBOptions"); + } +}; - return jslice; +// The portal class for org.rocksdb.BackupEngine +class BackupEngineJni : public RocksDBNativeClass< + rocksdb::BackupEngine*, BackupEngineJni> { + public: + /** + * Get the Java Class org.rocksdb.BackupableEngine + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/BackupEngine"); } }; -// The portal class for org.rocksdb.DirectSlice -class DirectSliceJni : public NativeRocksMutableObject< - const rocksdb::Slice*, AbstractSliceJni> { +// The portal class for org.rocksdb.RocksIterator +class IteratorJni : public RocksDBNativeClass< + rocksdb::Iterator*, IteratorJni> { + public: + /** + * Get the Java Class org.rocksdb.RocksIterator + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksIterator"); + } +}; + +// The portal class for org.rocksdb.Filter +class FilterJni : public RocksDBNativeClass< + std::shared_ptr*, FilterJni> { public: /** - * Get the Java Class org.rocksdb.DirectSlice + * Get the Java Class org.rocksdb.Filter * * @param env A pointer to the Java environment * @@ -2020,44 +3148,35 @@ class DirectSliceJni : public NativeRocksMutableObject< * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ static jclass getJClass(JNIEnv* env) { - return RocksDBNativeClass::getJClass(env, "org/rocksdb/DirectSlice"); + return RocksDBNativeClass::getJClass(env, "org/rocksdb/Filter"); } +}; +// The portal class for org.rocksdb.ColumnFamilyHandle +class ColumnFamilyHandleJni : public RocksDBNativeClass< + rocksdb::ColumnFamilyHandle*, ColumnFamilyHandleJni> { + public: /** - * Constructs a DirectSlice object + * Get the Java Class org.rocksdb.ColumnFamilyHandle * * @param env A pointer to the Java environment * - * @return A reference to a Java DirectSlice object, or a nullptr if an - * exception occurs + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ - static jobject construct0(JNIEnv* env) { - jclass jclazz = getJClass(env); - if(jclazz == nullptr) { - // exception occurred accessing class - return nullptr; - } - - static jmethodID mid = env->GetMethodID(jclazz, "", "()V"); - if(mid == nullptr) { - // exception occurred accessing method - return nullptr; - } - - jobject jdirect_slice = env->NewObject(jclazz, mid); - if(env->ExceptionCheck()) { - return nullptr; - } - - return jdirect_slice; + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/ColumnFamilyHandle"); } }; -// The portal class for java.util.List -class ListJni : public JavaClass { +// The portal class for org.rocksdb.FlushOptions +class FlushOptionsJni : public RocksDBNativeClass< + rocksdb::FlushOptions*, FlushOptionsJni> { public: /** - * Get the Java Class java.util.List + * Get the Java Class org.rocksdb.FlushOptions * * @param env A pointer to the Java environment * @@ -2065,12 +3184,17 @@ class ListJni : public JavaClass { * ClassFormatError, ClassCircularityError, NoClassDefFoundError, * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ - static jclass getListClass(JNIEnv* env) { - return JavaClass::getJClass(env, "java/util/List"); + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/FlushOptions"); } +}; +// The portal class for org.rocksdb.ComparatorOptions +class ComparatorOptionsJni : public RocksDBNativeClass< + rocksdb::ComparatorJniCallbackOptions*, ComparatorOptionsJni> { + public: /** - * Get the Java Class java.util.ArrayList + * Get the Java Class org.rocksdb.ComparatorOptions * * @param env A pointer to the Java environment * @@ -2078,12 +3202,18 @@ class ListJni : public JavaClass { * ClassFormatError, ClassCircularityError, NoClassDefFoundError, * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ - static jclass getArrayListClass(JNIEnv* env) { - return JavaClass::getJClass(env, "java/util/ArrayList"); + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/ComparatorOptions"); } +}; +// The portal class for org.rocksdb.AbstractCompactionFilterFactory +class AbstractCompactionFilterFactoryJni : public RocksDBNativeClass< + const rocksdb::CompactionFilterFactoryJniCallback*, + AbstractCompactionFilterFactoryJni> { + public: /** - * Get the Java Class java.util.Iterator + * Get the Java Class org.rocksdb.AbstractCompactionFilterFactory * * @param env A pointer to the Java environment * @@ -2091,132 +3221,193 @@ class ListJni : public JavaClass { * ClassFormatError, ClassCircularityError, NoClassDefFoundError, * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ - static jclass getIteratorClass(JNIEnv* env) { - return JavaClass::getJClass(env, "java/util/Iterator"); + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/AbstractCompactionFilterFactory"); } /** - * Get the Java Method: List#iterator + * Get the Java Method: AbstractCompactionFilterFactory#name * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getIteratorMethod(JNIEnv* env) { - jclass jlist_clazz = getListClass(env); - if(jlist_clazz == nullptr) { + static jmethodID getNameMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = - env->GetMethodID(jlist_clazz, "iterator", "()Ljava/util/Iterator;"); + static jmethodID mid = env->GetMethodID( + jclazz, "name", "()Ljava/lang/String;"); assert(mid != nullptr); return mid; } /** - * Get the Java Method: Iterator#hasNext + * Get the Java Method: AbstractCompactionFilterFactory#createCompactionFilter * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getHasNextMethod(JNIEnv* env) { - jclass jiterator_clazz = getIteratorClass(env); - if(jiterator_clazz == nullptr) { + static jmethodID getCreateCompactionFilterMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = env->GetMethodID(jiterator_clazz, "hasNext", "()Z"); + static jmethodID mid = env->GetMethodID(jclazz, + "createCompactionFilter", + "(ZZ)J"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.AbstractTransactionNotifier +class AbstractTransactionNotifierJni : public RocksDBNativeClass< + const rocksdb::TransactionNotifierJniCallback*, + AbstractTransactionNotifierJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/AbstractTransactionNotifier"); + } + + // Get the java method `snapshotCreated` + // of org.rocksdb.AbstractTransactionNotifier. + static jmethodID getSnapshotCreatedMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "snapshotCreated", "(J)V"); assert(mid != nullptr); return mid; } +}; + +// The portal class for org.rocksdb.AbstractComparator +class AbstractComparatorJni : public RocksDBNativeClass< + const rocksdb::BaseComparatorJniCallback*, + AbstractComparatorJni> { + public: + /** + * Get the Java Class org.rocksdb.AbstractComparator + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/AbstractComparator"); + } /** - * Get the Java Method: Iterator#next + * Get the Java Method: Comparator#name * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getNextMethod(JNIEnv* env) { - jclass jiterator_clazz = getIteratorClass(env); - if(jiterator_clazz == nullptr) { + static jmethodID getNameMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } static jmethodID mid = - env->GetMethodID(jiterator_clazz, "next", "()Ljava/lang/Object;"); + env->GetMethodID(jclazz, "name", "()Ljava/lang/String;"); assert(mid != nullptr); return mid; } /** - * Get the Java Method: ArrayList constructor + * Get the Java Method: Comparator#compare * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getArrayListConstructorMethodId(JNIEnv* env) { - jclass jarray_list_clazz = getArrayListClass(env); - if(jarray_list_clazz == nullptr) { + static jmethodID getCompareMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } + static jmethodID mid = - env->GetMethodID(jarray_list_clazz, "", "(I)V"); + env->GetMethodID(jclazz, "compare", + "(Lorg/rocksdb/AbstractSlice;Lorg/rocksdb/AbstractSlice;)I"); assert(mid != nullptr); return mid; } /** - * Get the Java Method: List#add + * Get the Java Method: Comparator#findShortestSeparator * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getListAddMethodId(JNIEnv* env) { - jclass jlist_clazz = getListClass(env); - if(jlist_clazz == nullptr) { + static jmethodID getFindShortestSeparatorMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } static jmethodID mid = - env->GetMethodID(jlist_clazz, "add", "(Ljava/lang/Object;)Z"); + env->GetMethodID(jclazz, "findShortestSeparator", + "(Ljava/lang/String;Lorg/rocksdb/AbstractSlice;)Ljava/lang/String;"); assert(mid != nullptr); return mid; } -}; -// The portal class for java.lang.Byte -class ByteJni : public JavaClass { - public: /** - * Get the Java Class java.lang.Byte + * Get the Java Method: Comparator#findShortSuccessor * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return JavaClass::getJClass(env, "java/lang/Byte"); + static jmethodID getFindShortSuccessorMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jclazz, "findShortSuccessor", + "(Ljava/lang/String;)Ljava/lang/String;"); + assert(mid != nullptr); + return mid; } +}; +// The portal class for org.rocksdb.AbstractSlice +class AbstractSliceJni : public NativeRocksMutableObject< + const rocksdb::Slice*, AbstractSliceJni> { + public: /** - * Get the Java Class byte[] + * Get the Java Class org.rocksdb.AbstractSlice * * @param env A pointer to the Java environment * @@ -2224,87 +3415,64 @@ class ByteJni : public JavaClass { * ClassFormatError, ClassCircularityError, NoClassDefFoundError, * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ - static jclass getArrayJClass(JNIEnv* env) { - return JavaClass::getJClass(env, "[B"); - } - - /** - * Creates a new 2-dimensional Java Byte Array byte[][] - * - * @param env A pointer to the Java environment - * @param len The size of the first dimension - * - * @return A reference to the Java byte[][] or nullptr if an exception occurs - */ - static jobjectArray new2dByteArray(JNIEnv* env, const jsize len) { - jclass clazz = getArrayJClass(env); - if(clazz == nullptr) { - // exception occurred accessing class - return nullptr; - } - - return env->NewObjectArray(len, clazz, nullptr); + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/AbstractSlice"); } +}; +// The portal class for org.rocksdb.Slice +class SliceJni : public NativeRocksMutableObject< + const rocksdb::Slice*, AbstractSliceJni> { + public: /** - * Get the Java Method: Byte#byteValue + * Get the Java Class org.rocksdb.Slice * * @param env A pointer to the Java environment * - * @return The Java Method ID or nullptr if the class or method id could not - * be retrieved - */ - static jmethodID getByteValueMethod(JNIEnv* env) { - jclass clazz = getJClass(env); - if(clazz == nullptr) { - // exception occurred accessing class - return nullptr; - } - - static jmethodID mid = env->GetMethodID(clazz, "byteValue", "()B"); - assert(mid != nullptr); - return mid; + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/Slice"); } /** - * Calls the Java Method: Byte#valueOf, returning a constructed Byte jobject + * Constructs a Slice object * * @param env A pointer to the Java environment * - * @return A constructing Byte object or nullptr if the class or method id could not - * be retrieved, or an exception occurred + * @return A reference to a Java Slice object, or a nullptr if an + * exception occurs */ - static jobject valueOf(JNIEnv* env, jbyte jprimitive_byte) { - jclass clazz = getJClass(env); - if (clazz == nullptr) { + static jobject construct0(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = - env->GetStaticMethodID(clazz, "valueOf", "(B)Ljava/lang/Byte;"); - if (mid == nullptr) { - // exception thrown: NoSuchMethodException or OutOfMemoryError + static jmethodID mid = env->GetMethodID(jclazz, "", "()V"); + if(mid == nullptr) { + // exception occurred accessing method return nullptr; } - const jobject jbyte_obj = - env->CallStaticObjectMethod(clazz, mid, jprimitive_byte); - if (env->ExceptionCheck()) { - // exception occurred + jobject jslice = env->NewObject(jclazz, mid); + if(env->ExceptionCheck()) { return nullptr; } - return jbyte_obj; + return jslice; } - }; -// The portal class for java.lang.StringBuilder -class StringBuilderJni : public JavaClass { - public: +// The portal class for org.rocksdb.DirectSlice +class DirectSliceJni : public NativeRocksMutableObject< + const rocksdb::Slice*, AbstractSliceJni> { + public: /** - * Get the Java Class java.lang.StringBuilder + * Get the Java Class org.rocksdb.DirectSlice * * @param env A pointer to the Java environment * @@ -2313,64 +3481,36 @@ class StringBuilderJni : public JavaClass { * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ static jclass getJClass(JNIEnv* env) { - return JavaClass::getJClass(env, "java/lang/StringBuilder"); + return RocksDBNativeClass::getJClass(env, "org/rocksdb/DirectSlice"); } /** - * Get the Java Method: StringBuilder#append + * Constructs a DirectSlice object * * @param env A pointer to the Java environment * - * @return The Java Method ID or nullptr if the class or method id could not - * be retieved + * @return A reference to a Java DirectSlice object, or a nullptr if an + * exception occurs */ - static jmethodID getListAddMethodId(JNIEnv* env) { + static jobject construct0(JNIEnv* env) { jclass jclazz = getJClass(env); if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = - env->GetMethodID(jclazz, "append", - "(Ljava/lang/String;)Ljava/lang/StringBuilder;"); - assert(mid != nullptr); - return mid; - } - - /** - * Appends a C-style string to a StringBuilder - * - * @param env A pointer to the Java environment - * @param jstring_builder Reference to a java.lang.StringBuilder - * @param c_str A C-style string to append to the StringBuilder - * - * @return A reference to the updated StringBuilder, or a nullptr if - * an exception occurs - */ - static jobject append(JNIEnv* env, jobject jstring_builder, - const char* c_str) { - jmethodID mid = getListAddMethodId(env); + static jmethodID mid = env->GetMethodID(jclazz, "", "()V"); if(mid == nullptr) { - // exception occurred accessing class or method - return nullptr; - } - - jstring new_value_str = env->NewStringUTF(c_str); - if(new_value_str == nullptr) { - // exception thrown: OutOfMemoryError + // exception occurred accessing method return nullptr; } - jobject jresult_string_builder = - env->CallObjectMethod(jstring_builder, mid, new_value_str); + jobject jdirect_slice = env->NewObject(jclazz, mid); if(env->ExceptionCheck()) { - // exception occurred - env->DeleteLocalRef(new_value_str); return nullptr; } - return jresult_string_builder; + return jdirect_slice; } }; @@ -4438,704 +5578,1399 @@ class DeadlockPathJni : public JavaClass { } }; -// various utility functions for working with RocksDB and JNI -class JniUtil { +class AbstractTableFilterJni : public RocksDBNativeClass { public: - /** - * Obtains a reference to the JNIEnv from - * the JVM - * - * If the current thread is not attached to the JavaVM - * then it will be attached so as to retrieve the JNIEnv - * - * If a thread is attached, it must later be manually - * released by calling JavaVM::DetachCurrentThread. - * This can be handled by always matching calls to this - * function with calls to {@link JniUtil::releaseJniEnv(JavaVM*, jboolean)} - * - * @param jvm (IN) A pointer to the JavaVM instance - * @param attached (OUT) A pointer to a boolean which - * will be set to JNI_TRUE if we had to attach the thread - * - * @return A pointer to the JNIEnv or nullptr if a fatal error - * occurs and the JNIEnv cannot be retrieved - */ - static JNIEnv* getJniEnv(JavaVM* jvm, jboolean* attached) { - assert(jvm != nullptr); + /** + * Get the Java Method: TableFilter#filter(TableProperties) + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved + */ + static jmethodID getFilterMethod(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } - JNIEnv *env; - const jint env_rs = jvm->GetEnv(reinterpret_cast(&env), - JNI_VERSION_1_2); + static jmethodID mid = + env->GetMethodID(jclazz, "filter", "(Lorg/rocksdb/TableProperties;)Z"); + assert(mid != nullptr); + return mid; + } - if(env_rs == JNI_OK) { - // current thread is already attached, return the JNIEnv - *attached = JNI_FALSE; - return env; - } else if(env_rs == JNI_EDETACHED) { - // current thread is not attached, attempt to attach - const jint rs_attach = jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - if(rs_attach == JNI_OK) { - *attached = JNI_TRUE; - return env; - } else { - // error, could not attach the thread - std::cerr << "JniUtil::getJinEnv - Fatal: could not attach current thread to JVM!" << std::endl; - return nullptr; - } - } else if(env_rs == JNI_EVERSION) { - // error, JDK does not support JNI_VERSION_1_2+ - std::cerr << "JniUtil::getJinEnv - Fatal: JDK does not support JNI_VERSION_1_2" << std::endl; - return nullptr; - } else { - std::cerr << "JniUtil::getJinEnv - Fatal: Unknown error: env_rs=" << env_rs << std::endl; - return nullptr; - } + private: + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/TableFilter"); + } +}; + +class TablePropertiesJni : public JavaClass { + public: + /** + * Create a new Java org.rocksdb.TableProperties object. + * + * @param env A pointer to the Java environment + * @param table_properties A Cpp table properties object + * + * @return A reference to a Java org.rocksdb.TableProperties object, or + * nullptr if an an exception occurs + */ + static jobject fromCppTableProperties(JNIEnv* env, const rocksdb::TableProperties& table_properties) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", "(JJJJJJJJJJJJJJJJJJJ[BLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/util/Map;Ljava/util/Map;Ljava/util/Map;)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jbyteArray jcolumn_family_name = rocksdb::JniUtil::copyBytes(env, table_properties.column_family_name); + if (jcolumn_family_name == nullptr) { + // exception occurred creating java string + return nullptr; + } + + jstring jfilter_policy_name = rocksdb::JniUtil::toJavaString(env, &table_properties.filter_policy_name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + return nullptr; + } + + jstring jcomparator_name = rocksdb::JniUtil::toJavaString(env, &table_properties.comparator_name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + return nullptr; + } + + jstring jmerge_operator_name = rocksdb::JniUtil::toJavaString(env, &table_properties.merge_operator_name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + return nullptr; + } + + jstring jprefix_extractor_name = rocksdb::JniUtil::toJavaString(env, &table_properties.prefix_extractor_name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + env->DeleteLocalRef(jmerge_operator_name); + return nullptr; + } + + jstring jproperty_collectors_names = rocksdb::JniUtil::toJavaString(env, &table_properties.property_collectors_names, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + env->DeleteLocalRef(jmerge_operator_name); + env->DeleteLocalRef(jprefix_extractor_name); + return nullptr; + } + + jstring jcompression_name = rocksdb::JniUtil::toJavaString(env, &table_properties.compression_name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + env->DeleteLocalRef(jmerge_operator_name); + env->DeleteLocalRef(jprefix_extractor_name); + env->DeleteLocalRef(jproperty_collectors_names); + return nullptr; + } + + // Map + jobject juser_collected_properties = rocksdb::HashMapJni::fromCppMap(env, &table_properties.user_collected_properties); + if (env->ExceptionCheck()) { + // exception occurred creating java map + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + env->DeleteLocalRef(jmerge_operator_name); + env->DeleteLocalRef(jprefix_extractor_name); + env->DeleteLocalRef(jproperty_collectors_names); + env->DeleteLocalRef(jcompression_name); + return nullptr; + } + + // Map + jobject jreadable_properties = rocksdb::HashMapJni::fromCppMap(env, &table_properties.readable_properties); + if (env->ExceptionCheck()) { + // exception occurred creating java map + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + env->DeleteLocalRef(jmerge_operator_name); + env->DeleteLocalRef(jprefix_extractor_name); + env->DeleteLocalRef(jproperty_collectors_names); + env->DeleteLocalRef(jcompression_name); + env->DeleteLocalRef(juser_collected_properties); + return nullptr; + } + + // Map + jobject jproperties_offsets = rocksdb::HashMapJni::fromCppMap(env, &table_properties.properties_offsets); + if (env->ExceptionCheck()) { + // exception occurred creating java map + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + env->DeleteLocalRef(jmerge_operator_name); + env->DeleteLocalRef(jprefix_extractor_name); + env->DeleteLocalRef(jproperty_collectors_names); + env->DeleteLocalRef(jcompression_name); + env->DeleteLocalRef(juser_collected_properties); + env->DeleteLocalRef(jreadable_properties); + return nullptr; + } + + jobject jtable_properties = env->NewObject(jclazz, mid, + static_cast(table_properties.data_size), + static_cast(table_properties.index_size), + static_cast(table_properties.index_partitions), + static_cast(table_properties.top_level_index_size), + static_cast(table_properties.index_key_is_user_key), + static_cast(table_properties.index_value_is_delta_encoded), + static_cast(table_properties.filter_size), + static_cast(table_properties.raw_key_size), + static_cast(table_properties.raw_value_size), + static_cast(table_properties.num_data_blocks), + static_cast(table_properties.num_entries), + static_cast(table_properties.num_deletions), + static_cast(table_properties.num_merge_operands), + static_cast(table_properties.num_range_deletions), + static_cast(table_properties.format_version), + static_cast(table_properties.fixed_key_len), + static_cast(table_properties.column_family_id), + static_cast(table_properties.creation_time), + static_cast(table_properties.oldest_key_time), + jcolumn_family_name, + jfilter_policy_name, + jcomparator_name, + jmerge_operator_name, + jprefix_extractor_name, + jproperty_collectors_names, + jcompression_name, + juser_collected_properties, + jreadable_properties, + jproperties_offsets + ); + + if (env->ExceptionCheck()) { + return nullptr; + } + + return jtable_properties; + } + + private: + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/TableProperties"); + } +}; + +class ColumnFamilyDescriptorJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.ColumnFamilyDescriptor + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/ColumnFamilyDescriptor"); + } + + /** + * Create a new Java org.rocksdb.ColumnFamilyDescriptor object with the same + * properties as the provided C++ rocksdb::ColumnFamilyDescriptor object + * + * @param env A pointer to the Java environment + * @param cfd A pointer to rocksdb::ColumnFamilyDescriptor object + * + * @return A reference to a Java org.rocksdb.ColumnFamilyDescriptor object, or + * nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, ColumnFamilyDescriptor* cfd) { + jbyteArray jcf_name = JniUtil::copyBytes(env, cfd->name); + jobject cfopts = ColumnFamilyOptionsJni::construct(env, &(cfd->options)); + + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", + "([BLorg/rocksdb/ColumnFamilyOptions;)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + env->DeleteLocalRef(jcf_name); + return nullptr; } - /** - * Counterpart to {@link JniUtil::getJniEnv(JavaVM*, jboolean*)} - * - * Detachess the current thread from the JVM if it was previously - * attached - * - * @param jvm (IN) A pointer to the JavaVM instance - * @param attached (IN) JNI_TRUE if we previously had to attach the thread - * to the JavaVM to get the JNIEnv - */ - static void releaseJniEnv(JavaVM* jvm, jboolean& attached) { - assert(jvm != nullptr); - if(attached == JNI_TRUE) { - const jint rs_detach = jvm->DetachCurrentThread(); - assert(rs_detach == JNI_OK); - if(rs_detach != JNI_OK) { - std::cerr << "JniUtil::getJinEnv - Warn: Unable to detach current thread from JVM!" << std::endl; - } - } + jobject jcfd = env->NewObject(jclazz, mid, jcf_name, cfopts); + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jcf_name); + return nullptr; } - /** - * Copies a Java String[] to a C++ std::vector - * - * @param env (IN) A pointer to the java environment - * @param jss (IN) The Java String array to copy - * @param has_exception (OUT) will be set to JNI_TRUE - * if an OutOfMemoryError or ArrayIndexOutOfBoundsException - * exception occurs - * - * @return A std::vector containing copies of the Java strings - */ - static std::vector copyStrings(JNIEnv* env, - jobjectArray jss, jboolean* has_exception) { - return rocksdb::JniUtil::copyStrings(env, jss, - env->GetArrayLength(jss), has_exception); + return jcfd; + } + + /** + * Get the Java Method: ColumnFamilyDescriptor#columnFamilyName + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved + */ + static jmethodID getColumnFamilyNameMethod(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; } - /** - * Copies a Java String[] to a C++ std::vector - * - * @param env (IN) A pointer to the java environment - * @param jss (IN) The Java String array to copy - * @param jss_len (IN) The length of the Java String array to copy - * @param has_exception (OUT) will be set to JNI_TRUE - * if an OutOfMemoryError or ArrayIndexOutOfBoundsException - * exception occurs - * - * @return A std::vector containing copies of the Java strings - */ - static std::vector copyStrings(JNIEnv* env, - jobjectArray jss, const jsize jss_len, jboolean* has_exception) { - std::vector strs; - for (jsize i = 0; i < jss_len; i++) { - jobject js = env->GetObjectArrayElement(jss, i); - if(env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - *has_exception = JNI_TRUE; - return strs; - } + static jmethodID mid = env->GetMethodID(jclazz, "columnFamilyName", "()[B"); + assert(mid != nullptr); + return mid; + } - jstring jstr = static_cast(js); - const char* str = env->GetStringUTFChars(jstr, nullptr); - if(str == nullptr) { - // exception thrown: OutOfMemoryError - env->DeleteLocalRef(js); - *has_exception = JNI_TRUE; - return strs; - } + /** + * Get the Java Method: ColumnFamilyDescriptor#columnFamilyOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved + */ + static jmethodID getColumnFamilyOptionsMethod(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } - strs.push_back(std::string(str)); + static jmethodID mid = env->GetMethodID( + jclazz, "columnFamilyOptions", "()Lorg/rocksdb/ColumnFamilyOptions;"); + assert(mid != nullptr); + return mid; + } +}; - env->ReleaseStringUTFChars(jstr, str); - env->DeleteLocalRef(js); - } +// The portal class for org.rocksdb.IndexType +class IndexTypeJni { + public: + // Returns the equivalent org.rocksdb.IndexType for the provided + // C++ rocksdb::IndexType enum + static jbyte toJavaIndexType( + const rocksdb::BlockBasedTableOptions::IndexType& index_type) { + switch(index_type) { + case rocksdb::BlockBasedTableOptions::IndexType::kBinarySearch: + return 0x0; + case rocksdb::BlockBasedTableOptions::IndexType::kHashSearch: + return 0x1; + case rocksdb::BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch: + return 0x2; + default: + return 0x7F; // undefined + } + } - *has_exception = JNI_FALSE; - return strs; - } + // Returns the equivalent C++ rocksdb::IndexType enum for the + // provided Java org.rocksdb.IndexType + static rocksdb::BlockBasedTableOptions::IndexType toCppIndexType( + jbyte jindex_type) { + switch(jindex_type) { + case 0x0: + return rocksdb::BlockBasedTableOptions::IndexType::kBinarySearch; + case 0x1: + return rocksdb::BlockBasedTableOptions::IndexType::kHashSearch; + case 0x2: + return rocksdb::BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch; + default: + // undefined/default + return rocksdb::BlockBasedTableOptions::IndexType::kBinarySearch; + } + } +}; - /** - * Copies a jstring to a C-style null-terminated byte string - * and releases the original jstring - * - * The jstring is copied as UTF-8 - * - * If an exception occurs, then JNIEnv::ExceptionCheck() - * will have been called - * - * @param env (IN) A pointer to the java environment - * @param js (IN) The java string to copy - * @param has_exception (OUT) will be set to JNI_TRUE - * if an OutOfMemoryError exception occurs - * - * @return A pointer to the copied string, or a - * nullptr if has_exception == JNI_TRUE - */ - static std::unique_ptr copyString(JNIEnv* env, jstring js, - jboolean* has_exception) { - const char *utf = env->GetStringUTFChars(js, nullptr); - if(utf == nullptr) { - // exception thrown: OutOfMemoryError - env->ExceptionCheck(); - *has_exception = JNI_TRUE; - return nullptr; - } else if(env->ExceptionCheck()) { - // exception thrown - env->ReleaseStringUTFChars(js, utf); - *has_exception = JNI_TRUE; - return nullptr; - } +// The portal class for org.rocksdb.DataBlockIndexType +class DataBlockIndexTypeJni { + public: + // Returns the equivalent org.rocksdb.DataBlockIndexType for the provided + // C++ rocksdb::DataBlockIndexType enum + static jbyte toJavaDataBlockIndexType( + const rocksdb::BlockBasedTableOptions::DataBlockIndexType& index_type) { + switch(index_type) { + case rocksdb::BlockBasedTableOptions::DataBlockIndexType::kDataBlockBinarySearch: + return 0x0; + case rocksdb::BlockBasedTableOptions::DataBlockIndexType::kDataBlockBinaryAndHash: + return 0x1; + default: + return 0x7F; // undefined + } + } - const jsize utf_len = env->GetStringUTFLength(js); - std::unique_ptr str(new char[utf_len + 1]); // Note: + 1 is needed for the c_str null terminator - std::strcpy(str.get(), utf); - env->ReleaseStringUTFChars(js, utf); - *has_exception = JNI_FALSE; - return str; - } + // Returns the equivalent C++ rocksdb::DataBlockIndexType enum for the + // provided Java org.rocksdb.DataBlockIndexType + static rocksdb::BlockBasedTableOptions::DataBlockIndexType toCppDataBlockIndexType( + jbyte jindex_type) { + switch(jindex_type) { + case 0x0: + return rocksdb::BlockBasedTableOptions::DataBlockIndexType::kDataBlockBinarySearch; + case 0x1: + return rocksdb::BlockBasedTableOptions::DataBlockIndexType::kDataBlockBinaryAndHash; + default: + // undefined/default + return rocksdb::BlockBasedTableOptions::DataBlockIndexType::kDataBlockBinarySearch; + } + } +}; - /** - * Copies a jstring to a std::string - * and releases the original jstring - * - * If an exception occurs, then JNIEnv::ExceptionCheck() - * will have been called - * - * @param env (IN) A pointer to the java environment - * @param js (IN) The java string to copy - * @param has_exception (OUT) will be set to JNI_TRUE - * if an OutOfMemoryError exception occurs - * - * @return A std::string copy of the jstring, or an - * empty std::string if has_exception == JNI_TRUE - */ - static std::string copyStdString(JNIEnv* env, jstring js, - jboolean* has_exception) { - const char *utf = env->GetStringUTFChars(js, nullptr); - if(utf == nullptr) { - // exception thrown: OutOfMemoryError - env->ExceptionCheck(); - *has_exception = JNI_TRUE; - return std::string(); - } else if(env->ExceptionCheck()) { - // exception thrown - env->ReleaseStringUTFChars(js, utf); - *has_exception = JNI_TRUE; - return std::string(); - } +// The portal class for org.rocksdb.ChecksumType +class ChecksumTypeJni { + public: + // Returns the equivalent org.rocksdb.ChecksumType for the provided + // C++ rocksdb::ChecksumType enum + static jbyte toJavaChecksumType( + const rocksdb::ChecksumType& checksum_type) { + switch(checksum_type) { + case rocksdb::ChecksumType::kNoChecksum: + return 0x0; + case rocksdb::ChecksumType::kCRC32c: + return 0x1; + case rocksdb::ChecksumType::kxxHash: + return 0x2; + case rocksdb::ChecksumType::kxxHash64: + return 0x3; + default: + return 0x7F; // undefined + } + } - std::string name(utf); - env->ReleaseStringUTFChars(js, utf); - *has_exception = JNI_FALSE; - return name; - } + // Returns the equivalent C++ rocksdb::ChecksumType enum for the + // provided Java org.rocksdb.ChecksumType + static rocksdb::ChecksumType toCppChecksumType( + jbyte jchecksum_type) { + switch(jchecksum_type) { + case 0x0: + return rocksdb::ChecksumType::kNoChecksum; + case 0x1: + return rocksdb::ChecksumType::kCRC32c; + case 0x2: + return rocksdb::ChecksumType::kxxHash; + case 0x3: + return rocksdb::ChecksumType::kxxHash64; + default: + // undefined/default + return rocksdb::ChecksumType::kCRC32c; + } + } +}; - /** - * Copies bytes from a std::string to a jByteArray - * - * @param env A pointer to the java environment - * @param bytes The bytes to copy - * - * @return the Java byte[] or nullptr if an exception occurs - * - * @throws RocksDBException thrown - * if memory size to copy exceeds general java specific array size limitation. - */ - static jbyteArray copyBytes(JNIEnv* env, std::string bytes) { - return createJavaByteArrayWithSizeCheck(env, bytes.c_str(), bytes.size()); - } +// The portal class for org.rocksdb.Priority +class PriorityJni { + public: + // Returns the equivalent org.rocksdb.Priority for the provided + // C++ rocksdb::Env::Priority enum + static jbyte toJavaPriority( + const rocksdb::Env::Priority& priority) { + switch(priority) { + case rocksdb::Env::Priority::BOTTOM: + return 0x0; + case rocksdb::Env::Priority::LOW: + return 0x1; + case rocksdb::Env::Priority::HIGH: + return 0x2; + case rocksdb::Env::Priority::TOTAL: + return 0x3; + default: + return 0x7F; // undefined + } + } - /** - * Given a Java byte[][] which is an array of java.lang.Strings - * where each String is a byte[], the passed function `string_fn` - * will be called on each String, the result is the collected by - * calling the passed function `collector_fn` - * - * @param env (IN) A pointer to the java environment - * @param jbyte_strings (IN) A Java array of Strings expressed as bytes - * @param string_fn (IN) A transform function to call for each String - * @param collector_fn (IN) A collector which is called for the result - * of each `string_fn` - * @param has_exception (OUT) will be set to JNI_TRUE - * if an ArrayIndexOutOfBoundsException or OutOfMemoryError - * exception occurs - */ - template static void byteStrings(JNIEnv* env, - jobjectArray jbyte_strings, - std::function string_fn, - std::function collector_fn, - jboolean *has_exception) { - const jsize jlen = env->GetArrayLength(jbyte_strings); + // Returns the equivalent C++ rocksdb::env::Priority enum for the + // provided Java org.rocksdb.Priority + static rocksdb::Env::Priority toCppPriority( + jbyte jpriority) { + switch(jpriority) { + case 0x0: + return rocksdb::Env::Priority::BOTTOM; + case 0x1: + return rocksdb::Env::Priority::LOW; + case 0x2: + return rocksdb::Env::Priority::HIGH; + case 0x3: + return rocksdb::Env::Priority::TOTAL; + default: + // undefined/default + return rocksdb::Env::Priority::LOW; + } + } +}; - for(jsize i = 0; i < jlen; i++) { - jobject jbyte_string_obj = env->GetObjectArrayElement(jbyte_strings, i); - if(env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - *has_exception = JNI_TRUE; // signal error - return; - } +// The portal class for org.rocksdb.ThreadType +class ThreadTypeJni { + public: + // Returns the equivalent org.rocksdb.ThreadType for the provided + // C++ rocksdb::ThreadStatus::ThreadType enum + static jbyte toJavaThreadType( + const rocksdb::ThreadStatus::ThreadType& thread_type) { + switch(thread_type) { + case rocksdb::ThreadStatus::ThreadType::HIGH_PRIORITY: + return 0x0; + case rocksdb::ThreadStatus::ThreadType::LOW_PRIORITY: + return 0x1; + case rocksdb::ThreadStatus::ThreadType::USER: + return 0x2; + case rocksdb::ThreadStatus::ThreadType::BOTTOM_PRIORITY: + return 0x3; + default: + return 0x7F; // undefined + } + } - jbyteArray jbyte_string_ary = - reinterpret_cast(jbyte_string_obj); - T result = byteString(env, jbyte_string_ary, string_fn, has_exception); + // Returns the equivalent C++ rocksdb::ThreadStatus::ThreadType enum for the + // provided Java org.rocksdb.ThreadType + static rocksdb::ThreadStatus::ThreadType toCppThreadType( + jbyte jthread_type) { + switch(jthread_type) { + case 0x0: + return rocksdb::ThreadStatus::ThreadType::HIGH_PRIORITY; + case 0x1: + return rocksdb::ThreadStatus::ThreadType::LOW_PRIORITY; + case 0x2: + return ThreadStatus::ThreadType::USER; + case 0x3: + return rocksdb::ThreadStatus::ThreadType::BOTTOM_PRIORITY; + default: + // undefined/default + return rocksdb::ThreadStatus::ThreadType::LOW_PRIORITY; + } + } +}; - env->DeleteLocalRef(jbyte_string_obj); +// The portal class for org.rocksdb.OperationType +class OperationTypeJni { + public: + // Returns the equivalent org.rocksdb.OperationType for the provided + // C++ rocksdb::ThreadStatus::OperationType enum + static jbyte toJavaOperationType( + const rocksdb::ThreadStatus::OperationType& operation_type) { + switch(operation_type) { + case rocksdb::ThreadStatus::OperationType::OP_UNKNOWN: + return 0x0; + case rocksdb::ThreadStatus::OperationType::OP_COMPACTION: + return 0x1; + case rocksdb::ThreadStatus::OperationType::OP_FLUSH: + return 0x2; + default: + return 0x7F; // undefined + } + } - if(*has_exception == JNI_TRUE) { - // exception thrown: OutOfMemoryError - return; - } + // Returns the equivalent C++ rocksdb::ThreadStatus::OperationType enum for the + // provided Java org.rocksdb.OperationType + static rocksdb::ThreadStatus::OperationType toCppOperationType( + jbyte joperation_type) { + switch(joperation_type) { + case 0x0: + return rocksdb::ThreadStatus::OperationType::OP_UNKNOWN; + case 0x1: + return rocksdb::ThreadStatus::OperationType::OP_COMPACTION; + case 0x2: + return rocksdb::ThreadStatus::OperationType::OP_FLUSH; + default: + // undefined/default + return rocksdb::ThreadStatus::OperationType::OP_UNKNOWN; + } + } +}; - collector_fn(i, result); - } +// The portal class for org.rocksdb.OperationStage +class OperationStageJni { + public: + // Returns the equivalent org.rocksdb.OperationStage for the provided + // C++ rocksdb::ThreadStatus::OperationStage enum + static jbyte toJavaOperationStage( + const rocksdb::ThreadStatus::OperationStage& operation_stage) { + switch(operation_stage) { + case rocksdb::ThreadStatus::OperationStage::STAGE_UNKNOWN: + return 0x0; + case rocksdb::ThreadStatus::OperationStage::STAGE_FLUSH_RUN: + return 0x1; + case rocksdb::ThreadStatus::OperationStage::STAGE_FLUSH_WRITE_L0: + return 0x2; + case rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_PREPARE: + return 0x3; + case rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_RUN: + return 0x4; + case rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_PROCESS_KV: + return 0x5; + case rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_INSTALL: + return 0x6; + case rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_SYNC_FILE: + return 0x7; + case rocksdb::ThreadStatus::OperationStage::STAGE_PICK_MEMTABLES_TO_FLUSH: + return 0x8; + case rocksdb::ThreadStatus::OperationStage::STAGE_MEMTABLE_ROLLBACK: + return 0x9; + case rocksdb::ThreadStatus::OperationStage::STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS: + return 0xA; + default: + return 0x7F; // undefined + } + } - *has_exception = JNI_FALSE; - } + // Returns the equivalent C++ rocksdb::ThreadStatus::OperationStage enum for the + // provided Java org.rocksdb.OperationStage + static rocksdb::ThreadStatus::OperationStage toCppOperationStage( + jbyte joperation_stage) { + switch(joperation_stage) { + case 0x0: + return rocksdb::ThreadStatus::OperationStage::STAGE_UNKNOWN; + case 0x1: + return rocksdb::ThreadStatus::OperationStage::STAGE_FLUSH_RUN; + case 0x2: + return rocksdb::ThreadStatus::OperationStage::STAGE_FLUSH_WRITE_L0; + case 0x3: + return rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_PREPARE; + case 0x4: + return rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_RUN; + case 0x5: + return rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_PROCESS_KV; + case 0x6: + return rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_INSTALL; + case 0x7: + return rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_SYNC_FILE; + case 0x8: + return rocksdb::ThreadStatus::OperationStage::STAGE_PICK_MEMTABLES_TO_FLUSH; + case 0x9: + return rocksdb::ThreadStatus::OperationStage::STAGE_MEMTABLE_ROLLBACK; + case 0xA: + return rocksdb::ThreadStatus::OperationStage::STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS; + default: + // undefined/default + return rocksdb::ThreadStatus::OperationStage::STAGE_UNKNOWN; + } + } +}; - /** - * Given a Java String which is expressed as a Java Byte Array byte[], - * the passed function `string_fn` will be called on the String - * and the result returned - * - * @param env (IN) A pointer to the java environment - * @param jbyte_string_ary (IN) A Java String expressed in bytes - * @param string_fn (IN) A transform function to call on the String - * @param has_exception (OUT) will be set to JNI_TRUE - * if an OutOfMemoryError exception occurs - */ - template static T byteString(JNIEnv* env, - jbyteArray jbyte_string_ary, - std::function string_fn, - jboolean* has_exception) { - const jsize jbyte_string_len = env->GetArrayLength(jbyte_string_ary); - return byteString(env, jbyte_string_ary, jbyte_string_len, string_fn, - has_exception); - } +// The portal class for org.rocksdb.StateType +class StateTypeJni { + public: + // Returns the equivalent org.rocksdb.StateType for the provided + // C++ rocksdb::ThreadStatus::StateType enum + static jbyte toJavaStateType( + const rocksdb::ThreadStatus::StateType& state_type) { + switch(state_type) { + case rocksdb::ThreadStatus::StateType::STATE_UNKNOWN: + return 0x0; + case rocksdb::ThreadStatus::StateType::STATE_MUTEX_WAIT: + return 0x1; + default: + return 0x7F; // undefined + } + } - /** - * Given a Java String which is expressed as a Java Byte Array byte[], - * the passed function `string_fn` will be called on the String - * and the result returned - * - * @param env (IN) A pointer to the java environment - * @param jbyte_string_ary (IN) A Java String expressed in bytes - * @param jbyte_string_len (IN) The length of the Java String - * expressed in bytes - * @param string_fn (IN) A transform function to call on the String - * @param has_exception (OUT) will be set to JNI_TRUE - * if an OutOfMemoryError exception occurs - */ - template static T byteString(JNIEnv* env, - jbyteArray jbyte_string_ary, const jsize jbyte_string_len, - std::function string_fn, - jboolean* has_exception) { - jbyte* jbyte_string = - env->GetByteArrayElements(jbyte_string_ary, nullptr); - if(jbyte_string == nullptr) { - // exception thrown: OutOfMemoryError - *has_exception = JNI_TRUE; - return nullptr; // signal error - } + // Returns the equivalent C++ rocksdb::ThreadStatus::StateType enum for the + // provided Java org.rocksdb.StateType + static rocksdb::ThreadStatus::StateType toCppStateType( + jbyte jstate_type) { + switch(jstate_type) { + case 0x0: + return rocksdb::ThreadStatus::StateType::STATE_UNKNOWN; + case 0x1: + return rocksdb::ThreadStatus::StateType::STATE_MUTEX_WAIT; + default: + // undefined/default + return rocksdb::ThreadStatus::StateType::STATE_UNKNOWN; + } + } +}; - T result = - string_fn(reinterpret_cast(jbyte_string), jbyte_string_len); +// The portal class for org.rocksdb.ThreadStatus +class ThreadStatusJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.ThreadStatus + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, + "org/rocksdb/ThreadStatus"); + } - env->ReleaseByteArrayElements(jbyte_string_ary, jbyte_string, JNI_ABORT); + /** + * Create a new Java org.rocksdb.ThreadStatus object with the same + * properties as the provided C++ rocksdb::ThreadStatus object + * + * @param env A pointer to the Java environment + * @param thread_status A pointer to rocksdb::ThreadStatus object + * + * @return A reference to a Java org.rocksdb.ColumnFamilyOptions object, or + * nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, + const rocksdb::ThreadStatus* thread_status) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } - *has_exception = JNI_FALSE; - return result; + jmethodID mid = env->GetMethodID(jclazz, "", "(JBLjava/lang/String;Ljava/lang/String;BJB[JB)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; } - /** - * Converts a std::vector to a Java byte[][] where each Java String - * is expressed as a Java Byte Array byte[]. - * - * @param env A pointer to the java environment - * @param strings A vector of Strings - * - * @return A Java array of Strings expressed as bytes - */ - static jobjectArray stringsBytes(JNIEnv* env, std::vector strings) { - jclass jcls_ba = ByteJni::getArrayJClass(env); - if(jcls_ba == nullptr) { - // exception occurred + jstring jdb_name = + JniUtil::toJavaString(env, &(thread_status->db_name), true); + if (env->ExceptionCheck()) { + // an error occurred return nullptr; - } + } - const jsize len = static_cast(strings.size()); + jstring jcf_name = + JniUtil::toJavaString(env, &(thread_status->cf_name), true); + if (env->ExceptionCheck()) { + // an error occurred + env->DeleteLocalRef(jdb_name); + return nullptr; + } - jobjectArray jbyte_strings = env->NewObjectArray(len, jcls_ba, nullptr); - if(jbyte_strings == nullptr) { + // long[] + const jsize len = static_cast(rocksdb::ThreadStatus::kNumOperationProperties); + jlongArray joperation_properties = + env->NewLongArray(len); + if (joperation_properties == nullptr) { + // an exception occurred + env->DeleteLocalRef(jdb_name); + env->DeleteLocalRef(jcf_name); + return nullptr; + } + jlong *body = env->GetLongArrayElements(joperation_properties, nullptr); + if (body == nullptr) { // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jdb_name); + env->DeleteLocalRef(jcf_name); + env->DeleteLocalRef(joperation_properties); return nullptr; - } + } + for (size_t i = 0; i < len; ++i) { + body[i] = static_cast(thread_status->op_properties[i]); + } + env->ReleaseLongArrayElements(joperation_properties, body, 0); + + jobject jcfd = env->NewObject(jclazz, mid, + static_cast(thread_status->thread_id), + ThreadTypeJni::toJavaThreadType(thread_status->thread_type), + jdb_name, + jcf_name, + OperationTypeJni::toJavaOperationType(thread_status->operation_type), + static_cast(thread_status->op_elapsed_micros), + OperationStageJni::toJavaOperationStage(thread_status->operation_stage), + joperation_properties, + StateTypeJni::toJavaStateType(thread_status->state_type)); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(jdb_name); + env->DeleteLocalRef(jcf_name); + env->DeleteLocalRef(joperation_properties); + return nullptr; + } - for (jsize i = 0; i < len; i++) { - std::string *str = &strings[i]; - const jsize str_len = static_cast(str->size()); + // cleanup + env->DeleteLocalRef(jdb_name); + env->DeleteLocalRef(jcf_name); + env->DeleteLocalRef(joperation_properties); - jbyteArray jbyte_string_ary = env->NewByteArray(str_len); - if(jbyte_string_ary == nullptr) { - // exception thrown: OutOfMemoryError - env->DeleteLocalRef(jbyte_strings); - return nullptr; - } + return jcfd; + } +}; + +// The portal class for org.rocksdb.CompactionStyle +class CompactionStyleJni { + public: + // Returns the equivalent org.rocksdb.CompactionStyle for the provided + // C++ rocksdb::CompactionStyle enum + static jbyte toJavaCompactionStyle( + const rocksdb::CompactionStyle& compaction_style) { + switch(compaction_style) { + case rocksdb::CompactionStyle::kCompactionStyleLevel: + return 0x0; + case rocksdb::CompactionStyle::kCompactionStyleUniversal: + return 0x1; + case rocksdb::CompactionStyle::kCompactionStyleFIFO: + return 0x2; + case rocksdb::CompactionStyle::kCompactionStyleNone: + return 0x3; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ rocksdb::CompactionStyle enum for the + // provided Java org.rocksdb.CompactionStyle + static rocksdb::CompactionStyle toCppCompactionStyle( + jbyte jcompaction_style) { + switch(jcompaction_style) { + case 0x0: + return rocksdb::CompactionStyle::kCompactionStyleLevel; + case 0x1: + return rocksdb::CompactionStyle::kCompactionStyleUniversal; + case 0x2: + return rocksdb::CompactionStyle::kCompactionStyleFIFO; + case 0x3: + return rocksdb::CompactionStyle::kCompactionStyleNone; + default: + // undefined/default + return rocksdb::CompactionStyle::kCompactionStyleLevel; + } + } +}; + +// The portal class for org.rocksdb.CompactionReason +class CompactionReasonJni { + public: + // Returns the equivalent org.rocksdb.CompactionReason for the provided + // C++ rocksdb::CompactionReason enum + static jbyte toJavaCompactionReason( + const rocksdb::CompactionReason& compaction_reason) { + switch(compaction_reason) { + case rocksdb::CompactionReason::kUnknown: + return 0x0; + case rocksdb::CompactionReason::kLevelL0FilesNum: + return 0x1; + case rocksdb::CompactionReason::kLevelMaxLevelSize: + return 0x2; + case rocksdb::CompactionReason::kUniversalSizeAmplification: + return 0x3; + case rocksdb::CompactionReason::kUniversalSizeRatio: + return 0x4; + case rocksdb::CompactionReason::kUniversalSortedRunNum: + return 0x5; + case rocksdb::CompactionReason::kFIFOMaxSize: + return 0x6; + case rocksdb::CompactionReason::kFIFOReduceNumFiles: + return 0x7; + case rocksdb::CompactionReason::kFIFOTtl: + return 0x8; + case rocksdb::CompactionReason::kManualCompaction: + return 0x9; + case rocksdb::CompactionReason::kFilesMarkedForCompaction: + return 0x10; + case rocksdb::CompactionReason::kBottommostFiles: + return 0x0A; + case rocksdb::CompactionReason::kTtl: + return 0x0B; + case rocksdb::CompactionReason::kFlush: + return 0x0C; + case rocksdb::CompactionReason::kExternalSstIngestion: + return 0x0D; + default: + return 0x7F; // undefined + } + } - env->SetByteArrayRegion( - jbyte_string_ary, 0, str_len, - const_cast(reinterpret_cast(str->c_str()))); - if(env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - env->DeleteLocalRef(jbyte_string_ary); - env->DeleteLocalRef(jbyte_strings); - return nullptr; - } + // Returns the equivalent C++ rocksdb::CompactionReason enum for the + // provided Java org.rocksdb.CompactionReason + static rocksdb::CompactionReason toCppCompactionReason( + jbyte jcompaction_reason) { + switch(jcompaction_reason) { + case 0x0: + return rocksdb::CompactionReason::kUnknown; + case 0x1: + return rocksdb::CompactionReason::kLevelL0FilesNum; + case 0x2: + return rocksdb::CompactionReason::kLevelMaxLevelSize; + case 0x3: + return rocksdb::CompactionReason::kUniversalSizeAmplification; + case 0x4: + return rocksdb::CompactionReason::kUniversalSizeRatio; + case 0x5: + return rocksdb::CompactionReason::kUniversalSortedRunNum; + case 0x6: + return rocksdb::CompactionReason::kFIFOMaxSize; + case 0x7: + return rocksdb::CompactionReason::kFIFOReduceNumFiles; + case 0x8: + return rocksdb::CompactionReason::kFIFOTtl; + case 0x9: + return rocksdb::CompactionReason::kManualCompaction; + case 0x10: + return rocksdb::CompactionReason::kFilesMarkedForCompaction; + case 0x0A: + return rocksdb::CompactionReason::kBottommostFiles; + case 0x0B: + return rocksdb::CompactionReason::kTtl; + case 0x0C: + return rocksdb::CompactionReason::kFlush; + case 0x0D: + return rocksdb::CompactionReason::kExternalSstIngestion; + default: + // undefined/default + return rocksdb::CompactionReason::kUnknown; + } + } +}; - env->SetObjectArrayElement(jbyte_strings, i, jbyte_string_ary); - if(env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - // or ArrayStoreException - env->DeleteLocalRef(jbyte_string_ary); - env->DeleteLocalRef(jbyte_strings); - return nullptr; - } +// The portal class for org.rocksdb.WalFileType +class WalFileTypeJni { + public: + // Returns the equivalent org.rocksdb.WalFileType for the provided + // C++ rocksdb::WalFileType enum + static jbyte toJavaWalFileType( + const rocksdb::WalFileType& wal_file_type) { + switch(wal_file_type) { + case rocksdb::WalFileType::kArchivedLogFile: + return 0x0; + case rocksdb::WalFileType::kAliveLogFile: + return 0x1; + default: + return 0x7F; // undefined + } + } - env->DeleteLocalRef(jbyte_string_ary); - } + // Returns the equivalent C++ rocksdb::WalFileType enum for the + // provided Java org.rocksdb.WalFileType + static rocksdb::WalFileType toCppWalFileType( + jbyte jwal_file_type) { + switch(jwal_file_type) { + case 0x0: + return rocksdb::WalFileType::kArchivedLogFile; + case 0x1: + return rocksdb::WalFileType::kAliveLogFile; + default: + // undefined/default + return rocksdb::WalFileType::kAliveLogFile; + } + } +}; - return jbyte_strings; +class LogFileJni : public JavaClass { + public: + /** + * Create a new Java org.rocksdb.LogFile object. + * + * @param env A pointer to the Java environment + * @param log_file A Cpp log file object + * + * @return A reference to a Java org.rocksdb.LogFile object, or + * nullptr if an an exception occurs + */ + static jobject fromCppLogFile(JNIEnv* env, rocksdb::LogFile* log_file) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; } - /** - * Copies bytes to a new jByteArray with the check of java array size limitation. - * - * @param bytes pointer to memory to copy to a new jByteArray - * @param size number of bytes to copy - * - * @return the Java byte[] or nullptr if an exception occurs - * - * @throws RocksDBException thrown - * if memory size to copy exceeds general java array size limitation to avoid overflow. - */ - static jbyteArray createJavaByteArrayWithSizeCheck(JNIEnv* env, const char* bytes, const size_t size) { - // Limitation for java array size is vm specific - // In general it cannot exceed Integer.MAX_VALUE (2^31 - 1) - // Current HotSpot VM limitation for array size is Integer.MAX_VALUE - 5 (2^31 - 1 - 5) - // It means that the next call to env->NewByteArray can still end with - // OutOfMemoryError("Requested array size exceeds VM limit") coming from VM - static const size_t MAX_JARRAY_SIZE = (static_cast(1)) << 31; - if(size > MAX_JARRAY_SIZE) { - rocksdb::RocksDBExceptionJni::ThrowNew(env, "Requested array size exceeds VM limit"); - return nullptr; - } + jmethodID mid = env->GetMethodID(jclazz, "", "(Ljava/lang/String;JBJJ)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } - const jsize jlen = static_cast(size); - jbyteArray jbytes = env->NewByteArray(jlen); - if(jbytes == nullptr) { - // exception thrown: OutOfMemoryError - return nullptr; - } + std::string path_name = log_file->PathName(); + jstring jpath_name = rocksdb::JniUtil::toJavaString(env, &path_name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + return nullptr; + } - env->SetByteArrayRegion(jbytes, 0, jlen, - const_cast(reinterpret_cast(bytes))); - if(env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - env->DeleteLocalRef(jbytes); - return nullptr; - } + jobject jlog_file = env->NewObject(jclazz, mid, + jpath_name, + static_cast(log_file->LogNumber()), + rocksdb::WalFileTypeJni::toJavaWalFileType(log_file->Type()), + static_cast(log_file->StartSequence()), + static_cast(log_file->SizeFileBytes()) + ); - return jbytes; + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jpath_name); + return nullptr; } - /** - * Copies bytes from a rocksdb::Slice to a jByteArray - * - * @param env A pointer to the java environment - * @param bytes The bytes to copy - * - * @return the Java byte[] or nullptr if an exception occurs - * - * @throws RocksDBException thrown - * if memory size to copy exceeds general java specific array size limitation. - */ - static jbyteArray copyBytes(JNIEnv* env, const Slice& bytes) { - return createJavaByteArrayWithSizeCheck(env, bytes.data(), bytes.size()); - } + // cleanup + env->DeleteLocalRef(jpath_name); - /* - * Helper for operations on a key and value - * for example WriteBatch->Put - * - * TODO(AR) could be used for RocksDB->Put etc. - */ - static std::unique_ptr kv_op( - std::function op, - JNIEnv* env, jobject /*jobj*/, - jbyteArray jkey, jint jkey_len, - jbyteArray jvalue, jint jvalue_len) { - jbyte* key = env->GetByteArrayElements(jkey, nullptr); - if(env->ExceptionCheck()) { - // exception thrown: OutOfMemoryError - return nullptr; - } + return jlog_file; + } - jbyte* value = env->GetByteArrayElements(jvalue, nullptr); - if(env->ExceptionCheck()) { - // exception thrown: OutOfMemoryError - if(key != nullptr) { - env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); - } - return nullptr; - } + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/LogFile"); + } +}; - rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); - rocksdb::Slice value_slice(reinterpret_cast(value), - jvalue_len); +class LiveFileMetaDataJni : public JavaClass { + public: + /** + * Create a new Java org.rocksdb.LiveFileMetaData object. + * + * @param env A pointer to the Java environment + * @param live_file_meta_data A Cpp live file meta data object + * + * @return A reference to a Java org.rocksdb.LiveFileMetaData object, or + * nullptr if an an exception occurs + */ + static jobject fromCppLiveFileMetaData(JNIEnv* env, + rocksdb::LiveFileMetaData* live_file_meta_data) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } - auto status = op(key_slice, value_slice); + jmethodID mid = env->GetMethodID(jclazz, "", "([BILjava/lang/String;Ljava/lang/String;JJJ[B[BJZJJ)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } - if(value != nullptr) { - env->ReleaseByteArrayElements(jvalue, value, JNI_ABORT); - } - if(key != nullptr) { - env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); - } + jbyteArray jcolumn_family_name = rocksdb::JniUtil::copyBytes( + env, live_file_meta_data->column_family_name); + if (jcolumn_family_name == nullptr) { + // exception occurred creating java byte array + return nullptr; + } - return std::unique_ptr(new rocksdb::Status(status)); + jstring jfile_name = rocksdb::JniUtil::toJavaString( + env, &live_file_meta_data->name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + return nullptr; } - /* - * Helper for operations on a key - * for example WriteBatch->Delete - * - * TODO(AR) could be used for RocksDB->Delete etc. - */ - static std::unique_ptr k_op( - std::function op, - JNIEnv* env, jobject /*jobj*/, - jbyteArray jkey, jint jkey_len) { - jbyte* key = env->GetByteArrayElements(jkey, nullptr); - if(env->ExceptionCheck()) { - // exception thrown: OutOfMemoryError - return nullptr; - } + jstring jpath = rocksdb::JniUtil::toJavaString( + env, &live_file_meta_data->db_path, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfile_name); + return nullptr; + } - rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + jbyteArray jsmallest_key = rocksdb::JniUtil::copyBytes( + env, live_file_meta_data->smallestkey); + if (jsmallest_key == nullptr) { + // exception occurred creating java byte array + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + return nullptr; + } - auto status = op(key_slice); + jbyteArray jlargest_key = rocksdb::JniUtil::copyBytes( + env, live_file_meta_data->largestkey); + if (jlargest_key == nullptr) { + // exception occurred creating java byte array + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + env->DeleteLocalRef(jsmallest_key); + return nullptr; + } - if(key != nullptr) { - env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); - } + jobject jlive_file_meta_data = env->NewObject(jclazz, mid, + jcolumn_family_name, + static_cast(live_file_meta_data->level), + jfile_name, + jpath, + static_cast(live_file_meta_data->size), + static_cast(live_file_meta_data->smallest_seqno), + static_cast(live_file_meta_data->largest_seqno), + jsmallest_key, + jlargest_key, + static_cast(live_file_meta_data->num_reads_sampled), + static_cast(live_file_meta_data->being_compacted), + static_cast(live_file_meta_data->num_entries), + static_cast(live_file_meta_data->num_deletions) + ); - return std::unique_ptr(new rocksdb::Status(status)); + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + env->DeleteLocalRef(jsmallest_key); + env->DeleteLocalRef(jlargest_key); + return nullptr; } - /* - * Helper for operations on a value - * for example WriteBatchWithIndex->GetFromBatch - */ - static jbyteArray v_op( - std::function op, - JNIEnv* env, jbyteArray jkey, jint jkey_len) { - jbyte* key = env->GetByteArrayElements(jkey, nullptr); - if(env->ExceptionCheck()) { - // exception thrown: OutOfMemoryError - return nullptr; - } + // cleanup + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + env->DeleteLocalRef(jsmallest_key); + env->DeleteLocalRef(jlargest_key); - rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + return jlive_file_meta_data; + } - std::string value; - rocksdb::Status s = op(key_slice, &value); + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/LiveFileMetaData"); + } +}; - if(key != nullptr) { - env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); - } +class SstFileMetaDataJni : public JavaClass { + public: + /** + * Create a new Java org.rocksdb.SstFileMetaData object. + * + * @param env A pointer to the Java environment + * @param sst_file_meta_data A Cpp sst file meta data object + * + * @return A reference to a Java org.rocksdb.SstFileMetaData object, or + * nullptr if an an exception occurs + */ + static jobject fromCppSstFileMetaData(JNIEnv* env, + const rocksdb::SstFileMetaData* sst_file_meta_data) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } - if (s.IsNotFound()) { - return nullptr; - } + jmethodID mid = env->GetMethodID(jclazz, "", "(Ljava/lang/String;Ljava/lang/String;JJJ[B[BJZJJ)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } - if (s.ok()) { - jbyteArray jret_value = - env->NewByteArray(static_cast(value.size())); - if(jret_value == nullptr) { - // exception thrown: OutOfMemoryError - return nullptr; - } + jstring jfile_name = rocksdb::JniUtil::toJavaString( + env, &sst_file_meta_data->name, true); + if (jfile_name == nullptr) { + // exception occurred creating java byte array + return nullptr; + } - env->SetByteArrayRegion(jret_value, 0, static_cast(value.size()), - const_cast(reinterpret_cast(value.c_str()))); - if(env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - if(jret_value != nullptr) { - env->DeleteLocalRef(jret_value); - } - return nullptr; - } + jstring jpath = rocksdb::JniUtil::toJavaString( + env, &sst_file_meta_data->db_path, true); + if (jpath == nullptr) { + // exception occurred creating java byte array + env->DeleteLocalRef(jfile_name); + return nullptr; + } - return jret_value; - } + jbyteArray jsmallest_key = rocksdb::JniUtil::copyBytes( + env, sst_file_meta_data->smallestkey); + if (jsmallest_key == nullptr) { + // exception occurred creating java byte array + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + return nullptr; + } - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + jbyteArray jlargest_key = rocksdb::JniUtil::copyBytes( + env, sst_file_meta_data->largestkey); + if (jlargest_key == nullptr) { + // exception occurred creating java byte array + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + env->DeleteLocalRef(jsmallest_key); + return nullptr; + } + + jobject jsst_file_meta_data = env->NewObject(jclazz, mid, + jfile_name, + jpath, + static_cast(sst_file_meta_data->size), + static_cast(sst_file_meta_data->smallest_seqno), + static_cast(sst_file_meta_data->largest_seqno), + jsmallest_key, + jlargest_key, + static_cast(sst_file_meta_data->num_reads_sampled), + static_cast(sst_file_meta_data->being_compacted), + static_cast(sst_file_meta_data->num_entries), + static_cast(sst_file_meta_data->num_deletions) + ); + + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + env->DeleteLocalRef(jsmallest_key); + env->DeleteLocalRef(jlargest_key); return nullptr; } + + // cleanup + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + env->DeleteLocalRef(jsmallest_key); + env->DeleteLocalRef(jlargest_key); + + return jsst_file_meta_data; + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/SstFileMetaData"); + } }; -class ColumnFamilyDescriptorJni : public JavaClass { +class LevelMetaDataJni : public JavaClass { public: /** - * Get the Java Class org.rocksdb.ColumnFamilyDescriptor + * Create a new Java org.rocksdb.LevelMetaData object. * * @param env A pointer to the Java environment + * @param level_meta_data A Cpp level meta data object * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return A reference to a Java org.rocksdb.LevelMetaData object, or + * nullptr if an an exception occurs */ + static jobject fromCppLevelMetaData(JNIEnv* env, + const rocksdb::LevelMetaData* level_meta_data) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", "(IJ[Lorg/rocksdb/SstFileMetaData;)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + const jsize jlen = + static_cast(level_meta_data->files.size()); + jobjectArray jfiles = env->NewObjectArray(jlen, SstFileMetaDataJni::getJClass(env), nullptr); + if (jfiles == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + jsize i = 0; + for (auto it = level_meta_data->files.begin(); + it != level_meta_data->files.end(); ++it) { + jobject jfile = SstFileMetaDataJni::fromCppSstFileMetaData(env, &(*it)); + if (jfile == nullptr) { + // exception occurred + env->DeleteLocalRef(jfiles); + return nullptr; + } + env->SetObjectArrayElement(jfiles, i++, jfile); + } + + jobject jlevel_meta_data = env->NewObject(jclazz, mid, + static_cast(level_meta_data->level), + static_cast(level_meta_data->size), + jfiles + ); + + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jfiles); + return nullptr; + } + + // cleanup + env->DeleteLocalRef(jfiles); + + return jlevel_meta_data; + } + static jclass getJClass(JNIEnv* env) { - return JavaClass::getJClass(env, "org/rocksdb/ColumnFamilyDescriptor"); + return JavaClass::getJClass(env, "org/rocksdb/LevelMetaData"); } +}; +class ColumnFamilyMetaDataJni : public JavaClass { + public: /** - * Create a new Java org.rocksdb.ColumnFamilyDescriptor object with the same - * properties as the provided C++ rocksdb::ColumnFamilyDescriptor object + * Create a new Java org.rocksdb.ColumnFamilyMetaData object. * * @param env A pointer to the Java environment - * @param cfd A pointer to rocksdb::ColumnFamilyDescriptor object + * @param column_famly_meta_data A Cpp live file meta data object * - * @return A reference to a Java org.rocksdb.ColumnFamilyDescriptor object, or + * @return A reference to a Java org.rocksdb.ColumnFamilyMetaData object, or * nullptr if an an exception occurs */ - static jobject construct(JNIEnv* env, ColumnFamilyDescriptor* cfd) { - jbyteArray jcf_name = JniUtil::copyBytes(env, cfd->name); - jobject cfopts = ColumnFamilyOptionsJni::construct(env, &(cfd->options)); - + static jobject fromCppColumnFamilyMetaData(JNIEnv* env, + const rocksdb::ColumnFamilyMetaData* column_famly_meta_data) { jclass jclazz = getJClass(env); if (jclazz == nullptr) { // exception occurred accessing class return nullptr; } - jmethodID mid = env->GetMethodID(jclazz, "", - "([BLorg/rocksdb/ColumnFamilyOptions;)V"); + jmethodID mid = env->GetMethodID(jclazz, "", "(JJ[B[Lorg/rocksdb/LevelMetaData;)V"); if (mid == nullptr) { // exception thrown: NoSuchMethodException or OutOfMemoryError - env->DeleteLocalRef(jcf_name); return nullptr; } - jobject jcfd = env->NewObject(jclazz, mid, jcf_name, cfopts); + jbyteArray jname = rocksdb::JniUtil::copyBytes( + env, column_famly_meta_data->name); + if (jname == nullptr) { + // exception occurred creating java byte array + return nullptr; + } + + const jsize jlen = + static_cast(column_famly_meta_data->levels.size()); + jobjectArray jlevels = env->NewObjectArray(jlen, LevelMetaDataJni::getJClass(env), nullptr); + if(jlevels == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jname); + return nullptr; + } + + jsize i = 0; + for (auto it = column_famly_meta_data->levels.begin(); + it != column_famly_meta_data->levels.end(); ++it) { + jobject jlevel = LevelMetaDataJni::fromCppLevelMetaData(env, &(*it)); + if (jlevel == nullptr) { + // exception occurred + env->DeleteLocalRef(jname); + env->DeleteLocalRef(jlevels); + return nullptr; + } + env->SetObjectArrayElement(jlevels, i++, jlevel); + } + + jobject jcolumn_family_meta_data = env->NewObject(jclazz, mid, + static_cast(column_famly_meta_data->size), + static_cast(column_famly_meta_data->file_count), + jname, + jlevels + ); + if (env->ExceptionCheck()) { - env->DeleteLocalRef(jcf_name); + env->DeleteLocalRef(jname); + env->DeleteLocalRef(jlevels); return nullptr; } - return jcfd; + // cleanup + env->DeleteLocalRef(jname); + env->DeleteLocalRef(jlevels); + + return jcolumn_family_meta_data; + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/ColumnFamilyMetaData"); } +}; +// The portal class for org.rocksdb.AbstractTraceWriter +class AbstractTraceWriterJni : public RocksDBNativeClass< + const rocksdb::TraceWriterJniCallback*, + AbstractTraceWriterJni> { + public: /** - * Get the Java Method: ColumnFamilyDescriptor#columnFamilyName + * Get the Java Class org.rocksdb.AbstractTraceWriter + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/AbstractTraceWriter"); + } + + /** + * Get the Java Method: AbstractTraceWriter#write * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getColumnFamilyNameMethod(JNIEnv* env) { + static jmethodID getWriteProxyMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); - if (jclazz == nullptr) { + if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = env->GetMethodID(jclazz, "columnFamilyName", "()[B"); + static jmethodID mid = env->GetMethodID( + jclazz, "writeProxy", "(J)S"); assert(mid != nullptr); return mid; } /** - * Get the Java Method: ColumnFamilyDescriptor#columnFamilyOptions + * Get the Java Method: AbstractTraceWriter#closeWriter * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getColumnFamilyOptionsMethod(JNIEnv* env) { + static jmethodID getCloseWriterProxyMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); - if (jclazz == nullptr) { + if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } static jmethodID mid = env->GetMethodID( - jclazz, "columnFamilyOptions", "()Lorg/rocksdb/ColumnFamilyOptions;"); + jclazz, "closeWriterProxy", "()S"); assert(mid != nullptr); return mid; } -}; - -class MapJni : public JavaClass { - public: - /** - * Get the Java Class java.util.Map - * - * @param env A pointer to the Java environment - * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown - */ - static jclass getClass(JNIEnv* env) { - return JavaClass::getJClass(env, "java/util/Map"); - } /** - * Get the Java Method: Map#put + * Get the Java Method: AbstractTraceWriter#getFileSize * * @param env A pointer to the Java environment * * @return The Java Method ID or nullptr if the class or method id could not * be retieved */ - static jmethodID getMapPutMethodId(JNIEnv* env) { - jclass jlist_clazz = getClass(env); - if(jlist_clazz == nullptr) { + static jmethodID getGetFileSizeMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - static jmethodID mid = - env->GetMethodID(jlist_clazz, "put", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;"); + static jmethodID mid = env->GetMethodID( + jclazz, "getFileSize", "()J"); assert(mid != nullptr); return mid; } }; -class HashMapJni : public JavaClass { +// The portal class for org.rocksdb.AbstractWalFilter +class AbstractWalFilterJni : public RocksDBNativeClass< + const rocksdb::WalFilterJniCallback*, + AbstractWalFilterJni> { public: /** - * Get the Java Class java.util.HashMap + * Get the Java Class org.rocksdb.AbstractWalFilter * * @param env A pointer to the Java environment * @@ -5144,120 +6979,114 @@ class HashMapJni : public JavaClass { * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown */ static jclass getJClass(JNIEnv* env) { - return JavaClass::getJClass(env, "java/util/HashMap"); + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/AbstractWalFilter"); } /** - * Create a new Java java.util.HashMap object. + * Get the Java Method: AbstractWalFilter#columnFamilyLogNumberMap * * @param env A pointer to the Java environment * - * @return A reference to a Java java.util.HashMap object, or - * nullptr if an an exception occurs + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jobject construct(JNIEnv* env, const uint32_t initial_capacity = 16) { + static jmethodID getColumnFamilyLogNumberMapMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); - if (jclazz == nullptr) { + if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - jmethodID mid = env->GetMethodID(jclazz, "", "(I)V"); - if (mid == nullptr) { - // exception thrown: NoSuchMethodException or OutOfMemoryError - return nullptr; - } - - jobject jhash_map = env->NewObject(jclazz, mid, static_cast(initial_capacity)); - if (env->ExceptionCheck()) { - return nullptr; - } - - return jhash_map; + static jmethodID mid = env->GetMethodID( + jclazz, "columnFamilyLogNumberMap", + "(Ljava/util/Map;Ljava/util/Map;)V"); + assert(mid != nullptr); + return mid; } /** - * A function which maps a std::pair to a std::pair + * Get the Java Method: AbstractTraceWriter#logRecordFoundProxy * - * @return Either a pointer to a std::pair, or nullptr - * if an error occurs during the mapping - */ - template - using FnMapKV = std::function> (const std::pair&)>; - - // template ::value_type, std::pair>::value, int32_t>::type = 0> - // static void putAll(JNIEnv* env, const jobject jhash_map, I iterator, const FnMapKV &fn_map_kv) { - /** - * Returns true if it succeeds, false if an error occurs + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - template - static bool putAll(JNIEnv* env, const jobject jhash_map, iterator_type iterator, iterator_type end, const FnMapKV &fn_map_kv) { - const jmethodID jmid_put = rocksdb::MapJni::getMapPutMethodId(env); - if (jmid_put == nullptr) { - return false; - } - - for (auto it = iterator; it != end; ++it) { - const std::unique_ptr> result = fn_map_kv(*it); - if (result == nullptr) { - // an error occurred during fn_map_kv - return false; - } - env->CallObjectMethod(jhash_map, jmid_put, result->first, result->second); - if (env->ExceptionCheck()) { - // exception occurred - env->DeleteLocalRef(result->second); - env->DeleteLocalRef(result->first); - return false; - } - - // release local references - env->DeleteLocalRef(result->second); - env->DeleteLocalRef(result->first); + static jmethodID getLogRecordFoundProxyMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; } - return true; + static jmethodID mid = env->GetMethodID( + jclazz, "logRecordFoundProxy", "(JLjava/lang/String;JJ)S"); + assert(mid != nullptr); + return mid; } -}; -class LongJni : public JavaClass { - public: /** - * Get the Java Class java.lang.Long + * Get the Java Method: AbstractTraceWriter#name * * @param env A pointer to the Java environment * - * @return The Java Class or nullptr if one of the - * ClassFormatError, ClassCircularityError, NoClassDefFoundError, - * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved */ - static jclass getJClass(JNIEnv* env) { - return JavaClass::getJClass(env, "java/lang/Long"); - } - - static jobject valueOf(JNIEnv* env, jlong jprimitive_long) { + static jmethodID getNameMethodId(JNIEnv* env) { jclass jclazz = getJClass(env); - if (jclazz == nullptr) { + if(jclazz == nullptr) { // exception occurred accessing class return nullptr; } - jmethodID mid = - env->GetStaticMethodID(jclazz, "valueOf", "(J)Ljava/lang/Long;"); - if (mid == nullptr) { - // exception thrown: NoSuchMethodException or OutOfMemoryError - return nullptr; - } + static jmethodID mid = env->GetMethodID( + jclazz, "name", "()Ljava/lang/String;"); + assert(mid != nullptr); + return mid; + } +}; - const jobject jlong_obj = - env->CallStaticObjectMethod(jclazz, mid, jprimitive_long); - if (env->ExceptionCheck()) { - // exception occurred - return nullptr; - } +// The portal class for org.rocksdb.WalProcessingOption +class WalProcessingOptionJni { + public: + // Returns the equivalent org.rocksdb.WalProcessingOption for the provided + // C++ rocksdb::WalFilter::WalProcessingOption enum + static jbyte toJavaWalProcessingOption( + const rocksdb::WalFilter::WalProcessingOption& wal_processing_option) { + switch(wal_processing_option) { + case rocksdb::WalFilter::WalProcessingOption::kContinueProcessing: + return 0x0; + case rocksdb::WalFilter::WalProcessingOption::kIgnoreCurrentRecord: + return 0x1; + case rocksdb::WalFilter::WalProcessingOption::kStopReplay: + return 0x2; + case rocksdb::WalFilter::WalProcessingOption::kCorruptedRecord: + return 0x3; + default: + return 0x7F; // undefined + } + } - return jlong_obj; - } + // Returns the equivalent C++ rocksdb::WalFilter::WalProcessingOption enum for + // the provided Java org.rocksdb.WalProcessingOption + static rocksdb::WalFilter::WalProcessingOption toCppWalProcessingOption( + jbyte jwal_processing_option) { + switch(jwal_processing_option) { + case 0x0: + return rocksdb::WalFilter::WalProcessingOption::kContinueProcessing; + case 0x1: + return rocksdb::WalFilter::WalProcessingOption::kIgnoreCurrentRecord; + case 0x2: + return rocksdb::WalFilter::WalProcessingOption::kStopReplay; + case 0x3: + return rocksdb::WalFilter::WalProcessingOption::kCorruptedRecord; + default: + // undefined/default + return rocksdb::WalFilter::WalProcessingOption::kCorruptedRecord; + } + } }; } // namespace rocksdb #endif // JAVA_ROCKSJNI_PORTAL_H_ diff --git a/java/rocksjni/rocksjni.cc b/java/rocksjni/rocksjni.cc index 242ce1f8d..53224232c 100644 --- a/java/rocksjni/rocksjni.cc +++ b/java/rocksjni/rocksjni.cc @@ -27,8 +27,6 @@ #undef min #endif -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::Open jlong rocksdb_open_helper( JNIEnv* env, jlong jopt_handle, jstring jdb_path, std::function(jopt_handle); - std::vector handles; + std::vector cf_handles; rocksdb::DB* db = nullptr; - rocksdb::Status s = open_fn(*opt, db_path, column_families, &handles, &db); + rocksdb::Status s = open_fn(*opt, db_path, column_families, &cf_handles, &db); // we have now finished with db_path env->ReleaseStringUTFChars(jdb_path, db_path); // check if open operation was successful - if (s.ok()) { - const jsize resultsLen = 1 + len_cols; // db handle + column family handles - std::unique_ptr results = - std::unique_ptr(new jlong[resultsLen]); - results[0] = reinterpret_cast(db); - for (int i = 1; i <= len_cols; i++) { - results[i] = reinterpret_cast(handles[i - 1]); - } + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } - jlongArray jresults = env->NewLongArray(resultsLen); - if (jresults == nullptr) { - // exception thrown: OutOfMemoryError - return nullptr; - } + const jsize resultsLen = 1 + len_cols; // db handle + column family handles + std::unique_ptr results = + std::unique_ptr(new jlong[resultsLen]); + results[0] = reinterpret_cast(db); + for (int i = 1; i <= len_cols; i++) { + results[i] = reinterpret_cast(cf_handles[i - 1]); + } - env->SetLongArrayRegion(jresults, 0, resultsLen, results.get()); - if (env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - env->DeleteLocalRef(jresults); - return nullptr; - } + jlongArray jresults = env->NewLongArray(resultsLen); + if (jresults == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } - return jresults; - } else { - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + env->SetLongArrayRegion(jresults, 0, resultsLen, results.get()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresults); return nullptr; } + + return jresults; } /* @@ -174,7 +170,7 @@ jlongArray rocksdb_open_helper( * Signature: (JLjava/lang/String;[[B[J)[J */ jlongArray Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2_3_3B_3J( - JNIEnv* env, jclass /*jcls*/, jlong jopt_handle, jstring jdb_path, + JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path, jobjectArray jcolumn_names, jlongArray jcolumn_options) { return rocksdb_open_helper( env, jopt_handle, jdb_path, jcolumn_names, jcolumn_options, @@ -192,7 +188,7 @@ jlongArray Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2_3_3B_3J( * Signature: (JLjava/lang/String;[[B[J)[J */ jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J( - JNIEnv* env, jclass /*jcls*/, jlong jopt_handle, jstring jdb_path, + JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path, jobjectArray jcolumn_names, jlongArray jcolumn_options) { return rocksdb_open_helper( env, jopt_handle, jdb_path, jcolumn_names, jcolumn_options, @@ -203,18 +199,38 @@ jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J( rocksdb::DB::Open); } -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::ListColumnFamilies +/* + * Class: org_rocksdb_RocksDB + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { + auto* db = reinterpret_cast(jhandle); + assert(db != nullptr); + delete db; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: closeDatabase + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_closeDatabase( + JNIEnv* env, jclass, jlong jhandle) { + auto* db = reinterpret_cast(jhandle); + assert(db != nullptr); + rocksdb::Status s = db->Close(); + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} /* * Class: org_rocksdb_RocksDB * Method: listColumnFamilies * Signature: (JLjava/lang/String;)[[B */ -jobjectArray Java_org_rocksdb_RocksDB_listColumnFamilies(JNIEnv* env, - jclass /*jclazz*/, - jlong jopt_handle, - jstring jdb_path) { +jobjectArray Java_org_rocksdb_RocksDB_listColumnFamilies( + JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path) { std::vector column_family_names; const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); if (db_path == nullptr) { @@ -234,17 +250,211 @@ jobjectArray Java_org_rocksdb_RocksDB_listColumnFamilies(JNIEnv* env, return jcolumn_family_names; } +/* + * Class: org_rocksdb_RocksDB + * Method: createColumnFamily + * Signature: (J[BIJ)J + */ +jlong Java_org_rocksdb_RocksDB_createColumnFamily( + JNIEnv* env, jobject, jlong jhandle, jbyteArray jcf_name, + jint jcf_name_len, jlong jcf_options_handle) { + auto* db = reinterpret_cast(jhandle); + jboolean has_exception = JNI_FALSE; + const std::string cf_name = + rocksdb::JniUtil::byteString(env, jcf_name, jcf_name_len, + [](const char* str, const size_t len) { + return std::string(str, len); + }, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return 0; + } + auto* cf_options = + reinterpret_cast(jcf_options_handle); + rocksdb::ColumnFamilyHandle *cf_handle; + rocksdb::Status s = db->CreateColumnFamily(*cf_options, cf_name, &cf_handle); + if (!s.ok()) { + // error occurred + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return 0; + } + return reinterpret_cast(cf_handle); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: createColumnFamilies + * Signature: (JJ[[B)[J + */ +jlongArray Java_org_rocksdb_RocksDB_createColumnFamilies__JJ_3_3B( + JNIEnv* env, jobject, jlong jhandle, jlong jcf_options_handle, + jobjectArray jcf_names) { + auto* db = reinterpret_cast(jhandle); + auto* cf_options = + reinterpret_cast(jcf_options_handle); + jboolean has_exception = JNI_FALSE; + std::vector cf_names; + rocksdb::JniUtil::byteStrings(env, jcf_names, + [](const char* str, const size_t len) { + return std::string(str, len); + }, + [&cf_names](const size_t, std::string str) { + cf_names.push_back(str); + }, + &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return nullptr; + } + + std::vector cf_handles; + rocksdb::Status s = db->CreateColumnFamilies(*cf_options, cf_names, &cf_handles); + if (!s.ok()) { + // error occurred + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + jlongArray jcf_handles = rocksdb::JniUtil::toJPointers( + env, cf_handles, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return nullptr; + } + return jcf_handles; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: createColumnFamilies + * Signature: (J[J[[B)[J + */ +jlongArray Java_org_rocksdb_RocksDB_createColumnFamilies__J_3J_3_3B( + JNIEnv* env, jobject, jlong jhandle, jlongArray jcf_options_handles, + jobjectArray jcf_names) { + auto* db = reinterpret_cast(jhandle); + const jsize jlen = env->GetArrayLength(jcf_options_handles); + std::vector cf_descriptors; + cf_descriptors.reserve(jlen); + + jboolean jcf_options_handles_is_copy = JNI_FALSE; + jlong *jcf_options_handles_elems = env->GetLongArrayElements(jcf_options_handles, &jcf_options_handles_is_copy); + if(jcf_options_handles_elems == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + // extract the column family descriptors + jboolean has_exception = JNI_FALSE; + for (jsize i = 0; i < jlen; i++) { + auto* cf_options = reinterpret_cast( + jcf_options_handles_elems[i]); + jbyteArray jcf_name = static_cast( + env->GetObjectArrayElement(jcf_names, i)); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->ReleaseLongArrayElements(jcf_options_handles, jcf_options_handles_elems, JNI_ABORT); + return nullptr; + } + const std::string cf_name = + rocksdb::JniUtil::byteString(env, jcf_name, + [](const char* str, const size_t len) { + return std::string(str, len); + }, + &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + env->DeleteLocalRef(jcf_name); + env->ReleaseLongArrayElements(jcf_options_handles, jcf_options_handles_elems, JNI_ABORT); + return nullptr; + } + + cf_descriptors.push_back(rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options)); + + env->DeleteLocalRef(jcf_name); + } + + std::vector cf_handles; + rocksdb::Status s = db->CreateColumnFamilies(cf_descriptors, &cf_handles); + + env->ReleaseLongArrayElements(jcf_options_handles, jcf_options_handles_elems, JNI_ABORT); + + if (!s.ok()) { + // error occurred + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + jlongArray jcf_handles = rocksdb::JniUtil::toJPointers( + env, cf_handles, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return nullptr; + } + return jcf_handles; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: dropColumnFamily + * Signature: (JJ)V; + */ +void Java_org_rocksdb_RocksDB_dropColumnFamily( + JNIEnv* env, jobject, jlong jdb_handle, + jlong jcf_handle) { + auto* db_handle = reinterpret_cast(jdb_handle); + auto* cf_handle = reinterpret_cast(jcf_handle); + rocksdb::Status s = db_handle->DropColumnFamily(cf_handle); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: dropColumnFamilies + * Signature: (J[J)V + */ +void Java_org_rocksdb_RocksDB_dropColumnFamilies( + JNIEnv* env, jobject, jlong jdb_handle, + jlongArray jcolumn_family_handles) { + auto* db_handle = reinterpret_cast(jdb_handle); + + std::vector cf_handles; + if (jcolumn_family_handles != nullptr) { + const jsize len_cols = env->GetArrayLength(jcolumn_family_handles); + + jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr); + if (jcfh == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + for (jsize i = 0; i < len_cols; i++) { + auto* cf_handle = reinterpret_cast(jcfh[i]); + cf_handles.push_back(cf_handle); + } + env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); + } + + rocksdb::Status s = db_handle->DropColumnFamilies(cf_handles); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + ////////////////////////////////////////////////////////////////////////////// // rocksdb::DB::Put /** * @return true if the put succeeded, false if a Java Exception was thrown */ -bool rocksdb_put_helper(JNIEnv* env, rocksdb::DB* db, - const rocksdb::WriteOptions& write_options, - rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jkey, - jint jkey_off, jint jkey_len, jbyteArray jval, - jint jval_off, jint jval_len) { +bool rocksdb_put_helper( + JNIEnv* env, rocksdb::DB* db, + const rocksdb::WriteOptions& write_options, + rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jkey, + jint jkey_off, jint jkey_len, jbyteArray jval, + jint jval_off, jint jval_len) { jbyte* key = new jbyte[jkey_len]; env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); if (env->ExceptionCheck()) { @@ -290,17 +500,15 @@ bool rocksdb_put_helper(JNIEnv* env, rocksdb::DB* db, * Method: put * Signature: (J[BII[BII)V */ -void Java_org_rocksdb_RocksDB_put__J_3BII_3BII(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jbyteArray jkey, jint jkey_off, - jint jkey_len, jbyteArray jval, - jint jval_off, jint jval_len) { +void Java_org_rocksdb_RocksDB_put__J_3BII_3BII( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len) { auto* db = reinterpret_cast(jdb_handle); static const rocksdb::WriteOptions default_write_options = rocksdb::WriteOptions(); - rocksdb_put_helper(env, db, default_write_options, nullptr, jkey, jkey_off, - jkey_len, jval, jval_off, jval_len); + jkey_len, jval, jval_off, jval_len); } /* @@ -308,19 +516,18 @@ void Java_org_rocksdb_RocksDB_put__J_3BII_3BII(JNIEnv* env, jobject /*jdb*/, * Method: put * Signature: (J[BII[BIIJ)V */ -void Java_org_rocksdb_RocksDB_put__J_3BII_3BIIJ(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jbyteArray jkey, jint jkey_off, - jint jkey_len, jbyteArray jval, - jint jval_off, jint jval_len, - jlong jcf_handle) { +void Java_org_rocksdb_RocksDB_put__J_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len, + jlong jcf_handle) { auto* db = reinterpret_cast(jdb_handle); static const rocksdb::WriteOptions default_write_options = rocksdb::WriteOptions(); auto* cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { rocksdb_put_helper(env, db, default_write_options, cf_handle, jkey, - jkey_off, jkey_len, jval, jval_off, jval_len); + jkey_off, jkey_len, jval, jval_off, jval_len); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); @@ -332,18 +539,16 @@ void Java_org_rocksdb_RocksDB_put__J_3BII_3BIIJ(JNIEnv* env, jobject /*jdb*/, * Method: put * Signature: (JJ[BII[BII)V */ -void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BII(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jlong jwrite_options_handle, - jbyteArray jkey, jint jkey_off, - jint jkey_len, jbyteArray jval, - jint jval_off, jint jval_len) { +void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BII( + JNIEnv* env, jobject, jlong jdb_handle, + jlong jwrite_options_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len) { auto* db = reinterpret_cast(jdb_handle); auto* write_options = reinterpret_cast(jwrite_options_handle); - rocksdb_put_helper(env, db, *write_options, nullptr, jkey, jkey_off, jkey_len, - jval, jval_off, jval_len); + jval, jval_off, jval_len); } /* @@ -352,16 +557,17 @@ void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BII(JNIEnv* env, jobject /*jdb*/, * Signature: (JJ[BII[BIIJ)V */ void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BIIJ( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options_handle, - jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval, - jint jval_off, jint jval_len, jlong jcf_handle) { + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len, + jlong jcf_handle) { auto* db = reinterpret_cast(jdb_handle); auto* write_options = reinterpret_cast(jwrite_options_handle); auto* cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { rocksdb_put_helper(env, db, *write_options, cf_handle, jkey, jkey_off, - jkey_len, jval, jval_off, jval_len); + jkey_len, jval, jval_off, jval_len); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); @@ -369,1174 +575,1148 @@ void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BIIJ( } ////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::Write -/* - * Class: org_rocksdb_RocksDB - * Method: write0 - * Signature: (JJJ)V - */ -void Java_org_rocksdb_RocksDB_write0(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jlong jwrite_options_handle, - jlong jwb_handle) { - auto* db = reinterpret_cast(jdb_handle); - auto* write_options = - reinterpret_cast(jwrite_options_handle); - auto* wb = reinterpret_cast(jwb_handle); - - rocksdb::Status s = db->Write(*write_options, wb); - - if (!s.ok()) { - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - } -} +// rocksdb::DB::Delete() -/* - * Class: org_rocksdb_RocksDB - * Method: write1 - * Signature: (JJJ)V +/** + * @return true if the delete succeeded, false if a Java Exception was thrown */ -void Java_org_rocksdb_RocksDB_write1(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jlong jwrite_options_handle, - jlong jwbwi_handle) { - auto* db = reinterpret_cast(jdb_handle); - auto* write_options = - reinterpret_cast(jwrite_options_handle); - auto* wbwi = reinterpret_cast(jwbwi_handle); - auto* wb = wbwi->GetWriteBatch(); - - rocksdb::Status s = db->Write(*write_options, wb); - - if (!s.ok()) { - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - } -} - -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::KeyMayExist -jboolean key_may_exist_helper(JNIEnv* env, rocksdb::DB* db, - const rocksdb::ReadOptions& read_opt, - rocksdb::ColumnFamilyHandle* cf_handle, - jbyteArray jkey, jint jkey_off, jint jkey_len, - jobject jstring_builder, bool* has_exception) { +bool rocksdb_delete_helper( + JNIEnv* env, rocksdb::DB* db, const rocksdb::WriteOptions& write_options, + rocksdb::ColumnFamilyHandle* cf_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len) { jbyte* key = new jbyte[jkey_len]; env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); if (env->ExceptionCheck()) { // exception thrown: ArrayIndexOutOfBoundsException delete[] key; - *has_exception = true; return false; } - rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); - std::string value; - bool value_found = false; - bool keyMayExist; + rocksdb::Status s; if (cf_handle != nullptr) { - keyMayExist = - db->KeyMayExist(read_opt, cf_handle, key_slice, &value, &value_found); + s = db->Delete(write_options, cf_handle, key_slice); } else { - keyMayExist = db->KeyMayExist(read_opt, key_slice, &value, &value_found); + // backwards compatibility + s = db->Delete(write_options, key_slice); } // cleanup delete[] key; - // extract the value - if (value_found && !value.empty()) { - jobject jresult_string_builder = - rocksdb::StringBuilderJni::append(env, jstring_builder, value.c_str()); - if (jresult_string_builder == nullptr) { - *has_exception = true; - return false; - } + if (s.ok()) { + return true; } - *has_exception = false; - return static_cast(keyMayExist); + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return false; } /* * Class: org_rocksdb_RocksDB - * Method: keyMayExist - * Signature: (J[BIILjava/lang/StringBuilder;)Z + * Method: delete + * Signature: (J[BII)V */ -jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BIILjava_lang_StringBuilder_2( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jkey, - jint jkey_off, jint jkey_len, jobject jstring_builder) { +void Java_org_rocksdb_RocksDB_delete__J_3BII( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len) { auto* db = reinterpret_cast(jdb_handle); - bool has_exception = false; - return key_may_exist_helper(env, db, rocksdb::ReadOptions(), nullptr, jkey, - jkey_off, jkey_len, jstring_builder, - &has_exception); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + rocksdb_delete_helper(env, db, default_write_options, nullptr, jkey, jkey_off, + jkey_len); } /* * Class: org_rocksdb_RocksDB - * Method: keyMayExist - * Signature: (J[BIIJLjava/lang/StringBuilder;)Z + * Method: delete + * Signature: (J[BIIJ)V */ -jboolean -Java_org_rocksdb_RocksDB_keyMayExist__J_3BIIJLjava_lang_StringBuilder_2( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jkey, - jint jkey_off, jint jkey_len, jlong jcf_handle, jobject jstring_builder) { +void Java_org_rocksdb_RocksDB_delete__J_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jlong jcf_handle) { auto* db = reinterpret_cast(jdb_handle); - auto* cf_handle = reinterpret_cast(jcf_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + auto* cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { - bool has_exception = false; - return key_may_exist_helper(env, db, rocksdb::ReadOptions(), cf_handle, - jkey, jkey_off, jkey_len, jstring_builder, - &has_exception); + rocksdb_delete_helper(env, db, default_write_options, cf_handle, jkey, + jkey_off, jkey_len); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); - return true; } } /* * Class: org_rocksdb_RocksDB - * Method: keyMayExist - * Signature: (JJ[BIILjava/lang/StringBuilder;)Z + * Method: delete + * Signature: (JJ[BII)V */ -jboolean -Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIILjava_lang_StringBuilder_2( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jread_options_handle, - jbyteArray jkey, jint jkey_off, jint jkey_len, jobject jstring_builder) { +void Java_org_rocksdb_RocksDB_delete__JJ_3BII( + JNIEnv* env, jobject, + jlong jdb_handle, + jlong jwrite_options, + jbyteArray jkey, jint jkey_off, jint jkey_len) { auto* db = reinterpret_cast(jdb_handle); - auto& read_options = - *reinterpret_cast(jread_options_handle); - bool has_exception = false; - return key_may_exist_helper(env, db, read_options, nullptr, jkey, jkey_off, - jkey_len, jstring_builder, &has_exception); + auto* write_options = + reinterpret_cast(jwrite_options); + rocksdb_delete_helper(env, db, *write_options, nullptr, jkey, jkey_off, + jkey_len); } /* * Class: org_rocksdb_RocksDB - * Method: keyMayExist - * Signature: (JJ[BIIJLjava/lang/StringBuilder;)Z + * Method: delete + * Signature: (JJ[BIIJ)V */ -jboolean -Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIIJLjava_lang_StringBuilder_2( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jread_options_handle, - jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle, - jobject jstring_builder) { +void Java_org_rocksdb_RocksDB_delete__JJ_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options, + jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) { auto* db = reinterpret_cast(jdb_handle); - auto& read_options = - *reinterpret_cast(jread_options_handle); + auto* write_options = + reinterpret_cast(jwrite_options); auto* cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { - bool has_exception = false; - return key_may_exist_helper(env, db, read_options, cf_handle, jkey, - jkey_off, jkey_len, jstring_builder, - &has_exception); + rocksdb_delete_helper(env, db, *write_options, cf_handle, jkey, jkey_off, + jkey_len); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); - return true; } } ////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::Get - -jbyteArray rocksdb_get_helper(JNIEnv* env, rocksdb::DB* db, - const rocksdb::ReadOptions& read_opt, - rocksdb::ColumnFamilyHandle* column_family_handle, - jbyteArray jkey, jint jkey_off, jint jkey_len) { - jbyte* key = new jbyte[jkey_len]; - env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); - if (env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - delete[] key; - return nullptr; +// rocksdb::DB::SingleDelete() +/** + * @return true if the single delete succeeded, false if a Java Exception + * was thrown + */ +bool rocksdb_single_delete_helper( + JNIEnv* env, rocksdb::DB* db, + const rocksdb::WriteOptions& write_options, + rocksdb::ColumnFamilyHandle* cf_handle, + jbyteArray jkey, jint jkey_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return false; } - rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); - std::string value; rocksdb::Status s; - if (column_family_handle != nullptr) { - s = db->Get(read_opt, column_family_handle, key_slice, &value); + if (cf_handle != nullptr) { + s = db->SingleDelete(write_options, cf_handle, key_slice); } else { // backwards compatibility - s = db->Get(read_opt, key_slice, &value); + s = db->SingleDelete(write_options, key_slice); } - // cleanup - delete[] key; - - if (s.IsNotFound()) { - return nullptr; - } + // trigger java unref on key and value. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); if (s.ok()) { - jbyteArray jret_value = rocksdb::JniUtil::copyBytes(env, value); - if (jret_value == nullptr) { - // exception occurred - return nullptr; - } - return jret_value; + return true; } rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - return nullptr; + return false; } /* * Class: org_rocksdb_RocksDB - * Method: get - * Signature: (J[BII)[B + * Method: singleDelete + * Signature: (J[BI)V */ -jbyteArray Java_org_rocksdb_RocksDB_get__J_3BII(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jbyteArray jkey, jint jkey_off, - jint jkey_len) { - return rocksdb_get_helper(env, reinterpret_cast(jdb_handle), - rocksdb::ReadOptions(), nullptr, jkey, jkey_off, - jkey_len); +void Java_org_rocksdb_RocksDB_singleDelete__J_3BI( + JNIEnv* env, jobject, + jlong jdb_handle, + jbyteArray jkey, + jint jkey_len) { + auto* db = reinterpret_cast(jdb_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + rocksdb_single_delete_helper(env, db, default_write_options, nullptr, + jkey, jkey_len); } /* * Class: org_rocksdb_RocksDB - * Method: get - * Signature: (J[BIIJ)[B + * Method: singleDelete + * Signature: (J[BIJ)V */ -jbyteArray Java_org_rocksdb_RocksDB_get__J_3BIIJ(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jbyteArray jkey, jint jkey_off, - jint jkey_len, - jlong jcf_handle) { - auto db_handle = reinterpret_cast(jdb_handle); - auto cf_handle = reinterpret_cast(jcf_handle); +void Java_org_rocksdb_RocksDB_singleDelete__J_3BIJ( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_len, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + auto* cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { - return rocksdb_get_helper(env, db_handle, rocksdb::ReadOptions(), cf_handle, - jkey, jkey_off, jkey_len); + rocksdb_single_delete_helper(env, db, default_write_options, cf_handle, + jkey, jkey_len); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); - return nullptr; } } /* * Class: org_rocksdb_RocksDB - * Method: get - * Signature: (JJ[BII)[B + * Method: singleDelete + * Signature: (JJ[BIJ)V */ -jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BII(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jlong jropt_handle, - jbyteArray jkey, jint jkey_off, - jint jkey_len) { - return rocksdb_get_helper( - env, reinterpret_cast(jdb_handle), - *reinterpret_cast(jropt_handle), nullptr, jkey, - jkey_off, jkey_len); +void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BI( + JNIEnv* env, jobject, jlong jdb_handle, + jlong jwrite_options, + jbyteArray jkey, + jint jkey_len) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options); + rocksdb_single_delete_helper(env, db, *write_options, nullptr, jkey, + jkey_len); } /* * Class: org_rocksdb_RocksDB - * Method: get - * Signature: (JJ[BIIJ)[B + * Method: singleDelete + * Signature: (JJ[BIJ)V */ -jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BIIJ( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jropt_handle, - jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) { - auto* db_handle = reinterpret_cast(jdb_handle); - auto& ro_opt = *reinterpret_cast(jropt_handle); +void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options, + jbyteArray jkey, jint jkey_len, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options); auto* cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { - return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle, jkey, jkey_off, - jkey_len); + rocksdb_single_delete_helper(env, db, *write_options, cf_handle, jkey, + jkey_len); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); - return nullptr; } } -jint rocksdb_get_helper(JNIEnv* env, rocksdb::DB* db, - const rocksdb::ReadOptions& read_options, - rocksdb::ColumnFamilyHandle* column_family_handle, - jbyteArray jkey, jint jkey_off, jint jkey_len, - jbyteArray jval, jint jval_off, jint jval_len, - bool* has_exception) { - static const int kNotFound = -1; - static const int kStatusError = -2; - - jbyte* key = new jbyte[jkey_len]; - env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::DeleteRange() +/** + * @return true if the delete range succeeded, false if a Java Exception + * was thrown + */ +bool rocksdb_delete_range_helper( + JNIEnv* env, rocksdb::DB* db, + const rocksdb::WriteOptions& write_options, + rocksdb::ColumnFamilyHandle* cf_handle, + jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len, + jbyteArray jend_key, jint jend_key_off, jint jend_key_len) { + jbyte* begin_key = new jbyte[jbegin_key_len]; + env->GetByteArrayRegion(jbegin_key, jbegin_key_off, jbegin_key_len, + begin_key); if (env->ExceptionCheck()) { - // exception thrown: OutOfMemoryError - delete[] key; - *has_exception = true; - return kStatusError; + // exception thrown: ArrayIndexOutOfBoundsException + delete[] begin_key; + return false; } - rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + rocksdb::Slice begin_key_slice(reinterpret_cast(begin_key), + jbegin_key_len); - // TODO(yhchiang): we might save one memory allocation here by adding - // a DB::Get() function which takes preallocated jbyte* as input. - std::string cvalue; - rocksdb::Status s; - if (column_family_handle != nullptr) { - s = db->Get(read_options, column_family_handle, key_slice, &cvalue); - } else { - // backwards compatibility - s = db->Get(read_options, key_slice, &cvalue); + jbyte* end_key = new jbyte[jend_key_len]; + env->GetByteArrayRegion(jend_key, jend_key_off, jend_key_len, end_key); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] begin_key; + delete[] end_key; + return false; } + rocksdb::Slice end_key_slice(reinterpret_cast(end_key), jend_key_len); - // cleanup - delete[] key; + rocksdb::Status s = + db->DeleteRange(write_options, cf_handle, begin_key_slice, end_key_slice); - if (s.IsNotFound()) { - *has_exception = false; - return kNotFound; - } else if (!s.ok()) { - *has_exception = true; - // Here since we are throwing a Java exception from c++ side. - // As a result, c++ does not know calling this function will in fact - // throwing an exception. As a result, the execution flow will - // not stop here, and codes after this throw will still be - // executed. - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + // cleanup + delete[] begin_key; + delete[] end_key; - // Return a dummy const value to avoid compilation error, although - // java side might not have a chance to get the return value :) - return kStatusError; + if (s.ok()) { + return true; } - const jint cvalue_len = static_cast(cvalue.size()); - const jint length = std::min(jval_len, cvalue_len); - - env->SetByteArrayRegion( - jval, jval_off, length, - const_cast(reinterpret_cast(cvalue.c_str()))); - if (env->ExceptionCheck()) { - // exception thrown: OutOfMemoryError - *has_exception = true; - return kStatusError; - } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return false; +} - *has_exception = false; - return cvalue_len; +/* + * Class: org_rocksdb_RocksDB + * Method: deleteRange + * Signature: (J[BII[BII)V + */ +void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BII( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len, + jbyteArray jend_key, jint jend_key_off, jint jend_key_len) { + auto* db = reinterpret_cast(jdb_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + rocksdb_delete_range_helper(env, db, default_write_options, nullptr, + jbegin_key, jbegin_key_off, jbegin_key_len, + jend_key, jend_key_off, jend_key_len); } -inline void multi_get_helper_release_keys( - JNIEnv* env, std::vector>& keys_to_free) { - auto end = keys_to_free.end(); - for (auto it = keys_to_free.begin(); it != end; ++it) { - delete[] it->first; - env->DeleteLocalRef(it->second); +/* + * Class: org_rocksdb_RocksDB + * Method: deleteRange + * Signature: (J[BII[BIIJ)V + */ +void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len, + jbyteArray jend_key, jint jend_key_off, jint jend_key_len, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + auto* cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_delete_range_helper(env, db, default_write_options, cf_handle, + jbegin_key, jbegin_key_off, jbegin_key_len, + jend_key, jend_key_off, jend_key_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew( + env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); } - keys_to_free.clear(); } -/** - * cf multi get - * - * @return byte[][] of values or nullptr if an exception occurs - */ -jobjectArray multi_get_helper(JNIEnv* env, jobject /*jdb*/, rocksdb::DB* db, - const rocksdb::ReadOptions& rOpt, - jobjectArray jkeys, jintArray jkey_offs, - jintArray jkey_lens, - jlongArray jcolumn_family_handles) { - std::vector cf_handles; - if (jcolumn_family_handles != nullptr) { - const jsize len_cols = env->GetArrayLength(jcolumn_family_handles); - - jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr); - if (jcfh == nullptr) { - // exception thrown: OutOfMemoryError - return nullptr; - } - - for (jsize i = 0; i < len_cols; i++) { - auto* cf_handle = reinterpret_cast(jcfh[i]); - cf_handles.push_back(cf_handle); - } - env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); - } +/* + * Class: org_rocksdb_RocksDB + * Method: deleteRange + * Signature: (JJ[BII[BII)V + */ +void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BII( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options, + jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len, + jbyteArray jend_key, jint jend_key_off, jint jend_key_len) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options); + rocksdb_delete_range_helper(env, db, *write_options, nullptr, jbegin_key, + jbegin_key_off, jbegin_key_len, jend_key, + jend_key_off, jend_key_len); +} - const jsize len_keys = env->GetArrayLength(jkeys); - if (env->EnsureLocalCapacity(len_keys) != 0) { - // exception thrown: OutOfMemoryError - return nullptr; +/* + * Class: org_rocksdb_RocksDB + * Method: deleteRange + * Signature: (JJ[BII[BIIJ)V + */ +void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options, + jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len, + jbyteArray jend_key, jint jend_key_off, jint jend_key_len, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options); + auto* cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_delete_range_helper(env, db, *write_options, cf_handle, + jbegin_key, jbegin_key_off, jbegin_key_len, + jend_key, jend_key_off, jend_key_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew( + env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); } +} - jint* jkey_off = env->GetIntArrayElements(jkey_offs, nullptr); - if (jkey_off == nullptr) { - // exception thrown: OutOfMemoryError - return nullptr; - } +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::Merge - jint* jkey_len = env->GetIntArrayElements(jkey_lens, nullptr); - if (jkey_len == nullptr) { - // exception thrown: OutOfMemoryError - env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); - return nullptr; +/** + * @return true if the merge succeeded, false if a Java Exception was thrown + */ +bool rocksdb_merge_helper( + JNIEnv* env, rocksdb::DB* db, const rocksdb::WriteOptions& write_options, + rocksdb::ColumnFamilyHandle* cf_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len) { + jbyte* key = new jbyte[jkey_len]; + env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] key; + return false; } + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); - std::vector keys; - std::vector> keys_to_free; - for (jsize i = 0; i < len_keys; i++) { - jobject jkey = env->GetObjectArrayElement(jkeys, i); - if (env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT); - env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); - multi_get_helper_release_keys(env, keys_to_free); - return nullptr; - } - - jbyteArray jkey_ba = reinterpret_cast(jkey); - - const jint len_key = jkey_len[i]; - jbyte* key = new jbyte[len_key]; - env->GetByteArrayRegion(jkey_ba, jkey_off[i], len_key, key); - if (env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - delete[] key; - env->DeleteLocalRef(jkey); - env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT); - env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); - multi_get_helper_release_keys(env, keys_to_free); - return nullptr; - } - - rocksdb::Slice key_slice(reinterpret_cast(key), len_key); - keys.push_back(key_slice); - - keys_to_free.push_back(std::pair(key, jkey)); + jbyte* value = new jbyte[jval_len]; + env->GetByteArrayRegion(jval, jval_off, jval_len, value); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] value; + delete[] key; + return false; } + rocksdb::Slice value_slice(reinterpret_cast(value), jval_len); - // cleanup jkey_off and jken_len - env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT); - env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); - - std::vector values; - std::vector s; - if (cf_handles.size() == 0) { - s = db->MultiGet(rOpt, keys, &values); + rocksdb::Status s; + if (cf_handle != nullptr) { + s = db->Merge(write_options, cf_handle, key_slice, value_slice); } else { - s = db->MultiGet(rOpt, cf_handles, keys, &values); - } - - // free up allocated byte arrays - multi_get_helper_release_keys(env, keys_to_free); - - // prepare the results - jobjectArray jresults = - rocksdb::ByteJni::new2dByteArray(env, static_cast(s.size())); - if (jresults == nullptr) { - // exception occurred - return nullptr; + s = db->Merge(write_options, key_slice, value_slice); } - // TODO(AR) it is not clear to me why EnsureLocalCapacity is needed for the - // loop as we cleanup references with env->DeleteLocalRef(jentry_value); - if (env->EnsureLocalCapacity(static_cast(s.size())) != 0) { - // exception thrown: OutOfMemoryError - return nullptr; - } - // add to the jresults - for (std::vector::size_type i = 0; i != s.size(); i++) { - if (s[i].ok()) { - std::string* value = &values[i]; - const jsize jvalue_len = static_cast(value->size()); - jbyteArray jentry_value = env->NewByteArray(jvalue_len); - if (jentry_value == nullptr) { - // exception thrown: OutOfMemoryError - return nullptr; - } - - env->SetByteArrayRegion( - jentry_value, 0, static_cast(jvalue_len), - const_cast(reinterpret_cast(value->c_str()))); - if (env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - env->DeleteLocalRef(jentry_value); - return nullptr; - } - - env->SetObjectArrayElement(jresults, static_cast(i), jentry_value); - if (env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - env->DeleteLocalRef(jentry_value); - return nullptr; - } + // cleanup + delete[] value; + delete[] key; - env->DeleteLocalRef(jentry_value); - } + if (s.ok()) { + return true; } - return jresults; -} - -/* - * Class: org_rocksdb_RocksDB - * Method: multiGet - * Signature: (J[[B[I[I)[[B - */ -jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I( - JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys, - jintArray jkey_offs, jintArray jkey_lens) { - return multi_get_helper(env, jdb, reinterpret_cast(jdb_handle), - rocksdb::ReadOptions(), jkeys, jkey_offs, jkey_lens, - nullptr); -} - -/* - * Class: org_rocksdb_RocksDB - * Method: multiGet - * Signature: (J[[B[I[I[J)[[B - */ -jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I_3J( - JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys, - jintArray jkey_offs, jintArray jkey_lens, - jlongArray jcolumn_family_handles) { - return multi_get_helper(env, jdb, reinterpret_cast(jdb_handle), - rocksdb::ReadOptions(), jkeys, jkey_offs, jkey_lens, - jcolumn_family_handles); + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return false; } /* * Class: org_rocksdb_RocksDB - * Method: multiGet - * Signature: (JJ[[B[I[I)[[B + * Method: merge + * Signature: (J[BII[BII)V */ -jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I( - JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, - jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens) { - return multi_get_helper( - env, jdb, reinterpret_cast(jdb_handle), - *reinterpret_cast(jropt_handle), jkeys, jkey_offs, - jkey_lens, nullptr); +void Java_org_rocksdb_RocksDB_merge__J_3BII_3BII( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len) { + auto* db = reinterpret_cast(jdb_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + rocksdb_merge_helper(env, db, default_write_options, nullptr, jkey, jkey_off, + jkey_len, jval, jval_off, jval_len); } /* * Class: org_rocksdb_RocksDB - * Method: multiGet - * Signature: (JJ[[B[I[I[J)[[B + * Method: merge + * Signature: (J[BII[BIIJ)V */ -jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I_3J( - JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, - jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens, - jlongArray jcolumn_family_handles) { - return multi_get_helper( - env, jdb, reinterpret_cast(jdb_handle), - *reinterpret_cast(jropt_handle), jkeys, jkey_offs, - jkey_lens, jcolumn_family_handles); +void Java_org_rocksdb_RocksDB_merge__J_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + auto* cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_merge_helper(env, db, default_write_options, cf_handle, jkey, + jkey_off, jkey_len, jval, jval_off, jval_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew( + env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + } } /* * Class: org_rocksdb_RocksDB - * Method: get - * Signature: (J[BII[BII)I + * Method: merge + * Signature: (JJ[BII[BII)V */ -jint Java_org_rocksdb_RocksDB_get__J_3BII_3BII(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jbyteArray jkey, jint jkey_off, - jint jkey_len, jbyteArray jval, - jint jval_off, jint jval_len) { - bool has_exception = false; - return rocksdb_get_helper(env, reinterpret_cast(jdb_handle), - rocksdb::ReadOptions(), nullptr, jkey, jkey_off, - jkey_len, jval, jval_off, jval_len, &has_exception); +void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BII( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + rocksdb_merge_helper(env, db, *write_options, nullptr, jkey, jkey_off, + jkey_len, jval, jval_off, jval_len); } /* * Class: org_rocksdb_RocksDB - * Method: get - * Signature: (J[BII[BIIJ)I + * Method: merge + * Signature: (JJ[BII[BIIJ)V */ -jint Java_org_rocksdb_RocksDB_get__J_3BII_3BIIJ(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jbyteArray jkey, jint jkey_off, - jint jkey_len, jbyteArray jval, - jint jval_off, jint jval_len, - jlong jcf_handle) { - auto* db_handle = reinterpret_cast(jdb_handle); +void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); auto* cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { - bool has_exception = false; - return rocksdb_get_helper(env, db_handle, rocksdb::ReadOptions(), cf_handle, - jkey, jkey_off, jkey_len, jval, jval_off, - jval_len, &has_exception); + rocksdb_merge_helper(env, db, *write_options, cf_handle, jkey, jkey_off, + jkey_len, jval, jval_off, jval_len); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); - // will never be evaluated - return 0; } } +jlong rocksdb_iterator_helper(rocksdb::DB* db, + rocksdb::ReadOptions read_options, + rocksdb::ColumnFamilyHandle* cf_handle) { + rocksdb::Iterator* iterator = nullptr; + if (cf_handle != nullptr) { + iterator = db->NewIterator(read_options, cf_handle); + } else { + iterator = db->NewIterator(read_options); + } + return reinterpret_cast(iterator); +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::Write /* * Class: org_rocksdb_RocksDB - * Method: get - * Signature: (JJ[BII[BII)I + * Method: write0 + * Signature: (JJJ)V */ -jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BII(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jlong jropt_handle, - jbyteArray jkey, jint jkey_off, - jint jkey_len, jbyteArray jval, - jint jval_off, jint jval_len) { - bool has_exception = false; - return rocksdb_get_helper( - env, reinterpret_cast(jdb_handle), - *reinterpret_cast(jropt_handle), nullptr, jkey, - jkey_off, jkey_len, jval, jval_off, jval_len, &has_exception); +void Java_org_rocksdb_RocksDB_write0( + JNIEnv* env, jobject, jlong jdb_handle, + jlong jwrite_options_handle, jlong jwb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* wb = reinterpret_cast(jwb_handle); + + rocksdb::Status s = db->Write(*write_options, wb); + + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } } /* * Class: org_rocksdb_RocksDB - * Method: get - * Signature: (JJ[BII[BIIJ)I + * Method: write1 + * Signature: (JJJ)V */ -jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BIIJ( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jropt_handle, - jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval, - jint jval_off, jint jval_len, jlong jcf_handle) { - auto* db_handle = reinterpret_cast(jdb_handle); - auto& ro_opt = *reinterpret_cast(jropt_handle); - auto* cf_handle = reinterpret_cast(jcf_handle); - if (cf_handle != nullptr) { - bool has_exception = false; - return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle, jkey, jkey_off, - jkey_len, jval, jval_off, jval_len, - &has_exception); - } else { - rocksdb::RocksDBExceptionJni::ThrowNew( - env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); - // will never be evaluated - return 0; +void Java_org_rocksdb_RocksDB_write1( + JNIEnv* env, jobject, jlong jdb_handle, + jlong jwrite_options_handle, jlong jwbwi_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* wbwi = reinterpret_cast(jwbwi_handle); + auto* wb = wbwi->GetWriteBatch(); + + rocksdb::Status s = db->Write(*write_options, wb); + + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); } } ////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::Delete() +// rocksdb::DB::Get -/** - * @return true if the delete succeeded, false if a Java Exception was thrown - */ -bool rocksdb_delete_helper(JNIEnv* env, rocksdb::DB* db, - const rocksdb::WriteOptions& write_options, - rocksdb::ColumnFamilyHandle* cf_handle, - jbyteArray jkey, jint jkey_off, jint jkey_len) { +jbyteArray rocksdb_get_helper( + JNIEnv* env, rocksdb::DB* db, + const rocksdb::ReadOptions& read_opt, + rocksdb::ColumnFamilyHandle* column_family_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len) { jbyte* key = new jbyte[jkey_len]; env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); if (env->ExceptionCheck()) { // exception thrown: ArrayIndexOutOfBoundsException delete[] key; - return false; + return nullptr; } + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + std::string value; rocksdb::Status s; - if (cf_handle != nullptr) { - s = db->Delete(write_options, cf_handle, key_slice); + if (column_family_handle != nullptr) { + s = db->Get(read_opt, column_family_handle, key_slice, &value); } else { // backwards compatibility - s = db->Delete(write_options, key_slice); + s = db->Get(read_opt, key_slice, &value); } // cleanup delete[] key; + if (s.IsNotFound()) { + return nullptr; + } + if (s.ok()) { - return true; + jbyteArray jret_value = rocksdb::JniUtil::copyBytes(env, value); + if (jret_value == nullptr) { + // exception occurred + return nullptr; + } + return jret_value; } rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - return false; + return nullptr; } /* * Class: org_rocksdb_RocksDB - * Method: delete - * Signature: (J[BII)V + * Method: get + * Signature: (J[BII)[B */ -void Java_org_rocksdb_RocksDB_delete__J_3BII(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, jbyteArray jkey, - jint jkey_off, jint jkey_len) { - auto* db = reinterpret_cast(jdb_handle); - static const rocksdb::WriteOptions default_write_options = - rocksdb::WriteOptions(); - rocksdb_delete_helper(env, db, default_write_options, nullptr, jkey, jkey_off, - jkey_len); +jbyteArray Java_org_rocksdb_RocksDB_get__J_3BII( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len) { + return rocksdb_get_helper(env, reinterpret_cast(jdb_handle), + rocksdb::ReadOptions(), nullptr, jkey, jkey_off, jkey_len); } /* * Class: org_rocksdb_RocksDB - * Method: delete - * Signature: (J[BIIJ)V + * Method: get + * Signature: (J[BIIJ)[B */ -void Java_org_rocksdb_RocksDB_delete__J_3BIIJ(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, jbyteArray jkey, - jint jkey_off, jint jkey_len, - jlong jcf_handle) { - auto* db = reinterpret_cast(jdb_handle); - static const rocksdb::WriteOptions default_write_options = - rocksdb::WriteOptions(); - auto* cf_handle = reinterpret_cast(jcf_handle); +jbyteArray Java_org_rocksdb_RocksDB_get__J_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) { + auto db_handle = reinterpret_cast(jdb_handle); + auto cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { - rocksdb_delete_helper(env, db, default_write_options, cf_handle, jkey, - jkey_off, jkey_len); + return rocksdb_get_helper(env, db_handle, rocksdb::ReadOptions(), cf_handle, + jkey, jkey_off, jkey_len); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + return nullptr; } } /* * Class: org_rocksdb_RocksDB - * Method: delete - * Signature: (JJ[BII)V + * Method: get + * Signature: (JJ[BII)[B */ -void Java_org_rocksdb_RocksDB_delete__JJ_3BII(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jlong jwrite_options, - jbyteArray jkey, jint jkey_off, - jint jkey_len) { - auto* db = reinterpret_cast(jdb_handle); - auto* write_options = - reinterpret_cast(jwrite_options); - rocksdb_delete_helper(env, db, *write_options, nullptr, jkey, jkey_off, - jkey_len); +jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BII( + JNIEnv* env, jobject, + jlong jdb_handle, jlong jropt_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len) { + return rocksdb_get_helper( + env, reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), nullptr, jkey, + jkey_off, jkey_len); } /* * Class: org_rocksdb_RocksDB - * Method: delete - * Signature: (JJ[BIIJ)V + * Method: get + * Signature: (JJ[BIIJ)[B */ -void Java_org_rocksdb_RocksDB_delete__JJ_3BIIJ( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options, +jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jropt_handle, jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) { - auto* db = reinterpret_cast(jdb_handle); - auto* write_options = - reinterpret_cast(jwrite_options); + auto* db_handle = reinterpret_cast(jdb_handle); + auto& ro_opt = *reinterpret_cast(jropt_handle); auto* cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { - rocksdb_delete_helper(env, db, *write_options, cf_handle, jkey, jkey_off, - jkey_len); + return rocksdb_get_helper( + env, db_handle, ro_opt, cf_handle, jkey, jkey_off, jkey_len); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + return nullptr; } } -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::SingleDelete() -/** - * @return true if the single delete succeeded, false if a Java Exception - * was thrown - */ -bool rocksdb_single_delete_helper(JNIEnv* env, rocksdb::DB* db, - const rocksdb::WriteOptions& write_options, - rocksdb::ColumnFamilyHandle* cf_handle, - jbyteArray jkey, jint jkey_len) { - jbyte* key = env->GetByteArrayElements(jkey, nullptr); - if (key == nullptr) { +jint rocksdb_get_helper( + JNIEnv* env, rocksdb::DB* db, const rocksdb::ReadOptions& read_options, + rocksdb::ColumnFamilyHandle* column_family_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len, + bool* has_exception) { + static const int kNotFound = -1; + static const int kStatusError = -2; + + jbyte* key = new jbyte[jkey_len]; + env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); + if (env->ExceptionCheck()) { // exception thrown: OutOfMemoryError - return false; + delete[] key; + *has_exception = true; + return kStatusError; } rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + // TODO(yhchiang): we might save one memory allocation here by adding + // a DB::Get() function which takes preallocated jbyte* as input. + std::string cvalue; rocksdb::Status s; - if (cf_handle != nullptr) { - s = db->SingleDelete(write_options, cf_handle, key_slice); + if (column_family_handle != nullptr) { + s = db->Get(read_options, column_family_handle, key_slice, &cvalue); } else { // backwards compatibility - s = db->SingleDelete(write_options, key_slice); + s = db->Get(read_options, key_slice, &cvalue); } - // trigger java unref on key and value. - // by passing JNI_ABORT, it will simply release the reference without - // copying the result back to the java byte array. - env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + // cleanup + delete[] key; - if (s.ok()) { - return true; + if (s.IsNotFound()) { + *has_exception = false; + return kNotFound; + } else if (!s.ok()) { + *has_exception = true; + // Here since we are throwing a Java exception from c++ side. + // As a result, c++ does not know calling this function will in fact + // throwing an exception. As a result, the execution flow will + // not stop here, and codes after this throw will still be + // executed. + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + + // Return a dummy const value to avoid compilation error, although + // java side might not have a chance to get the return value :) + return kStatusError; } - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - return false; + const jint cvalue_len = static_cast(cvalue.size()); + const jint length = std::min(jval_len, cvalue_len); + + env->SetByteArrayRegion( + jval, jval_off, length, + const_cast(reinterpret_cast(cvalue.c_str()))); + if (env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + *has_exception = true; + return kStatusError; + } + + *has_exception = false; + return cvalue_len; } + /* * Class: org_rocksdb_RocksDB - * Method: singleDelete - * Signature: (J[BI)V + * Method: get + * Signature: (J[BII[BII)I */ -void Java_org_rocksdb_RocksDB_singleDelete__J_3BI(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jbyteArray jkey, - jint jkey_len) { - auto* db = reinterpret_cast(jdb_handle); - static const rocksdb::WriteOptions default_write_options = - rocksdb::WriteOptions(); - rocksdb_single_delete_helper(env, db, default_write_options, nullptr, jkey, - jkey_len); +jint Java_org_rocksdb_RocksDB_get__J_3BII_3BII( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len) { + bool has_exception = false; + return rocksdb_get_helper(env, reinterpret_cast(jdb_handle), + rocksdb::ReadOptions(), nullptr, jkey, jkey_off, + jkey_len, jval, jval_off, jval_len, &has_exception); } /* * Class: org_rocksdb_RocksDB - * Method: singleDelete - * Signature: (J[BIJ)V - */ -void Java_org_rocksdb_RocksDB_singleDelete__J_3BIJ(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jbyteArray jkey, - jint jkey_len, - jlong jcf_handle) { - auto* db = reinterpret_cast(jdb_handle); - static const rocksdb::WriteOptions default_write_options = - rocksdb::WriteOptions(); + * Method: get + * Signature: (J[BII[BIIJ)I + */ +jint Java_org_rocksdb_RocksDB_get__J_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len, + jlong jcf_handle) { + auto* db_handle = reinterpret_cast(jdb_handle); auto* cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { - rocksdb_single_delete_helper(env, db, default_write_options, cf_handle, - jkey, jkey_len); + bool has_exception = false; + return rocksdb_get_helper(env, db_handle, rocksdb::ReadOptions(), cf_handle, + jkey, jkey_off, jkey_len, jval, jval_off, + jval_len, &has_exception); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + // will never be evaluated + return 0; } } /* * Class: org_rocksdb_RocksDB - * Method: singleDelete - * Signature: (JJ[BIJ)V + * Method: get + * Signature: (JJ[BII[BII)I */ -void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BI(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jlong jwrite_options, - jbyteArray jkey, - jint jkey_len) { - auto* db = reinterpret_cast(jdb_handle); - auto* write_options = - reinterpret_cast(jwrite_options); - rocksdb_single_delete_helper(env, db, *write_options, nullptr, jkey, - jkey_len); +jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BII( + JNIEnv* env, jobject, jlong jdb_handle, jlong jropt_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len) { + bool has_exception = false; + return rocksdb_get_helper( + env, reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), nullptr, jkey, + jkey_off, jkey_len, jval, jval_off, jval_len, &has_exception); } /* * Class: org_rocksdb_RocksDB - * Method: singleDelete - * Signature: (JJ[BIJ)V + * Method: get + * Signature: (JJ[BII[BIIJ)I */ -void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BIJ( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options, - jbyteArray jkey, jint jkey_len, jlong jcf_handle) { - auto* db = reinterpret_cast(jdb_handle); - auto* write_options = - reinterpret_cast(jwrite_options); +jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jropt_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len, + jlong jcf_handle) { + auto* db_handle = reinterpret_cast(jdb_handle); + auto& ro_opt = *reinterpret_cast(jropt_handle); auto* cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { - rocksdb_single_delete_helper(env, db, *write_options, cf_handle, jkey, - jkey_len); + bool has_exception = false; + return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle, + jkey, jkey_off, jkey_len, + jval, jval_off, jval_len, + &has_exception); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + // will never be evaluated + return 0; } } -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::DeleteRange() +inline void multi_get_helper_release_keys( + JNIEnv* env, std::vector>& keys_to_free) { + auto end = keys_to_free.end(); + for (auto it = keys_to_free.begin(); it != end; ++it) { + delete[] it->first; + env->DeleteLocalRef(it->second); + } + keys_to_free.clear(); +} + /** - * @return true if the delete range succeeded, false if a Java Exception - * was thrown + * cf multi get + * + * @return byte[][] of values or nullptr if an exception occurs */ -bool rocksdb_delete_range_helper(JNIEnv* env, rocksdb::DB* db, - const rocksdb::WriteOptions& write_options, - rocksdb::ColumnFamilyHandle* cf_handle, - jbyteArray jbegin_key, jint jbegin_key_off, - jint jbegin_key_len, jbyteArray jend_key, - jint jend_key_off, jint jend_key_len) { - jbyte* begin_key = new jbyte[jbegin_key_len]; - env->GetByteArrayRegion(jbegin_key, jbegin_key_off, jbegin_key_len, - begin_key); - if (env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - delete[] begin_key; - return false; +jobjectArray multi_get_helper( + JNIEnv* env, jobject, rocksdb::DB* db, const rocksdb::ReadOptions& rOpt, + jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens, + jlongArray jcolumn_family_handles) { + std::vector cf_handles; + if (jcolumn_family_handles != nullptr) { + const jsize len_cols = env->GetArrayLength(jcolumn_family_handles); + + jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr); + if (jcfh == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + for (jsize i = 0; i < len_cols; i++) { + auto* cf_handle = reinterpret_cast(jcfh[i]); + cf_handles.push_back(cf_handle); + } + env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); } - rocksdb::Slice begin_key_slice(reinterpret_cast(begin_key), - jbegin_key_len); - jbyte* end_key = new jbyte[jend_key_len]; - env->GetByteArrayRegion(jend_key, jend_key_off, jend_key_len, end_key); - if (env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - delete[] begin_key; - delete[] end_key; - return false; + const jsize len_keys = env->GetArrayLength(jkeys); + if (env->EnsureLocalCapacity(len_keys) != 0) { + // exception thrown: OutOfMemoryError + return nullptr; } - rocksdb::Slice end_key_slice(reinterpret_cast(end_key), jend_key_len); - rocksdb::Status s = - db->DeleteRange(write_options, cf_handle, begin_key_slice, end_key_slice); + jint* jkey_off = env->GetIntArrayElements(jkey_offs, nullptr); + if (jkey_off == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } - // cleanup - delete[] begin_key; - delete[] end_key; + jint* jkey_len = env->GetIntArrayElements(jkey_lens, nullptr); + if (jkey_len == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); + return nullptr; + } - if (s.ok()) { - return true; + std::vector keys; + std::vector> keys_to_free; + for (jsize i = 0; i < len_keys; i++) { + jobject jkey = env->GetObjectArrayElement(jkeys, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT); + env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); + multi_get_helper_release_keys(env, keys_to_free); + return nullptr; + } + + jbyteArray jkey_ba = reinterpret_cast(jkey); + + const jint len_key = jkey_len[i]; + jbyte* key = new jbyte[len_key]; + env->GetByteArrayRegion(jkey_ba, jkey_off[i], len_key, key); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] key; + env->DeleteLocalRef(jkey); + env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT); + env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); + multi_get_helper_release_keys(env, keys_to_free); + return nullptr; + } + + rocksdb::Slice key_slice(reinterpret_cast(key), len_key); + keys.push_back(key_slice); + + keys_to_free.push_back(std::pair(key, jkey)); } - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - return false; + // cleanup jkey_off and jken_len + env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT); + env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); + + std::vector values; + std::vector s; + if (cf_handles.size() == 0) { + s = db->MultiGet(rOpt, keys, &values); + } else { + s = db->MultiGet(rOpt, cf_handles, keys, &values); + } + + // free up allocated byte arrays + multi_get_helper_release_keys(env, keys_to_free); + + // prepare the results + jobjectArray jresults = + rocksdb::ByteJni::new2dByteArray(env, static_cast(s.size())); + if (jresults == nullptr) { + // exception occurred + return nullptr; + } + + // TODO(AR) it is not clear to me why EnsureLocalCapacity is needed for the + // loop as we cleanup references with env->DeleteLocalRef(jentry_value); + if (env->EnsureLocalCapacity(static_cast(s.size())) != 0) { + // exception thrown: OutOfMemoryError + return nullptr; + } + // add to the jresults + for (std::vector::size_type i = 0; i != s.size(); i++) { + if (s[i].ok()) { + std::string* value = &values[i]; + const jsize jvalue_len = static_cast(value->size()); + jbyteArray jentry_value = env->NewByteArray(jvalue_len); + if (jentry_value == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetByteArrayRegion( + jentry_value, 0, static_cast(jvalue_len), + const_cast(reinterpret_cast(value->c_str()))); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jentry_value); + return nullptr; + } + + env->SetObjectArrayElement(jresults, static_cast(i), jentry_value); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jentry_value); + return nullptr; + } + + env->DeleteLocalRef(jentry_value); + } + } + + return jresults; } /* * Class: org_rocksdb_RocksDB - * Method: deleteRange - * Signature: (J[BII[BII)V + * Method: multiGet + * Signature: (J[[B[I[I)[[B */ -void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BII( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jbegin_key, - jint jbegin_key_off, jint jbegin_key_len, jbyteArray jend_key, - jint jend_key_off, jint jend_key_len) { - auto* db = reinterpret_cast(jdb_handle); - static const rocksdb::WriteOptions default_write_options = - rocksdb::WriteOptions(); - rocksdb_delete_range_helper(env, db, default_write_options, nullptr, - jbegin_key, jbegin_key_off, jbegin_key_len, - jend_key, jend_key_off, jend_key_len); +jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens) { + return multi_get_helper(env, jdb, reinterpret_cast(jdb_handle), + rocksdb::ReadOptions(), jkeys, jkey_offs, jkey_lens, + nullptr); } /* * Class: org_rocksdb_RocksDB - * Method: deleteRange - * Signature: (J[BII[BIIJ)V + * Method: multiGet + * Signature: (J[[B[I[I[J)[[B */ -void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BIIJ( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jbegin_key, - jint jbegin_key_off, jint jbegin_key_len, jbyteArray jend_key, - jint jend_key_off, jint jend_key_len, jlong jcf_handle) { - auto* db = reinterpret_cast(jdb_handle); - static const rocksdb::WriteOptions default_write_options = - rocksdb::WriteOptions(); - auto* cf_handle = reinterpret_cast(jcf_handle); - if (cf_handle != nullptr) { - rocksdb_delete_range_helper(env, db, default_write_options, cf_handle, - jbegin_key, jbegin_key_off, jbegin_key_len, - jend_key, jend_key_off, jend_key_len); - } else { - rocksdb::RocksDBExceptionJni::ThrowNew( - env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); - } +jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I_3J( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens, + jlongArray jcolumn_family_handles) { + return multi_get_helper(env, jdb, reinterpret_cast(jdb_handle), + rocksdb::ReadOptions(), jkeys, jkey_offs, jkey_lens, + jcolumn_family_handles); } /* * Class: org_rocksdb_RocksDB - * Method: deleteRange - * Signature: (JJ[BII[BII)V + * Method: multiGet + * Signature: (JJ[[B[I[I)[[B */ -void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BII( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options, - jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len, - jbyteArray jend_key, jint jend_key_off, jint jend_key_len) { - auto* db = reinterpret_cast(jdb_handle); - auto* write_options = - reinterpret_cast(jwrite_options); - rocksdb_delete_range_helper(env, db, *write_options, nullptr, jbegin_key, - jbegin_key_off, jbegin_key_len, jend_key, - jend_key_off, jend_key_len); +jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens) { + return multi_get_helper( + env, jdb, reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), jkeys, jkey_offs, + jkey_lens, nullptr); } /* - * Class: org_rocksdb_RocksDB - * Method: deleteRange - * Signature: (JJ[BII[BIIJ)V + * Class: org_rocksdb_RocksDB + * Method: multiGet + * Signature: (JJ[[B[I[I[J)[[B */ -void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BIIJ( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options, - jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len, - jbyteArray jend_key, jint jend_key_off, jint jend_key_len, - jlong jcf_handle) { - auto* db = reinterpret_cast(jdb_handle); - auto* write_options = - reinterpret_cast(jwrite_options); - auto* cf_handle = reinterpret_cast(jcf_handle); - if (cf_handle != nullptr) { - rocksdb_delete_range_helper(env, db, *write_options, cf_handle, jbegin_key, - jbegin_key_off, jbegin_key_len, jend_key, - jend_key_off, jend_key_len); - } else { - rocksdb::RocksDBExceptionJni::ThrowNew( - env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); - } +jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I_3J( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens, + jlongArray jcolumn_family_handles) { + return multi_get_helper( + env, jdb, reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), jkeys, jkey_offs, + jkey_lens, jcolumn_family_handles); } ////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::Merge - -/** - * @return true if the merge succeeded, false if a Java Exception was thrown - */ -bool rocksdb_merge_helper(JNIEnv* env, rocksdb::DB* db, - const rocksdb::WriteOptions& write_options, - rocksdb::ColumnFamilyHandle* cf_handle, - jbyteArray jkey, jint jkey_off, jint jkey_len, - jbyteArray jval, jint jval_off, jint jval_len) { +// rocksdb::DB::KeyMayExist +jboolean key_may_exist_helper(JNIEnv* env, rocksdb::DB* db, + const rocksdb::ReadOptions& read_opt, + rocksdb::ColumnFamilyHandle* cf_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jobject jstring_builder, bool* has_exception) { jbyte* key = new jbyte[jkey_len]; env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); if (env->ExceptionCheck()) { // exception thrown: ArrayIndexOutOfBoundsException delete[] key; + *has_exception = true; return false; } - rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); - jbyte* value = new jbyte[jval_len]; - env->GetByteArrayRegion(jval, jval_off, jval_len, value); - if (env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - delete[] value; - delete[] key; - return false; - } - rocksdb::Slice value_slice(reinterpret_cast(value), jval_len); + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); - rocksdb::Status s; + std::string value; + bool value_found = false; + bool keyMayExist; if (cf_handle != nullptr) { - s = db->Merge(write_options, cf_handle, key_slice, value_slice); + keyMayExist = + db->KeyMayExist(read_opt, cf_handle, key_slice, &value, &value_found); } else { - s = db->Merge(write_options, key_slice, value_slice); + keyMayExist = db->KeyMayExist(read_opt, key_slice, &value, &value_found); } // cleanup - delete[] value; delete[] key; - if (s.ok()) { - return true; + // extract the value + if (value_found && !value.empty()) { + jobject jresult_string_builder = + rocksdb::StringBuilderJni::append(env, jstring_builder, value.c_str()); + if (jresult_string_builder == nullptr) { + *has_exception = true; + return false; + } } - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - return false; + *has_exception = false; + return static_cast(keyMayExist); } /* * Class: org_rocksdb_RocksDB - * Method: merge - * Signature: (J[BII[BII)V + * Method: keyMayExist + * Signature: (J[BIILjava/lang/StringBuilder;)Z */ -void Java_org_rocksdb_RocksDB_merge__J_3BII_3BII(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jbyteArray jkey, jint jkey_off, - jint jkey_len, jbyteArray jval, - jint jval_off, jint jval_len) { +jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BIILjava_lang_StringBuilder_2( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, jobject jstring_builder) { auto* db = reinterpret_cast(jdb_handle); - static const rocksdb::WriteOptions default_write_options = - rocksdb::WriteOptions(); - - rocksdb_merge_helper(env, db, default_write_options, nullptr, jkey, jkey_off, - jkey_len, jval, jval_off, jval_len); + bool has_exception = false; + return key_may_exist_helper(env, db, rocksdb::ReadOptions(), nullptr, jkey, + jkey_off, jkey_len, jstring_builder, &has_exception); } /* * Class: org_rocksdb_RocksDB - * Method: merge - * Signature: (J[BII[BIIJ)V + * Method: keyMayExist + * Signature: (J[BIIJLjava/lang/StringBuilder;)Z */ -void Java_org_rocksdb_RocksDB_merge__J_3BII_3BIIJ( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jkey, - jint jkey_off, jint jkey_len, jbyteArray jval, jint jval_off, jint jval_len, - jlong jcf_handle) { +jboolean +Java_org_rocksdb_RocksDB_keyMayExist__J_3BIIJLjava_lang_StringBuilder_2( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jlong jcf_handle, jobject jstring_builder) { auto* db = reinterpret_cast(jdb_handle); - static const rocksdb::WriteOptions default_write_options = - rocksdb::WriteOptions(); auto* cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { - rocksdb_merge_helper(env, db, default_write_options, cf_handle, jkey, - jkey_off, jkey_len, jval, jval_off, jval_len); + bool has_exception = false; + return key_may_exist_helper(env, db, rocksdb::ReadOptions(), cf_handle, + jkey, jkey_off, jkey_len, jstring_builder, + &has_exception); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + return true; } } /* * Class: org_rocksdb_RocksDB - * Method: merge - * Signature: (JJ[BII[BII)V + * Method: keyMayExist + * Signature: (JJ[BIILjava/lang/StringBuilder;)Z */ -void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BII( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options_handle, - jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval, - jint jval_off, jint jval_len) { +jboolean +Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIILjava_lang_StringBuilder_2( + JNIEnv* env, jobject, jlong jdb_handle, jlong jread_options_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, jobject jstring_builder) { auto* db = reinterpret_cast(jdb_handle); - auto* write_options = - reinterpret_cast(jwrite_options_handle); - - rocksdb_merge_helper(env, db, *write_options, nullptr, jkey, jkey_off, - jkey_len, jval, jval_off, jval_len); + auto& read_options = + *reinterpret_cast(jread_options_handle); + bool has_exception = false; + return key_may_exist_helper(env, db, read_options, nullptr, jkey, jkey_off, + jkey_len, jstring_builder, &has_exception); } /* * Class: org_rocksdb_RocksDB - * Method: merge - * Signature: (JJ[BII[BIIJ)V + * Method: keyMayExist + * Signature: (JJ[BIIJLjava/lang/StringBuilder;)Z */ -void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BIIJ( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options_handle, - jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval, - jint jval_off, jint jval_len, jlong jcf_handle) { +jboolean +Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIIJLjava_lang_StringBuilder_2( + JNIEnv* env, jobject, jlong jdb_handle, jlong jread_options_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle, + jobject jstring_builder) { auto* db = reinterpret_cast(jdb_handle); - auto* write_options = - reinterpret_cast(jwrite_options_handle); + auto& read_options = + *reinterpret_cast(jread_options_handle); auto* cf_handle = reinterpret_cast(jcf_handle); if (cf_handle != nullptr) { - rocksdb_merge_helper(env, db, *write_options, cf_handle, jkey, jkey_off, - jkey_len, jval, jval_off, jval_len); + bool has_exception = false; + return key_may_exist_helper(env, db, read_options, cf_handle, jkey, + jkey_off, jkey_len, jstring_builder, &has_exception); } else { rocksdb::RocksDBExceptionJni::ThrowNew( env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + return true; } } -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::~DB() - -/* - * Class: org_rocksdb_RocksDB - * Method: disposeInternal - * Signature: (J)V - */ -void Java_org_rocksdb_RocksDB_disposeInternal(JNIEnv* /*env*/, - jobject /*java_db*/, - jlong jhandle) { - auto* db = reinterpret_cast(jhandle); - assert(db != nullptr); - delete db; -} - -jlong rocksdb_iterator_helper(rocksdb::DB* db, - rocksdb::ReadOptions read_options, - rocksdb::ColumnFamilyHandle* cf_handle) { - rocksdb::Iterator* iterator = nullptr; - if (cf_handle != nullptr) { - iterator = db->NewIterator(read_options, cf_handle); - } else { - iterator = db->NewIterator(read_options); - } - return reinterpret_cast(iterator); -} - /* * Class: org_rocksdb_RocksDB * Method: iterator * Signature: (J)J */ -jlong Java_org_rocksdb_RocksDB_iterator__J(JNIEnv* /*env*/, jobject /*jdb*/, - jlong db_handle) { +jlong Java_org_rocksdb_RocksDB_iterator__J( + JNIEnv*, jobject, jlong db_handle) { auto* db = reinterpret_cast(db_handle); return rocksdb_iterator_helper(db, rocksdb::ReadOptions(), nullptr); } @@ -1546,9 +1726,8 @@ jlong Java_org_rocksdb_RocksDB_iterator__J(JNIEnv* /*env*/, jobject /*jdb*/, * Method: iterator * Signature: (JJ)J */ -jlong Java_org_rocksdb_RocksDB_iterator__JJ(JNIEnv* /*env*/, jobject /*jdb*/, - jlong db_handle, - jlong jread_options_handle) { +jlong Java_org_rocksdb_RocksDB_iterator__JJ( + JNIEnv*, jobject, jlong db_handle, jlong jread_options_handle) { auto* db = reinterpret_cast(db_handle); auto& read_options = *reinterpret_cast(jread_options_handle); @@ -1560,9 +1739,8 @@ jlong Java_org_rocksdb_RocksDB_iterator__JJ(JNIEnv* /*env*/, jobject /*jdb*/, * Method: iteratorCF * Signature: (JJ)J */ -jlong Java_org_rocksdb_RocksDB_iteratorCF__JJ(JNIEnv* /*env*/, jobject /*jdb*/, - jlong db_handle, - jlong jcf_handle) { +jlong Java_org_rocksdb_RocksDB_iteratorCF__JJ( + JNIEnv*, jobject, jlong db_handle, jlong jcf_handle) { auto* db = reinterpret_cast(db_handle); auto* cf_handle = reinterpret_cast(jcf_handle); return rocksdb_iterator_helper(db, rocksdb::ReadOptions(), cf_handle); @@ -1573,10 +1751,9 @@ jlong Java_org_rocksdb_RocksDB_iteratorCF__JJ(JNIEnv* /*env*/, jobject /*jdb*/, * Method: iteratorCF * Signature: (JJJ)J */ -jlong Java_org_rocksdb_RocksDB_iteratorCF__JJJ(JNIEnv* /*env*/, jobject /*jdb*/, - jlong db_handle, - jlong jcf_handle, - jlong jread_options_handle) { +jlong Java_org_rocksdb_RocksDB_iteratorCF__JJJ( + JNIEnv*, jobject, + jlong db_handle, jlong jcf_handle, jlong jread_options_handle) { auto* db = reinterpret_cast(db_handle); auto* cf_handle = reinterpret_cast(jcf_handle); auto& read_options = @@ -1589,10 +1766,10 @@ jlong Java_org_rocksdb_RocksDB_iteratorCF__JJJ(JNIEnv* /*env*/, jobject /*jdb*/, * Method: iterators * Signature: (J[JJ)[J */ -jlongArray Java_org_rocksdb_RocksDB_iterators(JNIEnv* env, jobject /*jdb*/, - jlong db_handle, - jlongArray jcolumn_family_handles, - jlong jread_options_handle) { +jlongArray Java_org_rocksdb_RocksDB_iterators( + JNIEnv* env, jobject, jlong db_handle, + jlongArray jcolumn_family_handles, + jlong jread_options_handle) { auto* db = reinterpret_cast(db_handle); auto& read_options = *reinterpret_cast(jread_options_handle); @@ -1643,108 +1820,12 @@ jlongArray Java_org_rocksdb_RocksDB_iterators(JNIEnv* env, jobject /*jdb*/, } } -/* - * Class: org_rocksdb_RocksDB - * Method: getDefaultColumnFamily - * Signature: (J)J - */ -jlong Java_org_rocksdb_RocksDB_getDefaultColumnFamily(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jdb_handle) { - auto* db_handle = reinterpret_cast(jdb_handle); - auto* cf_handle = db_handle->DefaultColumnFamily(); - return reinterpret_cast(cf_handle); -} - -/* - * Class: org_rocksdb_RocksDB - * Method: createColumnFamily - * Signature: (J[BJ)J - */ -jlong Java_org_rocksdb_RocksDB_createColumnFamily(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jbyteArray jcolumn_name, - jlong jcolumn_options) { - rocksdb::ColumnFamilyHandle* handle; - jboolean has_exception = JNI_FALSE; - std::string column_name = rocksdb::JniUtil::byteString( - env, jcolumn_name, - [](const char* str, const size_t len) { return std::string(str, len); }, - &has_exception); - if (has_exception == JNI_TRUE) { - // exception occurred - return 0; - } - - auto* db_handle = reinterpret_cast(jdb_handle); - auto* cfOptions = - reinterpret_cast(jcolumn_options); - - rocksdb::Status s = - db_handle->CreateColumnFamily(*cfOptions, column_name, &handle); - - if (s.ok()) { - return reinterpret_cast(handle); - } - - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - return 0; -} - -/* - * Class: org_rocksdb_RocksDB - * Method: dropColumnFamily - * Signature: (JJ)V; - */ -void Java_org_rocksdb_RocksDB_dropColumnFamily(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jlong jcf_handle) { - auto* db_handle = reinterpret_cast(jdb_handle); - auto* cf_handle = reinterpret_cast(jcf_handle); - rocksdb::Status s = db_handle->DropColumnFamily(cf_handle); - if (!s.ok()) { - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - } -} - -/* - * Class: org_rocksdb_RocksDB - * Method: dropColumnFamilies - * Signature: (J[J)V - */ -void Java_org_rocksdb_RocksDB_dropColumnFamilies( - JNIEnv* env, jobject, jlong jdb_handle, jlongArray jcolumn_family_handles) { - auto* db_handle = reinterpret_cast(jdb_handle); - - std::vector cf_handles; - if (jcolumn_family_handles != nullptr) { - const jsize len_cols = env->GetArrayLength(jcolumn_family_handles); - - jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr); - if (jcfh == nullptr) { - // exception thrown: OutOfMemoryError - return; - } - - for (jsize i = 0; i < len_cols; i++) { - auto* cf_handle = reinterpret_cast(jcfh[i]); - cf_handles.push_back(cf_handle); - } - env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); - } - - rocksdb::Status s = db_handle->DropColumnFamilies(cf_handles); - if (!s.ok()) { - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - } -} - /* * Method: getSnapshot * Signature: (J)J */ -jlong Java_org_rocksdb_RocksDB_getSnapshot(JNIEnv* /*env*/, jobject /*jdb*/, - jlong db_handle) { +jlong Java_org_rocksdb_RocksDB_getSnapshot( + JNIEnv*, jobject, jlong db_handle) { auto* db = reinterpret_cast(db_handle); const rocksdb::Snapshot* snapshot = db->GetSnapshot(); return reinterpret_cast(snapshot); @@ -1754,9 +1835,9 @@ jlong Java_org_rocksdb_RocksDB_getSnapshot(JNIEnv* /*env*/, jobject /*jdb*/, * Method: releaseSnapshot * Signature: (JJ)V */ -void Java_org_rocksdb_RocksDB_releaseSnapshot(JNIEnv* /*env*/, jobject /*jdb*/, - jlong db_handle, - jlong snapshot_handle) { +void Java_org_rocksdb_RocksDB_releaseSnapshot( + JNIEnv*, jobject, jlong db_handle, + jlong snapshot_handle) { auto* db = reinterpret_cast(db_handle); auto* snapshot = reinterpret_cast(snapshot_handle); db->ReleaseSnapshot(snapshot); @@ -1764,22 +1845,30 @@ void Java_org_rocksdb_RocksDB_releaseSnapshot(JNIEnv* /*env*/, jobject /*jdb*/, /* * Class: org_rocksdb_RocksDB - * Method: getProperty0 - * Signature: (JLjava/lang/String;I)Ljava/lang/String; + * Method: getProperty + * Signature: (JJLjava/lang/String;I)Ljava/lang/String; */ -jstring Java_org_rocksdb_RocksDB_getProperty0__JLjava_lang_String_2I( - JNIEnv* env, jobject /*jdb*/, jlong db_handle, jstring jproperty, - jint jproperty_len) { +jstring Java_org_rocksdb_RocksDB_getProperty( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jstring jproperty, jint jproperty_len) { const char* property = env->GetStringUTFChars(jproperty, nullptr); if (property == nullptr) { // exception thrown: OutOfMemoryError return nullptr; } - rocksdb::Slice property_slice(property, jproperty_len); + rocksdb::Slice property_name(property, jproperty_len); + + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } - auto* db = reinterpret_cast(db_handle); std::string property_value; - bool retCode = db->GetProperty(property_slice, &property_value); + bool retCode = db->GetProperty(cf_handle, property_name, &property_value); env->ReleaseStringUTFChars(jproperty, property); if (retCode) { @@ -1792,80 +1881,66 @@ jstring Java_org_rocksdb_RocksDB_getProperty0__JLjava_lang_String_2I( /* * Class: org_rocksdb_RocksDB - * Method: getProperty0 - * Signature: (JJLjava/lang/String;I)Ljava/lang/String; + * Method: getMapProperty + * Signature: (JJLjava/lang/String;I)Ljava/util/Map; */ -jstring Java_org_rocksdb_RocksDB_getProperty0__JJLjava_lang_String_2I( - JNIEnv* env, jobject /*jdb*/, jlong db_handle, jlong jcf_handle, +jobject Java_org_rocksdb_RocksDB_getMapProperty( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, jstring jproperty, jint jproperty_len) { - const char* property = env->GetStringUTFChars(jproperty, nullptr); + const char* property = env->GetStringUTFChars(jproperty, nullptr); if (property == nullptr) { // exception thrown: OutOfMemoryError return nullptr; } - rocksdb::Slice property_slice(property, jproperty_len); - - auto* db = reinterpret_cast(db_handle); - auto* cf_handle = reinterpret_cast(jcf_handle); - std::string property_value; - bool retCode = db->GetProperty(cf_handle, property_slice, &property_value); - env->ReleaseStringUTFChars(jproperty, property); - - if (retCode) { - return env->NewStringUTF(property_value.c_str()); - } - - rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound()); - return nullptr; -} - -/* - * Class: org_rocksdb_RocksDB - * Method: getLongProperty - * Signature: (JLjava/lang/String;I)L; - */ -jlong Java_org_rocksdb_RocksDB_getLongProperty__JLjava_lang_String_2I( - JNIEnv* env, jobject /*jdb*/, jlong db_handle, jstring jproperty, - jint jproperty_len) { - const char* property = env->GetStringUTFChars(jproperty, nullptr); - if (property == nullptr) { - // exception thrown: OutOfMemoryError - return 0; + rocksdb::Slice property_name(property, jproperty_len); + + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); } - rocksdb::Slice property_slice(property, jproperty_len); - auto* db = reinterpret_cast(db_handle); - uint64_t property_value = 0; - bool retCode = db->GetIntProperty(property_slice, &property_value); + std::map property_value; + bool retCode = db->GetMapProperty(cf_handle, property_name, &property_value); env->ReleaseStringUTFChars(jproperty, property); if (retCode) { - return property_value; + return rocksdb::HashMapJni::fromCppMap(env, &property_value); } rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound()); - return 0; + return nullptr; } /* * Class: org_rocksdb_RocksDB * Method: getLongProperty - * Signature: (JJLjava/lang/String;I)L; + * Signature: (JJLjava/lang/String;I)J */ -jlong Java_org_rocksdb_RocksDB_getLongProperty__JJLjava_lang_String_2I( - JNIEnv* env, jobject /*jdb*/, jlong db_handle, jlong jcf_handle, +jlong Java_org_rocksdb_RocksDB_getLongProperty( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, jstring jproperty, jint jproperty_len) { const char* property = env->GetStringUTFChars(jproperty, nullptr); if (property == nullptr) { // exception thrown: OutOfMemoryError return 0; } - rocksdb::Slice property_slice(property, jproperty_len); + rocksdb::Slice property_name(property, jproperty_len); + + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } - auto* db = reinterpret_cast(db_handle); - auto* cf_handle = reinterpret_cast(jcf_handle); uint64_t property_value; - bool retCode = db->GetIntProperty(cf_handle, property_slice, &property_value); + bool retCode = db->GetIntProperty(cf_handle, property_name, &property_value); env->ReleaseStringUTFChars(jproperty, property); if (retCode) { @@ -1876,21 +1951,33 @@ jlong Java_org_rocksdb_RocksDB_getLongProperty__JJLjava_lang_String_2I( return 0; } +/* + * Class: org_rocksdb_RocksDB + * Method: resetStats + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_resetStats( + JNIEnv *, jobject, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + db->ResetStats(); +} + /* * Class: org_rocksdb_RocksDB * Method: getAggregatedLongProperty * Signature: (JLjava/lang/String;I)J */ jlong Java_org_rocksdb_RocksDB_getAggregatedLongProperty( - JNIEnv* env, jobject, jlong db_handle, jstring jproperty, jint jproperty_len) { + JNIEnv* env, jobject, jlong db_handle, + jstring jproperty, jint jproperty_len) { const char* property = env->GetStringUTFChars(jproperty, nullptr); if (property == nullptr) { return 0; } - rocksdb::Slice property_slice(property, jproperty_len); + rocksdb::Slice property_name(property, jproperty_len); auto* db = reinterpret_cast(db_handle); uint64_t property_value = 0; - bool retCode = db->GetAggregatedIntProperty(property_slice, &property_value); + bool retCode = db->GetAggregatedIntProperty(property_name, &property_value); env->ReleaseStringUTFChars(jproperty, property); if (retCode) { @@ -1901,278 +1988,576 @@ jlong Java_org_rocksdb_RocksDB_getAggregatedLongProperty( return 0; } +/* + * Class: org_rocksdb_RocksDB + * Method: getApproximateSizes + * Signature: (JJ[JB)[J + */ +jlongArray Java_org_rocksdb_RocksDB_getApproximateSizes( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jlongArray jrange_slice_handles, jbyte jinclude_flags) { + const jsize jlen = env->GetArrayLength(jrange_slice_handles); + const size_t range_count = jlen / 2; -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::Flush + jboolean jranges_is_copy = JNI_FALSE; + jlong* jranges = env->GetLongArrayElements(jrange_slice_handles, + &jranges_is_copy); + if (jranges == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } -void rocksdb_flush_helper(JNIEnv* env, rocksdb::DB* db, - const rocksdb::FlushOptions& flush_options, - rocksdb::ColumnFamilyHandle* column_family_handle) { - rocksdb::Status s; - if (column_family_handle != nullptr) { - s = db->Flush(flush_options, column_family_handle); + auto ranges = std::unique_ptr( + new rocksdb::Range[range_count]); + for (jsize i = 0; i < jlen; ++i) { + auto* start = reinterpret_cast(jranges[i]); + auto* limit = reinterpret_cast(jranges[++i]); + ranges.get()[i] = rocksdb::Range(*start, *limit); + } + + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); } else { - s = db->Flush(flush_options); + cf_handle = + reinterpret_cast(jcf_handle); } - if (!s.ok()) { - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + + auto sizes = std::unique_ptr(new uint64_t[range_count]); + db->GetApproximateSizes(cf_handle, ranges.get(), + static_cast(range_count), sizes.get(), + static_cast(jinclude_flags)); + + // release LongArrayElements + env->ReleaseLongArrayElements(jrange_slice_handles, jranges, JNI_ABORT); + + // prepare results + auto results = std::unique_ptr(new jlong[range_count]); + for (size_t i = 0; i < range_count; ++i) { + results.get()[i] = static_cast(sizes.get()[i]); + } + + const jsize jrange_count = jlen / 2; + jlongArray jresults = env->NewLongArray(jrange_count); + if (jresults == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetLongArrayRegion(jresults, 0, jrange_count, results.get()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresults); + return nullptr; } + + return jresults; } /* * Class: org_rocksdb_RocksDB - * Method: flush - * Signature: (JJ)V + * Method: getApproximateMemTableStats + * Signature: (JJJJ)[J */ -void Java_org_rocksdb_RocksDB_flush__JJ(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jlong jflush_options) { +jlongArray Java_org_rocksdb_RocksDB_getApproximateMemTableStats( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jlong jstartHandle, jlong jlimitHandle) { + auto* start = reinterpret_cast(jstartHandle); + auto* limit = reinterpret_cast( jlimitHandle); + const rocksdb::Range range(*start, *limit); + auto* db = reinterpret_cast(jdb_handle); - auto* flush_options = - reinterpret_cast(jflush_options); - rocksdb_flush_helper(env, db, *flush_options, nullptr); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + + uint64_t count = 0; + uint64_t sizes = 0; + db->GetApproximateMemTableStats(cf_handle, range, &count, &sizes); + + // prepare results + jlong results[2] = { + static_cast(count), + static_cast(sizes)}; + + const jsize jcount = static_cast(count); + jlongArray jsizes = env->NewLongArray(jcount); + if (jsizes == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetLongArrayRegion(jsizes, 0, jcount, results); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jsizes); + return nullptr; + } + + return jsizes; } /* * Class: org_rocksdb_RocksDB - * Method: flush - * Signature: (JJJ)V + * Method: compactRange + * Signature: (J[BI[BIJJ)V */ -void Java_org_rocksdb_RocksDB_flush__JJJ(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, jlong jflush_options, - jlong jcf_handle) { +void Java_org_rocksdb_RocksDB_compactRange( + JNIEnv* env, jobject, jlong jdb_handle, + jbyteArray jbegin, jint jbegin_len, + jbyteArray jend, jint jend_len, + jlong jcompact_range_opts_handle, + jlong jcf_handle) { + jboolean has_exception = JNI_FALSE; + + std::string str_begin; + if (jbegin_len > 0) { + str_begin = rocksdb::JniUtil::byteString(env, jbegin, jbegin_len, + [](const char* str, const size_t len) { + return std::string(str, len); + }, + &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + } + + std::string str_end; + if (jend_len > 0) { + str_end = rocksdb::JniUtil::byteString(env, jend, jend_len, + [](const char* str, const size_t len) { + return std::string(str, len); + }, + &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + } + + rocksdb::CompactRangeOptions *compact_range_opts = nullptr; + if (jcompact_range_opts_handle == 0) { + // NOTE: we DO own the pointer! + compact_range_opts = new rocksdb::CompactRangeOptions(); + } else { + // NOTE: we do NOT own the pointer! + compact_range_opts = + reinterpret_cast(jcompact_range_opts_handle); + } + auto* db = reinterpret_cast(jdb_handle); - auto* flush_options = - reinterpret_cast(jflush_options); - auto* cf_handle = reinterpret_cast(jcf_handle); - rocksdb_flush_helper(env, db, *flush_options, cf_handle); -} -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::CompactRange - Full + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } -void rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db, - rocksdb::ColumnFamilyHandle* cf_handle, - jboolean jreduce_level, jint jtarget_level, - jint jtarget_path_id) { rocksdb::Status s; - rocksdb::CompactRangeOptions compact_options; - compact_options.change_level = jreduce_level; - compact_options.target_level = jtarget_level; - compact_options.target_path_id = static_cast(jtarget_path_id); - if (cf_handle != nullptr) { - s = db->CompactRange(compact_options, cf_handle, nullptr, nullptr); + if (jbegin_len > 0 || jend_len > 0) { + const rocksdb::Slice begin(str_begin); + const rocksdb::Slice end(str_end); + s = db->CompactRange(*compact_range_opts, cf_handle, &begin, &end); } else { - // backwards compatibility - s = db->CompactRange(compact_options, nullptr, nullptr); + s = db->CompactRange(*compact_range_opts, cf_handle, nullptr, nullptr); } - if (s.ok()) { - return; + if (jcompact_range_opts_handle == 0) { + delete compact_range_opts; } rocksdb::RocksDBExceptionJni::ThrowNew(env, s); } /* * Class: org_rocksdb_RocksDB - * Method: compactRange0 - * Signature: (JZII)V + * Method: setOptions + * Signature: (JJ[Ljava/lang/String;[Ljava/lang/String;)V */ -void Java_org_rocksdb_RocksDB_compactRange0__JZII(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jboolean jreduce_level, - jint jtarget_level, - jint jtarget_path_id) { +void Java_org_rocksdb_RocksDB_setOptions( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jobjectArray jkeys, jobjectArray jvalues) { + const jsize len = env->GetArrayLength(jkeys); + assert(len == env->GetArrayLength(jvalues)); + + std::unordered_map options_map; + for (jsize i = 0; i < len; i++) { + jobject jobj_key = env->GetObjectArrayElement(jkeys, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + return; + } + + jobject jobj_value = env->GetObjectArrayElement(jvalues, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jobj_key); + return; + } + + jboolean has_exception = JNI_FALSE; + std::string s_key = + rocksdb::JniUtil::copyStdString( + env, reinterpret_cast(jobj_key), &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + env->DeleteLocalRef(jobj_value); + env->DeleteLocalRef(jobj_key); + return; + } + + std::string s_value = + rocksdb::JniUtil::copyStdString( + env, reinterpret_cast(jobj_value), &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + env->DeleteLocalRef(jobj_value); + env->DeleteLocalRef(jobj_key); + return; + } + + options_map[s_key] = s_value; + + env->DeleteLocalRef(jobj_key); + env->DeleteLocalRef(jobj_value); + } + auto* db = reinterpret_cast(jdb_handle); - rocksdb_compactrange_helper(env, db, nullptr, jreduce_level, jtarget_level, - jtarget_path_id); + auto* cf_handle = reinterpret_cast(jcf_handle); + auto s = db->SetOptions(cf_handle, options_map); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } } /* * Class: org_rocksdb_RocksDB - * Method: compactRange - * Signature: (JZIIJ)V + * Method: setDBOptions + * Signature: (J[Ljava/lang/String;[Ljava/lang/String;)V */ -void Java_org_rocksdb_RocksDB_compactRange__JZIIJ( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jboolean jreduce_level, - jint jtarget_level, jint jtarget_path_id, jlong jcf_handle) { +void Java_org_rocksdb_RocksDB_setDBOptions( + JNIEnv* env, jobject, jlong jdb_handle, + jobjectArray jkeys, jobjectArray jvalues) { + const jsize len = env->GetArrayLength(jkeys); + assert(len == env->GetArrayLength(jvalues)); + + std::unordered_map options_map; + for (jsize i = 0; i < len; i++) { + jobject jobj_key = env->GetObjectArrayElement(jkeys, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + return; + } + + jobject jobj_value = env->GetObjectArrayElement(jvalues, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jobj_key); + return; + } + + jboolean has_exception = JNI_FALSE; + std::string s_key = + rocksdb::JniUtil::copyStdString( + env, reinterpret_cast(jobj_key), &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + env->DeleteLocalRef(jobj_value); + env->DeleteLocalRef(jobj_key); + return; + } + + std::string s_value = + rocksdb::JniUtil::copyStdString( + env, reinterpret_cast(jobj_value), &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + env->DeleteLocalRef(jobj_value); + env->DeleteLocalRef(jobj_key); + return; + } + + options_map[s_key] = s_value; + + env->DeleteLocalRef(jobj_key); + env->DeleteLocalRef(jobj_value); + } + auto* db = reinterpret_cast(jdb_handle); - auto* cf_handle = reinterpret_cast(jcf_handle); - rocksdb_compactrange_helper(env, db, cf_handle, jreduce_level, jtarget_level, - jtarget_path_id); + auto s = db->SetDBOptions(options_map); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } } -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::CompactRange - Range - -/** - * @return true if the compact range succeeded, false if a Java Exception - * was thrown +/* + * Class: org_rocksdb_RocksDB + * Method: compactFiles + * Signature: (JJJ[Ljava/lang/String;IIJ)[Ljava/lang/String; */ -bool rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db, - rocksdb::ColumnFamilyHandle* cf_handle, - jbyteArray jbegin, jint jbegin_len, - jbyteArray jend, jint jend_len, - const rocksdb::CompactRangeOptions& compact_options) { - jbyte* begin = env->GetByteArrayElements(jbegin, nullptr); - if (begin == nullptr) { - // exception thrown: OutOfMemoryError - return false; +jobjectArray Java_org_rocksdb_RocksDB_compactFiles( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcompaction_opts_handle, + jlong jcf_handle, jobjectArray jinput_file_names, jint joutput_level, + jint joutput_path_id, jlong jcompaction_job_info_handle) { + jboolean has_exception = JNI_FALSE; + const std::vector input_file_names = + rocksdb::JniUtil::copyStrings(env, jinput_file_names, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return nullptr; } - jbyte* end = env->GetByteArrayElements(jend, nullptr); - if (end == nullptr) { - // exception thrown: OutOfMemoryError - env->ReleaseByteArrayElements(jbegin, begin, JNI_ABORT); - return false; + auto* compaction_opts = + reinterpret_cast(jcompaction_opts_handle); + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); } - const rocksdb::Slice begin_slice(reinterpret_cast(begin), jbegin_len); - const rocksdb::Slice end_slice(reinterpret_cast(end), jend_len); + rocksdb::CompactionJobInfo* compaction_job_info = nullptr; + if (jcompaction_job_info_handle != 0) { + compaction_job_info = + reinterpret_cast(jcompaction_job_info_handle); + } - rocksdb::Status s; - if (cf_handle != nullptr) { - s = db->CompactRange(compact_options, cf_handle, &begin_slice, &end_slice); - } else { - // backwards compatibility - s = db->CompactRange(compact_options, &begin_slice, &end_slice); + std::vector output_file_names; + auto s = db->CompactFiles(*compaction_opts, cf_handle, input_file_names, + static_cast(joutput_level), static_cast(joutput_path_id), + &output_file_names, compaction_job_info); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; } - env->ReleaseByteArrayElements(jend, end, JNI_ABORT); - env->ReleaseByteArrayElements(jbegin, begin, JNI_ABORT); + return rocksdb::JniUtil::toJavaStrings(env, &output_file_names); +} - if (s.ok()) { - return true; +/* + * Class: org_rocksdb_RocksDB + * Method: pauseBackgroundWork + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_pauseBackgroundWork( + JNIEnv* env, jobject, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->PauseBackgroundWork(); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); } +} - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - return false; +/* + * Class: org_rocksdb_RocksDB + * Method: continueBackgroundWork + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_continueBackgroundWork( + JNIEnv* env, jobject, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->ContinueBackgroundWork(); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } } -/** - * @return true if the compact range succeeded, false if a Java Exception - * was thrown +/* + * Class: org_rocksdb_RocksDB + * Method: enableAutoCompaction + * Signature: (J[J)V + */ +void Java_org_rocksdb_RocksDB_enableAutoCompaction( + JNIEnv* env, jobject, jlong jdb_handle, jlongArray jcf_handles) { + auto* db = reinterpret_cast(jdb_handle); + jboolean has_exception = JNI_FALSE; + const std::vector cf_handles = + rocksdb::JniUtil::fromJPointers(env, jcf_handles, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + db->EnableAutoCompaction(cf_handles); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: numberLevels + * Signature: (JJ)I + */ +jint Java_org_rocksdb_RocksDB_numberLevels( + JNIEnv*, jobject, jlong jdb_handle, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + return static_cast(db->NumberLevels(cf_handle)); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: maxMemCompactionLevel + * Signature: (JJ)I */ -bool rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db, - rocksdb::ColumnFamilyHandle* cf_handle, - jbyteArray jbegin, jint jbegin_len, - jbyteArray jend, jint jend_len, - jboolean jreduce_level, jint jtarget_level, - jint jtarget_path_id) { - rocksdb::CompactRangeOptions compact_options; - compact_options.change_level = jreduce_level; - compact_options.target_level = jtarget_level; - compact_options.target_path_id = static_cast(jtarget_path_id); +jint Java_org_rocksdb_RocksDB_maxMemCompactionLevel( + JNIEnv*, jobject, jlong jdb_handle, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + return static_cast(db->MaxMemCompactionLevel(cf_handle)); +} - return rocksdb_compactrange_helper(env, db, cf_handle, jbegin, jbegin_len, - jend, jend_len, compact_options); +/* + * Class: org_rocksdb_RocksDB + * Method: level0StopWriteTrigger + * Signature: (JJ)I + */ +jint Java_org_rocksdb_RocksDB_level0StopWriteTrigger( + JNIEnv*, jobject, jlong jdb_handle, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + return static_cast(db->Level0StopWriteTrigger(cf_handle)); } /* * Class: org_rocksdb_RocksDB - * Method: compactRange0 - * Signature: (J[BI[BIZII)V + * Method: getName + * Signature: (J)Ljava/lang/String; */ -void Java_org_rocksdb_RocksDB_compactRange0__J_3BI_3BIZII( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jbegin, - jint jbegin_len, jbyteArray jend, jint jend_len, jboolean jreduce_level, - jint jtarget_level, jint jtarget_path_id) { +jstring Java_org_rocksdb_RocksDB_getName( + JNIEnv* env, jobject, jlong jdb_handle) { auto* db = reinterpret_cast(jdb_handle); - rocksdb_compactrange_helper(env, db, nullptr, jbegin, jbegin_len, jend, - jend_len, jreduce_level, jtarget_level, - jtarget_path_id); + std::string name = db->GetName(); + return rocksdb::JniUtil::toJavaString(env, &name, false); } /* * Class: org_rocksdb_RocksDB - * Method: compactRange - * Signature: (JJ[BI[BIZII)V + * Method: getEnv + * Signature: (J)J */ -void Java_org_rocksdb_RocksDB_compactRange__J_3BI_3BIZIIJ( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jbegin, - jint jbegin_len, jbyteArray jend, jint jend_len, jboolean jreduce_level, - jint jtarget_level, jint jtarget_path_id, jlong jcf_handle) { +jlong Java_org_rocksdb_RocksDB_getEnv( + JNIEnv*, jobject, jlong jdb_handle) { auto* db = reinterpret_cast(jdb_handle); - auto* cf_handle = reinterpret_cast(jcf_handle); - rocksdb_compactrange_helper(env, db, cf_handle, jbegin, jbegin_len, jend, - jend_len, jreduce_level, jtarget_level, - jtarget_path_id); + return reinterpret_cast(db->GetEnv()); } - -void Java_org_rocksdb_RocksDB_compactRange__J_3BI_3BIJJ( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jbegin, - jint jbegin_len, jbyteArray jend, jint jend_len, - jlong jcompact_options_handle, jlong jcf_handle) { +/* + * Class: org_rocksdb_RocksDB + * Method: flush + * Signature: (JJ[J)V + */ +void Java_org_rocksdb_RocksDB_flush( + JNIEnv* env, jobject, jlong jdb_handle, jlong jflush_opts_handle, + jlongArray jcf_handles) { auto* db = reinterpret_cast(jdb_handle); - auto* cf_handle = reinterpret_cast(jcf_handle); - auto* compact_options = reinterpret_cast(jcompact_options_handle); - - rocksdb_compactrange_helper(env, db, cf_handle, jbegin, jbegin_len, jend, - jend_len, *compact_options); + auto* flush_opts = + reinterpret_cast(jflush_opts_handle); + std::vector cf_handles; + if (jcf_handles == nullptr) { + cf_handles.push_back(db->DefaultColumnFamily()); + } else { + jboolean has_exception = JNI_FALSE; + cf_handles = + rocksdb::JniUtil::fromJPointers( + env, jcf_handles, &has_exception); + if (has_exception) { + // exception occurred + return; + } + } + auto s = db->Flush(*flush_opts, cf_handles); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } } - -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::PauseBackgroundWork - /* * Class: org_rocksdb_RocksDB - * Method: pauseBackgroundWork - * Signature: (J)V + * Method: flushWal + * Signature: (JZ)V */ -void Java_org_rocksdb_RocksDB_pauseBackgroundWork(JNIEnv* env, jobject /*jobj*/, - jlong jdb_handle) { +void Java_org_rocksdb_RocksDB_flushWal( + JNIEnv* env, jobject, jlong jdb_handle, jboolean jsync) { auto* db = reinterpret_cast(jdb_handle); - auto s = db->PauseBackgroundWork(); + auto s = db->FlushWAL(jsync == JNI_TRUE); if (!s.ok()) { rocksdb::RocksDBExceptionJni::ThrowNew(env, s); } } -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::ContinueBackgroundWork - /* * Class: org_rocksdb_RocksDB - * Method: continueBackgroundWork + * Method: syncWal * Signature: (J)V */ -void Java_org_rocksdb_RocksDB_continueBackgroundWork(JNIEnv* env, - jobject /*jobj*/, - jlong jdb_handle) { +void Java_org_rocksdb_RocksDB_syncWal( + JNIEnv* env, jobject, jlong jdb_handle) { auto* db = reinterpret_cast(jdb_handle); - auto s = db->ContinueBackgroundWork(); + auto s = db->SyncWAL(); if (!s.ok()) { rocksdb::RocksDBExceptionJni::ThrowNew(env, s); } } -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::GetLatestSequenceNumber - /* * Class: org_rocksdb_RocksDB * Method: getLatestSequenceNumber * Signature: (J)V */ -jlong Java_org_rocksdb_RocksDB_getLatestSequenceNumber(JNIEnv* /*env*/, - jobject /*jdb*/, - jlong jdb_handle) { +jlong Java_org_rocksdb_RocksDB_getLatestSequenceNumber( + JNIEnv*, jobject, jlong jdb_handle) { auto* db = reinterpret_cast(jdb_handle); return db->GetLatestSequenceNumber(); } -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB enable/disable file deletions +/* + * Class: org_rocksdb_RocksDB + * Method: setPreserveDeletesSequenceNumber + * Signature: (JJ)Z + */ +jboolean JNICALL Java_org_rocksdb_RocksDB_setPreserveDeletesSequenceNumber( + JNIEnv*, jobject, jlong jdb_handle, jlong jseq_number) { + auto* db = reinterpret_cast(jdb_handle); + if (db->SetPreserveDeletesSequenceNumber( + static_cast(jseq_number))) { + return JNI_TRUE; + } else { + return JNI_FALSE; + } +} /* * Class: org_rocksdb_RocksDB - * Method: enableFileDeletions + * Method: disableFileDeletions * Signature: (J)V */ -void Java_org_rocksdb_RocksDB_disableFileDeletions(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle) { +void Java_org_rocksdb_RocksDB_disableFileDeletions( + JNIEnv* env, jobject, jlong jdb_handle) { auto* db = reinterpret_cast(jdb_handle); rocksdb::Status s = db->DisableFileDeletions(); if (!s.ok()) { @@ -2185,9 +2570,8 @@ void Java_org_rocksdb_RocksDB_disableFileDeletions(JNIEnv* env, jobject /*jdb*/, * Method: enableFileDeletions * Signature: (JZ)V */ -void Java_org_rocksdb_RocksDB_enableFileDeletions(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jboolean jforce) { +void Java_org_rocksdb_RocksDB_enableFileDeletions( + JNIEnv* env, jobject, jlong jdb_handle, jboolean jforce) { auto* db = reinterpret_cast(jdb_handle); rocksdb::Status s = db->EnableFileDeletions(jforce); if (!s.ok()) { @@ -2195,17 +2579,84 @@ void Java_org_rocksdb_RocksDB_enableFileDeletions(JNIEnv* env, jobject /*jdb*/, } } -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::GetUpdatesSince +/* + * Class: org_rocksdb_RocksDB + * Method: getLiveFiles + * Signature: (JZ)[Ljava/lang/String; + */ +jobjectArray Java_org_rocksdb_RocksDB_getLiveFiles( + JNIEnv* env, jobject, jlong jdb_handle, jboolean jflush_memtable) { + auto* db = reinterpret_cast(jdb_handle); + std::vector live_files; + uint64_t manifest_file_size = 0; + auto s = db->GetLiveFiles( + live_files, &manifest_file_size, jflush_memtable == JNI_TRUE); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + // append the manifest_file_size to the vector + // for passing back to java + live_files.push_back(std::to_string(manifest_file_size)); + + return rocksdb::JniUtil::toJavaStrings(env, &live_files); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getSortedWalFiles + * Signature: (J)[Lorg/rocksdb/LogFile; + */ +jobjectArray Java_org_rocksdb_RocksDB_getSortedWalFiles( + JNIEnv* env, jobject, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + std::vector> sorted_wal_files; + auto s = db->GetSortedWalFiles(sorted_wal_files); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + // convert to Java type + const jsize jlen = static_cast(sorted_wal_files.size()); + jobjectArray jsorted_wal_files = env->NewObjectArray( + jlen, rocksdb::LogFileJni::getJClass(env), nullptr); + if(jsorted_wal_files == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + jsize i = 0; + for (auto it = sorted_wal_files.begin(); it != sorted_wal_files.end(); ++it) { + jobject jlog_file = rocksdb::LogFileJni::fromCppLogFile(env, it->get()); + if (jlog_file == nullptr) { + // exception occurred + env->DeleteLocalRef(jsorted_wal_files); + return nullptr; + } + + env->SetObjectArrayElement(jsorted_wal_files, i++, jlog_file); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(jlog_file); + env->DeleteLocalRef(jsorted_wal_files); + return nullptr; + } + + env->DeleteLocalRef(jlog_file); + } + + return jsorted_wal_files; +} /* * Class: org_rocksdb_RocksDB * Method: getUpdatesSince * Signature: (JJ)J */ -jlong Java_org_rocksdb_RocksDB_getUpdatesSince(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, - jlong jsequence_number) { +jlong Java_org_rocksdb_RocksDB_getUpdatesSince( + JNIEnv* env, jobject, jlong jdb_handle, jlong jsequence_number) { auto* db = reinterpret_cast(jdb_handle); rocksdb::SequenceNumber sequence_number = static_cast(jsequence_number); @@ -2221,68 +2672,86 @@ jlong Java_org_rocksdb_RocksDB_getUpdatesSince(JNIEnv* env, jobject /*jdb*/, /* * Class: org_rocksdb_RocksDB - * Method: setOptions - * Signature: (JJ[Ljava/lang/String;[Ljava/lang/String;)V + * Method: deleteFile + * Signature: (JLjava/lang/String;)V */ -void Java_org_rocksdb_RocksDB_setOptions(JNIEnv* env, jobject /*jdb*/, - jlong jdb_handle, jlong jcf_handle, - jobjectArray jkeys, - jobjectArray jvalues) { - const jsize len = env->GetArrayLength(jkeys); - assert(len == env->GetArrayLength(jvalues)); - - std::unordered_map options_map; - for (jsize i = 0; i < len; i++) { - jobject jobj_key = env->GetObjectArrayElement(jkeys, i); - if (env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - return; - } - - jobject jobj_value = env->GetObjectArrayElement(jvalues, i); - if (env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - env->DeleteLocalRef(jobj_key); - return; - } +void Java_org_rocksdb_RocksDB_deleteFile( + JNIEnv* env, jobject, jlong jdb_handle, jstring jname) { + auto* db = reinterpret_cast(jdb_handle); + jboolean has_exception = JNI_FALSE; + std::string name = + rocksdb::JniUtil::copyStdString(env, jname, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + db->DeleteFile(name); +} - jstring jkey = reinterpret_cast(jobj_key); - jstring jval = reinterpret_cast(jobj_value); +/* + * Class: org_rocksdb_RocksDB + * Method: getLiveFilesMetaData + * Signature: (J)[Lorg/rocksdb/LiveFileMetaData; + */ +jobjectArray Java_org_rocksdb_RocksDB_getLiveFilesMetaData( + JNIEnv* env, jobject, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + std::vector live_files_meta_data; + db->GetLiveFilesMetaData(&live_files_meta_data); + + // convert to Java type + const jsize jlen = static_cast(live_files_meta_data.size()); + jobjectArray jlive_files_meta_data = env->NewObjectArray( + jlen, rocksdb::LiveFileMetaDataJni::getJClass(env), nullptr); + if(jlive_files_meta_data == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } - const char* key = env->GetStringUTFChars(jkey, nullptr); - if (key == nullptr) { - // exception thrown: OutOfMemoryError - env->DeleteLocalRef(jobj_value); - env->DeleteLocalRef(jobj_key); - return; + jsize i = 0; + for (auto it = live_files_meta_data.begin(); it != live_files_meta_data.end(); ++it) { + jobject jlive_file_meta_data = + rocksdb::LiveFileMetaDataJni::fromCppLiveFileMetaData(env, &(*it)); + if (jlive_file_meta_data == nullptr) { + // exception occurred + env->DeleteLocalRef(jlive_files_meta_data); + return nullptr; } - const char* value = env->GetStringUTFChars(jval, nullptr); - if (value == nullptr) { - // exception thrown: OutOfMemoryError - env->ReleaseStringUTFChars(jkey, key); - env->DeleteLocalRef(jobj_value); - env->DeleteLocalRef(jobj_key); - return; + env->SetObjectArrayElement(jlive_files_meta_data, i++, jlive_file_meta_data); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(jlive_file_meta_data); + env->DeleteLocalRef(jlive_files_meta_data); + return nullptr; } - std::string s_key(key); - std::string s_value(value); - options_map[s_key] = s_value; - - env->ReleaseStringUTFChars(jkey, key); - env->ReleaseStringUTFChars(jval, value); - env->DeleteLocalRef(jobj_key); - env->DeleteLocalRef(jobj_value); + env->DeleteLocalRef(jlive_file_meta_data); } - auto* db = reinterpret_cast(jdb_handle); - auto* cf_handle = reinterpret_cast(jcf_handle); - db->SetOptions(cf_handle, options_map); + return jlive_files_meta_data; } -////////////////////////////////////////////////////////////////////////////// -// rocksdb::DB::IngestExternalFile +/* + * Class: org_rocksdb_RocksDB + * Method: getColumnFamilyMetaData + * Signature: (JJ)Lorg/rocksdb/ColumnFamilyMetaData; + */ +jobject Java_org_rocksdb_RocksDB_getColumnFamilyMetaData( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + rocksdb::ColumnFamilyMetaData cf_metadata; + db->GetColumnFamilyMetaData(cf_handle, &cf_metadata); + return rocksdb::ColumnFamilyMetaDataJni::fromCppColumnFamilyMetaData( + env, &cf_metadata); +} /* * Class: org_rocksdb_RocksDB @@ -2290,7 +2759,7 @@ void Java_org_rocksdb_RocksDB_setOptions(JNIEnv* env, jobject /*jdb*/, * Signature: (JJ[Ljava/lang/String;IJ)V */ void Java_org_rocksdb_RocksDB_ingestExternalFile( - JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jcf_handle, + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, jobjectArray jfile_path_list, jint jfile_path_list_len, jlong jingest_external_file_options_handle) { jboolean has_exception = JNI_FALSE; @@ -2313,14 +2782,249 @@ void Java_org_rocksdb_RocksDB_ingestExternalFile( } } +/* + * Class: org_rocksdb_RocksDB + * Method: verifyChecksum + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_verifyChecksum( + JNIEnv* env, jobject, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->VerifyChecksum(); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getDefaultColumnFamily + * Signature: (J)J + */ +jlong Java_org_rocksdb_RocksDB_getDefaultColumnFamily( + JNIEnv*, jobject, jlong jdb_handle) { + auto* db_handle = reinterpret_cast(jdb_handle); + auto* cf_handle = db_handle->DefaultColumnFamily(); + return reinterpret_cast(cf_handle); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getPropertiesOfAllTables + * Signature: (JJ)Ljava/util/Map; + */ +jobject Java_org_rocksdb_RocksDB_getPropertiesOfAllTables( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + rocksdb::TablePropertiesCollection table_properties_collection; + auto s = db->GetPropertiesOfAllTables(cf_handle, + &table_properties_collection); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } + + // convert to Java type + jobject jhash_map = rocksdb::HashMapJni::construct( + env, static_cast(table_properties_collection.size())); + if (jhash_map == nullptr) { + // exception occurred + return nullptr; + } + + const rocksdb::HashMapJni::FnMapKV, jobject, jobject> fn_map_kv = + [env](const std::pair>& kv) { + jstring jkey = rocksdb::JniUtil::toJavaString(env, &(kv.first), false); + if (env->ExceptionCheck()) { + // an error occurred + return std::unique_ptr>(nullptr); + } + + jobject jtable_properties = rocksdb::TablePropertiesJni::fromCppTableProperties(env, *(kv.second.get())); + if (jtable_properties == nullptr) { + // an error occurred + env->DeleteLocalRef(jkey); + return std::unique_ptr>(nullptr); + } + + return std::unique_ptr>(new std::pair(static_cast(jkey), static_cast(jtable_properties))); + }; + + if (!rocksdb::HashMapJni::putAll(env, jhash_map, table_properties_collection.begin(), table_properties_collection.end(), fn_map_kv)) { + // exception occurred + return nullptr; + } + + return jhash_map; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getPropertiesOfTablesInRange + * Signature: (JJ[J)Ljava/util/Map; + */ +jobject Java_org_rocksdb_RocksDB_getPropertiesOfTablesInRange( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jlongArray jrange_slice_handles) { + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + const jsize jlen = env->GetArrayLength(jrange_slice_handles); + jboolean jrange_slice_handles_is_copy = JNI_FALSE; + jlong *jrange_slice_handle = env->GetLongArrayElements( + jrange_slice_handles, &jrange_slice_handles_is_copy); + if (jrange_slice_handle == nullptr) { + // exception occurred + return nullptr; + } + + const size_t ranges_len = static_cast(jlen / 2); + auto ranges = std::unique_ptr(new rocksdb::Range[ranges_len]); + for (jsize i = 0, j = 0; i < jlen; ++i) { + auto* start = reinterpret_cast( + jrange_slice_handle[i]); + auto* limit = reinterpret_cast( + jrange_slice_handle[++i]); + ranges[j++] = rocksdb::Range(*start, *limit); + } + + rocksdb::TablePropertiesCollection table_properties_collection; + auto s = db->GetPropertiesOfTablesInRange( + cf_handle, ranges.get(), ranges_len, &table_properties_collection); + if (!s.ok()) { + // error occurred + env->ReleaseLongArrayElements(jrange_slice_handles, jrange_slice_handle, JNI_ABORT); + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + // cleanup + env->ReleaseLongArrayElements(jrange_slice_handles, jrange_slice_handle, JNI_ABORT); + + return jrange_slice_handles; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: suggestCompactRange + * Signature: (JJ)[J + */ +jlongArray Java_org_rocksdb_RocksDB_suggestCompactRange( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + auto* begin = new rocksdb::Slice(); + auto* end = new rocksdb::Slice(); + auto s = db->SuggestCompactRange(cf_handle, begin, end); + if (!s.ok()) { + // error occurred + delete begin; + delete end; + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + jlongArray jslice_handles = env->NewLongArray(2); + if (jslice_handles == nullptr) { + // exception thrown: OutOfMemoryError + delete begin; + delete end; + return nullptr; + } + + jlong slice_handles[2]; + slice_handles[0] = reinterpret_cast(begin); + slice_handles[1] = reinterpret_cast(end); + env->SetLongArrayRegion(jslice_handles, 0, 2, slice_handles); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete begin; + delete end; + env->DeleteLocalRef(jslice_handles); + return nullptr; + } + + return jslice_handles; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: promoteL0 + * Signature: (JJI)V + */ +void Java_org_rocksdb_RocksDB_promoteL0( + JNIEnv*, jobject, jlong jdb_handle, jlong jcf_handle, jint jtarget_level) { + auto* db = reinterpret_cast(jdb_handle); + rocksdb::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + db->PromoteL0(cf_handle, static_cast(jtarget_level)); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: startTrace + * Signature: (JJJ)V + */ +void Java_org_rocksdb_RocksDB_startTrace( + JNIEnv* env, jobject, jlong jdb_handle, jlong jmax_trace_file_size, + jlong jtrace_writer_jnicallback_handle) { + auto* db = reinterpret_cast(jdb_handle); + rocksdb::TraceOptions trace_options; + trace_options.max_trace_file_size = + static_cast(jmax_trace_file_size); + // transfer ownership of trace writer from Java to C++ + auto trace_writer = std::unique_ptr( + reinterpret_cast( + jtrace_writer_jnicallback_handle)); + auto s = db->StartTrace(trace_options, std::move(trace_writer)); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: endTrace + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_org_rocksdb_RocksDB_endTrace( + JNIEnv* env, jobject, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->EndTrace(); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + /* * Class: org_rocksdb_RocksDB * Method: destroyDB * Signature: (Ljava/lang/String;J)V */ -void Java_org_rocksdb_RocksDB_destroyDB(JNIEnv* env, jclass /*jcls*/, - jstring jdb_path, - jlong joptions_handle) { +void Java_org_rocksdb_RocksDB_destroyDB( + JNIEnv* env, jclass, jstring jdb_path, jlong joptions_handle) { const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); if (db_path == nullptr) { // exception thrown: OutOfMemoryError diff --git a/java/rocksjni/sst_file_manager.cc b/java/rocksjni/sst_file_manager.cc index c83ea00ef..3df3c9966 100644 --- a/java/rocksjni/sst_file_manager.cc +++ b/java/rocksjni/sst_file_manager.cc @@ -129,6 +129,8 @@ jobject Java_org_rocksdb_SstFileManager_getTrackedFiles(JNIEnv* env, reinterpret_cast*>(jhandle); auto tracked_files = sptr_sst_file_manager->get()->GetTrackedFiles(); + //TODO(AR) could refactor to share code with rocksdb::HashMapJni::fromCppMap(env, tracked_files); + const jobject jtracked_files = rocksdb::HashMapJni::construct( env, static_cast(tracked_files.size())); if (jtracked_files == nullptr) { @@ -136,7 +138,7 @@ jobject Java_org_rocksdb_SstFileManager_getTrackedFiles(JNIEnv* env, return nullptr; } - const rocksdb::HashMapJni::FnMapKV + const rocksdb::HashMapJni::FnMapKV fn_map_kv = [env](const std::pair& pair) { const jstring jtracked_file_path = diff --git a/java/rocksjni/statistics.cc b/java/rocksjni/statistics.cc index df137dc76..355b90bbf 100644 --- a/java/rocksjni/statistics.cc +++ b/java/rocksjni/statistics.cc @@ -20,8 +20,10 @@ * Method: newStatistics * Signature: ()J */ -jlong Java_org_rocksdb_Statistics_newStatistics__(JNIEnv* env, jclass jcls) { - return Java_org_rocksdb_Statistics_newStatistics___3BJ(env, jcls, nullptr, 0); +jlong Java_org_rocksdb_Statistics_newStatistics__( + JNIEnv* env, jclass jcls) { + return Java_org_rocksdb_Statistics_newStatistics___3BJ( + env, jcls, nullptr, 0); } /* @@ -40,10 +42,10 @@ jlong Java_org_rocksdb_Statistics_newStatistics__J( * Method: newStatistics * Signature: ([B)J */ -jlong Java_org_rocksdb_Statistics_newStatistics___3B(JNIEnv* env, jclass jcls, - jbyteArray jhistograms) { - return Java_org_rocksdb_Statistics_newStatistics___3BJ(env, jcls, jhistograms, - 0); +jlong Java_org_rocksdb_Statistics_newStatistics___3B( + JNIEnv* env, jclass jcls, jbyteArray jhistograms) { + return Java_org_rocksdb_Statistics_newStatistics___3BJ( + env, jcls, jhistograms, 0); } /* @@ -52,8 +54,7 @@ jlong Java_org_rocksdb_Statistics_newStatistics___3B(JNIEnv* env, jclass jcls, * Signature: ([BJ)J */ jlong Java_org_rocksdb_Statistics_newStatistics___3BJ( - JNIEnv* env, jclass /*jcls*/, jbyteArray jhistograms, - jlong jother_statistics_handle) { + JNIEnv* env, jclass, jbyteArray jhistograms, jlong jother_statistics_handle) { std::shared_ptr* pSptr_other_statistics = nullptr; if (jother_statistics_handle > 0) { pSptr_other_statistics = @@ -97,9 +98,8 @@ jlong Java_org_rocksdb_Statistics_newStatistics___3BJ( * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_Statistics_disposeInternal(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_Statistics_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { if (jhandle > 0) { auto* pSptr_statistics = reinterpret_cast*>(jhandle); @@ -112,8 +112,8 @@ void Java_org_rocksdb_Statistics_disposeInternal(JNIEnv* /*env*/, * Method: statsLevel * Signature: (J)B */ -jbyte Java_org_rocksdb_Statistics_statsLevel(JNIEnv* /*env*/, jobject /*jobj*/, - jlong jhandle) { +jbyte Java_org_rocksdb_Statistics_statsLevel( + JNIEnv*, jobject, jlong jhandle) { auto* pSptr_statistics = reinterpret_cast*>(jhandle); assert(pSptr_statistics != nullptr); @@ -126,9 +126,8 @@ jbyte Java_org_rocksdb_Statistics_statsLevel(JNIEnv* /*env*/, jobject /*jobj*/, * Method: setStatsLevel * Signature: (JB)V */ -void Java_org_rocksdb_Statistics_setStatsLevel(JNIEnv* /*env*/, - jobject /*jobj*/, jlong jhandle, - jbyte jstats_level) { +void Java_org_rocksdb_Statistics_setStatsLevel( + JNIEnv*, jobject, jlong jhandle, jbyte jstats_level) { auto* pSptr_statistics = reinterpret_cast*>(jhandle); assert(pSptr_statistics != nullptr); @@ -141,15 +140,14 @@ void Java_org_rocksdb_Statistics_setStatsLevel(JNIEnv* /*env*/, * Method: getTickerCount * Signature: (JB)J */ -jlong Java_org_rocksdb_Statistics_getTickerCount(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jbyte jticker_type) { +jlong Java_org_rocksdb_Statistics_getTickerCount( + JNIEnv*, jobject, jlong jhandle, jbyte jticker_type) { auto* pSptr_statistics = reinterpret_cast*>(jhandle); assert(pSptr_statistics != nullptr); auto ticker = rocksdb::TickerTypeJni::toCppTickers(jticker_type); - return pSptr_statistics->get()->getTickerCount(ticker); + uint64_t count = pSptr_statistics->get()->getTickerCount(ticker); + return static_cast(count); } /* @@ -157,10 +155,8 @@ jlong Java_org_rocksdb_Statistics_getTickerCount(JNIEnv* /*env*/, * Method: getAndResetTickerCount * Signature: (JB)J */ -jlong Java_org_rocksdb_Statistics_getAndResetTickerCount(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle, - jbyte jticker_type) { +jlong Java_org_rocksdb_Statistics_getAndResetTickerCount( + JNIEnv*, jobject, jlong jhandle, jbyte jticker_type) { auto* pSptr_statistics = reinterpret_cast*>(jhandle); assert(pSptr_statistics != nullptr); @@ -173,17 +169,16 @@ jlong Java_org_rocksdb_Statistics_getAndResetTickerCount(JNIEnv* /*env*/, * Method: getHistogramData * Signature: (JB)Lorg/rocksdb/HistogramData; */ -jobject Java_org_rocksdb_Statistics_getHistogramData(JNIEnv* env, - jobject /*jobj*/, - jlong jhandle, - jbyte jhistogram_type) { +jobject Java_org_rocksdb_Statistics_getHistogramData( + JNIEnv* env, jobject, jlong jhandle, jbyte jhistogram_type) { auto* pSptr_statistics = reinterpret_cast*>(jhandle); assert(pSptr_statistics != nullptr); - rocksdb::HistogramData - data; // TODO(AR) perhaps better to construct a Java Object Wrapper that - // uses ptr to C++ `new HistogramData` + // TODO(AR) perhaps better to construct a Java Object Wrapper that + // uses ptr to C++ `new HistogramData` + rocksdb::HistogramData data; + auto histogram = rocksdb::HistogramTypeJni::toCppHistograms(jhistogram_type); pSptr_statistics->get()->histogramData( static_cast(histogram), &data); @@ -211,10 +206,8 @@ jobject Java_org_rocksdb_Statistics_getHistogramData(JNIEnv* env, * Method: getHistogramString * Signature: (JB)Ljava/lang/String; */ -jstring Java_org_rocksdb_Statistics_getHistogramString(JNIEnv* env, - jobject /*jobj*/, - jlong jhandle, - jbyte jhistogram_type) { +jstring Java_org_rocksdb_Statistics_getHistogramString( + JNIEnv* env, jobject, jlong jhandle, jbyte jhistogram_type) { auto* pSptr_statistics = reinterpret_cast*>(jhandle); assert(pSptr_statistics != nullptr); @@ -228,8 +221,8 @@ jstring Java_org_rocksdb_Statistics_getHistogramString(JNIEnv* env, * Method: reset * Signature: (J)V */ -void Java_org_rocksdb_Statistics_reset(JNIEnv* env, jobject /*jobj*/, - jlong jhandle) { +void Java_org_rocksdb_Statistics_reset( + JNIEnv* env, jobject, jlong jhandle) { auto* pSptr_statistics = reinterpret_cast*>(jhandle); assert(pSptr_statistics != nullptr); @@ -244,8 +237,8 @@ void Java_org_rocksdb_Statistics_reset(JNIEnv* env, jobject /*jobj*/, * Method: toString * Signature: (J)Ljava/lang/String; */ -jstring Java_org_rocksdb_Statistics_toString(JNIEnv* env, jobject /*jobj*/, - jlong jhandle) { +jstring Java_org_rocksdb_Statistics_toString( + JNIEnv* env, jobject, jlong jhandle) { auto* pSptr_statistics = reinterpret_cast*>(jhandle); assert(pSptr_statistics != nullptr); diff --git a/java/rocksjni/table.cc b/java/rocksjni/table.cc index 7497317d9..5e6db28fc 100644 --- a/java/rocksjni/table.cc +++ b/java/rocksjni/table.cc @@ -9,6 +9,7 @@ #include #include "include/org_rocksdb_BlockBasedTableConfig.h" #include "include/org_rocksdb_PlainTableConfig.h" +#include "portal.h" #include "rocksdb/cache.h" #include "rocksdb/filter_policy.h" @@ -35,69 +36,102 @@ jlong Java_org_rocksdb_PlainTableConfig_newTableFactoryHandle( /* * Class: org_rocksdb_BlockBasedTableConfig * Method: newTableFactoryHandle - * Signature: (ZJIJJIIZJZZZZJZZJIBBI)J + * Signature: (ZZZZBBDBZJJJJIIIJZZJZZIIZZJIJI)J */ jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle( - JNIEnv * /*env*/, jobject /*jobj*/, jboolean no_block_cache, - jlong block_cache_size, jint block_cache_num_shardbits, jlong jblock_cache, - jlong block_size, jint block_size_deviation, jint block_restart_interval, - jboolean whole_key_filtering, jlong jfilter_policy, - jboolean cache_index_and_filter_blocks, - jboolean cache_index_and_filter_blocks_with_high_priority, - jboolean pin_l0_filter_and_index_blocks_in_cache, - jboolean partition_filters, jlong metadata_block_size, - jboolean pin_top_level_index_and_filter, - jboolean hash_index_allow_collision, jlong block_cache_compressed_size, - jint block_cache_compressd_num_shard_bits, jbyte jchecksum_type, - jbyte jindex_type, jint jformat_version) { + JNIEnv*, jobject, jboolean jcache_index_and_filter_blocks, + jboolean jcache_index_and_filter_blocks_with_high_priority, + jboolean jpin_l0_filter_and_index_blocks_in_cache, + jboolean jpin_top_level_index_and_filter, jbyte jindex_type_value, + jbyte jdata_block_index_type_value, + jdouble jdata_block_hash_table_util_ratio, jbyte jchecksum_type_value, + jboolean jno_block_cache, jlong jblock_cache_handle, + jlong jpersistent_cache_handle, + jlong jblock_cache_compressed_handle, jlong jblock_size, + jint jblock_size_deviation, jint jblock_restart_interval, + jint jindex_block_restart_interval, jlong jmetadata_block_size, + jboolean jpartition_filters, jboolean juse_delta_encoding, + jlong jfilter_policy_handle, jboolean jwhole_key_filtering, + jboolean jverify_compression, jint jread_amp_bytes_per_bit, + jint jformat_version, jboolean jenable_index_compression, + jboolean jblock_align, jlong jblock_cache_size, + jint jblock_cache_num_shard_bits, jlong jblock_cache_compressed_size, + jint jblock_cache_compressed_num_shard_bits) { rocksdb::BlockBasedTableOptions options; - options.no_block_cache = no_block_cache; - - if (!no_block_cache) { - if (jblock_cache > 0) { + options.cache_index_and_filter_blocks = + static_cast(jcache_index_and_filter_blocks); + options.cache_index_and_filter_blocks_with_high_priority = + static_cast(jcache_index_and_filter_blocks_with_high_priority); + options.pin_l0_filter_and_index_blocks_in_cache = + static_cast(jpin_l0_filter_and_index_blocks_in_cache); + options.pin_top_level_index_and_filter = + static_cast(jpin_top_level_index_and_filter); + options.index_type = + rocksdb::IndexTypeJni::toCppIndexType(jindex_type_value); + options.data_block_index_type = + rocksdb::DataBlockIndexTypeJni::toCppDataBlockIndexType( + jdata_block_index_type_value); + options.data_block_hash_table_util_ratio = + static_cast(jdata_block_hash_table_util_ratio); + options.checksum = + rocksdb::ChecksumTypeJni::toCppChecksumType(jchecksum_type_value); + options.no_block_cache = static_cast(jno_block_cache); + if (options.no_block_cache) { + options.block_cache = nullptr; + } else { + if (jblock_cache_handle > 0) { std::shared_ptr *pCache = - reinterpret_cast *>(jblock_cache); + reinterpret_cast *>(jblock_cache_handle); options.block_cache = *pCache; - } else if (block_cache_size > 0) { - if (block_cache_num_shardbits > 0) { - options.block_cache = - rocksdb::NewLRUCache(block_cache_size, block_cache_num_shardbits); + } else if (jblock_cache_size > 0) { + if (jblock_cache_num_shard_bits > 0) { + options.block_cache = rocksdb::NewLRUCache( + static_cast(jblock_cache_size), + static_cast(jblock_cache_num_shard_bits)); } else { - options.block_cache = rocksdb::NewLRUCache(block_cache_size); + options.block_cache = rocksdb::NewLRUCache( + static_cast(jblock_cache_size)); } } } - options.block_size = block_size; - options.block_size_deviation = block_size_deviation; - options.block_restart_interval = block_restart_interval; - options.whole_key_filtering = whole_key_filtering; - if (jfilter_policy > 0) { - std::shared_ptr *pFilterPolicy = - reinterpret_cast *>( - jfilter_policy); - options.filter_policy = *pFilterPolicy; + if (jpersistent_cache_handle > 0) { + std::shared_ptr *pCache = + reinterpret_cast *>(jpersistent_cache_handle); + options.persistent_cache = *pCache; } - options.cache_index_and_filter_blocks = cache_index_and_filter_blocks; - options.cache_index_and_filter_blocks_with_high_priority = - cache_index_and_filter_blocks_with_high_priority; - options.pin_l0_filter_and_index_blocks_in_cache = - pin_l0_filter_and_index_blocks_in_cache; - options.partition_filters = partition_filters; - options.metadata_block_size = metadata_block_size; - options.pin_top_level_index_and_filter = pin_top_level_index_and_filter; - options.hash_index_allow_collision = hash_index_allow_collision; - if (block_cache_compressed_size > 0) { - if (block_cache_compressd_num_shard_bits > 0) { - options.block_cache = rocksdb::NewLRUCache( - block_cache_compressed_size, block_cache_compressd_num_shard_bits); + if (jblock_cache_compressed_handle > 0) { + std::shared_ptr *pCache = + reinterpret_cast *>(jblock_cache_compressed_handle); + options.block_cache_compressed = *pCache; + } else if (jblock_cache_compressed_size > 0) { + if (jblock_cache_compressed_num_shard_bits > 0) { + options.block_cache_compressed = rocksdb::NewLRUCache( + static_cast(jblock_cache_compressed_size), + static_cast(jblock_cache_compressed_num_shard_bits)); } else { - options.block_cache = rocksdb::NewLRUCache(block_cache_compressed_size); + options.block_cache_compressed = rocksdb::NewLRUCache( + static_cast(jblock_cache_compressed_size)); } } - options.checksum = static_cast(jchecksum_type); - options.index_type = - static_cast(jindex_type); - options.format_version = jformat_version; + options.block_size = static_cast(jblock_size); + options.block_size_deviation = static_cast(jblock_size_deviation); + options.block_restart_interval = static_cast(jblock_restart_interval); + options.index_block_restart_interval = static_cast(jindex_block_restart_interval); + options.metadata_block_size = static_cast(jmetadata_block_size); + options.partition_filters = static_cast(jpartition_filters); + options.use_delta_encoding = static_cast(juse_delta_encoding); + if (jfilter_policy_handle > 0) { + std::shared_ptr *pFilterPolicy = + reinterpret_cast *>( + jfilter_policy_handle); + options.filter_policy = *pFilterPolicy; + } + options.whole_key_filtering = static_cast(jwhole_key_filtering); + options.verify_compression = static_cast(jverify_compression); + options.read_amp_bytes_per_bit = static_cast(jread_amp_bytes_per_bit); + options.format_version = static_cast(jformat_version); + options.enable_index_compression = static_cast(jenable_index_compression); + options.block_align = static_cast(jblock_align); return reinterpret_cast(rocksdb::NewBlockBasedTableFactory(options)); } diff --git a/java/rocksjni/table_filter.cc b/java/rocksjni/table_filter.cc new file mode 100644 index 000000000..e5b355621 --- /dev/null +++ b/java/rocksjni/table_filter.cc @@ -0,0 +1,25 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// org.rocksdb.AbstractTableFilter. + +#include +#include + +#include "include/org_rocksdb_AbstractTableFilter.h" +#include "rocksjni/table_filter_jnicallback.h" + +/* + * Class: org_rocksdb_AbstractTableFilter + * Method: createNewTableFilter + * Signature: ()J + */ +jlong Java_org_rocksdb_AbstractTableFilter_createNewTableFilter( + JNIEnv* env, jobject jtable_filter) { + auto* table_filter_jnicallback = + new rocksdb::TableFilterJniCallback(env, jtable_filter); + return reinterpret_cast(table_filter_jnicallback); +} \ No newline at end of file diff --git a/java/rocksjni/table_filter_jnicallback.cc b/java/rocksjni/table_filter_jnicallback.cc new file mode 100644 index 000000000..680c01445 --- /dev/null +++ b/java/rocksjni/table_filter_jnicallback.cc @@ -0,0 +1,62 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::TableFilter. + +#include "rocksjni/table_filter_jnicallback.h" +#include "rocksjni/portal.h" + +namespace rocksdb { +TableFilterJniCallback::TableFilterJniCallback( + JNIEnv* env, jobject jtable_filter) + : JniCallback(env, jtable_filter) { + m_jfilter_methodid = + AbstractTableFilterJni::getFilterMethod(env); + if(m_jfilter_methodid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + // create the function reference + /* + Note the JNI ENV must be obtained/release + on each call to the function itself as + it may be called from multiple threads + */ + m_table_filter_function = [this](const rocksdb::TableProperties& table_properties) { + jboolean attached_thread = JNI_FALSE; + JNIEnv* thread_env = getJniEnv(&attached_thread); + assert(thread_env != nullptr); + + // create a Java TableProperties object + jobject jtable_properties = TablePropertiesJni::fromCppTableProperties(thread_env, table_properties); + if (jtable_properties == nullptr) { + // exception thrown from fromCppTableProperties + thread_env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return false; + } + + jboolean result = thread_env->CallBooleanMethod(m_jcallback_obj, m_jfilter_methodid, jtable_properties); + if (thread_env->ExceptionCheck()) { + // exception thrown from CallBooleanMethod + thread_env->DeleteLocalRef(jtable_properties); + thread_env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return false; + } + + // ok... cleanup and then return + releaseJniEnv(attached_thread); + return static_cast(result); + }; +} + +std::function TableFilterJniCallback::GetTableFilterFunction() { + return m_table_filter_function; +} + +} // namespace rocksdb diff --git a/java/rocksjni/table_filter_jnicallback.h b/java/rocksjni/table_filter_jnicallback.h new file mode 100644 index 000000000..39a0c90e0 --- /dev/null +++ b/java/rocksjni/table_filter_jnicallback.h @@ -0,0 +1,34 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::TableFilter. + +#ifndef JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_ +#define JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_ + +#include +#include +#include + +#include "rocksdb/table_properties.h" +#include "rocksjni/jnicallback.h" + +namespace rocksdb { + +class TableFilterJniCallback : public JniCallback { + public: + TableFilterJniCallback( + JNIEnv* env, jobject jtable_filter); + std::function GetTableFilterFunction(); + + private: + jmethodID m_jfilter_methodid; + std::function m_table_filter_function; +}; + +} //namespace rocksdb + +#endif // JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_ diff --git a/java/rocksjni/thread_status.cc b/java/rocksjni/thread_status.cc new file mode 100644 index 000000000..f70d515a5 --- /dev/null +++ b/java/rocksjni/thread_status.cc @@ -0,0 +1,121 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::ThreadStatus methods from Java side. + +#include + +#include "portal.h" +#include "include/org_rocksdb_ThreadStatus.h" +#include "rocksdb/thread_status.h" + +/* + * Class: org_rocksdb_ThreadStatus + * Method: getThreadTypeName + * Signature: (B)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ThreadStatus_getThreadTypeName( + JNIEnv* env, jclass, jbyte jthread_type_value) { + auto name = rocksdb::ThreadStatus::GetThreadTypeName( + rocksdb::ThreadTypeJni::toCppThreadType(jthread_type_value)); + return rocksdb::JniUtil::toJavaString(env, &name, true); +} + +/* + * Class: org_rocksdb_ThreadStatus + * Method: getOperationName + * Signature: (B)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ThreadStatus_getOperationName( + JNIEnv* env, jclass, jbyte joperation_type_value) { + auto name = rocksdb::ThreadStatus::GetOperationName( + rocksdb::OperationTypeJni::toCppOperationType(joperation_type_value)); + return rocksdb::JniUtil::toJavaString(env, &name, true); +} + +/* + * Class: org_rocksdb_ThreadStatus + * Method: microsToStringNative + * Signature: (J)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ThreadStatus_microsToStringNative( + JNIEnv* env, jclass, jlong jmicros) { + auto str = + rocksdb::ThreadStatus::MicrosToString(static_cast(jmicros)); + return rocksdb::JniUtil::toJavaString(env, &str, true); +} + +/* + * Class: org_rocksdb_ThreadStatus + * Method: getOperationStageName + * Signature: (B)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ThreadStatus_getOperationStageName( + JNIEnv* env, jclass, jbyte joperation_stage_value) { + auto name = rocksdb::ThreadStatus::GetOperationStageName( + rocksdb::OperationStageJni::toCppOperationStage(joperation_stage_value)); + return rocksdb::JniUtil::toJavaString(env, &name, true); +} + +/* + * Class: org_rocksdb_ThreadStatus + * Method: getOperationPropertyName + * Signature: (BI)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ThreadStatus_getOperationPropertyName( + JNIEnv* env, jclass, jbyte joperation_type_value, jint jindex) { + auto name = rocksdb::ThreadStatus::GetOperationPropertyName( + rocksdb::OperationTypeJni::toCppOperationType(joperation_type_value), + static_cast(jindex)); + return rocksdb::JniUtil::toJavaString(env, &name, true); +} + +/* + * Class: org_rocksdb_ThreadStatus + * Method: interpretOperationProperties + * Signature: (B[J)Ljava/util/Map; + */ +jobject Java_org_rocksdb_ThreadStatus_interpretOperationProperties( + JNIEnv* env, jclass, jbyte joperation_type_value, + jlongArray joperation_properties) { + + //convert joperation_properties + const jsize len = env->GetArrayLength(joperation_properties); + const std::unique_ptr op_properties(new uint64_t[len]); + jlong* jop = env->GetLongArrayElements(joperation_properties, nullptr); + if (jop == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + for (jsize i = 0; i < len; i++) { + op_properties[i] = static_cast(jop[i]); + } + env->ReleaseLongArrayElements(joperation_properties, jop, JNI_ABORT); + + // call the function + auto result = rocksdb::ThreadStatus::InterpretOperationProperties( + rocksdb::OperationTypeJni::toCppOperationType(joperation_type_value), + op_properties.get()); + jobject jresult = rocksdb::HashMapJni::fromCppMap(env, &result); + if (env->ExceptionCheck()) { + // exception occurred + return nullptr; + } + + return jresult; +} + +/* + * Class: org_rocksdb_ThreadStatus + * Method: getStateName + * Signature: (B)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ThreadStatus_getStateName( + JNIEnv* env, jclass, jbyte jstate_type_value) { + auto name = rocksdb::ThreadStatus::GetStateName( + rocksdb::StateTypeJni::toCppStateType(jstate_type_value)); + return rocksdb::JniUtil::toJavaString(env, &name, true); +} \ No newline at end of file diff --git a/java/rocksjni/trace_writer.cc b/java/rocksjni/trace_writer.cc new file mode 100644 index 000000000..5d47cfcb3 --- /dev/null +++ b/java/rocksjni/trace_writer.cc @@ -0,0 +1,23 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::CompactionFilterFactory. + +#include + +#include "include/org_rocksdb_AbstractTraceWriter.h" +#include "rocksjni/trace_writer_jnicallback.h" + +/* + * Class: org_rocksdb_AbstractTraceWriter + * Method: createNewTraceWriter + * Signature: ()J + */ +jlong Java_org_rocksdb_AbstractTraceWriter_createNewTraceWriter( + JNIEnv* env, jobject jobj) { + auto* trace_writer = new rocksdb::TraceWriterJniCallback(env, jobj); + return reinterpret_cast(trace_writer); +} diff --git a/java/rocksjni/trace_writer_jnicallback.cc b/java/rocksjni/trace_writer_jnicallback.cc new file mode 100644 index 000000000..d547fb3f8 --- /dev/null +++ b/java/rocksjni/trace_writer_jnicallback.cc @@ -0,0 +1,115 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::TraceWriter. + +#include "rocksjni/trace_writer_jnicallback.h" +#include "rocksjni/portal.h" + +namespace rocksdb { +TraceWriterJniCallback::TraceWriterJniCallback( + JNIEnv* env, jobject jtrace_writer) + : JniCallback(env, jtrace_writer) { + m_jwrite_proxy_methodid = + AbstractTraceWriterJni::getWriteProxyMethodId(env); + if(m_jwrite_proxy_methodid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + m_jclose_writer_proxy_methodid = + AbstractTraceWriterJni::getCloseWriterProxyMethodId(env); + if(m_jclose_writer_proxy_methodid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + m_jget_file_size_methodid = + AbstractTraceWriterJni::getGetFileSizeMethodId(env); + if(m_jget_file_size_methodid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } +} + +Status TraceWriterJniCallback::Write(const Slice& data) { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + if (env == nullptr) { + return Status::IOError("Unable to attach JNI Environment"); + } + + jshort jstatus = env->CallShortMethod(m_jcallback_obj, + m_jwrite_proxy_methodid, + &data); + + if(env->ExceptionCheck()) { + // exception thrown from CallShortMethod + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return Status::IOError("Unable to call AbstractTraceWriter#writeProxy(long)"); + } + + // unpack status code and status sub-code from jstatus + jbyte jcode_value = (jstatus >> 8) & 0xFF; + jbyte jsub_code_value = jstatus & 0xFF; + std::unique_ptr s = StatusJni::toCppStatus(jcode_value, jsub_code_value); + + releaseJniEnv(attached_thread); + + return Status(*s); +} + +Status TraceWriterJniCallback::Close() { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + if (env == nullptr) { + return Status::IOError("Unable to attach JNI Environment"); + } + + jshort jstatus = env->CallShortMethod(m_jcallback_obj, + m_jclose_writer_proxy_methodid); + + if(env->ExceptionCheck()) { + // exception thrown from CallShortMethod + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return Status::IOError("Unable to call AbstractTraceWriter#closeWriterProxy()"); + } + + // unpack status code and status sub-code from jstatus + jbyte code_value = (jstatus >> 8) & 0xFF; + jbyte sub_code_value = jstatus & 0xFF; + std::unique_ptr s = StatusJni::toCppStatus(code_value, sub_code_value); + + releaseJniEnv(attached_thread); + + return Status(*s); +} + +uint64_t TraceWriterJniCallback::GetFileSize() { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + if (env == nullptr) { + return 0; + } + + jlong jfile_size = env->CallLongMethod(m_jcallback_obj, + m_jget_file_size_methodid); + + if(env->ExceptionCheck()) { + // exception thrown from CallLongMethod + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return 0; + } + + releaseJniEnv(attached_thread); + + return static_cast(jfile_size); +} + +} // namespace rocksdb \ No newline at end of file diff --git a/java/rocksjni/trace_writer_jnicallback.h b/java/rocksjni/trace_writer_jnicallback.h new file mode 100644 index 000000000..610b6c465 --- /dev/null +++ b/java/rocksjni/trace_writer_jnicallback.h @@ -0,0 +1,36 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::TraceWriter. + +#ifndef JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_ +#define JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_ + +#include +#include + +#include "rocksdb/trace_reader_writer.h" +#include "rocksjni/jnicallback.h" + +namespace rocksdb { + +class TraceWriterJniCallback : public JniCallback, public TraceWriter { + public: + TraceWriterJniCallback( + JNIEnv* env, jobject jtrace_writer); + virtual Status Write(const Slice& data); + virtual Status Close(); + virtual uint64_t GetFileSize(); + + private: + jmethodID m_jwrite_proxy_methodid; + jmethodID m_jclose_writer_proxy_methodid; + jmethodID m_jget_file_size_methodid; +}; + +} //namespace rocksdb + +#endif // JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_ diff --git a/java/rocksjni/transaction_db.cc b/java/rocksjni/transaction_db.cc index 1914a2422..c2c40bf10 100644 --- a/java/rocksjni/transaction_db.cc +++ b/java/rocksjni/transaction_db.cc @@ -25,7 +25,7 @@ * Signature: (JJLjava/lang/String;)J */ jlong Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2( - JNIEnv* env, jclass /*jcls*/, jlong joptions_handle, + JNIEnv* env, jclass, jlong joptions_handle, jlong jtxn_db_options_handle, jstring jdb_path) { auto* options = reinterpret_cast(joptions_handle); auto* txn_db_options = @@ -54,7 +54,7 @@ jlong Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2( * Signature: (JJLjava/lang/String;[[B[J)[J */ jlongArray Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2_3_3B_3J( - JNIEnv* env, jclass /*jcls*/, jlong jdb_options_handle, + JNIEnv* env, jclass, jlong jdb_options_handle, jlong jtxn_db_options_handle, jstring jdb_path, jobjectArray jcolumn_names, jlongArray jcolumn_options_handles) { const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); @@ -151,14 +151,38 @@ jlongArray Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2_3_3B_3J( } } +/* + * Class: org_rocksdb_TransactionDB + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionDB_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { + auto* txn_db = reinterpret_cast(jhandle); + assert(txn_db != nullptr); + delete txn_db; +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: closeDatabase + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionDB_closeDatabase( + JNIEnv* env, jclass, jlong jhandle) { + auto* txn_db = reinterpret_cast(jhandle); + assert(txn_db != nullptr); + rocksdb::Status s = txn_db->Close(); + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + /* * Class: org_rocksdb_TransactionDB * Method: beginTransaction * Signature: (JJ)J */ jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJ( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jwrite_options_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle) { auto* txn_db = reinterpret_cast(jhandle); auto* write_options = reinterpret_cast(jwrite_options_handle); @@ -172,8 +196,8 @@ jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJ( * Signature: (JJJ)J */ jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJJ( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jwrite_options_handle, jlong jtxn_options_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle, + jlong jtxn_options_handle) { auto* txn_db = reinterpret_cast(jhandle); auto* write_options = reinterpret_cast(jwrite_options_handle); @@ -190,8 +214,8 @@ jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJJ( * Signature: (JJJ)J */ jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJ( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jwrite_options_handle, jlong jold_txn_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle, + jlong jold_txn_handle) { auto* txn_db = reinterpret_cast(jhandle); auto* write_options = reinterpret_cast(jwrite_options_handle); @@ -214,9 +238,8 @@ jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJ( * Signature: (JJJJ)J */ jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJJ( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jlong jwrite_options_handle, jlong jtxn_options_handle, - jlong jold_txn_handle) { + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle, + jlong jtxn_options_handle, jlong jold_txn_handle) { auto* txn_db = reinterpret_cast(jhandle); auto* write_options = reinterpret_cast(jwrite_options_handle); @@ -239,10 +262,8 @@ jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJJ( * Method: getTransactionByName * Signature: (JLjava/lang/String;)J */ -jlong Java_org_rocksdb_TransactionDB_getTransactionByName(JNIEnv* env, - jobject /*jobj*/, - jlong jhandle, - jstring jname) { +jlong Java_org_rocksdb_TransactionDB_getTransactionByName( + JNIEnv* env, jobject, jlong jhandle, jstring jname) { auto* txn_db = reinterpret_cast(jhandle); const char* name = env->GetStringUTFChars(jname, nullptr); if (name == nullptr) { @@ -260,7 +281,7 @@ jlong Java_org_rocksdb_TransactionDB_getTransactionByName(JNIEnv* env, * Signature: (J)[J */ jlongArray Java_org_rocksdb_TransactionDB_getAllPreparedTransactions( - JNIEnv* env, jobject /*jobj*/, jlong jhandle) { + JNIEnv* env, jobject, jlong jhandle) { auto* txn_db = reinterpret_cast(jhandle); std::vector txns; txn_db->GetAllPreparedTransactions(&txns); @@ -294,9 +315,8 @@ jlongArray Java_org_rocksdb_TransactionDB_getAllPreparedTransactions( * Method: getLockStatusData * Signature: (J)Ljava/util/Map; */ -jobject Java_org_rocksdb_TransactionDB_getLockStatusData(JNIEnv* env, - jobject /*jobj*/, - jlong jhandle) { +jobject Java_org_rocksdb_TransactionDB_getLockStatusData( + JNIEnv* env, jobject, jlong jhandle) { auto* txn_db = reinterpret_cast(jhandle); const std::unordered_multimap lock_status_data = txn_db->GetLockStatusData(); @@ -307,7 +327,7 @@ jobject Java_org_rocksdb_TransactionDB_getLockStatusData(JNIEnv* env, return nullptr; } - const rocksdb::HashMapJni::FnMapKV + const rocksdb::HashMapJni::FnMapKV fn_map_kv = [env]( const std::pair& @@ -427,19 +447,7 @@ jobjectArray Java_org_rocksdb_TransactionDB_getDeadlockInfoBuffer( * Signature: (JI)V */ void Java_org_rocksdb_TransactionDB_setDeadlockInfoBufferSize( - JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, - jint jdeadlock_info_buffer_size) { + JNIEnv*, jobject, jlong jhandle, jint jdeadlock_info_buffer_size) { auto* txn_db = reinterpret_cast(jhandle); txn_db->SetDeadlockInfoBufferSize(jdeadlock_info_buffer_size); } - -/* - * Class: org_rocksdb_TransactionDB - * Method: disposeInternal - * Signature: (J)V - */ -void Java_org_rocksdb_TransactionDB_disposeInternal(JNIEnv* /*env*/, - jobject /*jobj*/, - jlong jhandle) { - delete reinterpret_cast(jhandle); -} diff --git a/java/rocksjni/ttl.cc b/java/rocksjni/ttl.cc index 597332e52..4b071e7b3 100644 --- a/java/rocksjni/ttl.cc +++ b/java/rocksjni/ttl.cc @@ -23,9 +23,9 @@ * Method: open * Signature: (JLjava/lang/String;IZ)J */ -jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env, jclass /*jcls*/, - jlong joptions_handle, jstring jdb_path, - jint jttl, jboolean jread_only) { +jlong Java_org_rocksdb_TtlDB_open( + JNIEnv* env, jclass, jlong joptions_handle, jstring jdb_path, jint jttl, + jboolean jread_only) { const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); if (db_path == nullptr) { // exception thrown: OutOfMemoryError @@ -53,11 +53,10 @@ jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env, jclass /*jcls*/, * Method: openCF * Signature: (JLjava/lang/String;[[B[J[IZ)[J */ -jlongArray Java_org_rocksdb_TtlDB_openCF(JNIEnv* env, jclass /*jcls*/, - jlong jopt_handle, jstring jdb_path, - jobjectArray jcolumn_names, - jlongArray jcolumn_options, - jintArray jttls, jboolean jread_only) { +jlongArray Java_org_rocksdb_TtlDB_openCF( + JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path, + jobjectArray jcolumn_names, jlongArray jcolumn_options, + jintArray jttls, jboolean jread_only) { const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); if (db_path == nullptr) { // exception thrown: OutOfMemoryError @@ -147,13 +146,40 @@ jlongArray Java_org_rocksdb_TtlDB_openCF(JNIEnv* env, jclass /*jcls*/, } } +/* + * Class: org_rocksdb_TtlDB + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TtlDB_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { + auto* ttl_db = reinterpret_cast(jhandle); + assert(ttl_db != nullptr); + delete ttl_db; +} + +/* + * Class: org_rocksdb_TtlDB + * Method: closeDatabase + * Signature: (J)V + */ +void Java_org_rocksdb_TtlDB_closeDatabase( + JNIEnv* /* env */, jclass, jlong /* jhandle */) { + //auto* ttl_db = reinterpret_cast(jhandle); + //assert(ttl_db != nullptr); + //rocksdb::Status s = ttl_db->Close(); + //rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + + //TODO(AR) this is disabled until https://github.com/facebook/rocksdb/issues/4818 is resolved! +} + /* * Class: org_rocksdb_TtlDB * Method: createColumnFamilyWithTtl * Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;[BJI)J; */ jlong Java_org_rocksdb_TtlDB_createColumnFamilyWithTtl( - JNIEnv* env, jobject /*jobj*/, jlong jdb_handle, jbyteArray jcolumn_name, + JNIEnv* env, jobject, jlong jdb_handle, jbyteArray jcolumn_name, jlong jcolumn_options, jint jttl) { jbyte* cfname = env->GetByteArrayElements(jcolumn_name, nullptr); if (cfname == nullptr) { diff --git a/java/rocksjni/wal_filter.cc b/java/rocksjni/wal_filter.cc new file mode 100644 index 000000000..c74e54252 --- /dev/null +++ b/java/rocksjni/wal_filter.cc @@ -0,0 +1,23 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::WalFilter. + +#include + +#include "include/org_rocksdb_AbstractWalFilter.h" +#include "rocksjni/wal_filter_jnicallback.h" + +/* + * Class: org_rocksdb_AbstractWalFilter + * Method: createNewWalFilter + * Signature: ()J + */ +jlong Java_org_rocksdb_AbstractWalFilter_createNewWalFilter( + JNIEnv* env, jobject jobj) { + auto* wal_filter = new rocksdb::WalFilterJniCallback(env, jobj); + return reinterpret_cast(wal_filter); +} \ No newline at end of file diff --git a/java/rocksjni/wal_filter_jnicallback.cc b/java/rocksjni/wal_filter_jnicallback.cc new file mode 100644 index 000000000..8fd909258 --- /dev/null +++ b/java/rocksjni/wal_filter_jnicallback.cc @@ -0,0 +1,144 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::WalFilter. + +#include "rocksjni/wal_filter_jnicallback.h" +#include "rocksjni/portal.h" + +namespace rocksdb { +WalFilterJniCallback::WalFilterJniCallback( + JNIEnv* env, jobject jwal_filter) + : JniCallback(env, jwal_filter) { + // Note: The name of a WalFilter will not change during it's lifetime, + // so we cache it in a global var + jmethodID jname_mid = AbstractWalFilterJni::getNameMethodId(env); + if(jname_mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + jstring jname = (jstring)env->CallObjectMethod(m_jcallback_obj, jname_mid); + if(env->ExceptionCheck()) { + // exception thrown + return; + } + jboolean has_exception = JNI_FALSE; + m_name = JniUtil::copyString(env, jname, + &has_exception); // also releases jname + if (has_exception == JNI_TRUE) { + // exception thrown + return; + } + + m_column_family_log_number_map_mid = + AbstractWalFilterJni::getColumnFamilyLogNumberMapMethodId(env); + if(m_column_family_log_number_map_mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + m_log_record_found_proxy_mid = + AbstractWalFilterJni::getLogRecordFoundProxyMethodId(env); + if(m_log_record_found_proxy_mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } +} + +void WalFilterJniCallback::ColumnFamilyLogNumberMap( + const std::map& cf_lognumber_map, + const std::map& cf_name_id_map) { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + if (env == nullptr) { + return; + } + + jobject jcf_lognumber_map = + rocksdb::HashMapJni::fromCppMap(env, &cf_lognumber_map); + if (jcf_lognumber_map == nullptr) { + // exception occurred + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + + jobject jcf_name_id_map = + rocksdb::HashMapJni::fromCppMap(env, &cf_name_id_map); + if (jcf_name_id_map == nullptr) { + // exception occurred + env->ExceptionDescribe(); // print out exception to stderr + env->DeleteLocalRef(jcf_lognumber_map); + releaseJniEnv(attached_thread); + return; + } + + env->CallVoidMethod(m_jcallback_obj, + m_column_family_log_number_map_mid, + jcf_lognumber_map, + jcf_name_id_map); + + env->DeleteLocalRef(jcf_lognumber_map); + env->DeleteLocalRef(jcf_name_id_map); + + if(env->ExceptionCheck()) { + // exception thrown from CallVoidMethod + env->ExceptionDescribe(); // print out exception to stderr + } + + releaseJniEnv(attached_thread); +} + + WalFilter::WalProcessingOption WalFilterJniCallback::LogRecordFound( + unsigned long long log_number, const std::string& log_file_name, + const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + if (env == nullptr) { + return WalFilter::WalProcessingOption::kCorruptedRecord; + } + + jstring jlog_file_name = JniUtil::toJavaString(env, &log_file_name); + if (jlog_file_name == nullptr) { + // exception occcurred + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return WalFilter::WalProcessingOption::kCorruptedRecord; + } + + jshort jlog_record_found_result = env->CallShortMethod(m_jcallback_obj, + m_log_record_found_proxy_mid, + static_cast(log_number), + jlog_file_name, + reinterpret_cast(&batch), + reinterpret_cast(new_batch)); + + env->DeleteLocalRef(jlog_file_name); + + if (env->ExceptionCheck()) { + // exception thrown from CallShortMethod + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return WalFilter::WalProcessingOption::kCorruptedRecord; + } + + // unpack WalProcessingOption and batch_changed from jlog_record_found_result + jbyte jwal_processing_option_value = (jlog_record_found_result >> 8) & 0xFF; + jbyte jbatch_changed_value = jlog_record_found_result & 0xFF; + + releaseJniEnv(attached_thread); + + *batch_changed = jbatch_changed_value == JNI_TRUE; + + return WalProcessingOptionJni::toCppWalProcessingOption( + jwal_processing_option_value); +} + +const char* WalFilterJniCallback::Name() const { + return m_name.get(); +} + +} // namespace rocksdb \ No newline at end of file diff --git a/java/rocksjni/wal_filter_jnicallback.h b/java/rocksjni/wal_filter_jnicallback.h new file mode 100644 index 000000000..df6394cef --- /dev/null +++ b/java/rocksjni/wal_filter_jnicallback.h @@ -0,0 +1,42 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::WalFilter. + +#ifndef JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_ +#define JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_ + +#include +#include +#include +#include + +#include "rocksdb/wal_filter.h" +#include "rocksjni/jnicallback.h" + +namespace rocksdb { + +class WalFilterJniCallback : public JniCallback, public WalFilter { + public: + WalFilterJniCallback( + JNIEnv* env, jobject jwal_filter); + virtual void ColumnFamilyLogNumberMap( + const std::map& cf_lognumber_map, + const std::map& cf_name_id_map); + virtual WalFilter::WalProcessingOption LogRecordFound( + unsigned long long log_number, const std::string& log_file_name, + const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed); + virtual const char* Name() const; + + private: + std::unique_ptr m_name; + jmethodID m_column_family_log_number_map_mid; + jmethodID m_log_record_found_proxy_mid; +}; + +} //namespace rocksdb + +#endif // JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_ diff --git a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java index 44e75c3cf..8532debf8 100644 --- a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java +++ b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java @@ -20,7 +20,7 @@ public abstract class AbstractImmutableNativeReference * A flag indicating whether the current {@code AbstractNativeReference} is * responsible to free the underlying C++ object */ - private final AtomicBoolean owningHandle_; + protected final AtomicBoolean owningHandle_; protected AbstractImmutableNativeReference(final boolean owningHandle) { this.owningHandle_ = new AtomicBoolean(owningHandle); diff --git a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java new file mode 100644 index 000000000..63015c39a --- /dev/null +++ b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java @@ -0,0 +1,254 @@ +package org.rocksdb; + +import java.util.*; + +public abstract class AbstractMutableOptions { + + protected static final String KEY_VALUE_PAIR_SEPARATOR = ";"; + protected static final char KEY_VALUE_SEPARATOR = '='; + static final String INT_ARRAY_INT_SEPARATOR = ","; + + protected final String[] keys; + private final String[] values; + + /** + * User must use builder pattern, or parser. + * + * @param keys the keys + * @param values the values + */ + protected AbstractMutableOptions(final String[] keys, final String[] values) { + this.keys = keys; + this.values = values; + } + + String[] getKeys() { + return keys; + } + + String[] getValues() { + return values; + } + + /** + * Returns a string representation of MutableOptions which + * is suitable for consumption by {@code #parse(String)}. + * + * @return String representation of MutableOptions + */ + @Override + public String toString() { + final StringBuilder buffer = new StringBuilder(); + for(int i = 0; i < keys.length; i++) { + buffer + .append(keys[i]) + .append(KEY_VALUE_SEPARATOR) + .append(values[i]); + + if(i + 1 < keys.length) { + buffer.append(KEY_VALUE_PAIR_SEPARATOR); + } + } + return buffer.toString(); + } + + public static abstract class AbstractMutableOptionsBuilder< + T extends AbstractMutableOptions, + U extends AbstractMutableOptionsBuilder, + K extends MutableOptionKey> { + + private final Map> options = new LinkedHashMap<>(); + + protected abstract U self(); + + /** + * Get all of the possible keys + * + * @return A map of all keys, indexed by name. + */ + protected abstract Map allKeys(); + + /** + * Construct a sub-class instance of {@link AbstractMutableOptions}. + * + * @param keys the keys + * @param values the values + * + * @return an instance of the options. + */ + protected abstract T build(final String[] keys, final String[] values); + + public T build() { + final String keys[] = new String[options.size()]; + final String values[] = new String[options.size()]; + + int i = 0; + for (final Map.Entry> option : options.entrySet()) { + keys[i] = option.getKey().name(); + values[i] = option.getValue().asString(); + i++; + } + + return build(keys, values); + } + + protected U setDouble( + final K key, final double value) { + if (key.getValueType() != MutableOptionKey.ValueType.DOUBLE) { + throw new IllegalArgumentException( + key + " does not accept a double value"); + } + options.put(key, MutableOptionValue.fromDouble(value)); + return self(); + } + + protected double getDouble(final K key) + throws NoSuchElementException, NumberFormatException { + final MutableOptionValue value = options.get(key); + if(value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + return value.asDouble(); + } + + protected U setLong( + final K key, final long value) { + if(key.getValueType() != MutableOptionKey.ValueType.LONG) { + throw new IllegalArgumentException( + key + " does not accept a long value"); + } + options.put(key, MutableOptionValue.fromLong(value)); + return self(); + } + + protected long getLong(final K key) + throws NoSuchElementException, NumberFormatException { + final MutableOptionValue value = options.get(key); + if(value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + return value.asLong(); + } + + protected U setInt( + final K key, final int value) { + if(key.getValueType() != MutableOptionKey.ValueType.INT) { + throw new IllegalArgumentException( + key + " does not accept an integer value"); + } + options.put(key, MutableOptionValue.fromInt(value)); + return self(); + } + + protected int getInt(final K key) + throws NoSuchElementException, NumberFormatException { + final MutableOptionValue value = options.get(key); + if(value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + return value.asInt(); + } + + protected U setBoolean( + final K key, final boolean value) { + if(key.getValueType() != MutableOptionKey.ValueType.BOOLEAN) { + throw new IllegalArgumentException( + key + " does not accept a boolean value"); + } + options.put(key, MutableOptionValue.fromBoolean(value)); + return self(); + } + + protected boolean getBoolean(final K key) + throws NoSuchElementException, NumberFormatException { + final MutableOptionValue value = options.get(key); + if(value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + return value.asBoolean(); + } + + protected U setIntArray( + final K key, final int[] value) { + if(key.getValueType() != MutableOptionKey.ValueType.INT_ARRAY) { + throw new IllegalArgumentException( + key + " does not accept an int array value"); + } + options.put(key, MutableOptionValue.fromIntArray(value)); + return self(); + } + + protected int[] getIntArray(final K key) + throws NoSuchElementException, NumberFormatException { + final MutableOptionValue value = options.get(key); + if(value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + return value.asIntArray(); + } + + protected > U setEnum( + final K key, final N value) { + if(key.getValueType() != MutableOptionKey.ValueType.ENUM) { + throw new IllegalArgumentException( + key + " does not accept a Enum value"); + } + options.put(key, MutableOptionValue.fromEnum(value)); + return self(); + } + + protected > N getEnum(final K key) + throws NoSuchElementException, NumberFormatException { + final MutableOptionValue value = options.get(key); + if(value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + + if(!(value instanceof MutableOptionValue.MutableOptionEnumValue)) { + throw new NoSuchElementException(key.name() + " is not of Enum type"); + } + + return ((MutableOptionValue.MutableOptionEnumValue)value).asObject(); + } + + public U fromString( + final String keyStr, final String valueStr) + throws IllegalArgumentException { + Objects.requireNonNull(keyStr); + Objects.requireNonNull(valueStr); + + final K key = allKeys().get(keyStr); + switch(key.getValueType()) { + case DOUBLE: + return setDouble(key, Double.parseDouble(valueStr)); + + case LONG: + return setLong(key, Long.parseLong(valueStr)); + + case INT: + return setInt(key, Integer.parseInt(valueStr)); + + case BOOLEAN: + return setBoolean(key, Boolean.parseBoolean(valueStr)); + + case INT_ARRAY: + final String[] strInts = valueStr + .trim().split(INT_ARRAY_INT_SEPARATOR); + if(strInts == null || strInts.length == 0) { + throw new IllegalArgumentException( + "int array value is not correctly formatted"); + } + + final int value[] = new int[strInts.length]; + int i = 0; + for(final String strInt : strInts) { + value[i++] = Integer.parseInt(strInt); + } + return setIntArray(key, value); + } + + throw new IllegalStateException( + key + " has unknown value type: " + key.getValueType()); + } + } +} diff --git a/java/src/main/java/org/rocksdb/AbstractTableFilter.java b/java/src/main/java/org/rocksdb/AbstractTableFilter.java new file mode 100644 index 000000000..627e1ae1f --- /dev/null +++ b/java/src/main/java/org/rocksdb/AbstractTableFilter.java @@ -0,0 +1,19 @@ +package org.rocksdb; + +/** + * Base class for Table Filters. + */ +public abstract class AbstractTableFilter + extends RocksCallbackObject implements TableFilter { + + protected AbstractTableFilter() { + super(); + } + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return createNewTableFilter(); + } + + private native long createNewTableFilter(); +} diff --git a/java/src/main/java/org/rocksdb/AbstractTraceWriter.java b/java/src/main/java/org/rocksdb/AbstractTraceWriter.java new file mode 100644 index 000000000..806709b1f --- /dev/null +++ b/java/src/main/java/org/rocksdb/AbstractTraceWriter.java @@ -0,0 +1,70 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Base class for TraceWriters. + */ +public abstract class AbstractTraceWriter + extends RocksCallbackObject implements TraceWriter { + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return createNewTraceWriter(); + } + + /** + * Called from JNI, proxy for {@link TraceWriter#write(Slice)}. + * + * @param sliceHandle the native handle of the slice (which we do not own) + * + * @return short (2 bytes) where the first byte is the + * {@link Status.Code#getValue()} and the second byte is the + * {@link Status.SubCode#getValue()}. + */ + private short writeProxy(final long sliceHandle) { + try { + write(new Slice(sliceHandle)); + return statusToShort(Status.Code.Ok, Status.SubCode.None); + } catch (final RocksDBException e) { + return statusToShort(e.getStatus()); + } + } + + /** + * Called from JNI, proxy for {@link TraceWriter#closeWriter()}. + * + * @return short (2 bytes) where the first byte is the + * {@link Status.Code#getValue()} and the second byte is the + * {@link Status.SubCode#getValue()}. + */ + private short closeWriterProxy() { + try { + closeWriter(); + return statusToShort(Status.Code.Ok, Status.SubCode.None); + } catch (final RocksDBException e) { + return statusToShort(e.getStatus()); + } + } + + private static short statusToShort(/*@Nullable*/ final Status status) { + final Status.Code code = status != null && status.getCode() != null + ? status.getCode() + : Status.Code.IOError; + final Status.SubCode subCode = status != null && status.getSubCode() != null + ? status.getSubCode() + : Status.SubCode.None; + return statusToShort(code, subCode); + } + + private static short statusToShort(final Status.Code code, + final Status.SubCode subCode) { + short result = (short)(code.getValue() << 8); + return (short)(result | subCode.getValue()); + } + + private native long createNewTraceWriter(); +} diff --git a/java/src/main/java/org/rocksdb/AbstractWalFilter.java b/java/src/main/java/org/rocksdb/AbstractWalFilter.java new file mode 100644 index 000000000..d525045c6 --- /dev/null +++ b/java/src/main/java/org/rocksdb/AbstractWalFilter.java @@ -0,0 +1,49 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Base class for WAL Filters. + */ +public abstract class AbstractWalFilter + extends RocksCallbackObject implements WalFilter { + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return createNewWalFilter(); + } + + /** + * Called from JNI, proxy for + * {@link WalFilter#logRecordFound(long, String, WriteBatch, WriteBatch)}. + * + * @param logNumber the log handle. + * @param logFileName the log file name + * @param batchHandle the native handle of a WriteBatch (which we do not own) + * @param newBatchHandle the native handle of a + * new WriteBatch (which we do not own) + * + * @return short (2 bytes) where the first byte is the + * {@link WalFilter.LogRecordFoundResult#walProcessingOption} + * {@link WalFilter.LogRecordFoundResult#batchChanged}. + */ + private short logRecordFoundProxy(final long logNumber, + final String logFileName, final long batchHandle, + final long newBatchHandle) { + final LogRecordFoundResult logRecordFoundResult = logRecordFound( + logNumber, logFileName, new WriteBatch(batchHandle), + new WriteBatch(newBatchHandle)); + return logRecordFoundResultToShort(logRecordFoundResult); + } + + private static short logRecordFoundResultToShort( + final LogRecordFoundResult logRecordFoundResult) { + short result = (short)(logRecordFoundResult.walProcessingOption.getValue() << 8); + return (short)(result | (logRecordFoundResult.batchChanged ? 1 : 0)); + } + + private native long createNewWalFilter(); +} diff --git a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java index 092fe3784..3ec467123 100644 --- a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java @@ -434,4 +434,32 @@ public interface AdvancedMutableColumnFamilyOptionsInterface * @return true if reporting is enabled */ boolean reportBgIoStats(); + + /** + * Non-bottom-level files older than TTL will go through the compaction + * process. This needs {@link MutableDBOptionsInterface#maxOpenFiles()} to be + * set to -1. + * + * Enabled only for level compaction for now. + * + * Default: 0 (disabled) + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param ttl the time-to-live. + * + * @return the reference to the current options. + */ + T setTtl(final long ttl); + + /** + * Get the TTL for Non-bottom-level files that will go through the compaction + * process. + * + * See {@link #setTtl(long)}. + * + * @return the time-to-live. + */ + long ttl(); } diff --git a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java index 1032be6e7..7a4ff14bf 100644 --- a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java +++ b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java @@ -9,71 +9,252 @@ package org.rocksdb; * * BlockBasedTable is a RocksDB's default SST file format. */ +//TODO(AR) should be renamed BlockBasedTableOptions public class BlockBasedTableConfig extends TableFormatConfig { public BlockBasedTableConfig() { - noBlockCache_ = false; - blockCacheSize_ = 8 * 1024 * 1024; - blockCacheNumShardBits_ = 0; - blockCache_ = null; - blockSize_ = 4 * 1024; - blockSizeDeviation_ = 10; - blockRestartInterval_ = 16; - wholeKeyFiltering_ = true; - filter_ = null; - cacheIndexAndFilterBlocks_ = false; - cacheIndexAndFilterBlocksWithHighPriority_ = false; - pinL0FilterAndIndexBlocksInCache_ = false; - partitionFilters_ = false; - metadataBlockSize_ = 4096; - pinTopLevelIndexAndFilter_ = true; - hashIndexAllowCollision_ = true; - blockCacheCompressedSize_ = 0; - blockCacheCompressedNumShardBits_ = 0; - checksumType_ = ChecksumType.kCRC32c; - indexType_ = IndexType.kBinarySearch; - formatVersion_ = 0; + //TODO(AR) flushBlockPolicyFactory + cacheIndexAndFilterBlocks = false; + cacheIndexAndFilterBlocksWithHighPriority = false; + pinL0FilterAndIndexBlocksInCache = false; + pinTopLevelIndexAndFilter = true; + indexType = IndexType.kBinarySearch; + dataBlockIndexType = DataBlockIndexType.kDataBlockBinarySearch; + dataBlockHashTableUtilRatio = 0.75; + checksumType = ChecksumType.kCRC32c; + noBlockCache = false; + blockCache = null; + persistentCache = null; + blockCacheCompressed = null; + blockSize = 4 * 1024; + blockSizeDeviation = 10; + blockRestartInterval = 16; + indexBlockRestartInterval = 1; + metadataBlockSize = 4096; + partitionFilters = false; + useDeltaEncoding = true; + filterPolicy = null; + wholeKeyFiltering = true; + verifyCompression = true; + readAmpBytesPerBit = 0; + formatVersion = 2; + enableIndexCompression = true; + blockAlign = false; + + // NOTE: ONLY used if blockCache == null + blockCacheSize = 8 * 1024 * 1024; + blockCacheNumShardBits = 0; + + // NOTE: ONLY used if blockCacheCompressed == null + blockCacheCompressedSize = 0; + blockCacheCompressedNumShardBits = 0; } /** - * Disable block cache. If this is set to true, - * then no block cache should be used, and the block_cache should - * point to a {@code nullptr} object. - * Default: false + * Indicating if we'd put index/filter blocks to the block cache. + * If not specified, each "table reader" object will pre-load index/filter + * block during table initialization. * - * @param noBlockCache if use block cache + * @return if index and filter blocks should be put in block cache. + */ + public boolean cacheIndexAndFilterBlocks() { + return cacheIndexAndFilterBlocks; + } + + /** + * Indicating if we'd put index/filter blocks to the block cache. + * If not specified, each "table reader" object will pre-load index/filter + * block during table initialization. + * + * @param cacheIndexAndFilterBlocks and filter blocks should be put in block cache. * @return the reference to the current config. */ - public BlockBasedTableConfig setNoBlockCache(final boolean noBlockCache) { - noBlockCache_ = noBlockCache; + public BlockBasedTableConfig setCacheIndexAndFilterBlocks( + final boolean cacheIndexAndFilterBlocks) { + this.cacheIndexAndFilterBlocks = cacheIndexAndFilterBlocks; return this; } /** - * @return if block cache is disabled + * Indicates if index and filter blocks will be treated as high-priority in the block cache. + * See note below about applicability. If not specified, defaults to false. + * + * @return if index and filter blocks will be treated as high-priority. */ - public boolean noBlockCache() { - return noBlockCache_; + public boolean cacheIndexAndFilterBlocksWithHighPriority() { + return cacheIndexAndFilterBlocksWithHighPriority; } /** - * Set the amount of cache in bytes that will be used by RocksDB. - * If cacheSize is non-positive, then cache will not be used. - * DEFAULT: 8M + * If true, cache index and filter blocks with high priority. If set to true, + * depending on implementation of block cache, index and filter blocks may be + * less likely to be evicted than data blocks. * - * @param blockCacheSize block cache size in bytes + * @param cacheIndexAndFilterBlocksWithHighPriority if index and filter blocks + * will be treated as high-priority. * @return the reference to the current config. */ - public BlockBasedTableConfig setBlockCacheSize(final long blockCacheSize) { - blockCacheSize_ = blockCacheSize; + public BlockBasedTableConfig setCacheIndexAndFilterBlocksWithHighPriority( + final boolean cacheIndexAndFilterBlocksWithHighPriority) { + this.cacheIndexAndFilterBlocksWithHighPriority = cacheIndexAndFilterBlocksWithHighPriority; return this; } /** - * @return block cache size in bytes + * Indicating if we'd like to pin L0 index/filter blocks to the block cache. + If not specified, defaults to false. + * + * @return if L0 index and filter blocks should be pinned to the block cache. */ - public long blockCacheSize() { - return blockCacheSize_; + public boolean pinL0FilterAndIndexBlocksInCache() { + return pinL0FilterAndIndexBlocksInCache; + } + + /** + * Indicating if we'd like to pin L0 index/filter blocks to the block cache. + If not specified, defaults to false. + * + * @param pinL0FilterAndIndexBlocksInCache pin blocks in block cache + * @return the reference to the current config. + */ + public BlockBasedTableConfig setPinL0FilterAndIndexBlocksInCache( + final boolean pinL0FilterAndIndexBlocksInCache) { + this.pinL0FilterAndIndexBlocksInCache = pinL0FilterAndIndexBlocksInCache; + return this; + } + + /** + * Indicates if top-level index and filter blocks should be pinned. + * + * @return if top-level index and filter blocks should be pinned. + */ + public boolean pinTopLevelIndexAndFilter() { + return pinTopLevelIndexAndFilter; + } + + /** + * If cacheIndexAndFilterBlocks is true and the below is true, then + * the top-level index of partitioned filter and index blocks are stored in + * the cache, but a reference is held in the "table reader" object so the + * blocks are pinned and only evicted from cache when the table reader is + * freed. This is not limited to l0 in LSM tree. + * + * @param pinTopLevelIndexAndFilter if top-level index and filter blocks should be pinned. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setPinTopLevelIndexAndFilter(final boolean pinTopLevelIndexAndFilter) { + this.pinTopLevelIndexAndFilter = pinTopLevelIndexAndFilter; + return this; + } + + /** + * Get the index type. + * + * @return the currently set index type + */ + public IndexType indexType() { + return indexType; + } + + /** + * Sets the index type to used with this table. + * + * @param indexType {@link org.rocksdb.IndexType} value + * @return the reference to the current option. + */ + public BlockBasedTableConfig setIndexType( + final IndexType indexType) { + this.indexType = indexType; + return this; + } + + /** + * Get the data block index type. + * + * @return the currently set data block index type + */ + public DataBlockIndexType dataBlockIndexType() { + return dataBlockIndexType; + } + + /** + * Sets the data block index type to used with this table. + * + * @param dataBlockIndexType {@link org.rocksdb.DataBlockIndexType} value + * @return the reference to the current option. + */ + public BlockBasedTableConfig setDataBlockIndexType( + final DataBlockIndexType dataBlockIndexType) { + this.dataBlockIndexType = dataBlockIndexType; + return this; + } + + /** + * Get the #entries/#buckets. It is valid only when {@link #dataBlockIndexType()} is + * {@link DataBlockIndexType#kDataBlockBinaryAndHash}. + * + * @return the #entries/#buckets. + */ + public double dataBlockHashTableUtilRatio() { + return dataBlockHashTableUtilRatio; + } + + /** + * Set the #entries/#buckets. It is valid only when {@link #dataBlockIndexType()} is + * {@link DataBlockIndexType#kDataBlockBinaryAndHash}. + * + * @param dataBlockHashTableUtilRatio #entries/#buckets + * @return the reference to the current option. + */ + public BlockBasedTableConfig setDataBlockHashTableUtilRatio( + final double dataBlockHashTableUtilRatio) { + this.dataBlockHashTableUtilRatio = dataBlockHashTableUtilRatio; + return this; + } + + /** + * Get the checksum type to be used with this table. + * + * @return the currently set checksum type + */ + public ChecksumType checksumType() { + return checksumType; + } + + /** + * Sets + * + * @param checksumType {@link org.rocksdb.ChecksumType} value. + * @return the reference to the current option. + */ + public BlockBasedTableConfig setChecksumType( + final ChecksumType checksumType) { + this.checksumType = checksumType; + return this; + } + + /** + * Determine if the block cache is disabled. + * + * @return if block cache is disabled + */ + public boolean noBlockCache() { + return noBlockCache; + } + + /** + * Disable block cache. If this is set to true, + * then no block cache should be used, and the {@link #setBlockCache(Cache)} + * should point to a {@code null} object. + * + * Default: false + * + * @param noBlockCache if use block cache + * @return the reference to the current config. + */ + public BlockBasedTableConfig setNoBlockCache(final boolean noBlockCache) { + this.noBlockCache = noBlockCache; + return this; } /** @@ -86,42 +267,68 @@ public class BlockBasedTableConfig extends TableFormatConfig { * {@link org.rocksdb.Cache} instance can be re-used in multiple options * instances. * - * @param cache {@link org.rocksdb.Cache} Cache java instance (e.g. LRUCache). + * @param blockCache {@link org.rocksdb.Cache} Cache java instance + * (e.g. LRUCache). + * * @return the reference to the current config. */ - public BlockBasedTableConfig setBlockCache(final Cache cache) { - blockCache_ = cache; + public BlockBasedTableConfig setBlockCache(final Cache blockCache) { + this.blockCache = blockCache; return this; } /** - * Controls the number of shards for the block cache. - * This is applied only if cacheSize is set to non-negative. + * Use the specified persistent cache. * - * @param blockCacheNumShardBits the number of shard bits. The resulting - * number of shards would be 2 ^ numShardBits. Any negative - * number means use default settings." - * @return the reference to the current option. + * If {@code !null} use the specified cache for pages read from device, + * otherwise no page cache is used. + * + * @param persistentCache the persistent cache + * + * @return the reference to the current config. */ - public BlockBasedTableConfig setCacheNumShardBits( - final int blockCacheNumShardBits) { - blockCacheNumShardBits_ = blockCacheNumShardBits; + public BlockBasedTableConfig setPersistentCache( + final PersistentCache persistentCache) { + this.persistentCache = persistentCache; return this; } /** - * Returns the number of shard bits used in the block cache. - * The resulting number of shards would be 2 ^ (returned value). - * Any negative number means use default settings. + * Use the specified cache for compressed blocks. * - * @return the number of shard bits used in the block cache. + * If {@code null}, RocksDB will not use a compressed block cache. + * + * Note: though it looks similar to {@link #setBlockCache(Cache)}, RocksDB + * doesn't put the same type of object there. + * + * {@link org.rocksdb.Cache} should not be disposed before options instances + * using this cache is disposed. + * + * {@link org.rocksdb.Cache} instance can be re-used in multiple options + * instances. + * + * @param blockCacheCompressed {@link org.rocksdb.Cache} Cache java instance + * (e.g. LRUCache). + * + * @return the reference to the current config. */ - public int cacheNumShardBits() { - return blockCacheNumShardBits_; + public BlockBasedTableConfig setBlockCacheCompressed( + final Cache blockCacheCompressed) { + this.blockCacheCompressed = blockCacheCompressed; + return this; } /** - * Approximate size of user data packed per block. Note that the + * Get the approximate size of user data packed per block. + * + * @return block size in bytes + */ + public long blockSize() { + return blockSize; + } + + /** + * Approximate size of user data packed per block. Note that the * block size specified here corresponds to uncompressed data. The * actual size of the unit read from disk may be smaller if * compression is enabled. This parameter can be changed dynamically. @@ -131,23 +338,24 @@ public class BlockBasedTableConfig extends TableFormatConfig { * @return the reference to the current config. */ public BlockBasedTableConfig setBlockSize(final long blockSize) { - blockSize_ = blockSize; + this.blockSize = blockSize; return this; } /** - * @return block size in bytes + * @return the hash table ratio. */ - public long blockSize() { - return blockSize_; + public int blockSizeDeviation() { + return blockSizeDeviation; } /** * This is used to close a block before it reaches the configured - * 'block_size'. If the percentage of free space in the current block is less - * than this specified number and adding a new record to the block will - * exceed the configured block size, then this block will be closed and the - * new record will be written to the next block. + * {@link #blockSize()}. If the percentage of free space in the current block + * is less than this specified number and adding a new record to the block + * will exceed the configured block size, then this block will be closed and + * the new record will be written to the next block. + * * Default is 10. * * @param blockSizeDeviation the deviation to block size allowed @@ -155,55 +363,120 @@ public class BlockBasedTableConfig extends TableFormatConfig { */ public BlockBasedTableConfig setBlockSizeDeviation( final int blockSizeDeviation) { - blockSizeDeviation_ = blockSizeDeviation; + this.blockSizeDeviation = blockSizeDeviation; return this; } /** - * @return the hash table ratio. + * Get the block restart interval. + * + * @return block restart interval */ - public int blockSizeDeviation() { - return blockSizeDeviation_; + public int blockRestartInterval() { + return blockRestartInterval; } /** - * Set block restart interval + * Set the block restart interval. * * @param restartInterval block restart interval. * @return the reference to the current config. */ public BlockBasedTableConfig setBlockRestartInterval( final int restartInterval) { - blockRestartInterval_ = restartInterval; + blockRestartInterval = restartInterval; return this; } /** - * @return block restart interval + * Get the index block restart interval. + * + * @return index block restart interval */ - public int blockRestartInterval() { - return blockRestartInterval_; + public int indexBlockRestartInterval() { + return indexBlockRestartInterval; } /** - * If true, place whole keys in the filter (not just prefixes). - * This must generally be true for gets to be efficient. - * Default: true + * Set the index block restart interval * - * @param wholeKeyFiltering if enable whole key filtering + * @param restartInterval index block restart interval. * @return the reference to the current config. */ - public BlockBasedTableConfig setWholeKeyFiltering( - final boolean wholeKeyFiltering) { - wholeKeyFiltering_ = wholeKeyFiltering; + public BlockBasedTableConfig setIndexBlockRestartInterval( + final int restartInterval) { + indexBlockRestartInterval = restartInterval; return this; } /** - * @return if whole key filtering is enabled + * Get the block size for partitioned metadata. + * + * @return block size for partitioned metadata. */ - public boolean wholeKeyFiltering() { - return wholeKeyFiltering_; + public long metadataBlockSize() { + return metadataBlockSize; + } + + /** + * Set block size for partitioned metadata. + * + * @param metadataBlockSize Partitioned metadata block size. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setMetadataBlockSize( + final long metadataBlockSize) { + this.metadataBlockSize = metadataBlockSize; + return this; + } + + /** + * Indicates if we're using partitioned filters. + * + * @return if we're using partition filters. + */ + public boolean partitionFilters() { + return partitionFilters; + } + + /** + * Use partitioned full filters for each SST file. This option is incompatible + * with block-based filters. + * + * Defaults to false. + * + * @param partitionFilters use partition filters. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setPartitionFilters(final boolean partitionFilters) { + this.partitionFilters = partitionFilters; + return this; + } + + /** + * Determine if delta encoding is being used to compress block keys. + * + * @return true if delta encoding is enabled, false otherwise. + */ + public boolean useDeltaEncoding() { + return useDeltaEncoding; + } + + /** + * Use delta encoding to compress keys in blocks. + * + * NOTE: {@link ReadOptions#pinData()} requires this option to be disabled. + * + * Default: true + * + * @param useDeltaEncoding true to enable delta encoding + * + * @return the reference to the current config. + */ + public BlockBasedTableConfig setUseDeltaEncoding( + final boolean useDeltaEncoding) { + this.useDeltaEncoding = useDeltaEncoding; + return this; } /** @@ -216,176 +489,274 @@ public class BlockBasedTableConfig extends TableFormatConfig { * {@link org.rocksdb.Filter} instance can be re-used in multiple options * instances. * - * @param filter {@link org.rocksdb.Filter} Filter Policy java instance. + * @param filterPolicy {@link org.rocksdb.Filter} Filter Policy java instance. * @return the reference to the current config. */ + public BlockBasedTableConfig setFilterPolicy( + final Filter filterPolicy) { + this.filterPolicy = filterPolicy; + return this; + } + + /* + * @deprecated Use {@link #setFilterPolicy(Filter)} + */ + @Deprecated public BlockBasedTableConfig setFilter( final Filter filter) { - filter_ = filter; - return this; + return setFilterPolicy(filter); } /** - * Indicating if we'd put index/filter blocks to the block cache. - If not specified, each "table reader" object will pre-load index/filter - block during table initialization. + * Determine if whole keys as opposed to prefixes are placed in the filter. * - * @return if index and filter blocks should be put in block cache. + * @return if whole key filtering is enabled */ - public boolean cacheIndexAndFilterBlocks() { - return cacheIndexAndFilterBlocks_; + public boolean wholeKeyFiltering() { + return wholeKeyFiltering; } /** - * Indicating if we'd put index/filter blocks to the block cache. - If not specified, each "table reader" object will pre-load index/filter - block during table initialization. + * If true, place whole keys in the filter (not just prefixes). + * This must generally be true for gets to be efficient. + * Default: true * - * @param cacheIndexAndFilterBlocks and filter blocks should be put in block cache. + * @param wholeKeyFiltering if enable whole key filtering * @return the reference to the current config. */ - public BlockBasedTableConfig setCacheIndexAndFilterBlocks( - final boolean cacheIndexAndFilterBlocks) { - cacheIndexAndFilterBlocks_ = cacheIndexAndFilterBlocks; + public BlockBasedTableConfig setWholeKeyFiltering( + final boolean wholeKeyFiltering) { + this.wholeKeyFiltering = wholeKeyFiltering; return this; } /** - * Indicates if index and filter blocks will be treated as high-priority in the block cache. - * See note below about applicability. If not specified, defaults to false. + * Returns true when compression verification is enabled. * - * @return if index and filter blocks will be treated as high-priority. + * See {@link #setVerifyCompression(boolean)}. + * + * @return true if compression verification is enabled. */ - public boolean cacheIndexAndFilterBlocksWithHighPriority() { - return cacheIndexAndFilterBlocksWithHighPriority_; + public boolean verifyCompression() { + return verifyCompression; } /** - * If true, cache index and filter blocks with high priority. If set to true, - * depending on implementation of block cache, index and filter blocks may be - * less likely to be evicted than data blocks. + * Verify that decompressing the compressed block gives back the input. This + * is a verification mode that we use to detect bugs in compression + * algorithms. + * + * @param verifyCompression true to enable compression verification. * - * @param cacheIndexAndFilterBlocksWithHighPriority if index and filter blocks - * will be treated as high-priority. * @return the reference to the current config. */ - public BlockBasedTableConfig setCacheIndexAndFilterBlocksWithHighPriority( - final boolean cacheIndexAndFilterBlocksWithHighPriority) { - cacheIndexAndFilterBlocksWithHighPriority_ = cacheIndexAndFilterBlocksWithHighPriority; + public BlockBasedTableConfig setVerifyCompression( + final boolean verifyCompression) { + this.verifyCompression = verifyCompression; return this; } /** - * Indicating if we'd like to pin L0 index/filter blocks to the block cache. - If not specified, defaults to false. + * Get the Read amplification bytes per-bit. * - * @return if L0 index and filter blocks should be pinned to the block cache. + * See {@link #setReadAmpBytesPerBit(int)}. + * + * @return the bytes per-bit. */ - public boolean pinL0FilterAndIndexBlocksInCache() { - return pinL0FilterAndIndexBlocksInCache_; + public int readAmpBytesPerBit() { + return readAmpBytesPerBit; } /** - * Indicating if we'd like to pin L0 index/filter blocks to the block cache. - If not specified, defaults to false. + * Set the Read amplification bytes per-bit. + * + * If used, For every data block we load into memory, we will create a bitmap + * of size ((block_size / `read_amp_bytes_per_bit`) / 8) bytes. This bitmap + * will be used to figure out the percentage we actually read of the blocks. + * + * When this feature is used Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES and + * Tickers::READ_AMP_TOTAL_READ_BYTES can be used to calculate the + * read amplification using this formula + * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) + * + * value => memory usage (percentage of loaded blocks memory) + * 1 => 12.50 % + * 2 => 06.25 % + * 4 => 03.12 % + * 8 => 01.56 % + * 16 => 00.78 % + * + * Note: This number must be a power of 2, if not it will be sanitized + * to be the next lowest power of 2, for example a value of 7 will be + * treated as 4, a value of 19 will be treated as 16. + * + * Default: 0 (disabled) + * + * @param readAmpBytesPerBit the bytes per-bit * - * @param pinL0FilterAndIndexBlocksInCache pin blocks in block cache * @return the reference to the current config. */ - public BlockBasedTableConfig setPinL0FilterAndIndexBlocksInCache( - final boolean pinL0FilterAndIndexBlocksInCache) { - pinL0FilterAndIndexBlocksInCache_ = pinL0FilterAndIndexBlocksInCache; + public BlockBasedTableConfig setReadAmpBytesPerBit(final int readAmpBytesPerBit) { + this.readAmpBytesPerBit = readAmpBytesPerBit; return this; } /** - * Indicating if we're using partitioned filters. Defaults to false. + * Get the format version. + * See {@link #setFormatVersion(int)}. * - * @return if we're using partition filters. + * @return the currently configured format version. */ - public boolean partitionFilters() { - return partitionFilters_; + public int formatVersion() { + return formatVersion; } /** - * Use partitioned full filters for each SST file. This option is incompatible with - * block-based filters. + *

We currently have five versions:

* - * @param partitionFilters use partition filters. - * @return the reference to the current config. + *
    + *
  • 0 - This version is currently written + * out by all RocksDB's versions by default. Can be read by really old + * RocksDB's. Doesn't support changing checksum (default is CRC32).
  • + *
  • 1 - Can be read by RocksDB's versions since 3.0. + * Supports non-default checksum, like xxHash. It is written by RocksDB when + * BlockBasedTableOptions::checksum is something other than kCRC32c. (version + * 0 is silently upconverted)
  • + *
  • 2 - Can be read by RocksDB's versions since 3.10. + * Changes the way we encode compressed blocks with LZ4, BZip2 and Zlib + * compression. If you don't plan to run RocksDB before version 3.10, + * you should probably use this.
  • + *
  • 3 - Can be read by RocksDB's versions since 5.15. Changes the way we + * encode the keys in index blocks. If you don't plan to run RocksDB before + * version 5.15, you should probably use this. + * This option only affects newly written tables. When reading existing + * tables, the information about version is read from the footer.
  • + *
  • 4 - Can be read by RocksDB's versions since 5.16. Changes the way we + * encode the values in index blocks. If you don't plan to run RocksDB before + * version 5.16 and you are using index_block_restart_interval > 1, you should + * probably use this as it would reduce the index size.
  • + *
+ *

This option only affects newly written tables. When reading existing + * tables, the information about version is read from the footer.

+ * + * @param formatVersion integer representing the version to be used. + * + * @return the reference to the current option. */ - public BlockBasedTableConfig setPartitionFilters(final boolean partitionFilters) { - partitionFilters_ = partitionFilters; + public BlockBasedTableConfig setFormatVersion( + final int formatVersion) { + assert(formatVersion >= 0 && formatVersion <= 4); + this.formatVersion = formatVersion; return this; } /** - * @return block size for partitioned metadata. + * Determine if index compression is enabled. + * + * See {@link #setEnableIndexCompression(boolean)}. + * + * @return true if index compression is enabled, false otherwise */ - public long metadataBlockSize() { - return metadataBlockSize_; + public boolean enableIndexCompression() { + return enableIndexCompression; } /** - * Set block size for partitioned metadata. + * Store index blocks on disk in compressed format. * - * @param metadataBlockSize Partitioned metadata block size. - * @return the reference to the current config. + * Changing this option to false will avoid the overhead of decompression + * if index blocks are evicted and read back. + * + * @param enableIndexCompression true to enable index compression, + * false to disable + * + * @return the reference to the current option. */ - public BlockBasedTableConfig setMetadataBlockSize( - final long metadataBlockSize) { - metadataBlockSize_ = metadataBlockSize; + public BlockBasedTableConfig setEnableIndexCompression( + final boolean enableIndexCompression) { + this.enableIndexCompression = enableIndexCompression; return this; } /** - * Indicates if top-level index and filter blocks should be pinned. + * Determines whether data blocks are aligned on the lesser of page size + * and block size. * - * @return if top-level index and filter blocks should be pinned. + * @return true if data blocks are aligned on the lesser of page size + * and block size. */ - public boolean pinTopLevelIndexAndFilter() { - return pinTopLevelIndexAndFilter_; + public boolean blockAlign() { + return blockAlign; } /** - * If cacheIndexAndFilterBlocks is true and the below is true, then - * the top-level index of partitioned filter and index blocks are stored in - * the cache, but a reference is held in the "table reader" object so the - * blocks are pinned and only evicted from cache when the table reader is - * freed. This is not limited to l0 in LSM tree. + * Set whether data blocks should be aligned on the lesser of page size + * and block size. * - * @param pinTopLevelIndexAndFilter if top-level index and filter blocks should be pinned. - * @return the reference to the current config. + * @param blockAlign true to align data blocks on the lesser of page size + * and block size. + * + * @return the reference to the current option. */ - public BlockBasedTableConfig setPinTopLevelIndexAndFilter(final boolean pinTopLevelIndexAndFilter) { - pinTopLevelIndexAndFilter_ = pinTopLevelIndexAndFilter; + public BlockBasedTableConfig setBlockAlign(final boolean blockAlign) { + this.blockAlign = blockAlign; return this; } + /** - * Influence the behavior when kHashSearch is used. - if false, stores a precise prefix to block range mapping - if true, does not store prefix and allows prefix hash collision - (less memory consumption) + * Get the size of the cache in bytes that will be used by RocksDB. * - * @return if hash collisions should be allowed. + * @return block cache size in bytes */ - public boolean hashIndexAllowCollision() { - return hashIndexAllowCollision_; + @Deprecated + public long blockCacheSize() { + return blockCacheSize; } /** - * Influence the behavior when kHashSearch is used. - if false, stores a precise prefix to block range mapping - if true, does not store prefix and allows prefix hash collision - (less memory consumption) + * Set the size of the cache in bytes that will be used by RocksDB. + * If cacheSize is non-positive, then cache will not be used. + * DEFAULT: 8M * - * @param hashIndexAllowCollision points out if hash collisions should be allowed. + * @param blockCacheSize block cache size in bytes * @return the reference to the current config. + * + * @deprecated Use {@link #setBlockCache(Cache)}. */ - public BlockBasedTableConfig setHashIndexAllowCollision( - final boolean hashIndexAllowCollision) { - hashIndexAllowCollision_ = hashIndexAllowCollision; + @Deprecated + public BlockBasedTableConfig setBlockCacheSize(final long blockCacheSize) { + this.blockCacheSize = blockCacheSize; + return this; + } + + /** + * Returns the number of shard bits used in the block cache. + * The resulting number of shards would be 2 ^ (returned value). + * Any negative number means use default settings. + * + * @return the number of shard bits used in the block cache. + */ + @Deprecated + public int cacheNumShardBits() { + return blockCacheNumShardBits; + } + + /** + * Controls the number of shards for the block cache. + * This is applied only if cacheSize is set to non-negative. + * + * @param blockCacheNumShardBits the number of shard bits. The resulting + * number of shards would be 2 ^ numShardBits. Any negative + * number means use default settings." + * @return the reference to the current option. + * + * @deprecated Use {@link #setBlockCache(Cache)}. + */ + @Deprecated + public BlockBasedTableConfig setCacheNumShardBits( + final int blockCacheNumShardBits) { + this.blockCacheNumShardBits = blockCacheNumShardBits; return this; } @@ -395,8 +766,9 @@ public class BlockBasedTableConfig extends TableFormatConfig { * * @return size of compressed block cache. */ + @Deprecated public long blockCacheCompressedSize() { - return blockCacheCompressedSize_; + return blockCacheCompressedSize; } /** @@ -405,10 +777,13 @@ public class BlockBasedTableConfig extends TableFormatConfig { * * @param blockCacheCompressedSize of compressed block cache. * @return the reference to the current config. + * + * @deprecated Use {@link #setBlockCacheCompressed(Cache)}. */ + @Deprecated public BlockBasedTableConfig setBlockCacheCompressedSize( final long blockCacheCompressedSize) { - blockCacheCompressedSize_ = blockCacheCompressedSize; + this.blockCacheCompressedSize = blockCacheCompressedSize; return this; } @@ -420,8 +795,9 @@ public class BlockBasedTableConfig extends TableFormatConfig { * number of shards would be 2 ^ numShardBits. Any negative * number means use default settings. */ + @Deprecated public int blockCacheCompressedNumShardBits() { - return blockCacheCompressedNumShardBits_; + return blockCacheCompressedNumShardBits; } /** @@ -432,141 +808,166 @@ public class BlockBasedTableConfig extends TableFormatConfig { * number of shards would be 2 ^ numShardBits. Any negative * number means use default settings." * @return the reference to the current option. + * + * @deprecated Use {@link #setBlockCacheCompressed(Cache)}. */ + @Deprecated public BlockBasedTableConfig setBlockCacheCompressedNumShardBits( final int blockCacheCompressedNumShardBits) { - blockCacheCompressedNumShardBits_ = blockCacheCompressedNumShardBits; - return this; - } - - /** - * Sets the checksum type to be used with this table. - * - * @param checksumType {@link org.rocksdb.ChecksumType} value. - * @return the reference to the current option. - */ - public BlockBasedTableConfig setChecksumType( - final ChecksumType checksumType) { - checksumType_ = checksumType; + this.blockCacheCompressedNumShardBits = blockCacheCompressedNumShardBits; return this; } /** + * Influence the behavior when kHashSearch is used. + * if false, stores a precise prefix to block range mapping + * if true, does not store prefix and allows prefix hash collision + * (less memory consumption) * - * @return the currently set checksum type - */ - public ChecksumType checksumType() { - return checksumType_; - } - - /** - * Sets the index type to used with this table. + * @return if hash collisions should be allowed. * - * @param indexType {@link org.rocksdb.IndexType} value - * @return the reference to the current option. + * @deprecated This option is now deprecated. No matter what value it + * is set to, it will behave as + * if {@link #hashIndexAllowCollision()} == true. */ - public BlockBasedTableConfig setIndexType( - final IndexType indexType) { - indexType_ = indexType; - return this; + @Deprecated + public boolean hashIndexAllowCollision() { + return true; } /** + * Influence the behavior when kHashSearch is used. + * if false, stores a precise prefix to block range mapping + * if true, does not store prefix and allows prefix hash collision + * (less memory consumption) * - * @return the currently set index type - */ - public IndexType indexType() { - return indexType_; - } - - /** - *

We currently have three versions:

+ * @param hashIndexAllowCollision points out if hash collisions should be allowed. * - *
    - *
  • 0 - This version is currently written - * out by all RocksDB's versions by default. Can be read by really old - * RocksDB's. Doesn't support changing checksum (default is CRC32).
  • - *
  • 1 - Can be read by RocksDB's versions since 3.0. - * Supports non-default checksum, like xxHash. It is written by RocksDB when - * BlockBasedTableOptions::checksum is something other than kCRC32c. (version - * 0 is silently upconverted)
  • - *
  • 2 - Can be read by RocksDB's versions since 3.10. - * Changes the way we encode compressed blocks with LZ4, BZip2 and Zlib - * compression. If you don't plan to run RocksDB before version 3.10, - * you should probably use this.
  • - *
- *

This option only affects newly written tables. When reading existing - * tables, the information about version is read from the footer.

+ * @return the reference to the current config. * - * @param formatVersion integer representing the version to be used. - * @return the reference to the current option. + * @deprecated This option is now deprecated. No matter what value it + * is set to, it will behave as + * if {@link #hashIndexAllowCollision()} == true. */ - public BlockBasedTableConfig setFormatVersion( - final int formatVersion) { - assert(formatVersion >= 0 && formatVersion <= 2); - formatVersion_ = formatVersion; + @Deprecated + public BlockBasedTableConfig setHashIndexAllowCollision( + final boolean hashIndexAllowCollision) { + // no-op return this; } - /** - * - * @return the currently configured format version. - * See also: {@link #setFormatVersion(int)}. - */ - public int formatVersion() { - return formatVersion_; - } - + @Override protected long newTableFactoryHandle() { + final long filterPolicyHandle; + if (filterPolicy != null) { + filterPolicyHandle = filterPolicy.nativeHandle_; + } else { + filterPolicyHandle = 0; + } + final long blockCacheHandle; + if (blockCache != null) { + blockCacheHandle = blockCache.nativeHandle_; + } else { + blockCacheHandle = 0; + } - @Override protected long newTableFactoryHandle() { - long filterHandle = 0; - if (filter_ != null) { - filterHandle = filter_.nativeHandle_; + final long persistentCacheHandle; + if (persistentCache != null) { + persistentCacheHandle = persistentCache.nativeHandle_; + } else { + persistentCacheHandle = 0; } - long blockCacheHandle = 0; - if (blockCache_ != null) { - blockCacheHandle = blockCache_.nativeHandle_; + final long blockCacheCompressedHandle; + if (blockCacheCompressed != null) { + blockCacheCompressedHandle = blockCacheCompressed.nativeHandle_; + } else { + blockCacheCompressedHandle = 0; } - return newTableFactoryHandle(noBlockCache_, blockCacheSize_, blockCacheNumShardBits_, - blockCacheHandle, blockSize_, blockSizeDeviation_, blockRestartInterval_, - wholeKeyFiltering_, filterHandle, cacheIndexAndFilterBlocks_, - cacheIndexAndFilterBlocksWithHighPriority_, pinL0FilterAndIndexBlocksInCache_, - partitionFilters_, metadataBlockSize_, pinTopLevelIndexAndFilter_, - hashIndexAllowCollision_, blockCacheCompressedSize_, blockCacheCompressedNumShardBits_, - checksumType_.getValue(), indexType_.getValue(), formatVersion_); - } - - private native long newTableFactoryHandle(boolean noBlockCache, long blockCacheSize, - int blockCacheNumShardBits, long blockCacheHandle, long blockSize, int blockSizeDeviation, - int blockRestartInterval, boolean wholeKeyFiltering, long filterPolicyHandle, - boolean cacheIndexAndFilterBlocks, boolean cacheIndexAndFilterBlocksWithHighPriority, - boolean pinL0FilterAndIndexBlocksInCache, boolean partitionFilters, long metadataBlockSize, - boolean pinTopLevelIndexAndFilter, boolean hashIndexAllowCollision, - long blockCacheCompressedSize, int blockCacheCompressedNumShardBits, - byte checkSumType, byte indexType, int formatVersion); - - private boolean cacheIndexAndFilterBlocks_; - private boolean cacheIndexAndFilterBlocksWithHighPriority_; - private boolean pinL0FilterAndIndexBlocksInCache_; - private boolean partitionFilters_; - private long metadataBlockSize_; - private boolean pinTopLevelIndexAndFilter_; - private IndexType indexType_; - private boolean hashIndexAllowCollision_; - private ChecksumType checksumType_; - private boolean noBlockCache_; - private long blockSize_; - private long blockCacheSize_; - private int blockCacheNumShardBits_; - private Cache blockCache_; - private long blockCacheCompressedSize_; - private int blockCacheCompressedNumShardBits_; - private int blockSizeDeviation_; - private int blockRestartInterval_; - private Filter filter_; - private boolean wholeKeyFiltering_; - private int formatVersion_; + return newTableFactoryHandle(cacheIndexAndFilterBlocks, + cacheIndexAndFilterBlocksWithHighPriority, + pinL0FilterAndIndexBlocksInCache, pinTopLevelIndexAndFilter, + indexType.getValue(), dataBlockIndexType.getValue(), + dataBlockHashTableUtilRatio, checksumType.getValue(), noBlockCache, + blockCacheHandle, persistentCacheHandle, blockCacheCompressedHandle, + blockSize, blockSizeDeviation, blockRestartInterval, + indexBlockRestartInterval, metadataBlockSize, partitionFilters, + useDeltaEncoding, filterPolicyHandle, wholeKeyFiltering, + verifyCompression, readAmpBytesPerBit, formatVersion, + enableIndexCompression, blockAlign, + blockCacheSize, blockCacheNumShardBits, + blockCacheCompressedSize, blockCacheCompressedNumShardBits); + } + + private native long newTableFactoryHandle( + final boolean cacheIndexAndFilterBlocks, + final boolean cacheIndexAndFilterBlocksWithHighPriority, + final boolean pinL0FilterAndIndexBlocksInCache, + final boolean pinTopLevelIndexAndFilter, + final byte indexTypeValue, + final byte dataBlockIndexTypeValue, + final double dataBlockHashTableUtilRatio, + final byte checksumTypeValue, + final boolean noBlockCache, + final long blockCacheHandle, + final long persistentCacheHandle, + final long blockCacheCompressedHandle, + final long blockSize, + final int blockSizeDeviation, + final int blockRestartInterval, + final int indexBlockRestartInterval, + final long metadataBlockSize, + final boolean partitionFilters, + final boolean useDeltaEncoding, + final long filterPolicyHandle, + final boolean wholeKeyFiltering, + final boolean verifyCompression, + final int readAmpBytesPerBit, + final int formatVersion, + final boolean enableIndexCompression, + final boolean blockAlign, + + @Deprecated final long blockCacheSize, + @Deprecated final int blockCacheNumShardBits, + + @Deprecated final long blockCacheCompressedSize, + @Deprecated final int blockCacheCompressedNumShardBits + ); + + //TODO(AR) flushBlockPolicyFactory + private boolean cacheIndexAndFilterBlocks; + private boolean cacheIndexAndFilterBlocksWithHighPriority; + private boolean pinL0FilterAndIndexBlocksInCache; + private boolean pinTopLevelIndexAndFilter; + private IndexType indexType; + private DataBlockIndexType dataBlockIndexType; + private double dataBlockHashTableUtilRatio; + private ChecksumType checksumType; + private boolean noBlockCache; + private Cache blockCache; + private PersistentCache persistentCache; + private Cache blockCacheCompressed; + private long blockSize; + private int blockSizeDeviation; + private int blockRestartInterval; + private int indexBlockRestartInterval; + private long metadataBlockSize; + private boolean partitionFilters; + private boolean useDeltaEncoding; + private Filter filterPolicy; + private boolean wholeKeyFiltering; + private boolean verifyCompression; + private int readAmpBytesPerBit; + private int formatVersion; + private boolean enableIndexCompression; + private boolean blockAlign; + + // NOTE: ONLY used if blockCache == null + @Deprecated private long blockCacheSize; + @Deprecated private int blockCacheNumShardBits; + + // NOTE: ONLY used if blockCacheCompressed == null + @Deprecated private long blockCacheCompressedSize; + @Deprecated private int blockCacheCompressedNumShardBits; } diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java b/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java new file mode 100644 index 000000000..191904017 --- /dev/null +++ b/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java @@ -0,0 +1,70 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Arrays; +import java.util.List; + +/** + * The metadata that describes a column family. + */ +public class ColumnFamilyMetaData { + private final long size; + private final long fileCount; + private final byte[] name; + private final LevelMetaData[] levels; + + /** + * Called from JNI C++ + */ + private ColumnFamilyMetaData( + final long size, + final long fileCount, + final byte[] name, + final LevelMetaData[] levels) { + this.size = size; + this.fileCount = fileCount; + this.name = name; + this.levels = levels; + } + + /** + * The size of this column family in bytes, which is equal to the sum of + * the file size of its {@link #levels()}. + * + * @return the size of this column family + */ + public long size() { + return size; + } + + /** + * The number of files in this column family. + * + * @return the number of files + */ + public long fileCount() { + return fileCount; + } + + /** + * The name of the column family. + * + * @return the name + */ + public byte[] name() { + return name; + } + + /** + * The metadata of all levels in this column family. + * + * @return the levels metadata + */ + public List levels() { + return Arrays.asList(levels); + } +} diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java index df52a74cf..e57752463 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java @@ -50,9 +50,19 @@ public class ColumnFamilyOptions extends RocksObject this.compactionFilterFactory_ = other.compactionFilterFactory_; this.compactionOptionsUniversal_ = other.compactionOptionsUniversal_; this.compactionOptionsFIFO_ = other.compactionOptionsFIFO_; + this.bottommostCompressionOptions_ = other.bottommostCompressionOptions_; this.compressionOptions_ = other.compressionOptions_; } + /** + * Constructor from Options + * + * @param options The options. + */ + public ColumnFamilyOptions(final Options options) { + super(newColumnFamilyOptionsFromOptions(options.nativeHandle_)); + } + /** *

Constructor to be used by * {@link #getColumnFamilyOptionsFromProps(java.util.Properties)}, @@ -318,6 +328,20 @@ public class ColumnFamilyOptions extends RocksObject bottommostCompressionType(nativeHandle_)); } + @Override + public ColumnFamilyOptions setBottommostCompressionOptions( + final CompressionOptions bottommostCompressionOptions) { + setBottommostCompressionOptions(nativeHandle_, + bottommostCompressionOptions.nativeHandle_); + this.bottommostCompressionOptions_ = bottommostCompressionOptions; + return this; + } + + @Override + public CompressionOptions bottommostCompressionOptions() { + return this.bottommostCompressionOptions_; + } + @Override public ColumnFamilyOptions setCompressionOptions( final CompressionOptions compressionOptions) { @@ -482,7 +506,7 @@ public class ColumnFamilyOptions extends RocksObject @Override public CompactionStyle compactionStyle() { - return CompactionStyle.values()[compactionStyle(nativeHandle_)]; + return CompactionStyle.fromValue(compactionStyle(nativeHandle_)); } @Override @@ -751,6 +775,17 @@ public class ColumnFamilyOptions extends RocksObject return reportBgIoStats(nativeHandle_); } + @Override + public ColumnFamilyOptions setTtl(final long ttl) { + setTtl(nativeHandle_, ttl); + return this; + } + + @Override + public long ttl() { + return ttl(nativeHandle_); + } + @Override public ColumnFamilyOptions setCompactionOptionsUniversal( final CompactionOptionsUniversal compactionOptionsUniversal) { @@ -793,7 +828,9 @@ public class ColumnFamilyOptions extends RocksObject String optString); private static native long newColumnFamilyOptions(); - private static native long copyColumnFamilyOptions(long handle); + private static native long copyColumnFamilyOptions(final long handle); + private static native long newColumnFamilyOptionsFromOptions( + final long optionsHandle); @Override protected final native void disposeInternal(final long handle); private native void optimizeForSmallDb(final long handle); @@ -829,6 +866,8 @@ public class ColumnFamilyOptions extends RocksObject private native void setBottommostCompressionType(long handle, byte bottommostCompressionType); private native byte bottommostCompressionType(long handle); + private native void setBottommostCompressionOptions(final long handle, + final long bottommostCompressionOptionsHandle); private native void setCompressionOptions(long handle, long compressionOptionsHandle); private native void useFixedLengthPrefixExtractor( @@ -936,6 +975,8 @@ public class ColumnFamilyOptions extends RocksObject private native void setReportBgIoStats(final long handle, final boolean reportBgIoStats); private native boolean reportBgIoStats(final long handle); + private native void setTtl(final long handle, final long ttl); + private native long ttl(final long handle); private native void setCompactionOptionsUniversal(final long handle, final long compactionOptionsUniversalHandle); private native void setCompactionOptionsFIFO(final long handle, @@ -954,6 +995,7 @@ public class ColumnFamilyOptions extends RocksObject compactionFilterFactory_; private CompactionOptionsUniversal compactionOptionsUniversal_; private CompactionOptionsFIFO compactionOptionsFIFO_; + private CompressionOptions bottommostCompressionOptions_; private CompressionOptions compressionOptions_; } diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java index 7eee754f4..f88a21af2 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java @@ -399,6 +399,28 @@ public interface ColumnFamilyOptionsInterface */ CompressionType bottommostCompressionType(); + /** + * Set the options for compression algorithms used by + * {@link #bottommostCompressionType()} if it is enabled. + * + * To enable it, please see the definition of + * {@link CompressionOptions}. + * + * @param compressionOptions the bottom most compression options. + * + * @return the reference of the current options. + */ + T setBottommostCompressionOptions( + final CompressionOptions compressionOptions); + + /** + * Get the bottom most compression options. + * + * See {@link #setBottommostCompressionOptions(CompressionOptions)}. + * + * @return the bottom most compression options. + */ + CompressionOptions bottommostCompressionOptions(); /** * Set the different options for compression algorithms diff --git a/java/src/main/java/org/rocksdb/CompactRangeOptions.java b/java/src/main/java/org/rocksdb/CompactRangeOptions.java index e8c892110..c07bd96a5 100644 --- a/java/src/main/java/org/rocksdb/CompactRangeOptions.java +++ b/java/src/main/java/org/rocksdb/CompactRangeOptions.java @@ -88,26 +88,6 @@ public class CompactRangeOptions extends RocksObject { return this; } - - /** - * Returns the policy for compacting the bottommost level - * @return The BottommostLevelCompaction policy - */ - public BottommostLevelCompaction bottommostLevelCompaction() { - return BottommostLevelCompaction.fromRocksId(bottommostLevelCompaction(nativeHandle_)); - } - - /** - * Sets the policy for compacting the bottommost level - * - * @param bottommostLevelCompaction The policy for compacting the bottommost level - * @return This CompactRangeOptions - */ - public CompactRangeOptions setBottommostLevelCompaction(final BottommostLevelCompaction bottommostLevelCompaction) { - setBottommostLevelCompaction(nativeHandle_, bottommostLevelCompaction.getValue()); - return this; - } - /** * Returns whether compacted files will be moved to the minimum level capable of holding the data or given level * (specified non-negative target_level). @@ -170,6 +150,25 @@ public class CompactRangeOptions extends RocksObject { return this; } + /** + * Returns the policy for compacting the bottommost level + * @return The BottommostLevelCompaction policy + */ + public BottommostLevelCompaction bottommostLevelCompaction() { + return BottommostLevelCompaction.fromRocksId(bottommostLevelCompaction(nativeHandle_)); + } + + /** + * Sets the policy for compacting the bottommost level + * + * @param bottommostLevelCompaction The policy for compacting the bottommost level + * @return This CompactRangeOptions + */ + public CompactRangeOptions setBottommostLevelCompaction(final BottommostLevelCompaction bottommostLevelCompaction) { + setBottommostLevelCompaction(nativeHandle_, bottommostLevelCompaction.getValue()); + return this; + } + /** * If true, compaction will execute immediately even if doing so would cause the DB to * enter write stall mode. Otherwise, it'll sleep until load is low enough. @@ -212,22 +211,27 @@ public class CompactRangeOptions extends RocksObject { } private native static long newCompactRangeOptions(); + @Override protected final native void disposeInternal(final long handle); + private native boolean exclusiveManualCompaction(final long handle); - private native void setExclusiveManualCompaction(final long handle, final boolean exclusive_manual_compaction); - private native int bottommostLevelCompaction(final long handle); - private native void setBottommostLevelCompaction(final long handle, final int bottommostLevelCompaction); + private native void setExclusiveManualCompaction(final long handle, + final boolean exclusive_manual_compaction); private native boolean changeLevel(final long handle); - private native void setChangeLevel(final long handle, final boolean changeLevel); + private native void setChangeLevel(final long handle, + final boolean changeLevel); private native int targetLevel(final long handle); - private native void setTargetLevel(final long handle, final int targetLevel); + private native void setTargetLevel(final long handle, + final int targetLevel); private native int targetPathId(final long handle); - private native void setTargetPathId(final long handle, final int /* uint32_t */ targetPathId); + private native void setTargetPathId(final long handle, + final int targetPathId); + private native int bottommostLevelCompaction(final long handle); + private native void setBottommostLevelCompaction(final long handle, + final int bottommostLevelCompaction); private native boolean allowWriteStall(final long handle); - private native void setAllowWriteStall(final long handle, final boolean allowWriteStall); - private native void setMaxSubcompactions(final long handle, final int /* uint32_t */ maxSubcompactions); + private native void setAllowWriteStall(final long handle, + final boolean allowWriteStall); + private native void setMaxSubcompactions(final long handle, + final int maxSubcompactions); private native int maxSubcompactions(final long handle); - - @Override - protected final native void disposeInternal(final long handle); - } diff --git a/java/src/main/java/org/rocksdb/CompactionJobInfo.java b/java/src/main/java/org/rocksdb/CompactionJobInfo.java new file mode 100644 index 000000000..8b59edc91 --- /dev/null +++ b/java/src/main/java/org/rocksdb/CompactionJobInfo.java @@ -0,0 +1,159 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +public class CompactionJobInfo extends RocksObject { + + public CompactionJobInfo() { + super(newCompactionJobInfo()); + } + + /** + * Private as called from JNI C++ + */ + private CompactionJobInfo(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Get the name of the column family where the compaction happened. + * + * @return the name of the column family + */ + public byte[] columnFamilyName() { + return columnFamilyName(nativeHandle_); + } + + /** + * Get the status indicating whether the compaction was successful or not. + * + * @return the status + */ + public Status status() { + return status(nativeHandle_); + } + + /** + * Get the id of the thread that completed this compaction job. + * + * @return the id of the thread + */ + public long threadId() { + return threadId(nativeHandle_); + } + + /** + * Get the job id, which is unique in the same thread. + * + * @return the id of the thread + */ + public int jobId() { + return jobId(nativeHandle_); + } + + /** + * Get the smallest input level of the compaction. + * + * @return the input level + */ + public int baseInputLevel() { + return baseInputLevel(nativeHandle_); + } + + /** + * Get the output level of the compaction. + * + * @return the output level + */ + public int outputLevel() { + return outputLevel(nativeHandle_); + } + + /** + * Get the names of the compaction input files. + * + * @return the names of the input files. + */ + public List inputFiles() { + return Arrays.asList(inputFiles(nativeHandle_)); + } + + /** + * Get the names of the compaction output files. + * + * @return the names of the output files. + */ + public List outputFiles() { + return Arrays.asList(outputFiles(nativeHandle_)); + } + + /** + * Get the table properties for the input and output tables. + * + * The map is keyed by values from {@link #inputFiles()} and + * {@link #outputFiles()}. + * + * @return the table properties + */ + public Map tableProperties() { + return tableProperties(nativeHandle_); + } + + /** + * Get the Reason for running the compaction. + * + * @return the reason. + */ + public CompactionReason compactionReason() { + return CompactionReason.fromValue(compactionReason(nativeHandle_)); + } + + // + /** + * Get the compression algorithm used for output files. + * + * @return the compression algorithm + */ + public CompressionType compression() { + return CompressionType.getCompressionType(compression(nativeHandle_)); + } + + /** + * Get detailed information about this compaction. + * + * @return the detailed information, or null if not available. + */ + public /* @Nullable */ CompactionJobStats stats() { + final long statsHandle = stats(nativeHandle_); + if (statsHandle == 0) { + return null; + } + + return new CompactionJobStats(statsHandle); + } + + + private static native long newCompactionJobInfo(); + @Override protected native void disposeInternal(final long handle); + + private static native byte[] columnFamilyName(final long handle); + private static native Status status(final long handle); + private static native long threadId(final long handle); + private static native int jobId(final long handle); + private static native int baseInputLevel(final long handle); + private static native int outputLevel(final long handle); + private static native String[] inputFiles(final long handle); + private static native String[] outputFiles(final long handle); + private static native Map tableProperties( + final long handle); + private static native byte compactionReason(final long handle); + private static native byte compression(final long handle); + private static native long stats(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/CompactionJobStats.java b/java/src/main/java/org/rocksdb/CompactionJobStats.java new file mode 100644 index 000000000..3d53b5565 --- /dev/null +++ b/java/src/main/java/org/rocksdb/CompactionJobStats.java @@ -0,0 +1,295 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class CompactionJobStats extends RocksObject { + + public CompactionJobStats() { + super(newCompactionJobStats()); + } + + /** + * Private as called from JNI C++ + */ + CompactionJobStats(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Reset the stats. + */ + public void reset() { + reset(nativeHandle_); + } + + /** + * Aggregate the CompactionJobStats from another instance with this one. + * + * @param compactionJobStats another instance of stats. + */ + public void add(final CompactionJobStats compactionJobStats) { + add(nativeHandle_, compactionJobStats.nativeHandle_); + } + + /** + * Get the elapsed time in micro of this compaction. + * + * @return the elapsed time in micro of this compaction. + */ + public long elapsedMicros() { + return elapsedMicros(nativeHandle_); + } + + /** + * Get the number of compaction input records. + * + * @return the number of compaction input records. + */ + public long numInputRecords() { + return numInputRecords(nativeHandle_); + } + + /** + * Get the number of compaction input files. + * + * @return the number of compaction input files. + */ + public long numInputFiles() { + return numInputFiles(nativeHandle_); + } + + /** + * Get the number of compaction input files at the output level. + * + * @return the number of compaction input files at the output level. + */ + public long numInputFilesAtOutputLevel() { + return numInputFilesAtOutputLevel(nativeHandle_); + } + + /** + * Get the number of compaction output records. + * + * @return the number of compaction output records. + */ + public long numOutputRecords() { + return numOutputRecords(nativeHandle_); + } + + /** + * Get the number of compaction output files. + * + * @return the number of compaction output files. + */ + public long numOutputFiles() { + return numOutputFiles(nativeHandle_); + } + + /** + * Determine if the compaction is a manual compaction. + * + * @return true if the compaction is a manual compaction, false otherwise. + */ + public boolean isManualCompaction() { + return isManualCompaction(nativeHandle_); + } + + /** + * Get the size of the compaction input in bytes. + * + * @return the size of the compaction input in bytes. + */ + public long totalInputBytes() { + return totalInputBytes(nativeHandle_); + } + + /** + * Get the size of the compaction output in bytes. + * + * @return the size of the compaction output in bytes. + */ + public long totalOutputBytes() { + return totalOutputBytes(nativeHandle_); + } + + /** + * Get the number of records being replaced by newer record associated + * with same key. + * + * This could be a new value or a deletion entry for that key so this field + * sums up all updated and deleted keys. + * + * @return the number of records being replaced by newer record associated + * with same key. + */ + public long numRecordsReplaced() { + return numRecordsReplaced(nativeHandle_); + } + + /** + * Get the sum of the uncompressed input keys in bytes. + * + * @return the sum of the uncompressed input keys in bytes. + */ + public long totalInputRawKeyBytes() { + return totalInputRawKeyBytes(nativeHandle_); + } + + /** + * Get the sum of the uncompressed input values in bytes. + * + * @return the sum of the uncompressed input values in bytes. + */ + public long totalInputRawValueBytes() { + return totalInputRawValueBytes(nativeHandle_); + } + + /** + * Get the number of deletion entries before compaction. + * + * Deletion entries can disappear after compaction because they expired. + * + * @return the number of deletion entries before compaction. + */ + public long numInputDeletionRecords() { + return numInputDeletionRecords(nativeHandle_); + } + + /** + * Get the number of deletion records that were found obsolete and discarded + * because it is not possible to delete any more keys with this entry. + * (i.e. all possible deletions resulting from it have been completed) + * + * @return the number of deletion records that were found obsolete and + * discarded. + */ + public long numExpiredDeletionRecords() { + return numExpiredDeletionRecords(nativeHandle_); + } + + /** + * Get the number of corrupt keys (ParseInternalKey returned false when + * applied to the key) encountered and written out. + * + * @return the number of corrupt keys. + */ + public long numCorruptKeys() { + return numCorruptKeys(nativeHandle_); + } + + /** + * Get the Time spent on file's Append() call. + * + * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. + * + * @return the Time spent on file's Append() call. + */ + public long fileWriteNanos() { + return fileWriteNanos(nativeHandle_); + } + + /** + * Get the Time spent on sync file range. + * + * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. + * + * @return the Time spent on sync file range. + */ + public long fileRangeSyncNanos() { + return fileRangeSyncNanos(nativeHandle_); + } + + /** + * Get the Time spent on file fsync. + * + * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. + * + * @return the Time spent on file fsync. + */ + public long fileFsyncNanos() { + return fileFsyncNanos(nativeHandle_); + } + + /** + * Get the Time spent on preparing file write (falocate, etc) + * + * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. + * + * @return the Time spent on preparing file write (falocate, etc). + */ + public long filePrepareWriteNanos() { + return filePrepareWriteNanos(nativeHandle_); + } + + /** + * Get the smallest output key prefix. + * + * @return the smallest output key prefix. + */ + public byte[] smallestOutputKeyPrefix() { + return smallestOutputKeyPrefix(nativeHandle_); + } + + /** + * Get the largest output key prefix. + * + * @return the smallest output key prefix. + */ + public byte[] largestOutputKeyPrefix() { + return largestOutputKeyPrefix(nativeHandle_); + } + + /** + * Get the number of single-deletes which do not meet a put. + * + * @return number of single-deletes which do not meet a put. + */ + @Experimental("Performance optimization for a very specific workload") + public long numSingleDelFallthru() { + return numSingleDelFallthru(nativeHandle_); + } + + /** + * Get the number of single-deletes which meet something other than a put. + * + * @return the number of single-deletes which meet something other than a put. + */ + @Experimental("Performance optimization for a very specific workload") + public long numSingleDelMismatch() { + return numSingleDelMismatch(nativeHandle_); + } + + private static native long newCompactionJobStats(); + @Override protected native void disposeInternal(final long handle); + + + private static native void reset(final long handle); + private static native void add(final long handle, + final long compactionJobStatsHandle); + private static native long elapsedMicros(final long handle); + private static native long numInputRecords(final long handle); + private static native long numInputFiles(final long handle); + private static native long numInputFilesAtOutputLevel(final long handle); + private static native long numOutputRecords(final long handle); + private static native long numOutputFiles(final long handle); + private static native boolean isManualCompaction(final long handle); + private static native long totalInputBytes(final long handle); + private static native long totalOutputBytes(final long handle); + private static native long numRecordsReplaced(final long handle); + private static native long totalInputRawKeyBytes(final long handle); + private static native long totalInputRawValueBytes(final long handle); + private static native long numInputDeletionRecords(final long handle); + private static native long numExpiredDeletionRecords(final long handle); + private static native long numCorruptKeys(final long handle); + private static native long fileWriteNanos(final long handle); + private static native long fileRangeSyncNanos(final long handle); + private static native long fileFsyncNanos(final long handle); + private static native long filePrepareWriteNanos(final long handle); + private static native byte[] smallestOutputKeyPrefix(final long handle); + private static native byte[] largestOutputKeyPrefix(final long handle); + private static native long numSingleDelFallthru(final long handle); + private static native long numSingleDelMismatch(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/CompactionOptions.java b/java/src/main/java/org/rocksdb/CompactionOptions.java new file mode 100644 index 000000000..2c7e391fb --- /dev/null +++ b/java/src/main/java/org/rocksdb/CompactionOptions.java @@ -0,0 +1,121 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.List; + +/** + * CompactionOptions are used in + * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)} + * calls. + */ +public class CompactionOptions extends RocksObject { + + public CompactionOptions() { + super(newCompactionOptions()); + } + + /** + * Get the compaction output compression type. + * + * See {@link #setCompression(CompressionType)}. + * + * @return the compression type. + */ + public CompressionType compression() { + return CompressionType.getCompressionType( + compression(nativeHandle_)); + } + + /** + * Set the compaction output compression type. + * + * Default: snappy + * + * If set to {@link CompressionType#DISABLE_COMPRESSION_OPTION}, + * RocksDB will choose compression type according to the + * {@link ColumnFamilyOptions#compressionType()}, taking into account + * the output level if {@link ColumnFamilyOptions#compressionPerLevel()} + * is specified. + * + * @param compression the compression type to use for compaction output. + * + * @return the instance of the current Options. + */ + public CompactionOptions setCompression(final CompressionType compression) { + setCompression(nativeHandle_, compression.getValue()); + return this; + } + + /** + * Get the compaction output file size limit. + * + * See {@link #setOutputFileSizeLimit(long)}. + * + * @return the file size limit. + */ + public long outputFileSizeLimit() { + return outputFileSizeLimit(nativeHandle_); + } + + /** + * Compaction will create files of size {@link #outputFileSizeLimit()}. + * + * Default: 2^64-1, which means that compaction will create a single file + * + * @param outputFileSizeLimit the size limit + * + * @return the instance of the current Options. + */ + public CompactionOptions setOutputFileSizeLimit( + final long outputFileSizeLimit) { + setOutputFileSizeLimit(nativeHandle_, outputFileSizeLimit); + return this; + } + + /** + * Get the maximum number of threads that will concurrently perform a + * compaction job. + * + * @return the maximum number of threads. + */ + public int maxSubcompactions() { + return maxSubcompactions(nativeHandle_); + } + + /** + * This value represents the maximum number of threads that will + * concurrently perform a compaction job by breaking it into multiple, + * smaller ones that are run simultaneously. + * + * Default: 0 (i.e. no subcompactions) + * + * If > 0, it will replace the option in + * {@link DBOptions#maxSubcompactions()} for this compaction. + * + * @param maxSubcompactions The maximum number of threads that will + * concurrently perform a compaction job + * + * @return the instance of the current Options. + */ + public CompactionOptions setMaxSubcompactions(final int maxSubcompactions) { + setMaxSubcompactions(nativeHandle_, maxSubcompactions); + return this; + } + + private static native long newCompactionOptions(); + @Override protected final native void disposeInternal(final long handle); + + private static native byte compression(final long handle); + private static native void setCompression(final long handle, + final byte compressionTypeValue); + private static native long outputFileSizeLimit(final long handle); + private static native void setOutputFileSizeLimit(final long handle, + final long outputFileSizeLimit); + private static native int maxSubcompactions(final long handle); + private static native void setMaxSubcompactions(final long handle, + final int maxSubcompactions); +} diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java index f0f28f849..4c8d6545c 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java @@ -51,36 +51,39 @@ public class CompactionOptionsFIFO extends RocksObject { * * Default: false * - * @param allowCompaction should allow intra-L0 compaction? + * @param allowCompaction true to allow intra-L0 compaction * * @return the reference to the current options. */ - public CompactionOptionsFIFO setAllowCompaction(final boolean allowCompaction) { + public CompactionOptionsFIFO setAllowCompaction( + final boolean allowCompaction) { setAllowCompaction(nativeHandle_, allowCompaction); return this; } + /** * Check if intra-L0 compaction is enabled. - * If true, try to do compaction to compact smaller files into larger ones. - * Minimum files to compact follows options.level0_file_num_compaction_trigger - * and compaction won't trigger if average compact bytes per del file is - * larger than options.write_buffer_size. This is to protect large files - * from being compacted again. + * When enabled, we try to compact smaller files into larger ones. + * + * See {@link #setAllowCompaction(boolean)}. * * Default: false * - * @return a boolean value indicating whether intra-L0 compaction is enabled + * @return true if intra-L0 compaction is enabled, false otherwise. */ public boolean allowCompaction() { return allowCompaction(nativeHandle_); } - private native void setMaxTableFilesSize(long handle, long maxTableFilesSize); - private native long maxTableFilesSize(long handle); - private native void setAllowCompaction(long handle, boolean allowCompaction); - private native boolean allowCompaction(long handle); private native static long newCompactionOptionsFIFO(); @Override protected final native void disposeInternal(final long handle); + + private native void setMaxTableFilesSize(final long handle, + final long maxTableFilesSize); + private native long maxTableFilesSize(final long handle); + private native void setAllowCompaction(final long handle, + final boolean allowCompaction); + private native boolean allowCompaction(final long handle); } diff --git a/java/src/main/java/org/rocksdb/CompactionReason.java b/java/src/main/java/org/rocksdb/CompactionReason.java new file mode 100644 index 000000000..f18c48122 --- /dev/null +++ b/java/src/main/java/org/rocksdb/CompactionReason.java @@ -0,0 +1,115 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public enum CompactionReason { + kUnknown((byte)0x0), + + /** + * [Level] number of L0 files > level0_file_num_compaction_trigger + */ + kLevelL0FilesNum((byte)0x1), + + /** + * [Level] total size of level > MaxBytesForLevel() + */ + kLevelMaxLevelSize((byte)0x2), + + /** + * [Universal] Compacting for size amplification + */ + kUniversalSizeAmplification((byte)0x3), + + /** + * [Universal] Compacting for size ratio + */ + kUniversalSizeRatio((byte)0x4), + + /** + * [Universal] number of sorted runs > level0_file_num_compaction_trigger + */ + kUniversalSortedRunNum((byte)0x5), + + /** + * [FIFO] total size > max_table_files_size + */ + kFIFOMaxSize((byte)0x6), + + /** + * [FIFO] reduce number of files. + */ + kFIFOReduceNumFiles((byte)0x7), + + /** + * [FIFO] files with creation time < (current_time - interval) + */ + kFIFOTtl((byte)0x8), + + /** + * Manual compaction + */ + kManualCompaction((byte)0x9), + + /** + * DB::SuggestCompactRange() marked files for compaction + */ + kFilesMarkedForCompaction((byte)0x10), + + /** + * [Level] Automatic compaction within bottommost level to cleanup duplicate + * versions of same user key, usually due to a released snapshot. + */ + kBottommostFiles((byte)0x0A), + + /** + * Compaction based on TTL + */ + kTtl((byte)0x0B), + + /** + * According to the comments in flush_job.cc, RocksDB treats flush as + * a level 0 compaction in internal stats. + */ + kFlush((byte)0x0C), + + /** + * Compaction caused by external sst file ingestion + */ + kExternalSstIngestion((byte)0x0D); + + private final byte value; + + CompactionReason(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value + */ + byte getValue() { + return value; + } + + /** + * Get the CompactionReason from the internal representation value. + * + * @return the compaction reason. + * + * @throws IllegalArgumentException if the value is unknown. + */ + static CompactionReason fromValue(final byte value) { + for (final CompactionReason compactionReason : CompactionReason.values()) { + if(compactionReason.value == value) { + return compactionReason; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for CompactionReason: " + value); + } +} diff --git a/java/src/main/java/org/rocksdb/CompactionStyle.java b/java/src/main/java/org/rocksdb/CompactionStyle.java index 5e13363c4..b24bbf850 100644 --- a/java/src/main/java/org/rocksdb/CompactionStyle.java +++ b/java/src/main/java/org/rocksdb/CompactionStyle.java @@ -5,6 +5,8 @@ package org.rocksdb; +import java.util.List; + /** * Enum CompactionStyle * @@ -21,6 +23,9 @@ package org.rocksdb; * compaction strategy. It is suited for keeping event log data with * very low overhead (query log for example). It periodically deletes * the old data, so it's basically a TTL compaction style. + *

  • NONE - Disable background compaction. + * Compaction jobs are submitted + * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)} ()}.
  • * * * @see */ public enum CompactionStyle { - LEVEL((byte) 0), - UNIVERSAL((byte) 1), - FIFO((byte) 2); + LEVEL((byte) 0x0), + UNIVERSAL((byte) 0x1), + FIFO((byte) 0x2), + NONE((byte) 0x3); - private final byte value_; + private final byte value; - private CompactionStyle(byte value) { - value_ = value; + CompactionStyle(final byte value) { + this.value = value; } /** - * Returns the byte value of the enumerations value + * Get the internal representation value. * - * @return byte representation + * @return the internal representation value. */ + //TODO(AR) should be made package-private public byte getValue() { - return value_; + return value; + } + + /** + * Get the Compaction style from the internal representation value. + * + * @param value the internal representation value. + * + * @return the Compaction style + * + * @throws IllegalArgumentException if the value does not match a + * CompactionStyle + */ + static CompactionStyle fromValue(final byte value) + throws IllegalArgumentException { + for (final CompactionStyle compactionStyle : CompactionStyle.values()) { + if (compactionStyle.value == value) { + return compactionStyle; + } + } + throw new IllegalArgumentException("Unknown value for CompactionStyle: " + + value); } } diff --git a/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java index 4927770e5..a9072bbb9 100644 --- a/java/src/main/java/org/rocksdb/CompressionOptions.java +++ b/java/src/main/java/org/rocksdb/CompressionOptions.java @@ -71,6 +71,67 @@ public class CompressionOptions extends RocksObject { return maxDictBytes(nativeHandle_); } + /** + * Maximum size of training data passed to zstd's dictionary trainer. Using + * zstd's dictionary trainer can achieve even better compression ratio + * improvements than using {@link #setMaxDictBytes(int)} alone. + * + * The training data will be used to generate a dictionary + * of {@link #maxDictBytes()}. + * + * Default: 0. + * + * @param zstdMaxTrainBytes Maximum bytes to use for training ZStd. + * + * @return the reference to the current options + */ + public CompressionOptions setZStdMaxTrainBytes(final int zstdMaxTrainBytes) { + setZstdMaxTrainBytes(nativeHandle_, zstdMaxTrainBytes); + return this; + } + + /** + * Maximum size of training data passed to zstd's dictionary trainer. + * + * @return Maximum bytes to use for training ZStd + */ + public int zstdMaxTrainBytes() { + return zstdMaxTrainBytes(nativeHandle_); + } + + /** + * When the compression options are set by the user, it will be set to "true". + * For bottommost_compression_opts, to enable it, user must set enabled=true. + * Otherwise, bottommost compression will use compression_opts as default + * compression options. + * + * For compression_opts, if compression_opts.enabled=false, it is still + * used as compression options for compression process. + * + * Default: false. + * + * @param enabled true to use these compression options + * for the bottommost_compression_opts, false otherwise + * + * @return the reference to the current options + */ + public CompressionOptions setEnabled(final boolean enabled) { + setEnabled(nativeHandle_, enabled); + return this; + } + + /** + * Determine whether these compression options + * are used for the bottommost_compression_opts. + * + * @return true if these compression options are used + * for the bottommost_compression_opts, false otherwise + */ + public boolean enabled() { + return enabled(nativeHandle_); + } + + private native static long newCompressionOptions(); @Override protected final native void disposeInternal(final long handle); @@ -82,4 +143,9 @@ public class CompressionOptions extends RocksObject { private native int strategy(final long handle); private native void setMaxDictBytes(final long handle, final int maxDictBytes); private native int maxDictBytes(final long handle); + private native void setZstdMaxTrainBytes(final long handle, + final int zstdMaxTrainBytes); + private native int zstdMaxTrainBytes(final long handle); + private native void setEnabled(final long handle, final boolean enabled); + private native boolean enabled(final long handle); } diff --git a/java/src/main/java/org/rocksdb/DBOptions.java b/java/src/main/java/org/rocksdb/DBOptions.java index 280623a20..e2c4c02b3 100644 --- a/java/src/main/java/org/rocksdb/DBOptions.java +++ b/java/src/main/java/org/rocksdb/DBOptions.java @@ -15,8 +15,9 @@ import java.util.*; * If {@link #dispose()} function is not called, then it will be GC'd * automatically and native resources will be released as part of the process. */ -public class DBOptions - extends RocksObject implements DBOptionsInterface { +public class DBOptions extends RocksObject + implements DBOptionsInterface, + MutableDBOptionsInterface { static { RocksDB.loadLibrary(); } @@ -46,9 +47,19 @@ public class DBOptions this.numShardBits_ = other.numShardBits_; this.rateLimiter_ = other.rateLimiter_; this.rowCache_ = other.rowCache_; + this.walFilter_ = other.walFilter_; this.writeBufferManager_ = other.writeBufferManager_; } + /** + * Constructor from Options + * + * @param options The options. + */ + public DBOptions(final Options options) { + super(newDBOptionsFromOptions(options.nativeHandle_)); + } + /** *

    Method to get a options instance by using pre-configured * property values. If one or many values are undefined in @@ -131,18 +142,6 @@ public class DBOptions return createMissingColumnFamilies(nativeHandle_); } - @Override - public DBOptions setEnv(final Env env) { - setEnv(nativeHandle_, env.nativeHandle_); - this.env_ = env; - return this; - } - - @Override - public Env getEnv() { - return env_; - } - @Override public DBOptions setErrorIfExists( final boolean errorIfExists) { @@ -171,6 +170,18 @@ public class DBOptions return paranoidChecks(nativeHandle_); } + @Override + public DBOptions setEnv(final Env env) { + setEnv(nativeHandle_, env.nativeHandle_); + this.env_ = env; + return this; + } + + @Override + public Env getEnv() { + return env_; + } + @Override public DBOptions setRateLimiter(final RateLimiter rateLimiter) { assert(isOwningHandle()); @@ -286,8 +297,8 @@ public class DBOptions assert(isOwningHandle()); final int len = dbPaths.size(); - final String paths[] = new String[len]; - final long targetSizes[] = new long[len]; + final String[] paths = new String[len]; + final long[] targetSizes = new long[len]; int i = 0; for(final DbPath dbPath : dbPaths) { @@ -305,8 +316,8 @@ public class DBOptions if(len == 0) { return Collections.emptyList(); } else { - final String paths[] = new String[len]; - final long targetSizes[] = new long[len]; + final String[] paths = new String[len]; + final long[] targetSizes = new long[len]; dbPaths(nativeHandle_, paths, targetSizes); @@ -360,6 +371,19 @@ public class DBOptions return deleteObsoleteFilesPeriodMicros(nativeHandle_); } + @Override + public DBOptions setMaxBackgroundJobs(final int maxBackgroundJobs) { + assert(isOwningHandle()); + setMaxBackgroundJobs(nativeHandle_, maxBackgroundJobs); + return this; + } + + @Override + public int maxBackgroundJobs() { + assert(isOwningHandle()); + return maxBackgroundJobs(nativeHandle_); + } + @Override public void setBaseBackgroundCompactions( final int baseBackgroundCompactions) { @@ -388,9 +412,10 @@ public class DBOptions } @Override - public void setMaxSubcompactions(final int maxSubcompactions) { + public DBOptions setMaxSubcompactions(final int maxSubcompactions) { assert(isOwningHandle()); setMaxSubcompactions(nativeHandle_, maxSubcompactions); + return this; } @Override @@ -413,19 +438,6 @@ public class DBOptions return maxBackgroundFlushes(nativeHandle_); } - @Override - public DBOptions setMaxBackgroundJobs(final int maxBackgroundJobs) { - assert(isOwningHandle()); - setMaxBackgroundJobs(nativeHandle_, maxBackgroundJobs); - return this; - } - - @Override - public int maxBackgroundJobs() { - assert(isOwningHandle()); - return maxBackgroundJobs(nativeHandle_); - } - @Override public DBOptions setMaxLogFileSize(final long maxLogFileSize) { assert(isOwningHandle()); @@ -551,73 +563,73 @@ public class DBOptions } @Override - public DBOptions setUseDirectReads( - final boolean useDirectReads) { + public DBOptions setAllowMmapReads( + final boolean allowMmapReads) { assert(isOwningHandle()); - setUseDirectReads(nativeHandle_, useDirectReads); + setAllowMmapReads(nativeHandle_, allowMmapReads); return this; } @Override - public boolean useDirectReads() { + public boolean allowMmapReads() { assert(isOwningHandle()); - return useDirectReads(nativeHandle_); + return allowMmapReads(nativeHandle_); } @Override - public DBOptions setUseDirectIoForFlushAndCompaction( - final boolean useDirectIoForFlushAndCompaction) { + public DBOptions setAllowMmapWrites( + final boolean allowMmapWrites) { assert(isOwningHandle()); - setUseDirectIoForFlushAndCompaction(nativeHandle_, - useDirectIoForFlushAndCompaction); + setAllowMmapWrites(nativeHandle_, allowMmapWrites); return this; } @Override - public boolean useDirectIoForFlushAndCompaction() { + public boolean allowMmapWrites() { assert(isOwningHandle()); - return useDirectIoForFlushAndCompaction(nativeHandle_); + return allowMmapWrites(nativeHandle_); } @Override - public DBOptions setAllowFAllocate(final boolean allowFAllocate) { + public DBOptions setUseDirectReads( + final boolean useDirectReads) { assert(isOwningHandle()); - setAllowFAllocate(nativeHandle_, allowFAllocate); + setUseDirectReads(nativeHandle_, useDirectReads); return this; } @Override - public boolean allowFAllocate() { + public boolean useDirectReads() { assert(isOwningHandle()); - return allowFAllocate(nativeHandle_); + return useDirectReads(nativeHandle_); } @Override - public DBOptions setAllowMmapReads( - final boolean allowMmapReads) { + public DBOptions setUseDirectIoForFlushAndCompaction( + final boolean useDirectIoForFlushAndCompaction) { assert(isOwningHandle()); - setAllowMmapReads(nativeHandle_, allowMmapReads); + setUseDirectIoForFlushAndCompaction(nativeHandle_, + useDirectIoForFlushAndCompaction); return this; } @Override - public boolean allowMmapReads() { + public boolean useDirectIoForFlushAndCompaction() { assert(isOwningHandle()); - return allowMmapReads(nativeHandle_); + return useDirectIoForFlushAndCompaction(nativeHandle_); } @Override - public DBOptions setAllowMmapWrites( - final boolean allowMmapWrites) { + public DBOptions setAllowFAllocate(final boolean allowFAllocate) { assert(isOwningHandle()); - setAllowMmapWrites(nativeHandle_, allowMmapWrites); + setAllowFAllocate(nativeHandle_, allowFAllocate); return this; } @Override - public boolean allowMmapWrites() { + public boolean allowFAllocate() { assert(isOwningHandle()); - return allowMmapWrites(nativeHandle_); + return allowFAllocate(nativeHandle_); } @Override @@ -682,7 +694,7 @@ public class DBOptions return this.writeBufferManager_; } - @Override + @Override public long dbWriteBufferSize() { assert(isOwningHandle()); return dbWriteBufferSize(nativeHandle_); @@ -795,6 +807,33 @@ public class DBOptions return walBytesPerSync(nativeHandle_); } + //TODO(AR) NOW +// @Override +// public DBOptions setListeners(final List listeners) { +// assert(isOwningHandle()); +// final long[] eventListenerHandlers = new long[listeners.size()]; +// for (int i = 0; i < eventListenerHandlers.length; i++) { +// eventListenerHandlers[i] = listeners.get(i).nativeHandle_; +// } +// setEventListeners(nativeHandle_, eventListenerHandlers); +// return this; +// } +// +// @Override +// public Collection listeners() { +// assert(isOwningHandle()); +// final long[] eventListenerHandlers = listeners(nativeHandle_); +// if (eventListenerHandlers == null || eventListenerHandlers.length == 0) { +// return Collections.emptyList(); +// } +// +// final List eventListeners = new ArrayList<>(); +// for (final long eventListenerHandle : eventListenerHandlers) { +// eventListeners.add(new EventListener(eventListenerHandle)); //TODO(AR) check ownership is set to false! +// } +// return eventListeners; +// } + @Override public DBOptions setEnableThreadTracking(final boolean enableThreadTracking) { assert(isOwningHandle()); @@ -820,6 +859,19 @@ public class DBOptions return delayedWriteRate(nativeHandle_); } + @Override + public DBOptions setEnablePipelinedWrite(final boolean enablePipelinedWrite) { + assert(isOwningHandle()); + setEnablePipelinedWrite(nativeHandle_, enablePipelinedWrite); + return this; + } + + @Override + public boolean enablePipelinedWrite() { + assert(isOwningHandle()); + return enablePipelinedWrite(nativeHandle_); + } + @Override public DBOptions setAllowConcurrentMemtableWrite( final boolean allowConcurrentMemtableWrite) { @@ -921,6 +973,20 @@ public class DBOptions return this.rowCache_; } + @Override + public DBOptions setWalFilter(final AbstractWalFilter walFilter) { + assert(isOwningHandle()); + setWalFilter(nativeHandle_, walFilter.nativeHandle_); + this.walFilter_ = walFilter; + return this; + } + + @Override + public WalFilter walFilter() { + assert(isOwningHandle()); + return this.walFilter_; + } + @Override public DBOptions setFailIfOptionsFileError(final boolean failIfOptionsFileError) { assert(isOwningHandle()); @@ -973,6 +1039,69 @@ public class DBOptions return avoidFlushDuringShutdown(nativeHandle_); } + @Override + public DBOptions setAllowIngestBehind(final boolean allowIngestBehind) { + assert(isOwningHandle()); + setAllowIngestBehind(nativeHandle_, allowIngestBehind); + return this; + } + + @Override + public boolean allowIngestBehind() { + assert(isOwningHandle()); + return allowIngestBehind(nativeHandle_); + } + + @Override + public DBOptions setPreserveDeletes(final boolean preserveDeletes) { + assert(isOwningHandle()); + setPreserveDeletes(nativeHandle_, preserveDeletes); + return this; + } + + @Override + public boolean preserveDeletes() { + assert(isOwningHandle()); + return preserveDeletes(nativeHandle_); + } + + @Override + public DBOptions setTwoWriteQueues(final boolean twoWriteQueues) { + assert(isOwningHandle()); + setTwoWriteQueues(nativeHandle_, twoWriteQueues); + return this; + } + + @Override + public boolean twoWriteQueues() { + assert(isOwningHandle()); + return twoWriteQueues(nativeHandle_); + } + + @Override + public DBOptions setManualWalFlush(final boolean manualWalFlush) { + assert(isOwningHandle()); + setManualWalFlush(nativeHandle_, manualWalFlush); + return this; + } + + @Override + public boolean manualWalFlush() { + assert(isOwningHandle()); + return manualWalFlush(nativeHandle_); + } + + @Override + public DBOptions setAtomicFlush(final boolean atomicFlush) { + setAtomicFlush(nativeHandle_, atomicFlush); + return this; + } + + @Override + public boolean atomicFlush() { + return atomicFlush(nativeHandle_); + } + static final int DEFAULT_NUM_SHARD_BITS = -1; @@ -991,8 +1120,9 @@ public class DBOptions private static native long getDBOptionsFromProps( String optString); - private native static long newDBOptions(); - private native static long copyDBOptions(long handle); + private static native long newDBOptions(); + private static native long copyDBOptions(final long handle); + private static native long newDBOptionsFromOptions(final long optionsHandle); @Override protected final native void disposeInternal(final long handle); private native void optimizeForSmallDb(final long handle); @@ -1133,6 +1263,9 @@ public class DBOptions private native boolean enableThreadTracking(long handle); private native void setDelayedWriteRate(long handle, long delayedWriteRate); private native long delayedWriteRate(long handle); + private native void setEnablePipelinedWrite(final long handle, + final boolean enablePipelinedWrite); + private native boolean enablePipelinedWrite(final long handle); private native void setAllowConcurrentMemtableWrite(long handle, boolean allowConcurrentMemtableWrite); private native boolean allowConcurrentMemtableWrite(long handle); @@ -1155,7 +1288,9 @@ public class DBOptions final boolean allow2pc); private native boolean allow2pc(final long handle); private native void setRowCache(final long handle, - final long row_cache_handle); + final long rowCacheHandle); + private native void setWalFilter(final long handle, + final long walFilterHandle); private native void setFailIfOptionsFileError(final long handle, final boolean failIfOptionsFileError); private native boolean failIfOptionsFileError(final long handle); @@ -1168,6 +1303,21 @@ public class DBOptions private native void setAvoidFlushDuringShutdown(final long handle, final boolean avoidFlushDuringShutdown); private native boolean avoidFlushDuringShutdown(final long handle); + private native void setAllowIngestBehind(final long handle, + final boolean allowIngestBehind); + private native boolean allowIngestBehind(final long handle); + private native void setPreserveDeletes(final long handle, + final boolean preserveDeletes); + private native boolean preserveDeletes(final long handle); + private native void setTwoWriteQueues(final long handle, + final boolean twoWriteQueues); + private native boolean twoWriteQueues(final long handle); + private native void setManualWalFlush(final long handle, + final boolean manualWalFlush); + private native boolean manualWalFlush(final long handle); + private native void setAtomicFlush(final long handle, + final boolean atomicFlush); + private native boolean atomicFlush(final long handle); // instance variables // NOTE: If you add new member variables, please update the copy constructor above! @@ -1175,5 +1325,6 @@ public class DBOptions private int numShardBits_; private RateLimiter rateLimiter_; private Cache rowCache_; + private WalFilter walFilter_; private WriteBufferManager writeBufferManager_; } diff --git a/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/java/src/main/java/org/rocksdb/DBOptionsInterface.java index 4ab2fd15a..af9aa179b 100644 --- a/java/src/main/java/org/rocksdb/DBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/DBOptionsInterface.java @@ -206,35 +206,9 @@ public interface DBOptionsInterface { InfoLogLevel infoLogLevel(); /** - * Number of open files that can be used by the DB. You may need to - * increase this if your database has a large working set. Value -1 means - * files opened are always kept open. You can estimate number of files based - * on {@code target_file_size_base} and {@code target_file_size_multiplier} - * for level-based compaction. For universal-style compaction, you can usually - * set it to -1. - * Default: 5000 - * - * @param maxOpenFiles the maximum number of open files. - * @return the instance of the current object. - */ - T setMaxOpenFiles(int maxOpenFiles); - - /** - * Number of open files that can be used by the DB. You may need to - * increase this if your database has a large working set. Value -1 means - * files opened are always kept open. You can estimate number of files based - * on {@code target_file_size_base} and {@code target_file_size_multiplier} - * for level-based compaction. For universal-style compaction, you can usually - * set it to -1. - * - * @return the maximum number of open files. - */ - int maxOpenFiles(); - - /** - * If {@link #maxOpenFiles()} is -1, DB will open all files on DB::Open(). You - * can use this option to increase the number of threads used to open the - * files. + * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open + * all files on DB::Open(). You can use this option to increase the number + * of threads used to open the files. * * Default: 16 * @@ -246,9 +220,9 @@ public interface DBOptionsInterface { T setMaxFileOpeningThreads(int maxFileOpeningThreads); /** - * If {@link #maxOpenFiles()} is -1, DB will open all files on DB::Open(). You - * can use this option to increase the number of threads used to open the - * files. + * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open all + * files on DB::Open(). You can use this option to increase the number of + * threads used to open the files. * * Default: 16 * @@ -256,36 +230,6 @@ public interface DBOptionsInterface { */ int maxFileOpeningThreads(); - /** - *

    Once write-ahead logs exceed this size, we will start forcing the - * flush of column families whose memtables are backed by the oldest live - * WAL file (i.e. the ones that are causing all the space amplification). - *

    - *

    If set to 0 (default), we will dynamically choose the WAL size limit to - * be [sum of all write_buffer_size * max_write_buffer_number] * 2

    - *

    This option takes effect only when there are more than one column family as - * otherwise the wal size is dictated by the write_buffer_size.

    - *

    Default: 0

    - * - * @param maxTotalWalSize max total wal size. - * @return the instance of the current object. - */ - T setMaxTotalWalSize(long maxTotalWalSize); - - /** - *

    Returns the max total wal size. Once write-ahead logs exceed this size, - * we will start forcing the flush of column families whose memtables are - * backed by the oldest live WAL file (i.e. the ones that are causing all - * the space amplification).

    - * - *

    If set to 0 (default), we will dynamically choose the WAL size limit - * to be [sum of all write_buffer_size * max_write_buffer_number] * 2 - *

    - * - * @return max total wal size - */ - long maxTotalWalSize(); - /** *

    Sets the statistics object which collects metrics about database operations. * Statistics objects should not be shared between DB instances as @@ -466,59 +410,6 @@ public interface DBOptionsInterface { */ long deleteObsoleteFilesPeriodMicros(); - /** - * Suggested number of concurrent background compaction jobs, submitted to - * the default LOW priority thread pool. - * Default: 1 - * - * @param baseBackgroundCompactions Suggested number of background compaction - * jobs - * - * @deprecated Use {@link #setMaxBackgroundJobs(int)} - */ - void setBaseBackgroundCompactions(int baseBackgroundCompactions); - - /** - * Suggested number of concurrent background compaction jobs, submitted to - * the default LOW priority thread pool. - * Default: 1 - * - * @return Suggested number of background compaction jobs - */ - int baseBackgroundCompactions(); - - /** - * Specifies the maximum number of concurrent background compaction jobs, - * submitted to the default LOW priority thread pool. - * If you're increasing this, also consider increasing number of threads in - * LOW priority thread pool. For more information, see - * Default: 1 - * - * @param maxBackgroundCompactions the maximum number of background - * compaction jobs. - * @return the instance of the current object. - * - * @see RocksEnv#setBackgroundThreads(int) - * @see RocksEnv#setBackgroundThreads(int, int) - * @see #maxBackgroundFlushes() - */ - T setMaxBackgroundCompactions(int maxBackgroundCompactions); - - /** - * Returns the maximum number of concurrent background compaction jobs, - * submitted to the default LOW priority thread pool. - * When increasing this number, we may also want to consider increasing - * number of threads in LOW priority thread pool. - * Default: 1 - * - * @return the maximum number of concurrent background compaction jobs. - * @see RocksEnv#setBackgroundThreads(int) - * @see RocksEnv#setBackgroundThreads(int, int) - * - * @deprecated Use {@link #setMaxBackgroundJobs(int)} - */ - int maxBackgroundCompactions(); - /** * This value represents the maximum number of threads that will * concurrently perform a compaction job by breaking it into multiple, @@ -527,8 +418,10 @@ public interface DBOptionsInterface { * * @param maxSubcompactions The maximum number of threads that will * concurrently perform a compaction job + * + * @return the instance of the current object. */ - void setMaxSubcompactions(int maxSubcompactions); + T setMaxSubcompactions(int maxSubcompactions); /** * This value represents the maximum number of threads that will @@ -551,11 +444,12 @@ public interface DBOptionsInterface { * @return the instance of the current object. * * @see RocksEnv#setBackgroundThreads(int) - * @see RocksEnv#setBackgroundThreads(int, int) - * @see #maxBackgroundCompactions() + * @see RocksEnv#setBackgroundThreads(int, Priority) + * @see MutableDBOptionsInterface#maxBackgroundCompactions() * - * @deprecated Use {@link #setMaxBackgroundJobs(int)} + * @deprecated Use {@link MutableDBOptionsInterface#setMaxBackgroundJobs(int)} */ + @Deprecated T setMaxBackgroundFlushes(int maxBackgroundFlushes); /** @@ -566,29 +460,11 @@ public interface DBOptionsInterface { * * @return the maximum number of concurrent background flush jobs. * @see RocksEnv#setBackgroundThreads(int) - * @see RocksEnv#setBackgroundThreads(int, int) + * @see RocksEnv#setBackgroundThreads(int, Priority) */ + @Deprecated int maxBackgroundFlushes(); - /** - * Specifies the maximum number of concurrent background jobs (both flushes - * and compactions combined). - * Default: 2 - * - * @param maxBackgroundJobs number of max concurrent background jobs - * @return the instance of the current object. - */ - T setMaxBackgroundJobs(int maxBackgroundJobs); - - /** - * Returns the maximum number of concurrent background jobs (both flushes - * and compactions combined). - * Default: 2 - * - * @return the maximum number of concurrent background jobs. - */ - int maxBackgroundJobs(); - /** * Specifies the maximum size of a info log file. If the current log file * is larger than `max_log_file_size`, a new info log file will @@ -938,23 +814,6 @@ public interface DBOptionsInterface { */ boolean isFdCloseOnExec(); - /** - * if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec - * Default: 600 (10 minutes) - * - * @param statsDumpPeriodSec time interval in seconds. - * @return the instance of the current object. - */ - T setStatsDumpPeriodSec(int statsDumpPeriodSec); - - /** - * If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec - * Default: 600 (10 minutes) - * - * @return time interval in seconds. - */ - int statsDumpPeriodSec(); - /** * If set true, will hint the underlying file system that the file * access pattern is random, when a sst file is opened. @@ -1089,36 +948,6 @@ public interface DBOptionsInterface { */ boolean newTableReaderForCompactionInputs(); - /** - * If non-zero, we perform bigger reads when doing compaction. If you're - * running RocksDB on spinning disks, you should set this to at least 2MB. - * - * That way RocksDB's compaction is doing sequential instead of random reads. - * When non-zero, we also force {@link #newTableReaderForCompactionInputs()} - * to true. - * - * Default: 0 - * - * @param compactionReadaheadSize The compaction read-ahead size - * - * @return the reference to the current options. - */ - T setCompactionReadaheadSize(final long compactionReadaheadSize); - - /** - * If non-zero, we perform bigger reads when doing compaction. If you're - * running RocksDB on spinning disks, you should set this to at least 2MB. - * - * That way RocksDB's compaction is doing sequential instead of random reads. - * When non-zero, we also force {@link #newTableReaderForCompactionInputs()} - * to true. - * - * Default: 0 - * - * @return The compaction read-ahead size - */ - long compactionReadaheadSize(); - /** * This is a maximum buffer size that is used by WinMmapReadableFile in * unbuffered disk I/O mode. We need to maintain an aligned buffer for @@ -1126,7 +955,8 @@ public interface DBOptionsInterface { * for bigger requests allocate one shot buffers. In unbuffered mode we * always bypass read-ahead buffer at ReadaheadRandomAccessFile * When read-ahead is required we then make use of - * {@link #compactionReadaheadSize()} value and always try to read ahead. + * {@link MutableDBOptionsInterface#compactionReadaheadSize()} value and + * always try to read ahead. * With read-ahead we always pre-allocate buffer to the size instead of * growing it up to a limit. * @@ -1151,9 +981,9 @@ public interface DBOptionsInterface { * for bigger requests allocate one shot buffers. In unbuffered mode we * always bypass read-ahead buffer at ReadaheadRandomAccessFile * When read-ahead is required we then make use of - * {@link #compactionReadaheadSize()} value and always try to read ahead. - * With read-ahead we always pre-allocate buffer to the size instead of - * growing it up to a limit. + * {@link MutableDBOptionsInterface#compactionReadaheadSize()} value and + * always try to read ahead. With read-ahead we always pre-allocate buffer + * to the size instead of growing it up to a limit. * * This option is currently honored only on Windows * @@ -1166,30 +996,6 @@ public interface DBOptionsInterface { */ long randomAccessMaxBufferSize(); - /** - * This is the maximum buffer size that is used by WritableFileWriter. - * On Windows, we need to maintain an aligned buffer for writes. - * We allow the buffer to grow until it's size hits the limit. - * - * Default: 1024 * 1024 (1 MB) - * - * @param writableFileMaxBufferSize the maximum buffer size - * - * @return the reference to the current options. - */ - T setWritableFileMaxBufferSize(long writableFileMaxBufferSize); - - /** - * This is the maximum buffer size that is used by WritableFileWriter. - * On Windows, we need to maintain an aligned buffer for writes. - * We allow the buffer to grow until it's size hits the limit. - * - * Default: 1024 * 1024 (1 MB) - * - * @return the maximum buffer size - */ - long writableFileMaxBufferSize(); - /** * Use adaptive mutex, which spins in the user space before resorting * to kernel. This could reduce context switch when the mutex is not @@ -1213,45 +1019,24 @@ public interface DBOptionsInterface { */ boolean useAdaptiveMutex(); - /** - * Allows OS to incrementally sync files to disk while they are being - * written, asynchronously, in the background. - * Issue one request for every bytes_per_sync written. 0 turns it off. - * Default: 0 - * - * @param bytesPerSync size in bytes - * @return the instance of the current object. - */ - T setBytesPerSync(long bytesPerSync); - - /** - * Allows OS to incrementally sync files to disk while they are being - * written, asynchronously, in the background. - * Issue one request for every bytes_per_sync written. 0 turns it off. - * Default: 0 - * - * @return size in bytes - */ - long bytesPerSync(); - - /** - * Same as {@link #setBytesPerSync(long)} , but applies to WAL files - * - * Default: 0, turned off - * - * @param walBytesPerSync size in bytes - * @return the instance of the current object. - */ - T setWalBytesPerSync(long walBytesPerSync); - - /** - * Same as {@link #bytesPerSync()} , but applies to WAL files - * - * Default: 0, turned off - * - * @return size in bytes - */ - long walBytesPerSync(); + //TODO(AR) NOW +// /** +// * Sets the {@link EventListener}s whose callback functions +// * will be called when specific RocksDB event happens. +// * +// * @param listeners the listeners who should be notified on various events. +// * +// * @return the instance of the current object. +// */ +// T setListeners(final List listeners); +// +// /** +// * Gets the {@link EventListener}s whose callback functions +// * will be called when specific RocksDB event happens. +// * +// * @return a collection of Event listeners. +// */ +// Collection listeners(); /** * If true, then the status of the threads involved in this DB will @@ -1276,40 +1061,33 @@ public interface DBOptionsInterface { boolean enableThreadTracking(); /** - * The limited write rate to DB if - * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or - * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered, - * or we are writing to the last mem table allowed and we allow more than 3 - * mem tables. It is calculated using size of user write requests before - * compression. RocksDB may decide to slow down more if the compaction still - * gets behind further. + * By default, a single write thread queue is maintained. The thread gets + * to the head of the queue becomes write batch group leader and responsible + * for writing to WAL and memtable for the batch group. * - * Unit: bytes per second. + * If {@link #enablePipelinedWrite()} is true, separate write thread queue is + * maintained for WAL write and memtable write. A write thread first enter WAL + * writer queue and then memtable writer queue. Pending thread on the WAL + * writer queue thus only have to wait for previous writers to finish their + * WAL writing but not the memtable writing. Enabling the feature may improve + * write throughput and reduce latency of the prepare phase of two-phase + * commit. * - * Default: 16MB/s + * Default: false * - * @param delayedWriteRate the rate in bytes per second + * @param enablePipelinedWrite true to enabled pipelined writes * * @return the reference to the current options. */ - T setDelayedWriteRate(long delayedWriteRate); + T setEnablePipelinedWrite(final boolean enablePipelinedWrite); /** - * The limited write rate to DB if - * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or - * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered, - * or we are writing to the last mem table allowed and we allow more than 3 - * mem tables. It is calculated using size of user write requests before - * compression. RocksDB may decide to slow down more if the compaction still - * gets behind further. - * - * Unit: bytes per second. + * Returns true if pipelined writes are enabled. + * See {@link #setEnablePipelinedWrite(boolean)}. * - * Default: 16MB/s - * - * @return the rate in bytes per second + * @return true if pipelined writes are enabled, false otherwise. */ - long delayedWriteRate(); + boolean enablePipelinedWrite(); /** * If true, allow multi-writers to update mem tables in parallel. @@ -1511,6 +1289,27 @@ public interface DBOptionsInterface { */ Cache rowCache(); + /** + * A filter object supplied to be invoked while processing write-ahead-logs + * (WALs) during recovery. The filter provides a way to inspect log + * records, ignoring a particular record or skipping replay. + * The filter is invoked at startup and is invoked from a single-thread + * currently. + * + * @param walFilter the filter for processing WALs during recovery. + * + * @return the reference to the current options. + */ + T setWalFilter(final AbstractWalFilter walFilter); + + /** + * Get's the filter for processing WALs during recovery. + * See {@link #setWalFilter(AbstractWalFilter)}. + * + * @return the filter used for processing WALs during recovery. + */ + WalFilter walFilter(); + /** * If true, then DB::Open / CreateColumnFamily / DropColumnFamily * / SetOptions will fail if options file is not detected or properly @@ -1589,35 +1388,126 @@ public interface DBOptionsInterface { boolean avoidFlushDuringRecovery(); /** - * By default RocksDB will flush all memtables on DB close if there are - * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup - * DB close. Unpersisted data WILL BE LOST. + * Set this option to true during creation of database if you want + * to be able to ingest behind (call IngestExternalFile() skipping keys + * that already exist, rather than overwriting matching keys). + * Setting this option to true will affect 2 things: + * 1) Disable some internal optimizations around SST file compression + * 2) Reserve bottom-most level for ingested files only. + * 3) Note that num_levels should be >= 3 if this option is turned on. + * + * DEFAULT: false + * + * @param allowIngestBehind true to allow ingest behind, false to disallow. + * + * @return the reference to the current options. + */ + T setAllowIngestBehind(final boolean allowIngestBehind); + + /** + * Returns true if ingest behind is allowed. + * See {@link #setAllowIngestBehind(boolean)}. + * + * @return true if ingest behind is allowed, false otherwise. + */ + boolean allowIngestBehind(); + + /** + * Needed to support differential snapshots. + * If set to true then DB will only process deletes with sequence number + * less than what was set by SetPreserveDeletesSequenceNumber(uint64_t ts). + * Clients are responsible to periodically call this method to advance + * the cutoff time. If this method is never called and preserve_deletes + * is set to true NO deletes will ever be processed. + * At the moment this only keeps normal deletes, SingleDeletes will + * not be preserved. * * DEFAULT: false * - * Dynamically changeable through - * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} - * API. + * @param preserveDeletes true to preserve deletes. + * + * @return the reference to the current options. + */ + T setPreserveDeletes(final boolean preserveDeletes); + + /** + * Returns true if deletes are preserved. + * See {@link #setPreserveDeletes(boolean)}. + * + * @return true if deletes are preserved, false otherwise. + */ + boolean preserveDeletes(); + + /** + * If enabled it uses two queues for writes, one for the ones with + * disable_memtable and one for the ones that also write to memtable. This + * allows the memtable writes not to lag behind other writes. It can be used + * to optimize MySQL 2PC in which only the commits, which are serial, write to + * memtable. + * + * DEFAULT: false * - * @param avoidFlushDuringShutdown true if we should avoid flush during - * shutdown + * @param twoWriteQueues true to enable two write queues, false otherwise. * * @return the reference to the current options. */ - T setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown); + T setTwoWriteQueues(final boolean twoWriteQueues); /** - * By default RocksDB will flush all memtables on DB close if there are - * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup - * DB close. Unpersisted data WILL BE LOST. + * Returns true if two write queues are enabled. + * + * @return true if two write queues are enabled, false otherwise. + */ + boolean twoWriteQueues(); + + /** + * If true WAL is not flushed automatically after each write. Instead it + * relies on manual invocation of FlushWAL to write the WAL buffer to its + * file. * * DEFAULT: false * - * Dynamically changeable through - * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} - * API. + * @param manualWalFlush true to set disable automatic WAL flushing, + * false otherwise. + * + * @return the reference to the current options. + */ + T setManualWalFlush(final boolean manualWalFlush); + + /** + * Returns true if automatic WAL flushing is disabled. + * See {@link #setManualWalFlush(boolean)}. + * + * @return true if automatic WAL flushing is disabled, false otherwise. + */ + boolean manualWalFlush(); + + /** + * If true, RocksDB supports flushing multiple column families and committing + * their results atomically to MANIFEST. Note that it is not + * necessary to set atomic_flush to true if WAL is always enabled since WAL + * allows the database to be restored to the last persistent state in WAL. + * This option is useful when there are column families with writes NOT + * protected by WAL. + * For manual flush, application has to specify which column families to + * flush atomically in {@link RocksDB#flush(FlushOptions, List)}. + * For auto-triggered flush, RocksDB atomically flushes ALL column families. + * + * Currently, any WAL-enabled writes after atomic flush may be replayed + * independently if the process crashes later and tries to recover. + * + * @param atomicFlush true to enable atomic flush of multiple column families. + * + * @return the reference to the current options. + */ + T setAtomicFlush(final boolean atomicFlush); + + /** + * Determine if atomic flush of multiple column families is enabled. + * + * See {@link #setAtomicFlush(boolean)}. * - * @return true if we should avoid flush during shutdown + * @return true if atomic flush is enabled. */ - boolean avoidFlushDuringShutdown(); + boolean atomicFlush(); } diff --git a/java/src/main/java/org/rocksdb/DataBlockIndexType.java b/java/src/main/java/org/rocksdb/DataBlockIndexType.java new file mode 100644 index 000000000..513e5b429 --- /dev/null +++ b/java/src/main/java/org/rocksdb/DataBlockIndexType.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + + +/** + * DataBlockIndexType used in conjunction with BlockBasedTable. + */ +public enum DataBlockIndexType { + /** + * traditional block type + */ + kDataBlockBinarySearch((byte)0x0), + + /** + * additional hash index + */ + kDataBlockBinaryAndHash((byte)0x1); + + private final byte value; + + DataBlockIndexType(final byte value) { + this.value = value; + } + + byte getValue() { + return value; + } +} diff --git a/java/src/main/java/org/rocksdb/Env.java b/java/src/main/java/org/rocksdb/Env.java index a46f06178..d7658f239 100644 --- a/java/src/main/java/org/rocksdb/Env.java +++ b/java/src/main/java/org/rocksdb/Env.java @@ -5,12 +5,23 @@ package org.rocksdb; +import java.util.Arrays; +import java.util.List; + /** * Base class for all Env implementations in RocksDB. */ public abstract class Env extends RocksObject { - public static final int FLUSH_POOL = 0; - public static final int COMPACTION_POOL = 1; + + private static final Env DEFAULT_ENV = new RocksEnv(getDefaultEnvInternal()); + static { + /** + * The Ownership of the Default Env belongs to C++ + * and so we disown the native handle here so that + * we cannot accidentally free it from Java. + */ + DEFAULT_ENV.disOwnNativeHandle(); + } /** *

    Returns the default environment suitable for the current operating @@ -18,13 +29,13 @@ public abstract class Env extends RocksObject { * *

    The result of {@code getDefault()} is a singleton whose ownership * belongs to rocksdb c++. As a result, the returned RocksEnv will not - * have the ownership of its c++ resource, and calling its dispose() + * have the ownership of its c++ resource, and calling its dispose()/close() * will be no-op.

    * * @return the default {@link org.rocksdb.RocksEnv} instance. */ public static Env getDefault() { - return default_env_; + return DEFAULT_ENV; } /** @@ -32,27 +43,36 @@ public abstract class Env extends RocksObject { * for this environment.

    *

    Default number: 1

    * - * @param num the number of threads + * @param number the number of threads * * @return current {@link RocksEnv} instance. */ - public Env setBackgroundThreads(final int num) { - return setBackgroundThreads(num, FLUSH_POOL); + public Env setBackgroundThreads(final int number) { + return setBackgroundThreads(number, Priority.LOW); + } + + /** + *

    Gets the number of background worker threads of the pool + * for this environment.

    + * + * @return the number of threads. + */ + public int getBackgroundThreads(final Priority priority) { + return getBackgroundThreads(nativeHandle_, priority.getValue()); } /** *

    Sets the number of background worker threads of the specified thread * pool for this environment.

    * - * @param num the number of threads - * @param poolID the id to specified a thread pool. Should be either - * FLUSH_POOL or COMPACTION_POOL. + * @param number the number of threads + * @param priority the priority id of a specified thread pool. * *

    Default number: 1

    * @return current {@link RocksEnv} instance. */ - public Env setBackgroundThreads(final int num, final int poolID) { - setBackgroundThreads(nativeHandle_, num, poolID); + public Env setBackgroundThreads(final int number, final Priority priority) { + setBackgroundThreads(nativeHandle_, number, priority.getValue()); return this; } @@ -60,33 +80,75 @@ public abstract class Env extends RocksObject { *

    Returns the length of the queue associated with the specified * thread pool.

    * - * @param poolID the id to specified a thread pool. Should be either - * FLUSH_POOL or COMPACTION_POOL. + * @param priority the priority id of a specified thread pool. * * @return the thread pool queue length. */ - public int getThreadPoolQueueLen(final int poolID) { - return getThreadPoolQueueLen(nativeHandle_, poolID); + public int getThreadPoolQueueLen(final Priority priority) { + return getThreadPoolQueueLen(nativeHandle_, priority.getValue()); } + /** + * Enlarge number of background worker threads of a specific thread pool + * for this environment if it is smaller than specified. 'LOW' is the default + * pool. + * + * @param number the number of threads. + * + * @return current {@link RocksEnv} instance. + */ + public Env incBackgroundThreadsIfNeeded(final int number, + final Priority priority) { + incBackgroundThreadsIfNeeded(nativeHandle_, number, priority.getValue()); + return this; + } - protected Env(final long nativeHandle) { - super(nativeHandle); + /** + * Lower IO priority for threads from the specified pool. + * + * @param priority the priority id of a specified thread pool. + */ + public Env lowerThreadPoolIOPriority(final Priority priority) { + lowerThreadPoolIOPriority(nativeHandle_, priority.getValue()); + return this; } - static { - default_env_ = new RocksEnv(getDefaultEnvInternal()); + /** + * Lower CPU priority for threads from the specified pool. + * + * @param priority the priority id of a specified thread pool. + */ + public Env lowerThreadPoolCPUPriority(final Priority priority) { + lowerThreadPoolCPUPriority(nativeHandle_, priority.getValue()); + return this; } /** - *

    The static default Env. The ownership of its native handle - * belongs to rocksdb c++ and is not able to be released on the Java - * side.

    + * Returns the status of all threads that belong to the current Env. + * + * @return the status of all threads belong to this env. */ - static Env default_env_; + public List getThreadList() throws RocksDBException { + return Arrays.asList(getThreadList(nativeHandle_)); + } + + Env(final long nativeHandle) { + super(nativeHandle); + } private static native long getDefaultEnvInternal(); private native void setBackgroundThreads( - long handle, int num, int priority); - private native int getThreadPoolQueueLen(long handle, int poolID); + final long handle, final int number, final byte priority); + private native int getBackgroundThreads(final long handle, + final byte priority); + private native int getThreadPoolQueueLen(final long handle, + final byte priority); + private native void incBackgroundThreadsIfNeeded(final long handle, + final int number, final byte priority); + private native void lowerThreadPoolIOPriority(final long handle, + final byte priority); + private native void lowerThreadPoolCPUPriority(final long handle, + final byte priority); + private native ThreadStatus[] getThreadList(final long handle) + throws RocksDBException; } diff --git a/java/src/main/java/org/rocksdb/EnvOptions.java b/java/src/main/java/org/rocksdb/EnvOptions.java index 2bca0355e..6baddb310 100644 --- a/java/src/main/java/org/rocksdb/EnvOptions.java +++ b/java/src/main/java/org/rocksdb/EnvOptions.java @@ -5,203 +5,362 @@ package org.rocksdb; +/** + * Options while opening a file to read/write + */ public class EnvOptions extends RocksObject { static { RocksDB.loadLibrary(); } + /** + * Construct with default Options + */ public EnvOptions() { super(newEnvOptions()); } - public EnvOptions setUseOsBuffer(final boolean useOsBuffer) { - setUseOsBuffer(nativeHandle_, useOsBuffer); - return this; - } - - public boolean useOsBuffer() { - assert(isOwningHandle()); - return useOsBuffer(nativeHandle_); + /** + * Construct from {@link DBOptions}. + * + * @param dbOptions the database options. + */ + public EnvOptions(final DBOptions dbOptions) { + super(newEnvOptions(dbOptions.nativeHandle_)); } + /** + * Enable/Disable memory mapped reads. + * + * Default: false + * + * @param useMmapReads true to enable memory mapped reads, false to disable. + * + * @return the reference to these options. + */ public EnvOptions setUseMmapReads(final boolean useMmapReads) { setUseMmapReads(nativeHandle_, useMmapReads); return this; } + /** + * Determine if memory mapped reads are in-use. + * + * @return true if memory mapped reads are in-use, false otherwise. + */ public boolean useMmapReads() { assert(isOwningHandle()); return useMmapReads(nativeHandle_); } + /** + * Enable/Disable memory mapped Writes. + * + * Default: true + * + * @param useMmapWrites true to enable memory mapped writes, false to disable. + * + * @return the reference to these options. + */ public EnvOptions setUseMmapWrites(final boolean useMmapWrites) { setUseMmapWrites(nativeHandle_, useMmapWrites); return this; } + /** + * Determine if memory mapped writes are in-use. + * + * @return true if memory mapped writes are in-use, false otherwise. + */ public boolean useMmapWrites() { assert(isOwningHandle()); return useMmapWrites(nativeHandle_); } + /** + * Enable/Disable direct reads, i.e. {@code O_DIRECT}. + * + * Default: false + * + * @param useDirectReads true to enable direct reads, false to disable. + * + * @return the reference to these options. + */ public EnvOptions setUseDirectReads(final boolean useDirectReads) { setUseDirectReads(nativeHandle_, useDirectReads); return this; } + /** + * Determine if direct reads are in-use. + * + * @return true if direct reads are in-use, false otherwise. + */ public boolean useDirectReads() { assert(isOwningHandle()); return useDirectReads(nativeHandle_); } + /** + * Enable/Disable direct writes, i.e. {@code O_DIRECT}. + * + * Default: false + * + * @param useDirectWrites true to enable direct writes, false to disable. + * + * @return the reference to these options. + */ public EnvOptions setUseDirectWrites(final boolean useDirectWrites) { setUseDirectWrites(nativeHandle_, useDirectWrites); return this; } + /** + * Determine if direct writes are in-use. + * + * @return true if direct writes are in-use, false otherwise. + */ public boolean useDirectWrites() { assert(isOwningHandle()); return useDirectWrites(nativeHandle_); } + /** + * Enable/Disable fallocate calls. + * + * Default: true + * + * If false, {@code fallocate()} calls are bypassed. + * + * @param allowFallocate true to enable fallocate calls, false to disable. + * + * @return the reference to these options. + */ public EnvOptions setAllowFallocate(final boolean allowFallocate) { setAllowFallocate(nativeHandle_, allowFallocate); return this; } + /** + * Determine if fallocate calls are used. + * + * @return true if fallocate calls are used, false otherwise. + */ public boolean allowFallocate() { assert(isOwningHandle()); return allowFallocate(nativeHandle_); } + /** + * Enable/Disable the {@code FD_CLOEXEC} bit when opening file descriptors. + * + * Default: true + * + * @param setFdCloexec true to enable the {@code FB_CLOEXEC} bit, + * false to disable. + * + * @return the reference to these options. + */ public EnvOptions setSetFdCloexec(final boolean setFdCloexec) { setSetFdCloexec(nativeHandle_, setFdCloexec); return this; } + /** + * Determine i fthe {@code FD_CLOEXEC} bit is set when opening file + * descriptors. + * + * @return true if the {@code FB_CLOEXEC} bit is enabled, false otherwise. + */ public boolean setFdCloexec() { assert(isOwningHandle()); return setFdCloexec(nativeHandle_); } + /** + * Allows OS to incrementally sync files to disk while they are being + * written, in the background. Issue one request for every + * {@code bytesPerSync} written. + * + * Default: 0 + * + * @param bytesPerSync 0 to disable, otherwise the number of bytes. + * + * @return the reference to these options. + */ public EnvOptions setBytesPerSync(final long bytesPerSync) { setBytesPerSync(nativeHandle_, bytesPerSync); return this; } + /** + * Get the number of incremental bytes per sync written in the background. + * + * @return 0 if disabled, otherwise the number of bytes. + */ public long bytesPerSync() { assert(isOwningHandle()); return bytesPerSync(nativeHandle_); } - public EnvOptions setFallocateWithKeepSize(final boolean fallocateWithKeepSize) { + /** + * If true, we will preallocate the file with {@code FALLOC_FL_KEEP_SIZE} + * flag, which means that file size won't change as part of preallocation. + * If false, preallocation will also change the file size. This option will + * improve the performance in workloads where you sync the data on every + * write. By default, we set it to true for MANIFEST writes and false for + * WAL writes + * + * @param fallocateWithKeepSize true to preallocate, false otherwise. + * + * @return the reference to these options. + */ + public EnvOptions setFallocateWithKeepSize( + final boolean fallocateWithKeepSize) { setFallocateWithKeepSize(nativeHandle_, fallocateWithKeepSize); return this; } + /** + * Determine if file is preallocated. + * + * @return true if the file is preallocated, false otherwise. + */ public boolean fallocateWithKeepSize() { assert(isOwningHandle()); return fallocateWithKeepSize(nativeHandle_); } - public EnvOptions setCompactionReadaheadSize(final long compactionReadaheadSize) { + /** + * See {@link DBOptions#setCompactionReadaheadSize(long)}. + * + * @param compactionReadaheadSize the compaction read-ahead size. + * + * @return the reference to these options. + */ + public EnvOptions setCompactionReadaheadSize( + final long compactionReadaheadSize) { setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize); return this; } + /** + * See {@link DBOptions#compactionReadaheadSize()}. + * + * @return the compaction read-ahead size. + */ public long compactionReadaheadSize() { assert(isOwningHandle()); return compactionReadaheadSize(nativeHandle_); } - public EnvOptions setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) { + /** + * See {@link DBOptions#setRandomAccessMaxBufferSize(long)}. + * + * @param randomAccessMaxBufferSize the max buffer size for random access. + * + * @return the reference to these options. + */ + public EnvOptions setRandomAccessMaxBufferSize( + final long randomAccessMaxBufferSize) { setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize); return this; } + /** + * See {@link DBOptions#randomAccessMaxBufferSize()}. + * + * @return the max buffer size for random access. + */ public long randomAccessMaxBufferSize() { assert(isOwningHandle()); return randomAccessMaxBufferSize(nativeHandle_); } - public EnvOptions setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) { + /** + * See {@link DBOptions#setWritableFileMaxBufferSize(long)}. + * + * @param writableFileMaxBufferSize the max buffer size. + * + * @return the reference to these options. + */ + public EnvOptions setWritableFileMaxBufferSize( + final long writableFileMaxBufferSize) { setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize); return this; } + /** + * See {@link DBOptions#writableFileMaxBufferSize()}. + * + * @return the max buffer size. + */ public long writableFileMaxBufferSize() { assert(isOwningHandle()); return writableFileMaxBufferSize(nativeHandle_); } + /** + * Set the write rate limiter for flush and compaction. + * + * @param rateLimiter the rate limiter. + * + * @return the reference to these options. + */ public EnvOptions setRateLimiter(final RateLimiter rateLimiter) { this.rateLimiter = rateLimiter; setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_); return this; } + /** + * Get the write rate limiter for flush and compaction. + * + * @return the rate limiter. + */ public RateLimiter rateLimiter() { assert(isOwningHandle()); return rateLimiter; } private native static long newEnvOptions(); - + private native static long newEnvOptions(final long dboptions_handle); @Override protected final native void disposeInternal(final long handle); - private native void setUseOsBuffer(final long handle, final boolean useOsBuffer); - - private native boolean useOsBuffer(final long handle); - - private native void setUseMmapReads(final long handle, final boolean useMmapReads); - + private native void setUseMmapReads(final long handle, + final boolean useMmapReads); private native boolean useMmapReads(final long handle); - - private native void setUseMmapWrites(final long handle, final boolean useMmapWrites); - + private native void setUseMmapWrites(final long handle, + final boolean useMmapWrites); private native boolean useMmapWrites(final long handle); - - private native void setUseDirectReads(final long handle, final boolean useDirectReads); - + private native void setUseDirectReads(final long handle, + final boolean useDirectReads); private native boolean useDirectReads(final long handle); - - private native void setUseDirectWrites(final long handle, final boolean useDirectWrites); - + private native void setUseDirectWrites(final long handle, + final boolean useDirectWrites); private native boolean useDirectWrites(final long handle); - - private native void setAllowFallocate(final long handle, final boolean allowFallocate); - + private native void setAllowFallocate(final long handle, + final boolean allowFallocate); private native boolean allowFallocate(final long handle); - - private native void setSetFdCloexec(final long handle, final boolean setFdCloexec); - + private native void setSetFdCloexec(final long handle, + final boolean setFdCloexec); private native boolean setFdCloexec(final long handle); - - private native void setBytesPerSync(final long handle, final long bytesPerSync); - + private native void setBytesPerSync(final long handle, + final long bytesPerSync); private native long bytesPerSync(final long handle); - private native void setFallocateWithKeepSize( final long handle, final boolean fallocateWithKeepSize); - private native boolean fallocateWithKeepSize(final long handle); - private native void setCompactionReadaheadSize( final long handle, final long compactionReadaheadSize); - private native long compactionReadaheadSize(final long handle); - private native void setRandomAccessMaxBufferSize( final long handle, final long randomAccessMaxBufferSize); - private native long randomAccessMaxBufferSize(final long handle); - private native void setWritableFileMaxBufferSize( final long handle, final long writableFileMaxBufferSize); - private native long writableFileMaxBufferSize(final long handle); - - private native void setRateLimiter(final long handle, final long rateLimiterHandle); - + private native void setRateLimiter(final long handle, + final long rateLimiterHandle); private RateLimiter rateLimiter; } diff --git a/java/src/main/java/org/rocksdb/Filter.java b/java/src/main/java/org/rocksdb/Filter.java index 011be2085..7f490cf59 100644 --- a/java/src/main/java/org/rocksdb/Filter.java +++ b/java/src/main/java/org/rocksdb/Filter.java @@ -12,6 +12,7 @@ package org.rocksdb; * number of disk seeks form a handful to a single disk seek per * DB::Get() call. */ +//TODO(AR) should be renamed FilterPolicy public abstract class Filter extends RocksObject { protected Filter(final long nativeHandle) { diff --git a/java/src/main/java/org/rocksdb/FlushOptions.java b/java/src/main/java/org/rocksdb/FlushOptions.java index ce54a528b..760b515fd 100644 --- a/java/src/main/java/org/rocksdb/FlushOptions.java +++ b/java/src/main/java/org/rocksdb/FlushOptions.java @@ -1,3 +1,8 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + package org.rocksdb; /** @@ -41,9 +46,45 @@ public class FlushOptions extends RocksObject { return waitForFlush(nativeHandle_); } + /** + * Set to true so that flush would proceeds immediately even it it means + * writes will stall for the duration of the flush. + * + * Set to false so that the operation will wait until it's possible to do + * the flush without causing stall or until required flush is performed by + * someone else (foreground call or background thread). + * + * Default: false + * + * @param allowWriteStall true to allow writes to stall for flush, false + * otherwise. + * + * @return instance of current FlushOptions. + */ + public FlushOptions setAllowWriteStall(final boolean allowWriteStall) { + assert(isOwningHandle()); + setAllowWriteStall(nativeHandle_, allowWriteStall); + return this; + } + + /** + * Returns true if writes are allowed to stall for flushes to complete, false + * otherwise. + * + * @return true if writes are allowed to stall for flushes + */ + public boolean allowWriteStall() { + assert(isOwningHandle()); + return allowWriteStall(nativeHandle_); + } + private native static long newFlushOptions(); @Override protected final native void disposeInternal(final long handle); - private native void setWaitForFlush(long handle, - boolean wait); - private native boolean waitForFlush(long handle); + + private native void setWaitForFlush(final long handle, + final boolean wait); + private native boolean waitForFlush(final long handle); + private native void setAllowWriteStall(final long handle, + final boolean allowWriteStall); + private native boolean allowWriteStall(final long handle); } diff --git a/java/src/main/java/org/rocksdb/HdfsEnv.java b/java/src/main/java/org/rocksdb/HdfsEnv.java new file mode 100644 index 000000000..4d8d3bff6 --- /dev/null +++ b/java/src/main/java/org/rocksdb/HdfsEnv.java @@ -0,0 +1,27 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * HDFS environment. + */ +public class HdfsEnv extends Env { + + /** +

    Creates a new environment that is used for HDFS environment.

    + * + *

    The caller must delete the result when it is + * no longer needed.

    + * + * @param fsName the HDFS as a string in the form "hdfs://hostname:port/" + */ + public HdfsEnv(final String fsName) { + super(createHdfsEnv(fsName)); + } + + private static native long createHdfsEnv(final String fsName); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/IndexType.java b/java/src/main/java/org/rocksdb/IndexType.java index e0c113d39..04e481465 100644 --- a/java/src/main/java/org/rocksdb/IndexType.java +++ b/java/src/main/java/org/rocksdb/IndexType.java @@ -33,7 +33,7 @@ public enum IndexType { return value_; } - private IndexType(byte value) { + IndexType(byte value) { value_ = value; } diff --git a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java index 734369181..a6a308daa 100644 --- a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java +++ b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java @@ -7,7 +7,8 @@ package org.rocksdb; import java.util.List; /** - * IngestExternalFileOptions is used by {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)} + * IngestExternalFileOptions is used by + * {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}. */ public class IngestExternalFileOptions extends RocksObject { @@ -41,9 +42,12 @@ public class IngestExternalFileOptions extends RocksObject { * Can be set to true to move the files instead of copying them. * * @param moveFiles true if files should be moved instead of copied + * + * @return the reference to the current IngestExternalFileOptions. */ - public void setMoveFiles(final boolean moveFiles) { + public IngestExternalFileOptions setMoveFiles(final boolean moveFiles) { setMoveFiles(nativeHandle_, moveFiles); + return this; } /** @@ -61,9 +65,13 @@ public class IngestExternalFileOptions extends RocksObject { * that where created before the file was ingested. * * @param snapshotConsistency true if snapshot consistency is required + * + * @return the reference to the current IngestExternalFileOptions. */ - public void setSnapshotConsistency(final boolean snapshotConsistency) { + public IngestExternalFileOptions setSnapshotConsistency( + final boolean snapshotConsistency) { setSnapshotConsistency(nativeHandle_, snapshotConsistency); + return this; } /** @@ -81,9 +89,13 @@ public class IngestExternalFileOptions extends RocksObject { * will fail if the file key range overlaps with existing keys or tombstones in the DB. * * @param allowGlobalSeqNo true if global seq numbers are required + * + * @return the reference to the current IngestExternalFileOptions. */ - public void setAllowGlobalSeqNo(final boolean allowGlobalSeqNo) { + public IngestExternalFileOptions setAllowGlobalSeqNo( + final boolean allowGlobalSeqNo) { setAllowGlobalSeqNo(nativeHandle_, allowGlobalSeqNo); + return this; } /** @@ -101,15 +113,100 @@ public class IngestExternalFileOptions extends RocksObject { * (memtable flush required), IngestExternalFile will fail. * * @param allowBlockingFlush true if blocking flushes are allowed + * + * @return the reference to the current IngestExternalFileOptions. */ - public void setAllowBlockingFlush(final boolean allowBlockingFlush) { + public IngestExternalFileOptions setAllowBlockingFlush( + final boolean allowBlockingFlush) { setAllowBlockingFlush(nativeHandle_, allowBlockingFlush); + return this; + } + + /** + * Returns true if duplicate keys in the file being ingested are + * to be skipped rather than overwriting existing data under that key. + * + * @return true if duplicate keys in the file being ingested are to be + * skipped, false otherwise. + */ + public boolean ingestBehind() { + return ingestBehind(nativeHandle_); + } + + /** + * Set to true if you would like duplicate keys in the file being ingested + * to be skipped rather than overwriting existing data under that key. + * + * Usecase: back-fill of some historical data in the database without + * over-writing existing newer version of data. + * + * This option could only be used if the DB has been running + * with DBOptions#allowIngestBehind() == true since the dawn of time. + * + * All files will be ingested at the bottommost level with seqno=0. + * + * Default: false + * + * @param ingestBehind true if you would like duplicate keys in the file being + * ingested to be skipped. + * + * @return the reference to the current IngestExternalFileOptions. + */ + public IngestExternalFileOptions setIngestBehind(final boolean ingestBehind) { + setIngestBehind(nativeHandle_, ingestBehind); + return this; + } + + /** + * Returns true write if the global_seqno is written to a given offset + * in the external SST file for backward compatibility. + * + * See {@link #setWriteGlobalSeqno(boolean)}. + * + * @return true if the global_seqno is written to a given offset, + * false otherwise. + */ + public boolean writeGlobalSeqno() { + return writeGlobalSeqno(nativeHandle_); + } + + /** + * Set to true if you would like to write the global_seqno to a given offset + * in the external SST file for backward compatibility. + * + * Older versions of RocksDB write the global_seqno to a given offset within + * the ingested SST files, and new versions of RocksDB do not. + * + * If you ingest an external SST using new version of RocksDB and would like + * to be able to downgrade to an older version of RocksDB, you should set + * {@link #writeGlobalSeqno()} to true. + * + * If your service is just starting to use the new RocksDB, we recommend that + * you set this option to false, which brings two benefits: + * 1. No extra random write for global_seqno during ingestion. + * 2. Without writing external SST file, it's possible to do checksum. + * + * We have a plan to set this option to false by default in the future. + * + * Default: true + * + * @param writeGlobalSeqno true to write the gloal_seqno to a given offset, + * false otherwise + * + * @return the reference to the current IngestExternalFileOptions. + */ + public IngestExternalFileOptions setWriteGlobalSeqno( + final boolean writeGlobalSeqno) { + setWriteGlobalSeqno(nativeHandle_, writeGlobalSeqno); + return this; } private native static long newIngestExternalFileOptions(); private native static long newIngestExternalFileOptions( final boolean moveFiles, final boolean snapshotConsistency, final boolean allowGlobalSeqNo, final boolean allowBlockingFlush); + @Override protected final native void disposeInternal(final long handle); + private native boolean moveFiles(final long handle); private native void setMoveFiles(final long handle, final boolean move_files); private native boolean snapshotConsistency(final long handle); @@ -121,5 +218,10 @@ public class IngestExternalFileOptions extends RocksObject { private native boolean allowBlockingFlush(final long handle); private native void setAllowBlockingFlush(final long handle, final boolean allowBlockingFlush); - @Override protected final native void disposeInternal(final long handle); + private native boolean ingestBehind(final long handle); + private native void setIngestBehind(final long handle, + final boolean ingestBehind); + private native boolean writeGlobalSeqno(final long handle); + private native void setWriteGlobalSeqno(final long handle, + final boolean writeGlobalSeqNo); } diff --git a/java/src/main/java/org/rocksdb/LevelMetaData.java b/java/src/main/java/org/rocksdb/LevelMetaData.java new file mode 100644 index 000000000..c5685098b --- /dev/null +++ b/java/src/main/java/org/rocksdb/LevelMetaData.java @@ -0,0 +1,56 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Arrays; +import java.util.List; + +/** + * The metadata that describes a level. + */ +public class LevelMetaData { + private final int level; + private final long size; + private final SstFileMetaData[] files; + + /** + * Called from JNI C++ + */ + private LevelMetaData(final int level, final long size, + final SstFileMetaData[] files) { + this.level = level; + this.size = size; + this.files = files; + } + + /** + * The level which this meta data describes. + * + * @return the level + */ + public int level() { + return level; + } + + /** + * The size of this level in bytes, which is equal to the sum of + * the file size of its {@link #files()}. + * + * @return the size + */ + public long size() { + return size; + } + + /** + * The metadata of all sst files in this level. + * + * @return the metadata of the files + */ + public List files() { + return Arrays.asList(files); + } +} diff --git a/java/src/main/java/org/rocksdb/LiveFileMetaData.java b/java/src/main/java/org/rocksdb/LiveFileMetaData.java new file mode 100644 index 000000000..35d883e18 --- /dev/null +++ b/java/src/main/java/org/rocksdb/LiveFileMetaData.java @@ -0,0 +1,55 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The full set of metadata associated with each SST file. + */ +public class LiveFileMetaData extends SstFileMetaData { + private final byte[] columnFamilyName; + private final int level; + + /** + * Called from JNI C++ + */ + private LiveFileMetaData( + final byte[] columnFamilyName, + final int level, + final String fileName, + final String path, + final long size, + final long smallestSeqno, + final long largestSeqno, + final byte[] smallestKey, + final byte[] largestKey, + final long numReadsSampled, + final boolean beingCompacted, + final long numEntries, + final long numDeletions) { + super(fileName, path, size, smallestSeqno, largestSeqno, smallestKey, + largestKey, numReadsSampled, beingCompacted, numEntries, numDeletions); + this.columnFamilyName = columnFamilyName; + this.level = level; + } + + /** + * Get the name of the column family. + * + * @return the name of the column family + */ + public byte[] columnFamilyName() { + return columnFamilyName; + } + + /** + * Get the level at which this file resides. + * + * @return the level at which the file resides. + */ + public int level() { + return level; + } +} diff --git a/java/src/main/java/org/rocksdb/LogFile.java b/java/src/main/java/org/rocksdb/LogFile.java new file mode 100644 index 000000000..ef24a6427 --- /dev/null +++ b/java/src/main/java/org/rocksdb/LogFile.java @@ -0,0 +1,75 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class LogFile { + private final String pathName; + private final long logNumber; + private final WalFileType type; + private final long startSequence; + private final long sizeFileBytes; + + /** + * Called from JNI C++ + */ + private LogFile(final String pathName, final long logNumber, + final byte walFileTypeValue, final long startSequence, + final long sizeFileBytes) { + this.pathName = pathName; + this.logNumber = logNumber; + this.type = WalFileType.fromValue(walFileTypeValue); + this.startSequence = startSequence; + this.sizeFileBytes = sizeFileBytes; + } + + /** + * Returns log file's pathname relative to the main db dir + * Eg. For a live-log-file = /000003.log + * For an archived-log-file = /archive/000003.log + * + * @return log file's pathname + */ + public String pathName() { + return pathName; + } + + /** + * Primary identifier for log file. + * This is directly proportional to creation time of the log file + * + * @return the log number + */ + public long logNumber() { + return logNumber; + } + + /** + * Log file can be either alive or archived. + * + * @return the type of the log file. + */ + public WalFileType type() { + return type; + } + + /** + * Starting sequence number of writebatch written in this log file. + * + * @return the stating sequence number + */ + public long startSequence() { + return startSequence; + } + + /** + * Size of log file on disk in Bytes. + * + * @return size of log file + */ + public long sizeFileBytes() { + return sizeFileBytes; + } +} diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java index 3585318db..1d9ca0817 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java @@ -7,27 +7,20 @@ package org.rocksdb; import java.util.*; -public class MutableColumnFamilyOptions { - private final static String KEY_VALUE_PAIR_SEPARATOR = ";"; - private final static char KEY_VALUE_SEPARATOR = '='; - private final static String INT_ARRAY_INT_SEPARATOR = ","; - - private final String[] keys; - private final String[] values; - - // user must use builder pattern, or parser - private MutableColumnFamilyOptions(final String keys[], - final String values[]) { - this.keys = keys; - this.values = values; - } - - String[] getKeys() { - return keys; - } +public class MutableColumnFamilyOptions + extends AbstractMutableOptions { - String[] getValues() { - return values; + /** + * User must use builder pattern, or parser. + * + * @param keys the keys + * @param values the values + * + * See {@link #builder()} and {@link #parse(String)}. + */ + private MutableColumnFamilyOptions(final String[] keys, + final String[] values) { + super(keys, values); } /** @@ -60,7 +53,7 @@ public class MutableColumnFamilyOptions { final MutableColumnFamilyOptionsBuilder builder = new MutableColumnFamilyOptionsBuilder(); - final String options[] = str.trim().split(KEY_VALUE_PAIR_SEPARATOR); + final String[] options = str.trim().split(KEY_VALUE_PAIR_SEPARATOR); for(final String option : options) { final int equalsOffset = option.indexOf(KEY_VALUE_SEPARATOR); if(equalsOffset <= 0) { @@ -69,12 +62,12 @@ public class MutableColumnFamilyOptions { } final String key = option.substring(0, equalsOffset); - if(key == null || key.isEmpty()) { + if(key.isEmpty()) { throw new IllegalArgumentException("options string is invalid"); } final String value = option.substring(equalsOffset + 1); - if(value == null || value.isEmpty()) { + if(value.isEmpty()) { throw new IllegalArgumentException("options string is invalid"); } @@ -84,37 +77,7 @@ public class MutableColumnFamilyOptions { return builder; } - /** - * Returns a string representation - * of MutableColumnFamilyOptions which is - * suitable for consumption by {@link #parse(String)} - * - * @return String representation of MutableColumnFamilyOptions - */ - @Override - public String toString() { - final StringBuilder buffer = new StringBuilder(); - for(int i = 0; i < keys.length; i++) { - buffer - .append(keys[i]) - .append(KEY_VALUE_SEPARATOR) - .append(values[i]); - - if(i + 1 < keys.length) { - buffer.append(KEY_VALUE_PAIR_SEPARATOR); - } - } - return buffer.toString(); - } - - public enum ValueType { - DOUBLE, - LONG, - INT, - BOOLEAN, - INT_ARRAY, - ENUM - } + private interface MutableColumnFamilyOptionKey extends MutableOptionKey {} public enum MemtableOption implements MutableColumnFamilyOptionKey { write_buffer_size(ValueType.LONG), @@ -153,7 +116,8 @@ public class MutableColumnFamilyOptions { target_file_size_multiplier(ValueType.INT), max_bytes_for_level_base(ValueType.LONG), max_bytes_for_level_multiplier(ValueType.INT), - max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY); + max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY), + ttl(ValueType.LONG); private final ValueType valueType; CompactionOption(final ValueType valueType) { @@ -183,356 +147,9 @@ public class MutableColumnFamilyOptions { } } - private interface MutableColumnFamilyOptionKey { - String name(); - ValueType getValueType(); - } - - private static abstract class MutableColumnFamilyOptionValue { - protected final T value; - - MutableColumnFamilyOptionValue(final T value) { - this.value = value; - } - - abstract double asDouble() throws NumberFormatException; - abstract long asLong() throws NumberFormatException; - abstract int asInt() throws NumberFormatException; - abstract boolean asBoolean() throws IllegalStateException; - abstract int[] asIntArray() throws IllegalStateException; - abstract String asString(); - abstract T asObject(); - } - - private static class MutableColumnFamilyOptionStringValue - extends MutableColumnFamilyOptionValue { - MutableColumnFamilyOptionStringValue(final String value) { - super(value); - } - - @Override - double asDouble() throws NumberFormatException { - return Double.parseDouble(value); - } - - @Override - long asLong() throws NumberFormatException { - return Long.parseLong(value); - } - - @Override - int asInt() throws NumberFormatException { - return Integer.parseInt(value); - } - - @Override - boolean asBoolean() throws IllegalStateException { - return Boolean.parseBoolean(value); - } - - @Override - int[] asIntArray() throws IllegalStateException { - throw new IllegalStateException("String is not applicable as int[]"); - } - - @Override - String asString() { - return value; - } - - @Override - String asObject() { - return value; - } - } - - private static class MutableColumnFamilyOptionDoubleValue - extends MutableColumnFamilyOptionValue { - MutableColumnFamilyOptionDoubleValue(final double value) { - super(value); - } - - @Override - double asDouble() { - return value; - } - - @Override - long asLong() throws NumberFormatException { - return value.longValue(); - } - - @Override - int asInt() throws NumberFormatException { - if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { - throw new NumberFormatException( - "double value lies outside the bounds of int"); - } - return value.intValue(); - } - - @Override - boolean asBoolean() throws IllegalStateException { - throw new IllegalStateException( - "double is not applicable as boolean"); - } - - @Override - int[] asIntArray() throws IllegalStateException { - if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { - throw new NumberFormatException( - "double value lies outside the bounds of int"); - } - return new int[] { value.intValue() }; - } - - @Override - String asString() { - return Double.toString(value); - } - - @Override - Double asObject() { - return value; - } - } - - private static class MutableColumnFamilyOptionLongValue - extends MutableColumnFamilyOptionValue { - MutableColumnFamilyOptionLongValue(final long value) { - super(value); - } - - @Override - double asDouble() { - if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) { - throw new NumberFormatException( - "long value lies outside the bounds of int"); - } - return value.doubleValue(); - } - - @Override - long asLong() throws NumberFormatException { - return value; - } - - @Override - int asInt() throws NumberFormatException { - if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { - throw new NumberFormatException( - "long value lies outside the bounds of int"); - } - return value.intValue(); - } - - @Override - boolean asBoolean() throws IllegalStateException { - throw new IllegalStateException( - "long is not applicable as boolean"); - } - - @Override - int[] asIntArray() throws IllegalStateException { - if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { - throw new NumberFormatException( - "long value lies outside the bounds of int"); - } - return new int[] { value.intValue() }; - } - - @Override - String asString() { - return Long.toString(value); - } - - @Override - Long asObject() { - return value; - } - } - - private static class MutableColumnFamilyOptionIntValue - extends MutableColumnFamilyOptionValue { - MutableColumnFamilyOptionIntValue(final int value) { - super(value); - } - - @Override - double asDouble() { - if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) { - throw new NumberFormatException("int value lies outside the bounds of int"); - } - return value.doubleValue(); - } - - @Override - long asLong() throws NumberFormatException { - return value; - } - - @Override - int asInt() throws NumberFormatException { - return value; - } - - @Override - boolean asBoolean() throws IllegalStateException { - throw new IllegalStateException("int is not applicable as boolean"); - } - - @Override - int[] asIntArray() throws IllegalStateException { - return new int[] { value }; - } - - @Override - String asString() { - return Integer.toString(value); - } - - @Override - Integer asObject() { - return value; - } - } - - private static class MutableColumnFamilyOptionBooleanValue - extends MutableColumnFamilyOptionValue { - MutableColumnFamilyOptionBooleanValue(final boolean value) { - super(value); - } - - @Override - double asDouble() { - throw new NumberFormatException("boolean is not applicable as double"); - } - - @Override - long asLong() throws NumberFormatException { - throw new NumberFormatException("boolean is not applicable as Long"); - } - - @Override - int asInt() throws NumberFormatException { - throw new NumberFormatException("boolean is not applicable as int"); - } - - @Override - boolean asBoolean() { - return value; - } - - @Override - int[] asIntArray() throws IllegalStateException { - throw new IllegalStateException("boolean is not applicable as int[]"); - } - - @Override - String asString() { - return Boolean.toString(value); - } - - @Override - Boolean asObject() { - return value; - } - } - - private static class MutableColumnFamilyOptionIntArrayValue - extends MutableColumnFamilyOptionValue { - MutableColumnFamilyOptionIntArrayValue(final int[] value) { - super(value); - } - - @Override - double asDouble() { - throw new NumberFormatException("int[] is not applicable as double"); - } - - @Override - long asLong() throws NumberFormatException { - throw new NumberFormatException("int[] is not applicable as Long"); - } - - @Override - int asInt() throws NumberFormatException { - throw new NumberFormatException("int[] is not applicable as int"); - } - - @Override - boolean asBoolean() { - throw new NumberFormatException("int[] is not applicable as boolean"); - } - - @Override - int[] asIntArray() throws IllegalStateException { - return value; - } - - @Override - String asString() { - final StringBuilder builder = new StringBuilder(); - for(int i = 0; i < value.length; i++) { - builder.append(Integer.toString(i)); - if(i + 1 < value.length) { - builder.append(INT_ARRAY_INT_SEPARATOR); - } - } - return builder.toString(); - } - - @Override - int[] asObject() { - return value; - } - } - - private static class MutableColumnFamilyOptionEnumValue> - extends MutableColumnFamilyOptionValue { - - MutableColumnFamilyOptionEnumValue(final T value) { - super(value); - } - - @Override - double asDouble() throws NumberFormatException { - throw new NumberFormatException("Enum is not applicable as double"); - } - - @Override - long asLong() throws NumberFormatException { - throw new NumberFormatException("Enum is not applicable as long"); - } - - @Override - int asInt() throws NumberFormatException { - throw new NumberFormatException("Enum is not applicable as int"); - } - - @Override - boolean asBoolean() throws IllegalStateException { - throw new NumberFormatException("Enum is not applicable as boolean"); - } - - @Override - int[] asIntArray() throws IllegalStateException { - throw new NumberFormatException("Enum is not applicable as int[]"); - } - - @Override - String asString() { - return value.name(); - } - - @Override - T asObject() { - return value; - } - } - public static class MutableColumnFamilyOptionsBuilder - implements MutableColumnFamilyOptionsInterface { + extends AbstractMutableOptionsBuilder + implements MutableColumnFamilyOptionsInterface { private final static Map ALL_KEYS_LOOKUP = new HashMap<>(); static { @@ -549,179 +166,24 @@ public class MutableColumnFamilyOptions { } } - private final Map> options = new LinkedHashMap<>(); - - public MutableColumnFamilyOptions build() { - final String keys[] = new String[options.size()]; - final String values[] = new String[options.size()]; - - int i = 0; - for(final Map.Entry> option : options.entrySet()) { - keys[i] = option.getKey().name(); - values[i] = option.getValue().asString(); - i++; - } - - return new MutableColumnFamilyOptions(keys, values); - } - - private MutableColumnFamilyOptionsBuilder setDouble( - final MutableColumnFamilyOptionKey key, final double value) { - if(key.getValueType() != ValueType.DOUBLE) { - throw new IllegalArgumentException( - key + " does not accept a double value"); - } - options.put(key, new MutableColumnFamilyOptionDoubleValue(value)); - return this; - } - - private double getDouble(final MutableColumnFamilyOptionKey key) - throws NoSuchElementException, NumberFormatException { - final MutableColumnFamilyOptionValue value = options.get(key); - if(value == null) { - throw new NoSuchElementException(key.name() + " has not been set"); - } - return value.asDouble(); - } - - private MutableColumnFamilyOptionsBuilder setLong( - final MutableColumnFamilyOptionKey key, final long value) { - if(key.getValueType() != ValueType.LONG) { - throw new IllegalArgumentException( - key + " does not accept a long value"); - } - options.put(key, new MutableColumnFamilyOptionLongValue(value)); - return this; - } - - private long getLong(final MutableColumnFamilyOptionKey key) - throws NoSuchElementException, NumberFormatException { - final MutableColumnFamilyOptionValue value = options.get(key); - if(value == null) { - throw new NoSuchElementException(key.name() + " has not been set"); - } - return value.asLong(); - } - - private MutableColumnFamilyOptionsBuilder setInt( - final MutableColumnFamilyOptionKey key, final int value) { - if(key.getValueType() != ValueType.INT) { - throw new IllegalArgumentException( - key + " does not accept an integer value"); - } - options.put(key, new MutableColumnFamilyOptionIntValue(value)); - return this; - } - - private int getInt(final MutableColumnFamilyOptionKey key) - throws NoSuchElementException, NumberFormatException { - final MutableColumnFamilyOptionValue value = options.get(key); - if(value == null) { - throw new NoSuchElementException(key.name() + " has not been set"); - } - return value.asInt(); - } - - private MutableColumnFamilyOptionsBuilder setBoolean( - final MutableColumnFamilyOptionKey key, final boolean value) { - if(key.getValueType() != ValueType.BOOLEAN) { - throw new IllegalArgumentException( - key + " does not accept a boolean value"); - } - options.put(key, new MutableColumnFamilyOptionBooleanValue(value)); - return this; - } - - private boolean getBoolean(final MutableColumnFamilyOptionKey key) - throws NoSuchElementException, NumberFormatException { - final MutableColumnFamilyOptionValue value = options.get(key); - if(value == null) { - throw new NoSuchElementException(key.name() + " has not been set"); - } - return value.asBoolean(); + private MutableColumnFamilyOptionsBuilder() { + super(); } - private MutableColumnFamilyOptionsBuilder setIntArray( - final MutableColumnFamilyOptionKey key, final int[] value) { - if(key.getValueType() != ValueType.INT_ARRAY) { - throw new IllegalArgumentException( - key + " does not accept an int array value"); - } - options.put(key, new MutableColumnFamilyOptionIntArrayValue(value)); - return this; - } - - private int[] getIntArray(final MutableColumnFamilyOptionKey key) - throws NoSuchElementException, NumberFormatException { - final MutableColumnFamilyOptionValue value = options.get(key); - if(value == null) { - throw new NoSuchElementException(key.name() + " has not been set"); - } - return value.asIntArray(); - } - - private > MutableColumnFamilyOptionsBuilder setEnum( - final MutableColumnFamilyOptionKey key, final T value) { - if(key.getValueType() != ValueType.ENUM) { - throw new IllegalArgumentException( - key + " does not accept a Enum value"); - } - options.put(key, new MutableColumnFamilyOptionEnumValue(value)); + @Override + protected MutableColumnFamilyOptionsBuilder self() { return this; - } - private > T getEnum(final MutableColumnFamilyOptionKey key) - throws NoSuchElementException, NumberFormatException { - final MutableColumnFamilyOptionValue value = options.get(key); - if(value == null) { - throw new NoSuchElementException(key.name() + " has not been set"); - } - - if(!(value instanceof MutableColumnFamilyOptionEnumValue)) { - throw new NoSuchElementException(key.name() + " is not of Enum type"); - } - - return ((MutableColumnFamilyOptionEnumValue)value).asObject(); + @Override + protected Map allKeys() { + return ALL_KEYS_LOOKUP; } - public MutableColumnFamilyOptionsBuilder fromString(final String keyStr, - final String valueStr) throws IllegalArgumentException { - Objects.requireNonNull(keyStr); - Objects.requireNonNull(valueStr); - - final MutableColumnFamilyOptionKey key = ALL_KEYS_LOOKUP.get(keyStr); - switch(key.getValueType()) { - case DOUBLE: - return setDouble(key, Double.parseDouble(valueStr)); - - case LONG: - return setLong(key, Long.parseLong(valueStr)); - - case INT: - return setInt(key, Integer.parseInt(valueStr)); - - case BOOLEAN: - return setBoolean(key, Boolean.parseBoolean(valueStr)); - - case INT_ARRAY: - final String[] strInts = valueStr - .trim().split(INT_ARRAY_INT_SEPARATOR); - if(strInts == null || strInts.length == 0) { - throw new IllegalArgumentException( - "int array value is not correctly formatted"); - } - - final int value[] = new int[strInts.length]; - int i = 0; - for(final String strInt : strInts) { - value[i++] = Integer.parseInt(strInt); - } - return setIntArray(key, value); - } - - throw new IllegalStateException( - key + " has unknown value type: " + key.getValueType()); + @Override + protected MutableColumnFamilyOptions build(final String[] keys, + final String[] values) { + return new MutableColumnFamilyOptions(keys, values); } @Override @@ -993,5 +455,15 @@ public class MutableColumnFamilyOptions { public boolean reportBgIoStats() { return getBoolean(MiscOption.report_bg_io_stats); } + + @Override + public MutableColumnFamilyOptionsBuilder setTtl(final long ttl) { + return setLong(CompactionOption.ttl, ttl); + } + + @Override + public long ttl() { + return getLong(CompactionOption.ttl); + } } } diff --git a/java/src/main/java/org/rocksdb/MutableDBOptions.java b/java/src/main/java/org/rocksdb/MutableDBOptions.java new file mode 100644 index 000000000..328f7f979 --- /dev/null +++ b/java/src/main/java/org/rocksdb/MutableDBOptions.java @@ -0,0 +1,286 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +public class MutableDBOptions extends AbstractMutableOptions { + + /** + * User must use builder pattern, or parser. + * + * @param keys the keys + * @param values the values + * + * See {@link #builder()} and {@link #parse(String)}. + */ + private MutableDBOptions(final String[] keys, final String[] values) { + super(keys, values); + } + + /** + * Creates a builder which allows you + * to set MutableDBOptions in a fluent + * manner + * + * @return A builder for MutableDBOptions + */ + public static MutableDBOptionsBuilder builder() { + return new MutableDBOptionsBuilder(); + } + + /** + * Parses a String representation of MutableDBOptions + * + * The format is: key1=value1;key2=value2;key3=value3 etc + * + * For int[] values, each int should be separated by a comma, e.g. + * + * key1=value1;intArrayKey1=1,2,3 + * + * @param str The string representation of the mutable db options + * + * @return A builder for the mutable db options + */ + public static MutableDBOptionsBuilder parse(final String str) { + Objects.requireNonNull(str); + + final MutableDBOptionsBuilder builder = + new MutableDBOptionsBuilder(); + + final String[] options = str.trim().split(KEY_VALUE_PAIR_SEPARATOR); + for(final String option : options) { + final int equalsOffset = option.indexOf(KEY_VALUE_SEPARATOR); + if(equalsOffset <= 0) { + throw new IllegalArgumentException( + "options string has an invalid key=value pair"); + } + + final String key = option.substring(0, equalsOffset); + if(key.isEmpty()) { + throw new IllegalArgumentException("options string is invalid"); + } + + final String value = option.substring(equalsOffset + 1); + if(value.isEmpty()) { + throw new IllegalArgumentException("options string is invalid"); + } + + builder.fromString(key, value); + } + + return builder; + } + + private interface MutableDBOptionKey extends MutableOptionKey {} + + public enum DBOption implements MutableDBOptionKey { + max_background_jobs(ValueType.INT), + base_background_compactions(ValueType.INT), + max_background_compactions(ValueType.INT), + avoid_flush_during_shutdown(ValueType.BOOLEAN), + writable_file_max_buffer_size(ValueType.LONG), + delayed_write_rate(ValueType.LONG), + max_total_wal_size(ValueType.LONG), + delete_obsolete_files_period_micros(ValueType.LONG), + stats_dump_period_sec(ValueType.INT), + max_open_files(ValueType.INT), + bytes_per_sync(ValueType.LONG), + wal_bytes_per_sync(ValueType.LONG), + compaction_readahead_size(ValueType.LONG); + + private final ValueType valueType; + DBOption(final ValueType valueType) { + this.valueType = valueType; + } + + @Override + public ValueType getValueType() { + return valueType; + } + } + + public static class MutableDBOptionsBuilder + extends AbstractMutableOptionsBuilder + implements MutableDBOptionsInterface { + + private final static Map ALL_KEYS_LOOKUP = new HashMap<>(); + static { + for(final MutableDBOptionKey key : DBOption.values()) { + ALL_KEYS_LOOKUP.put(key.name(), key); + } + } + + private MutableDBOptionsBuilder() { + super(); + } + + @Override + protected MutableDBOptionsBuilder self() { + return this; + } + + @Override + protected Map allKeys() { + return ALL_KEYS_LOOKUP; + } + + @Override + protected MutableDBOptions build(final String[] keys, + final String[] values) { + return new MutableDBOptions(keys, values); + } + + @Override + public MutableDBOptionsBuilder setMaxBackgroundJobs( + final int maxBackgroundJobs) { + return setInt(DBOption.max_background_jobs, maxBackgroundJobs); + } + + @Override + public int maxBackgroundJobs() { + return getInt(DBOption.max_background_jobs); + } + + @Override + public void setBaseBackgroundCompactions( + final int baseBackgroundCompactions) { + setInt(DBOption.base_background_compactions, + baseBackgroundCompactions); + } + + @Override + public int baseBackgroundCompactions() { + return getInt(DBOption.base_background_compactions); + } + + @Override + public MutableDBOptionsBuilder setMaxBackgroundCompactions( + final int maxBackgroundCompactions) { + return setInt(DBOption.max_background_compactions, + maxBackgroundCompactions); + } + + @Override + public int maxBackgroundCompactions() { + return getInt(DBOption.max_background_compactions); + } + + @Override + public MutableDBOptionsBuilder setAvoidFlushDuringShutdown( + final boolean avoidFlushDuringShutdown) { + return setBoolean(DBOption.avoid_flush_during_shutdown, + avoidFlushDuringShutdown); + } + + @Override + public boolean avoidFlushDuringShutdown() { + return getBoolean(DBOption.avoid_flush_during_shutdown); + } + + @Override + public MutableDBOptionsBuilder setWritableFileMaxBufferSize( + final long writableFileMaxBufferSize) { + return setLong(DBOption.writable_file_max_buffer_size, + writableFileMaxBufferSize); + } + + @Override + public long writableFileMaxBufferSize() { + return getLong(DBOption.writable_file_max_buffer_size); + } + + @Override + public MutableDBOptionsBuilder setDelayedWriteRate( + final long delayedWriteRate) { + return setLong(DBOption.delayed_write_rate, + delayedWriteRate); + } + + @Override + public long delayedWriteRate() { + return getLong(DBOption.delayed_write_rate); + } + + @Override + public MutableDBOptionsBuilder setMaxTotalWalSize( + final long maxTotalWalSize) { + return setLong(DBOption.max_total_wal_size, maxTotalWalSize); + } + + @Override + public long maxTotalWalSize() { + return getLong(DBOption.max_total_wal_size); + } + + @Override + public MutableDBOptionsBuilder setDeleteObsoleteFilesPeriodMicros( + final long micros) { + return setLong(DBOption.delete_obsolete_files_period_micros, micros); + } + + @Override + public long deleteObsoleteFilesPeriodMicros() { + return getLong(DBOption.delete_obsolete_files_period_micros); + } + + @Override + public MutableDBOptionsBuilder setStatsDumpPeriodSec( + final int statsDumpPeriodSec) { + return setInt(DBOption.stats_dump_period_sec, statsDumpPeriodSec); + } + + @Override + public int statsDumpPeriodSec() { + return getInt(DBOption.stats_dump_period_sec); + } + + @Override + public MutableDBOptionsBuilder setMaxOpenFiles(final int maxOpenFiles) { + return setInt(DBOption.max_open_files, maxOpenFiles); + } + + @Override + public int maxOpenFiles() { + return getInt(DBOption.max_open_files); + } + + @Override + public MutableDBOptionsBuilder setBytesPerSync(final long bytesPerSync) { + return setLong(DBOption.bytes_per_sync, bytesPerSync); + } + + @Override + public long bytesPerSync() { + return getLong(DBOption.bytes_per_sync); + } + + @Override + public MutableDBOptionsBuilder setWalBytesPerSync( + final long walBytesPerSync) { + return setLong(DBOption.wal_bytes_per_sync, walBytesPerSync); + } + + @Override + public long walBytesPerSync() { + return getLong(DBOption.wal_bytes_per_sync); + } + + @Override + public MutableDBOptionsBuilder setCompactionReadaheadSize( + final long compactionReadaheadSize) { + return setLong(DBOption.compaction_readahead_size, + compactionReadaheadSize); + } + + @Override + public long compactionReadaheadSize() { + return getLong(DBOption.compaction_readahead_size); + } + } +} diff --git a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java new file mode 100644 index 000000000..5fe3215b3 --- /dev/null +++ b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java @@ -0,0 +1,336 @@ +package org.rocksdb; + +public interface MutableDBOptionsInterface { + + /** + * Specifies the maximum number of concurrent background jobs (both flushes + * and compactions combined). + * Default: 2 + * + * @param maxBackgroundJobs number of max concurrent background jobs + * @return the instance of the current object. + */ + T setMaxBackgroundJobs(int maxBackgroundJobs); + + /** + * Returns the maximum number of concurrent background jobs (both flushes + * and compactions combined). + * Default: 2 + * + * @return the maximum number of concurrent background jobs. + */ + int maxBackgroundJobs(); + + /** + * Suggested number of concurrent background compaction jobs, submitted to + * the default LOW priority thread pool. + * Default: 1 + * + * @param baseBackgroundCompactions Suggested number of background compaction + * jobs + * + * @deprecated Use {@link #setMaxBackgroundJobs(int)} + */ + @Deprecated + void setBaseBackgroundCompactions(int baseBackgroundCompactions); + + /** + * Suggested number of concurrent background compaction jobs, submitted to + * the default LOW priority thread pool. + * Default: 1 + * + * @return Suggested number of background compaction jobs + */ + int baseBackgroundCompactions(); + + /** + * Specifies the maximum number of concurrent background compaction jobs, + * submitted to the default LOW priority thread pool. + * If you're increasing this, also consider increasing number of threads in + * LOW priority thread pool. For more information, see + * Default: 1 + * + * @param maxBackgroundCompactions the maximum number of background + * compaction jobs. + * @return the instance of the current object. + * + * @see RocksEnv#setBackgroundThreads(int) + * @see RocksEnv#setBackgroundThreads(int, Priority) + * @see DBOptionsInterface#maxBackgroundFlushes() + */ + T setMaxBackgroundCompactions(int maxBackgroundCompactions); + + /** + * Returns the maximum number of concurrent background compaction jobs, + * submitted to the default LOW priority thread pool. + * When increasing this number, we may also want to consider increasing + * number of threads in LOW priority thread pool. + * Default: 1 + * + * @return the maximum number of concurrent background compaction jobs. + * @see RocksEnv#setBackgroundThreads(int) + * @see RocksEnv#setBackgroundThreads(int, Priority) + * + * @deprecated Use {@link #setMaxBackgroundJobs(int)} + */ + @Deprecated + int maxBackgroundCompactions(); + + /** + * By default RocksDB will flush all memtables on DB close if there are + * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup + * DB close. Unpersisted data WILL BE LOST. + * + * DEFAULT: false + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} + * API. + * + * @param avoidFlushDuringShutdown true if we should avoid flush during + * shutdown + * + * @return the reference to the current options. + */ + T setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown); + + /** + * By default RocksDB will flush all memtables on DB close if there are + * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup + * DB close. Unpersisted data WILL BE LOST. + * + * DEFAULT: false + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} + * API. + * + * @return true if we should avoid flush during shutdown + */ + boolean avoidFlushDuringShutdown(); + + /** + * This is the maximum buffer size that is used by WritableFileWriter. + * On Windows, we need to maintain an aligned buffer for writes. + * We allow the buffer to grow until it's size hits the limit. + * + * Default: 1024 * 1024 (1 MB) + * + * @param writableFileMaxBufferSize the maximum buffer size + * + * @return the reference to the current options. + */ + T setWritableFileMaxBufferSize(long writableFileMaxBufferSize); + + /** + * This is the maximum buffer size that is used by WritableFileWriter. + * On Windows, we need to maintain an aligned buffer for writes. + * We allow the buffer to grow until it's size hits the limit. + * + * Default: 1024 * 1024 (1 MB) + * + * @return the maximum buffer size + */ + long writableFileMaxBufferSize(); + + /** + * The limited write rate to DB if + * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or + * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered, + * or we are writing to the last mem table allowed and we allow more than 3 + * mem tables. It is calculated using size of user write requests before + * compression. RocksDB may decide to slow down more if the compaction still + * gets behind further. + * + * Unit: bytes per second. + * + * Default: 16MB/s + * + * @param delayedWriteRate the rate in bytes per second + * + * @return the reference to the current options. + */ + T setDelayedWriteRate(long delayedWriteRate); + + /** + * The limited write rate to DB if + * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or + * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered, + * or we are writing to the last mem table allowed and we allow more than 3 + * mem tables. It is calculated using size of user write requests before + * compression. RocksDB may decide to slow down more if the compaction still + * gets behind further. + * + * Unit: bytes per second. + * + * Default: 16MB/s + * + * @return the rate in bytes per second + */ + long delayedWriteRate(); + + /** + *

    Once write-ahead logs exceed this size, we will start forcing the + * flush of column families whose memtables are backed by the oldest live + * WAL file (i.e. the ones that are causing all the space amplification). + *

    + *

    If set to 0 (default), we will dynamically choose the WAL size limit to + * be [sum of all write_buffer_size * max_write_buffer_number] * 2

    + *

    This option takes effect only when there are more than one column family as + * otherwise the wal size is dictated by the write_buffer_size.

    + *

    Default: 0

    + * + * @param maxTotalWalSize max total wal size. + * @return the instance of the current object. + */ + T setMaxTotalWalSize(long maxTotalWalSize); + + /** + *

    Returns the max total wal size. Once write-ahead logs exceed this size, + * we will start forcing the flush of column families whose memtables are + * backed by the oldest live WAL file (i.e. the ones that are causing all + * the space amplification).

    + * + *

    If set to 0 (default), we will dynamically choose the WAL size limit + * to be [sum of all write_buffer_size * max_write_buffer_number] * 2 + *

    + * + * @return max total wal size + */ + long maxTotalWalSize(); + + /** + * The periodicity when obsolete files get deleted. The default + * value is 6 hours. The files that get out of scope by compaction + * process will still get automatically delete on every compaction, + * regardless of this setting + * + * @param micros the time interval in micros + * @return the instance of the current object. + */ + T setDeleteObsoleteFilesPeriodMicros(long micros); + + /** + * The periodicity when obsolete files get deleted. The default + * value is 6 hours. The files that get out of scope by compaction + * process will still get automatically delete on every compaction, + * regardless of this setting + * + * @return the time interval in micros when obsolete files will be deleted. + */ + long deleteObsoleteFilesPeriodMicros(); + + /** + * if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec + * Default: 600 (10 minutes) + * + * @param statsDumpPeriodSec time interval in seconds. + * @return the instance of the current object. + */ + T setStatsDumpPeriodSec(int statsDumpPeriodSec); + + /** + * If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec + * Default: 600 (10 minutes) + * + * @return time interval in seconds. + */ + int statsDumpPeriodSec(); + + /** + * Number of open files that can be used by the DB. You may need to + * increase this if your database has a large working set. Value -1 means + * files opened are always kept open. You can estimate number of files based + * on {@code target_file_size_base} and {@code target_file_size_multiplier} + * for level-based compaction. For universal-style compaction, you can usually + * set it to -1. + * Default: 5000 + * + * @param maxOpenFiles the maximum number of open files. + * @return the instance of the current object. + */ + T setMaxOpenFiles(int maxOpenFiles); + + /** + * Number of open files that can be used by the DB. You may need to + * increase this if your database has a large working set. Value -1 means + * files opened are always kept open. You can estimate number of files based + * on {@code target_file_size_base} and {@code target_file_size_multiplier} + * for level-based compaction. For universal-style compaction, you can usually + * set it to -1. + * + * @return the maximum number of open files. + */ + int maxOpenFiles(); + + /** + * Allows OS to incrementally sync files to disk while they are being + * written, asynchronously, in the background. + * Issue one request for every bytes_per_sync written. 0 turns it off. + * Default: 0 + * + * @param bytesPerSync size in bytes + * @return the instance of the current object. + */ + T setBytesPerSync(long bytesPerSync); + + /** + * Allows OS to incrementally sync files to disk while they are being + * written, asynchronously, in the background. + * Issue one request for every bytes_per_sync written. 0 turns it off. + * Default: 0 + * + * @return size in bytes + */ + long bytesPerSync(); + + /** + * Same as {@link #setBytesPerSync(long)} , but applies to WAL files + * + * Default: 0, turned off + * + * @param walBytesPerSync size in bytes + * @return the instance of the current object. + */ + T setWalBytesPerSync(long walBytesPerSync); + + /** + * Same as {@link #bytesPerSync()} , but applies to WAL files + * + * Default: 0, turned off + * + * @return size in bytes + */ + long walBytesPerSync(); + + + /** + * If non-zero, we perform bigger reads when doing compaction. If you're + * running RocksDB on spinning disks, you should set this to at least 2MB. + * + * That way RocksDB's compaction is doing sequential instead of random reads. + * When non-zero, we also force + * {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true. + * + * Default: 0 + * + * @param compactionReadaheadSize The compaction read-ahead size + * + * @return the reference to the current options. + */ + T setCompactionReadaheadSize(final long compactionReadaheadSize); + + /** + * If non-zero, we perform bigger reads when doing compaction. If you're + * running RocksDB on spinning disks, you should set this to at least 2MB. + * + * That way RocksDB's compaction is doing sequential instead of random reads. + * When non-zero, we also force + * {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true. + * + * Default: 0 + * + * @return The compaction read-ahead size + */ + long compactionReadaheadSize(); +} diff --git a/java/src/main/java/org/rocksdb/MutableOptionKey.java b/java/src/main/java/org/rocksdb/MutableOptionKey.java new file mode 100644 index 000000000..7402471ff --- /dev/null +++ b/java/src/main/java/org/rocksdb/MutableOptionKey.java @@ -0,0 +1,15 @@ +package org.rocksdb; + +public interface MutableOptionKey { + enum ValueType { + DOUBLE, + LONG, + INT, + BOOLEAN, + INT_ARRAY, + ENUM + } + + String name(); + ValueType getValueType(); +} diff --git a/java/src/main/java/org/rocksdb/MutableOptionValue.java b/java/src/main/java/org/rocksdb/MutableOptionValue.java new file mode 100644 index 000000000..3727f7c1f --- /dev/null +++ b/java/src/main/java/org/rocksdb/MutableOptionValue.java @@ -0,0 +1,375 @@ +package org.rocksdb; + +import static org.rocksdb.AbstractMutableOptions.INT_ARRAY_INT_SEPARATOR; + +public abstract class MutableOptionValue { + + abstract double asDouble() throws NumberFormatException; + abstract long asLong() throws NumberFormatException; + abstract int asInt() throws NumberFormatException; + abstract boolean asBoolean() throws IllegalStateException; + abstract int[] asIntArray() throws IllegalStateException; + abstract String asString(); + abstract T asObject(); + + private static abstract class MutableOptionValueObject + extends MutableOptionValue { + protected final T value; + + private MutableOptionValueObject(final T value) { + this.value = value; + } + + @Override T asObject() { + return value; + } + } + + static MutableOptionValue fromString(final String s) { + return new MutableOptionStringValue(s); + } + + static MutableOptionValue fromDouble(final double d) { + return new MutableOptionDoubleValue(d); + } + + static MutableOptionValue fromLong(final long d) { + return new MutableOptionLongValue(d); + } + + static MutableOptionValue fromInt(final int i) { + return new MutableOptionIntValue(i); + } + + static MutableOptionValue fromBoolean(final boolean b) { + return new MutableOptionBooleanValue(b); + } + + static MutableOptionValue fromIntArray(final int[] ix) { + return new MutableOptionIntArrayValue(ix); + } + + static > MutableOptionValue fromEnum(final N value) { + return new MutableOptionEnumValue<>(value); + } + + static class MutableOptionStringValue + extends MutableOptionValueObject { + MutableOptionStringValue(final String value) { + super(value); + } + + @Override + double asDouble() throws NumberFormatException { + return Double.parseDouble(value); + } + + @Override + long asLong() throws NumberFormatException { + return Long.parseLong(value); + } + + @Override + int asInt() throws NumberFormatException { + return Integer.parseInt(value); + } + + @Override + boolean asBoolean() throws IllegalStateException { + return Boolean.parseBoolean(value); + } + + @Override + int[] asIntArray() throws IllegalStateException { + throw new IllegalStateException("String is not applicable as int[]"); + } + + @Override + String asString() { + return value; + } + } + + static class MutableOptionDoubleValue + extends MutableOptionValue { + private final double value; + MutableOptionDoubleValue(final double value) { + this.value = value; + } + + @Override + double asDouble() { + return value; + } + + @Override + long asLong() throws NumberFormatException { + return Double.valueOf(value).longValue(); + } + + @Override + int asInt() throws NumberFormatException { + if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { + throw new NumberFormatException( + "double value lies outside the bounds of int"); + } + return Double.valueOf(value).intValue(); + } + + @Override + boolean asBoolean() throws IllegalStateException { + throw new IllegalStateException( + "double is not applicable as boolean"); + } + + @Override + int[] asIntArray() throws IllegalStateException { + if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { + throw new NumberFormatException( + "double value lies outside the bounds of int"); + } + return new int[] { Double.valueOf(value).intValue() }; + } + + @Override + String asString() { + return String.valueOf(value); + } + + @Override + Double asObject() { + return value; + } + } + + static class MutableOptionLongValue + extends MutableOptionValue { + private final long value; + + MutableOptionLongValue(final long value) { + this.value = value; + } + + @Override + double asDouble() { + if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) { + throw new NumberFormatException( + "long value lies outside the bounds of int"); + } + return Long.valueOf(value).doubleValue(); + } + + @Override + long asLong() throws NumberFormatException { + return value; + } + + @Override + int asInt() throws NumberFormatException { + if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { + throw new NumberFormatException( + "long value lies outside the bounds of int"); + } + return Long.valueOf(value).intValue(); + } + + @Override + boolean asBoolean() throws IllegalStateException { + throw new IllegalStateException( + "long is not applicable as boolean"); + } + + @Override + int[] asIntArray() throws IllegalStateException { + if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { + throw new NumberFormatException( + "long value lies outside the bounds of int"); + } + return new int[] { Long.valueOf(value).intValue() }; + } + + @Override + String asString() { + return String.valueOf(value); + } + + @Override + Long asObject() { + return value; + } + } + + static class MutableOptionIntValue + extends MutableOptionValue { + private final int value; + + MutableOptionIntValue(final int value) { + this.value = value; + } + + @Override + double asDouble() { + if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) { + throw new NumberFormatException("int value lies outside the bounds of int"); + } + return Integer.valueOf(value).doubleValue(); + } + + @Override + long asLong() throws NumberFormatException { + return value; + } + + @Override + int asInt() throws NumberFormatException { + return value; + } + + @Override + boolean asBoolean() throws IllegalStateException { + throw new IllegalStateException("int is not applicable as boolean"); + } + + @Override + int[] asIntArray() throws IllegalStateException { + return new int[] { value }; + } + + @Override + String asString() { + return String.valueOf(value); + } + + @Override + Integer asObject() { + return value; + } + } + + static class MutableOptionBooleanValue + extends MutableOptionValue { + private final boolean value; + + MutableOptionBooleanValue(final boolean value) { + this.value = value; + } + + @Override + double asDouble() { + throw new NumberFormatException("boolean is not applicable as double"); + } + + @Override + long asLong() throws NumberFormatException { + throw new NumberFormatException("boolean is not applicable as Long"); + } + + @Override + int asInt() throws NumberFormatException { + throw new NumberFormatException("boolean is not applicable as int"); + } + + @Override + boolean asBoolean() { + return value; + } + + @Override + int[] asIntArray() throws IllegalStateException { + throw new IllegalStateException("boolean is not applicable as int[]"); + } + + @Override + String asString() { + return String.valueOf(value); + } + + @Override + Boolean asObject() { + return value; + } + } + + static class MutableOptionIntArrayValue + extends MutableOptionValueObject { + MutableOptionIntArrayValue(final int[] value) { + super(value); + } + + @Override + double asDouble() { + throw new NumberFormatException("int[] is not applicable as double"); + } + + @Override + long asLong() throws NumberFormatException { + throw new NumberFormatException("int[] is not applicable as Long"); + } + + @Override + int asInt() throws NumberFormatException { + throw new NumberFormatException("int[] is not applicable as int"); + } + + @Override + boolean asBoolean() { + throw new NumberFormatException("int[] is not applicable as boolean"); + } + + @Override + int[] asIntArray() throws IllegalStateException { + return value; + } + + @Override + String asString() { + final StringBuilder builder = new StringBuilder(); + for(int i = 0; i < value.length; i++) { + builder.append(i); + if(i + 1 < value.length) { + builder.append(INT_ARRAY_INT_SEPARATOR); + } + } + return builder.toString(); + } + } + + static class MutableOptionEnumValue> + extends MutableOptionValueObject { + + MutableOptionEnumValue(final T value) { + super(value); + } + + @Override + double asDouble() throws NumberFormatException { + throw new NumberFormatException("Enum is not applicable as double"); + } + + @Override + long asLong() throws NumberFormatException { + throw new NumberFormatException("Enum is not applicable as long"); + } + + @Override + int asInt() throws NumberFormatException { + throw new NumberFormatException("Enum is not applicable as int"); + } + + @Override + boolean asBoolean() throws IllegalStateException { + throw new NumberFormatException("Enum is not applicable as boolean"); + } + + @Override + int[] asIntArray() throws IllegalStateException { + throw new NumberFormatException("Enum is not applicable as int[]"); + } + + @Override + String asString() { + return value.name(); + } + } + +} diff --git a/java/src/main/java/org/rocksdb/OperationStage.java b/java/src/main/java/org/rocksdb/OperationStage.java new file mode 100644 index 000000000..6ac0a15a2 --- /dev/null +++ b/java/src/main/java/org/rocksdb/OperationStage.java @@ -0,0 +1,59 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The operation stage. + */ +public enum OperationStage { + STAGE_UNKNOWN((byte)0x0), + STAGE_FLUSH_RUN((byte)0x1), + STAGE_FLUSH_WRITE_L0((byte)0x2), + STAGE_COMPACTION_PREPARE((byte)0x3), + STAGE_COMPACTION_RUN((byte)0x4), + STAGE_COMPACTION_PROCESS_KV((byte)0x5), + STAGE_COMPACTION_INSTALL((byte)0x6), + STAGE_COMPACTION_SYNC_FILE((byte)0x7), + STAGE_PICK_MEMTABLES_TO_FLUSH((byte)0x8), + STAGE_MEMTABLE_ROLLBACK((byte)0x9), + STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS((byte)0xA); + + private final byte value; + + OperationStage(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ + byte getValue() { + return value; + } + + /** + * Get the Operation stage from the internal representation value. + * + * @param value the internal representation value. + * + * @return the operation stage + * + * @throws IllegalArgumentException if the value does not match + * an OperationStage + */ + static OperationStage fromValue(final byte value) + throws IllegalArgumentException { + for (final OperationStage threadType : OperationStage.values()) { + if (threadType.value == value) { + return threadType; + } + } + throw new IllegalArgumentException( + "Unknown value for OperationStage: " + value); + } +} diff --git a/java/src/main/java/org/rocksdb/OperationType.java b/java/src/main/java/org/rocksdb/OperationType.java new file mode 100644 index 000000000..7cc9b65cd --- /dev/null +++ b/java/src/main/java/org/rocksdb/OperationType.java @@ -0,0 +1,54 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The type used to refer to a thread operation. + * + * A thread operation describes high-level action of a thread, + * examples include compaction and flush. + */ +public enum OperationType { + OP_UNKNOWN((byte)0x0), + OP_COMPACTION((byte)0x1), + OP_FLUSH((byte)0x2); + + private final byte value; + + OperationType(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ + byte getValue() { + return value; + } + + /** + * Get the Operation type from the internal representation value. + * + * @param value the internal representation value. + * + * @return the operation type + * + * @throws IllegalArgumentException if the value does not match + * an OperationType + */ + static OperationType fromValue(final byte value) + throws IllegalArgumentException { + for (final OperationType threadType : OperationType.values()) { + if (threadType.value == value) { + return threadType; + } + } + throw new IllegalArgumentException( + "Unknown value for OperationType: " + value); + } +} diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java index 1610dc739..267cab1de 100644 --- a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java @@ -94,6 +94,54 @@ public class OptimisticTransactionDB extends RocksDB return otdb; } + + /** + * This is similar to {@link #close()} except that it + * throws an exception if any error occurs. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + * + * @throws RocksDBException if an error occurs whilst closing. + */ + public void closeE() throws RocksDBException { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } finally { + disposeInternal(); + } + } + } + + /** + * This is similar to {@link #closeE()} except that it + * silently ignores any errors. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + */ + @Override + public void close() { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } catch (final RocksDBException e) { + // silently ignore the error report + } finally { + disposeInternal(); + } + } + } + @Override public Transaction beginTransaction(final WriteOptions writeOptions) { return new Transaction(this, beginTransaction(nativeHandle_, @@ -155,10 +203,14 @@ public class OptimisticTransactionDB extends RocksDB return db; } + @Override protected final native void disposeInternal(final long handle); + protected static native long open(final long optionsHandle, final String path) throws RocksDBException; protected static native long[] open(final long handle, final String path, final byte[][] columnFamilyNames, final long[] columnFamilyOptions); + private native static void closeDatabase(final long handle) + throws RocksDBException; private native long beginTransaction(final long handle, final long writeOptionsHandle); private native long beginTransaction(final long handle, @@ -171,5 +223,4 @@ public class OptimisticTransactionDB extends RocksDB final long optimisticTransactionOptionsHandle, final long oldTransactionHandle); private native long getBaseDB(final long handle); - @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/Options.java b/java/src/main/java/org/rocksdb/Options.java index cdd1b91fd..5831b1e29 100644 --- a/java/src/main/java/org/rocksdb/Options.java +++ b/java/src/main/java/org/rocksdb/Options.java @@ -19,7 +19,9 @@ import java.util.List; * automaticallyand native resources will be released as part of the process. */ public class Options extends RocksObject - implements DBOptionsInterface, ColumnFamilyOptionsInterface, + implements DBOptionsInterface, + MutableDBOptionsInterface, + ColumnFamilyOptionsInterface, MutableColumnFamilyOptionsInterface { static { RocksDB.loadLibrary(); @@ -472,9 +474,10 @@ public class Options extends RocksObject } @Override - public void setMaxSubcompactions(final int maxSubcompactions) { + public Options setMaxSubcompactions(final int maxSubcompactions) { assert(isOwningHandle()); setMaxSubcompactions(nativeHandle_, maxSubcompactions); + return this; } @Override @@ -905,6 +908,17 @@ public class Options extends RocksObject return delayedWriteRate(nativeHandle_); } + @Override + public Options setEnablePipelinedWrite(final boolean enablePipelinedWrite) { + setEnablePipelinedWrite(nativeHandle_, enablePipelinedWrite); + return this; + } + + @Override + public boolean enablePipelinedWrite() { + return enablePipelinedWrite(nativeHandle_); + } + @Override public Options setAllowConcurrentMemtableWrite( final boolean allowConcurrentMemtableWrite) { @@ -1006,6 +1020,20 @@ public class Options extends RocksObject return this.rowCache_; } + @Override + public Options setWalFilter(final AbstractWalFilter walFilter) { + assert(isOwningHandle()); + setWalFilter(nativeHandle_, walFilter.nativeHandle_); + this.walFilter_ = walFilter; + return this; + } + + @Override + public WalFilter walFilter() { + assert(isOwningHandle()); + return this.walFilter_; + } + @Override public Options setFailIfOptionsFileError(final boolean failIfOptionsFileError) { assert(isOwningHandle()); @@ -1058,6 +1086,58 @@ public class Options extends RocksObject return avoidFlushDuringShutdown(nativeHandle_); } + @Override + public Options setAllowIngestBehind(final boolean allowIngestBehind) { + assert(isOwningHandle()); + setAllowIngestBehind(nativeHandle_, allowIngestBehind); + return this; + } + + @Override + public boolean allowIngestBehind() { + assert(isOwningHandle()); + return allowIngestBehind(nativeHandle_); + } + + @Override + public Options setPreserveDeletes(final boolean preserveDeletes) { + assert(isOwningHandle()); + setPreserveDeletes(nativeHandle_, preserveDeletes); + return this; + } + + @Override + public boolean preserveDeletes() { + assert(isOwningHandle()); + return preserveDeletes(nativeHandle_); + } + + @Override + public Options setTwoWriteQueues(final boolean twoWriteQueues) { + assert(isOwningHandle()); + setTwoWriteQueues(nativeHandle_, twoWriteQueues); + return this; + } + + @Override + public boolean twoWriteQueues() { + assert(isOwningHandle()); + return twoWriteQueues(nativeHandle_); + } + + @Override + public Options setManualWalFlush(final boolean manualWalFlush) { + assert(isOwningHandle()); + setManualWalFlush(nativeHandle_, manualWalFlush); + return this; + } + + @Override + public boolean manualWalFlush() { + assert(isOwningHandle()); + return manualWalFlush(nativeHandle_); + } + @Override public MemTableConfig memTableConfig() { return this.memTableConfig_; @@ -1194,6 +1274,20 @@ public class Options extends RocksObject bottommostCompressionType(nativeHandle_)); } + @Override + public Options setBottommostCompressionOptions( + final CompressionOptions bottommostCompressionOptions) { + setBottommostCompressionOptions(nativeHandle_, + bottommostCompressionOptions.nativeHandle_); + this.bottommostCompressionOptions_ = bottommostCompressionOptions; + return this; + } + + @Override + public CompressionOptions bottommostCompressionOptions() { + return this.bottommostCompressionOptions_; + } + @Override public Options setCompressionOptions( final CompressionOptions compressionOptions) { @@ -1209,7 +1303,7 @@ public class Options extends RocksObject @Override public CompactionStyle compactionStyle() { - return CompactionStyle.values()[compactionStyle(nativeHandle_)]; + return CompactionStyle.fromValue(compactionStyle(nativeHandle_)); } @Override @@ -1581,6 +1675,17 @@ public class Options extends RocksObject return reportBgIoStats(nativeHandle_); } + @Override + public Options setTtl(final long ttl) { + setTtl(nativeHandle_, ttl); + return this; + } + + @Override + public long ttl() { + return ttl(nativeHandle_); + } + @Override public Options setCompactionOptionsUniversal( final CompactionOptionsUniversal compactionOptionsUniversal) { @@ -1619,6 +1724,17 @@ public class Options extends RocksObject return forceConsistencyChecks(nativeHandle_); } + @Override + public Options setAtomicFlush(final boolean atomicFlush) { + setAtomicFlush(nativeHandle_, atomicFlush); + return this; + } + + @Override + public boolean atomicFlush() { + return atomicFlush(nativeHandle_); + } + private native static long newOptions(); private native static long newOptions(long dbOptHandle, long cfOptHandle); @@ -1767,6 +1883,9 @@ public class Options extends RocksObject private native boolean enableThreadTracking(long handle); private native void setDelayedWriteRate(long handle, long delayedWriteRate); private native long delayedWriteRate(long handle); + private native void setEnablePipelinedWrite(final long handle, + final boolean pipelinedWrite); + private native boolean enablePipelinedWrite(final long handle); private native void setAllowConcurrentMemtableWrite(long handle, boolean allowConcurrentMemtableWrite); private native boolean allowConcurrentMemtableWrite(long handle); @@ -1789,7 +1908,9 @@ public class Options extends RocksObject final boolean allow2pc); private native boolean allow2pc(final long handle); private native void setRowCache(final long handle, - final long row_cache_handle); + final long rowCacheHandle); + private native void setWalFilter(final long handle, + final long walFilterHandle); private native void setFailIfOptionsFileError(final long handle, final boolean failIfOptionsFileError); private native boolean failIfOptionsFileError(final long handle); @@ -1802,6 +1923,19 @@ public class Options extends RocksObject private native void setAvoidFlushDuringShutdown(final long handle, final boolean avoidFlushDuringShutdown); private native boolean avoidFlushDuringShutdown(final long handle); + private native void setAllowIngestBehind(final long handle, + final boolean allowIngestBehind); + private native boolean allowIngestBehind(final long handle); + private native void setPreserveDeletes(final long handle, + final boolean preserveDeletes); + private native boolean preserveDeletes(final long handle); + private native void setTwoWriteQueues(final long handle, + final boolean twoWriteQueues); + private native boolean twoWriteQueues(final long handle); + private native void setManualWalFlush(final long handle, + final boolean manualWalFlush); + private native boolean manualWalFlush(final long handle); + // CF native handles private native void optimizeForSmallDb(final long handle); @@ -1839,6 +1973,8 @@ public class Options extends RocksObject private native void setBottommostCompressionType(long handle, byte bottommostCompressionType); private native byte bottommostCompressionType(long handle); + private native void setBottommostCompressionOptions(final long handle, + final long bottommostCompressionOptionsHandle); private native void setCompressionOptions(long handle, long compressionOptionsHandle); private native void useFixedLengthPrefixExtractor( @@ -1942,6 +2078,8 @@ public class Options extends RocksObject private native void setReportBgIoStats(final long handle, final boolean reportBgIoStats); private native boolean reportBgIoStats(final long handle); + private native void setTtl(final long handle, final long ttl); + private native long ttl(final long handle); private native void setCompactionOptionsUniversal(final long handle, final long compactionOptionsUniversalHandle); private native void setCompactionOptionsFIFO(final long handle, @@ -1949,6 +2087,9 @@ public class Options extends RocksObject private native void setForceConsistencyChecks(final long handle, final boolean forceConsistencyChecks); private native boolean forceConsistencyChecks(final long handle); + private native void setAtomicFlush(final long handle, + final boolean atomicFlush); + private native boolean atomicFlush(final long handle); // instance variables // NOTE: If you add new member variables, please update the copy constructor above! @@ -1962,7 +2103,9 @@ public class Options extends RocksObject compactionFilterFactory_; private CompactionOptionsUniversal compactionOptionsUniversal_; private CompactionOptionsFIFO compactionOptionsFIFO_; + private CompressionOptions bottommostCompressionOptions_; private CompressionOptions compressionOptions_; private Cache rowCache_; + private WalFilter walFilter_; private WriteBufferManager writeBufferManager_; } diff --git a/java/src/main/java/org/rocksdb/PersistentCache.java b/java/src/main/java/org/rocksdb/PersistentCache.java new file mode 100644 index 000000000..aed565297 --- /dev/null +++ b/java/src/main/java/org/rocksdb/PersistentCache.java @@ -0,0 +1,26 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Persistent cache for caching IO pages on a persistent medium. The + * cache is specifically designed for persistent read cache. + */ +public class PersistentCache extends RocksObject { + + public PersistentCache(final Env env, final String path, final long size, + final Logger logger, final boolean optimizedForNvm) + throws RocksDBException { + super(newPersistentCache(env.nativeHandle_, path, size, + logger.nativeHandle_, optimizedForNvm)); + } + + private native static long newPersistentCache(final long envHandle, + final String path, final long size, final long loggerHandle, + final boolean optimizedForNvm) throws RocksDBException; + + @Override protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/Priority.java b/java/src/main/java/org/rocksdb/Priority.java new file mode 100644 index 000000000..34a56edcb --- /dev/null +++ b/java/src/main/java/org/rocksdb/Priority.java @@ -0,0 +1,49 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The Thread Pool priority. + */ +public enum Priority { + BOTTOM((byte) 0x0), + LOW((byte) 0x1), + HIGH((byte)0x2), + TOTAL((byte)0x3); + + private final byte value; + + Priority(final byte value) { + this.value = value; + } + + /** + *

    Returns the byte value of the enumerations value.

    + * + * @return byte representation + */ + byte getValue() { + return value; + } + + /** + * Get Priority by byte value. + * + * @param value byte representation of Priority. + * + * @return {@link org.rocksdb.Priority} instance. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + static Priority getPriority(final byte value) { + for (final Priority priority : Priority.values()) { + if (priority.getValue() == value){ + return priority; + } + } + throw new IllegalArgumentException("Illegal value provided for Priority."); + } +} diff --git a/java/src/main/java/org/rocksdb/Range.java b/java/src/main/java/org/rocksdb/Range.java new file mode 100644 index 000000000..74c85e5f0 --- /dev/null +++ b/java/src/main/java/org/rocksdb/Range.java @@ -0,0 +1,19 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Range from start to limit. + */ +public class Range { + final Slice start; + final Slice limit; + + public Range(final Slice start, final Slice limit) { + this.start = start; + this.limit = limit; + } +} diff --git a/java/src/main/java/org/rocksdb/ReadOptions.java b/java/src/main/java/org/rocksdb/ReadOptions.java index f176d249b..8353e0fe8 100644 --- a/java/src/main/java/org/rocksdb/ReadOptions.java +++ b/java/src/main/java/org/rocksdb/ReadOptions.java @@ -16,6 +16,15 @@ public class ReadOptions extends RocksObject { super(newReadOptions()); } + /** + * @param verifyChecksums verification will be performed on every read + * when set to true + * @param fillCache if true, then fill-cache behavior will be performed. + */ + public ReadOptions(final boolean verifyChecksums, final boolean fillCache) { + super(newReadOptions(verifyChecksums, fillCache)); + } + /** * Copy constructor. * @@ -26,8 +35,8 @@ public class ReadOptions extends RocksObject { */ public ReadOptions(ReadOptions other) { super(copyReadOptions(other.nativeHandle_)); - iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_; - iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_; + this.iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_; + this.iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_; } /** @@ -182,8 +191,12 @@ public class ReadOptions extends RocksObject { /** * Returns whether managed iterators will be used. * - * @return the setting of whether managed iterators will be used, by default false + * @return the setting of whether managed iterators will be used, + * by default false + * + * @deprecated This options is not used anymore. */ + @Deprecated public boolean managed() { assert(isOwningHandle()); return managed(nativeHandle_); @@ -196,7 +209,10 @@ public class ReadOptions extends RocksObject { * * @param managed if true, then managed iterators will be enabled. * @return the reference to the current ReadOptions. + * + * @deprecated This options is not used anymore. */ + @Deprecated public ReadOptions setManaged(final boolean managed) { assert(isOwningHandle()); setManaged(nativeHandle_, managed); @@ -238,7 +254,6 @@ public class ReadOptions extends RocksObject { return prefixSameAsStart(nativeHandle_); } - /** * Enforce that the iterator only iterates over the same prefix as the seek. * This option is effective only for prefix seeks, i.e. prefix_extractor is @@ -346,6 +361,37 @@ public class ReadOptions extends RocksObject { return this; } + /** + * A threshold for the number of keys that can be skipped before failing an + * iterator seek as incomplete. + * + * @return the number of keys that can be skipped + * before failing an iterator seek as incomplete. + */ + public long maxSkippableInternalKeys() { + assert(isOwningHandle()); + return maxSkippableInternalKeys(nativeHandle_); + } + + /** + * A threshold for the number of keys that can be skipped before failing an + * iterator seek as incomplete. The default value of 0 should be used to + * never fail a request as incomplete, even on skipping too many keys. + * + * Default: 0 + * + * @param maxSkippableInternalKeys the number of keys that can be skipped + * before failing an iterator seek as incomplete. + * + * @return the reference to the current ReadOptions. + */ + public ReadOptions setMaxSkippableInternalKeys( + final long maxSkippableInternalKeys) { + assert(isOwningHandle()); + setMaxSkippableInternalKeys(nativeHandle_, maxSkippableInternalKeys); + return this; + } + /** * If true, keys deleted using the DeleteRange() API will be visible to * readers until they are naturally deleted during compaction. This improves @@ -378,14 +424,63 @@ public class ReadOptions extends RocksObject { } /** - * Defines the extent upto which the forward iterator can returns entries. - * Once the bound is reached, Valid() will be false. iterate_upper_bound - * is exclusive ie the bound value is not a valid entry. If - * iterator_extractor is not null, the Seek target and iterator_upper_bound + * Defines the smallest key at which the backward + * iterator can return an entry. Once the bound is passed, + * {@link RocksIterator#isValid()} will be false. + * + * The lower bound is inclusive i.e. the bound value is a valid + * entry. + * + * If prefix_extractor is not null, the Seek target and `iterate_lower_bound` * need to have the same prefix. This is because ordering is not guaranteed - * outside of prefix domain. There is no lower bound on the iterator. + * outside of prefix domain. * - * Default: nullptr + * Default: null + * + * @param iterateLowerBound Slice representing the upper bound + * @return the reference to the current ReadOptions. + */ + public ReadOptions setIterateLowerBound(final Slice iterateLowerBound) { + assert(isOwningHandle()); + if (iterateLowerBound != null) { + // Hold onto a reference so it doesn't get garbage collected out from under us. + iterateLowerBoundSlice_ = iterateLowerBound; + setIterateLowerBound(nativeHandle_, iterateLowerBoundSlice_.getNativeHandle()); + } + return this; + } + + /** + * Returns the smallest key at which the backward + * iterator can return an entry. + * + * The lower bound is inclusive i.e. the bound value is a valid entry. + * + * @return the smallest key, or null if there is no lower bound defined. + */ + public Slice iterateLowerBound() { + assert(isOwningHandle()); + final long lowerBoundSliceHandle = iterateLowerBound(nativeHandle_); + if (lowerBoundSliceHandle != 0) { + // Disown the new slice - it's owned by the C++ side of the JNI boundary + // from the perspective of this method. + return new Slice(lowerBoundSliceHandle, false); + } + return null; + } + + /** + * Defines the extent up to which the forward iterator + * can returns entries. Once the bound is reached, + * {@link RocksIterator#isValid()} will be false. + * + * The upper bound is exclusive i.e. the bound value is not a valid entry. + * + * If iterator_extractor is not null, the Seek target and iterate_upper_bound + * need to have the same prefix. This is because ordering is not guaranteed + * outside of prefix domain. + * + * Default: null * * @param iterateUpperBound Slice representing the upper bound * @return the reference to the current ReadOptions. @@ -393,7 +488,7 @@ public class ReadOptions extends RocksObject { public ReadOptions setIterateUpperBound(final Slice iterateUpperBound) { assert(isOwningHandle()); if (iterateUpperBound != null) { - // Hold onto a reference so it doesn't get garbaged collected out from under us. + // Hold onto a reference so it doesn't get garbage collected out from under us. iterateUpperBoundSlice_ = iterateUpperBound; setIterateUpperBound(nativeHandle_, iterateUpperBoundSlice_.getNativeHandle()); } @@ -401,21 +496,16 @@ public class ReadOptions extends RocksObject { } /** - * Defines the extent upto which the forward iterator can returns entries. - * Once the bound is reached, Valid() will be false. iterate_upper_bound - * is exclusive ie the bound value is not a valid entry. If - * iterator_extractor is not null, the Seek target and iterator_upper_bound - * need to have the same prefix. This is because ordering is not guaranteed - * outside of prefix domain. There is no lower bound on the iterator. + * Returns the largest key at which the forward + * iterator can return an entry. * - * Default: nullptr + * The upper bound is exclusive i.e. the bound value is not a valid entry. * - * @return Slice representing current iterate_upper_bound setting, or null if - * one does not exist. + * @return the largest key, or null if there is no upper bound defined. */ public Slice iterateUpperBound() { assert(isOwningHandle()); - long upperBoundSliceHandle = iterateUpperBound(nativeHandle_); + final long upperBoundSliceHandle = iterateUpperBound(nativeHandle_); if (upperBoundSliceHandle != 0) { // Disown the new slice - it's owned by the C++ side of the JNI boundary // from the perspective of this method. @@ -425,67 +515,70 @@ public class ReadOptions extends RocksObject { } /** - * Defines the smallest key at which the backward iterator can return an - * entry. Once the bound is passed, Valid() will be false. - * `iterate_lower_bound` is inclusive ie the bound value is a valid entry. + * A callback to determine whether relevant keys for this scan exist in a + * given table based on the table's properties. The callback is passed the + * properties of each table during iteration. If the callback returns false, + * the table will not be scanned. This option only affects Iterators and has + * no impact on point lookups. * - * If prefix_extractor is not null, the Seek target and `iterate_lower_bound` - * need to have the same prefix. This is because ordering is not guaranteed - * outside of prefix domain. + * Default: null (every table will be scanned) * - * Default: nullptr + * @param tableFilter the table filter for the callback. * - * @param iterateLowerBound Slice representing the lower bound * @return the reference to the current ReadOptions. */ - public ReadOptions setIterateLowerBound(final Slice iterateLowerBound) { + public ReadOptions setTableFilter(final AbstractTableFilter tableFilter) { assert(isOwningHandle()); - if (iterateLowerBound != null) { - // Hold onto a reference so it doesn't get garbaged collected out from under us. - iterateLowerBoundSlice_ = iterateLowerBound; - setIterateLowerBound(nativeHandle_, iterateLowerBoundSlice_.getNativeHandle()); - } + setTableFilter(nativeHandle_, tableFilter.nativeHandle_); return this; } /** - * Defines the smallest key at which the backward iterator can return an - * entry. Once the bound is passed, Valid() will be false. - * `iterate_lower_bound` is inclusive ie the bound value is a valid entry. + * Needed to support differential snapshots. Has 2 effects: + * 1) Iterator will skip all internal keys with seqnum < iter_start_seqnum + * 2) if this param > 0 iterator will return INTERNAL keys instead of user + * keys; e.g. return tombstones as well. * - * If prefix_extractor is not null, the Seek target and `iterate_lower_bound` - * need to have the same prefix. This is because ordering is not guaranteed - * outside of prefix domain. + * Default: 0 (don't filter by seqnum, return user keys) * - * Default: nullptr + * @param startSeqnum the starting sequence number. * - * @return Slice representing current iterate_lower_bound setting, or null if - * one does not exist. + * @return the reference to the current ReadOptions. */ - public Slice iterateLowerBound() { + public ReadOptions setIterStartSeqnum(final long startSeqnum) { assert(isOwningHandle()); - long lowerBoundSliceHandle = iterateLowerBound(nativeHandle_); - if (lowerBoundSliceHandle != 0) { - // Disown the new slice - it's owned by the C++ side of the JNI boundary - // from the perspective of this method. - return new Slice(lowerBoundSliceHandle, false); - } - return null; + setIterStartSeqnum(nativeHandle_, startSeqnum); + return this; + } + + /** + * Returns the starting Sequence Number of any iterator. + * See {@link #setIterStartSeqnum(long)}. + * + * @return the starting sequence number of any iterator. + */ + public long iterStartSeqnum() { + assert(isOwningHandle()); + return iterStartSeqnum(nativeHandle_); } // instance variables // NOTE: If you add new member variables, please update the copy constructor above! // - // Hold a reference to any iterate upper/lower bound that was set on this object - // until we're destroyed or it's overwritten. That way the caller can freely - // leave scope without us losing the Java Slice object, which during close() - // would also reap its associated rocksdb::Slice native object since it's - // possibly (likely) to be an owning handle. - protected Slice iterateUpperBoundSlice_; - protected Slice iterateLowerBoundSlice_; + // Hold a reference to any iterate lower or upper bound that was set on this + // object until we're destroyed or it's overwritten. That way the caller can + // freely leave scope without us losing the Java Slice object, which during + // close() would also reap its associated rocksdb::Slice native object since + // it's possibly (likely) to be an owning handle. + private Slice iterateLowerBoundSlice_; + private Slice iterateUpperBoundSlice_; private native static long newReadOptions(); + private native static long newReadOptions(final boolean verifyChecksums, + final boolean fillCache); private native static long copyReadOptions(long handle); + @Override protected final native void disposeInternal(final long handle); + private native boolean verifyChecksums(long handle); private native void setVerifyChecksums(long handle, boolean verifyChecksums); private native boolean fillCache(long handle); @@ -510,6 +603,9 @@ public class ReadOptions extends RocksObject { private native long readaheadSize(final long handle); private native void setReadaheadSize(final long handle, final long readaheadSize); + private native long maxSkippableInternalKeys(final long handle); + private native void setMaxSkippableInternalKeys(final long handle, + final long maxSkippableInternalKeys); private native boolean ignoreRangeDeletions(final long handle); private native void setIgnoreRangeDeletions(final long handle, final boolean ignoreRangeDeletions); @@ -517,9 +613,10 @@ public class ReadOptions extends RocksObject { final long upperBoundSliceHandle); private native long iterateUpperBound(final long handle); private native void setIterateLowerBound(final long handle, - final long upperBoundSliceHandle); + final long lowerBoundSliceHandle); private native long iterateLowerBound(final long handle); - - @Override protected final native void disposeInternal(final long handle); - + private native void setTableFilter(final long handle, + final long tableFilterHandle); + private native void setIterStartSeqnum(final long handle, final long seqNum); + private native long iterStartSeqnum(final long handle); } diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index 791ff4100..b93a51e28 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -7,7 +7,6 @@ package org.rocksdb; import java.util.*; import java.io.IOException; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import org.rocksdb.util.Environment; @@ -139,6 +138,15 @@ public class RocksDB extends RocksObject { } } + /** + * Private constructor. + * + * @param nativeHandle The native handle of the C++ RocksDB object + */ + protected RocksDB(final long nativeHandle) { + super(nativeHandle); + } + /** * The factory constructor of RocksDB that opens a RocksDB instance given * the path to the database using the default options w/ createIfMissing @@ -153,9 +161,7 @@ public class RocksDB extends RocksObject { * @see Options#setCreateIfMissing(boolean) */ public static RocksDB open(final String path) throws RocksDBException { - // This allows to use the rocksjni default Options instead of - // the c++ one. - Options options = new Options(); + final Options options = new Options(); options.setCreateIfMissing(true); return open(options, path); } @@ -193,9 +199,7 @@ public class RocksDB extends RocksObject { final List columnFamilyDescriptors, final List columnFamilyHandles) throws RocksDBException { - // This allows to use the rocksjni default Options instead of - // the c++ one. - DBOptions options = new DBOptions(); + final DBOptions options = new DBOptions(); return open(options, path, columnFamilyDescriptors, columnFamilyHandles); } @@ -418,6 +422,54 @@ public class RocksDB extends RocksObject { return db; } + + /** + * This is similar to {@link #close()} except that it + * throws an exception if any error occurs. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + * + * @throws RocksDBException if an error occurs whilst closing. + */ + public void closeE() throws RocksDBException { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } finally { + disposeInternal(); + } + } + } + + /** + * This is similar to {@link #closeE()} except that it + * silently ignores any errors. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + */ + @Override + public void close() { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } catch (final RocksDBException e) { + // silently ignore the error report + } finally { + disposeInternal(); + } + } + } + /** * Static method to determine all available column families for a * rocksdb database identified by path @@ -435,16 +487,108 @@ public class RocksDB extends RocksObject { path)); } - protected void storeOptionsInstance(DBOptionsInterface options) { - options_ = options; + /** + * Creates a new column family with the name columnFamilyName and + * allocates a ColumnFamilyHandle within an internal structure. + * The ColumnFamilyHandle is automatically disposed with DB disposal. + * + * @param columnFamilyDescriptor column family to be created. + * @return {@link org.rocksdb.ColumnFamilyHandle} instance. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public ColumnFamilyHandle createColumnFamily( + final ColumnFamilyDescriptor columnFamilyDescriptor) + throws RocksDBException { + return new ColumnFamilyHandle(this, createColumnFamily(nativeHandle_, + columnFamilyDescriptor.getName(), + columnFamilyDescriptor.getName().length, + columnFamilyDescriptor.getOptions().nativeHandle_)); } - private static void checkBounds(int offset, int len, int size) { - if ((offset | len | (offset + len) | (size - (offset + len))) < 0) { - throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size)); + /** + * Bulk create column families with the same column family options. + * + * @param columnFamilyOptions the options for the column families. + * @param columnFamilyNames the names of the column families. + * + * @return the handles to the newly created column families. + */ + public List createColumnFamilies( + final ColumnFamilyOptions columnFamilyOptions, + final List columnFamilyNames) throws RocksDBException { + final byte[][] cfNames = columnFamilyNames.toArray( + new byte[0][]); + final long[] cfHandles = createColumnFamilies(nativeHandle_, + columnFamilyOptions.nativeHandle_, cfNames); + final List columnFamilyHandles = + new ArrayList<>(cfHandles.length); + for (int i = 0; i < cfHandles.length; i++) { + columnFamilyHandles.add(new ColumnFamilyHandle(this, cfHandles[i])); + } + return columnFamilyHandles; + } + + /** + * Bulk create column families with the same column family options. + * + * @param columnFamilyDescriptors the descriptions of the column families. + * + * @return the handles to the newly created column families. + */ + public List createColumnFamilies( + final List columnFamilyDescriptors) + throws RocksDBException { + final long[] cfOptsHandles = new long[columnFamilyDescriptors.size()]; + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor columnFamilyDescriptor + = columnFamilyDescriptors.get(i); + cfOptsHandles[i] = columnFamilyDescriptor.getOptions().nativeHandle_; + cfNames[i] = columnFamilyDescriptor.getName(); + } + final long[] cfHandles = createColumnFamilies(nativeHandle_, + cfOptsHandles, cfNames); + final List columnFamilyHandles = + new ArrayList<>(cfHandles.length); + for (int i = 0; i < cfHandles.length; i++) { + columnFamilyHandles.add(new ColumnFamilyHandle(this, cfHandles[i])); + } + return columnFamilyHandles; + } + + /** + * Drops the column family specified by {@code columnFamilyHandle}. This call + * only records a drop record in the manifest and prevents the column + * family from flushing and compacting. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void dropColumnFamily(final ColumnFamilyHandle columnFamilyHandle) + throws RocksDBException { + dropColumnFamily(nativeHandle_, columnFamilyHandle.nativeHandle_); + } + + // Bulk drop column families. This call only records drop records in the + // manifest and prevents the column families from flushing and compacting. + // In case of error, the request may succeed partially. User may call + // ListColumnFamilies to check the result. + public void dropColumnFamilies( + final List columnFamilies) throws RocksDBException { + final long[] cfHandles = new long[columnFamilies.size()]; + for (int i = 0; i < columnFamilies.size(); i++) { + cfHandles[i] = columnFamilies.get(i).nativeHandle_; } + dropColumnFamilies(nativeHandle_, cfHandles); } + //TODO(AR) what about DestroyColumnFamilyHandle + /** * Set the database entry for "key" to "value". * @@ -460,22 +604,26 @@ public class RocksDB extends RocksObject { } /** - * Set the database entry for "key" to "value" + * Set the database entry for "key" to "value". * * @param key The specified key to be inserted - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and - * must be non-negative and no larger than ("key".length - offset) + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) * @param value the value associated with the specified key - * @param vOffset the offset of the "value" array to be used, must be non-negative and - * no longer than "key".length - * @param vLen the length of the "value" array to be used, must be non-negative and - * must be non-negative and no larger than ("value".length - offset) - * - * @throws RocksDBException thrown if errors happens in underlying native library. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if errors happens in underlying native + * library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds */ - public void put(final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) throws RocksDBException { + public void put(final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { checkBounds(offset, len, key.length); checkBounds(vOffset, vLen, value.length); put(nativeHandle_, key, offset, len, value, vOffset, vLen); @@ -508,19 +656,24 @@ public class RocksDB extends RocksObject { * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} * instance * @param key The specified key to be inserted - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and - * must be non-negative and no larger than ("key".length - offset) + * @param offset the offset of the "key" array to be used, must + * be non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) * @param value the value associated with the specified key - * @param vOffset the offset of the "value" array to be used, must be non-negative and - * no longer than "key".length - * @param vLen the length of the "value" array to be used, must be non-negative and - * must be non-negative and no larger than ("value".length - offset) - * - * @throws RocksDBException thrown if errors happens in underlying native library. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if errors happens in underlying native + * library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds */ - public void put(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) throws RocksDBException { + public void put(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { checkBounds(offset, len, key.length); checkBounds(vOffset, vLen, value.length); put(nativeHandle_, key, offset, len, value, vOffset, vLen, @@ -548,27 +701,30 @@ public class RocksDB extends RocksObject { * * @param writeOpts {@link org.rocksdb.WriteOptions} instance. * @param key The specified key to be inserted - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and - * must be non-negative and no larger than ("key".length - offset) + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) * @param value the value associated with the specified key - * @param vOffset the offset of the "value" array to be used, must be non-negative and - * no longer than "key".length - * @param vLen the length of the "value" array to be used, must be non-negative and - * must be non-negative and no larger than ("value".length - offset) + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) * * @throws RocksDBException thrown if error happens in underlying * native library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds */ - public void put(final WriteOptions writeOpts, byte[] key, int offset, int len, byte[] value, int vOffset, int vLen) throws RocksDBException { + public void put(final WriteOptions writeOpts, + final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { checkBounds(offset, len, key.length); checkBounds(vOffset, vLen, value.length); put(nativeHandle_, writeOpts.nativeHandle_, key, offset, len, value, vOffset, vLen); } - /** * Set the database entry for "key" to "value" for the specified * column family. @@ -600,22 +756,25 @@ public class RocksDB extends RocksObject { * instance * @param writeOpts {@link org.rocksdb.WriteOptions} instance. * @param key The specified key to be inserted - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and - * must be non-negative and no larger than ("key".length - offset) + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) * @param value the value associated with the specified key - * @param vOffset the offset of the "value" array to be used, must be non-negative and - * no longer than "key".length - * @param vLen the length of the "value" array to be used, must be non-negative and - * must be non-negative and no larger than ("value".length - offset) + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) * * @throws RocksDBException thrown if error happens in underlying * native library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds */ public void put(final ColumnFamilyHandle columnFamilyHandle, - final WriteOptions writeOpts, final byte[] key, int offset, int len, - final byte[] value, int vOffset, int vLen) throws RocksDBException { + final WriteOptions writeOpts, + final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { checkBounds(offset, len, key.length); checkBounds(vOffset, vLen, value.length); put(nativeHandle_, writeOpts.nativeHandle_, key, offset, len, value, @@ -623,370 +782,624 @@ public class RocksDB extends RocksObject { } /** - * If the key definitely does not exist in the database, then this method - * returns false, else true. + * Remove the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. * - * This check is potentially lighter-weight than invoking DB::Get(). One way - * to make this lighter weight is to avoid doing any IOs. + * @param key Key to delete within database * - * @param key byte array of a key to search for - * @param value StringBuilder instance which is a out parameter if a value is - * found in block-cache. - * @return boolean value indicating if key does not exist or might exist. + * @throws RocksDBException thrown if error happens in underlying + * native library. + * + * @deprecated Use {@link #delete(byte[])} */ - public boolean keyMayExist(final byte[] key, final StringBuilder value) { - return keyMayExist(nativeHandle_, key, 0, key.length, value); + @Deprecated + public void remove(final byte[] key) throws RocksDBException { + delete(key); } /** - * If the key definitely does not exist in the database, then this method - * returns false, else true. - * - * This check is potentially lighter-weight than invoking DB::Get(). One way - * to make this lighter weight is to avoid doing any IOs. + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. * - * @param key byte array of a key to search for - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and - * @param value StringBuilder instance which is a out parameter if a value is - * found in block-cache. + * @param key Key to delete within database * - * @return boolean value indicating if key does not exist or might exist. + * @throws RocksDBException thrown if error happens in underlying + * native library. */ - public boolean keyMayExist(final byte[] key, int offset, int len, final StringBuilder value) { - checkBounds(offset, len, key.length); - return keyMayExist(nativeHandle_, key, offset, len, value); + public void delete(final byte[] key) throws RocksDBException { + delete(nativeHandle_, key, 0, key.length); } /** - * If the key definitely does not exist in the database, then this method - * returns false, else true. + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. * - * This check is potentially lighter-weight than invoking DB::Get(). One way - * to make this lighter weight is to avoid doing any IOs. + * @param key Key to delete within database + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be + * non-negative and no larger than ("key".length - offset) * - * @param columnFamilyHandle {@link ColumnFamilyHandle} instance - * @param key byte array of a key to search for - * @param value StringBuilder instance which is a out parameter if a value is - * found in block-cache. - * @return boolean value indicating if key does not exist or might exist. + * @throws RocksDBException thrown if error happens in underlying + * native library. */ - public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, - final byte[] key, final StringBuilder value) { - return keyMayExist(nativeHandle_, key, 0, key.length, - columnFamilyHandle.nativeHandle_, value); + public void delete(final byte[] key, final int offset, final int len) + throws RocksDBException { + delete(nativeHandle_, key, offset, len); } /** - * If the key definitely does not exist in the database, then this method - * returns false, else true. + * Remove the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. * - * This check is potentially lighter-weight than invoking DB::Get(). One way - * to make this lighter weight is to avoid doing any IOs. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key Key to delete within database * - * @param columnFamilyHandle {@link ColumnFamilyHandle} instance - * @param key byte array of a key to search for - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and - * @param value StringBuilder instance which is a out parameter if a value is - * found in block-cache. - * @return boolean value indicating if key does not exist or might exist. + * @throws RocksDBException thrown if error happens in underlying + * native library. + * + * @deprecated Use {@link #delete(ColumnFamilyHandle, byte[])} */ - public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, - final byte[] key, int offset, int len, final StringBuilder value) { - checkBounds(offset, len, key.length); - return keyMayExist(nativeHandle_, key, offset, len, - columnFamilyHandle.nativeHandle_, value); + @Deprecated + public void remove(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + delete(columnFamilyHandle, key); } - /** - * If the key definitely does not exist in the database, then this method - * returns false, else true. + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. * - * This check is potentially lighter-weight than invoking DB::Get(). One way - * to make this lighter weight is to avoid doing any IOs. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key Key to delete within database * - * @param readOptions {@link ReadOptions} instance - * @param key byte array of a key to search for - * @param value StringBuilder instance which is a out parameter if a value is - * found in block-cache. - * @return boolean value indicating if key does not exist or might exist. + * @throws RocksDBException thrown if error happens in underlying + * native library. */ - public boolean keyMayExist(final ReadOptions readOptions, - final byte[] key, final StringBuilder value) { - return keyMayExist(nativeHandle_, readOptions.nativeHandle_, - key, 0, key.length, value); + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + delete(nativeHandle_, key, 0, key.length, columnFamilyHandle.nativeHandle_); } /** - * If the key definitely does not exist in the database, then this method - * returns false, else true. + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. * - * This check is potentially lighter-weight than invoking DB::Get(). One way - * to make this lighter weight is to avoid doing any IOs. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key Key to delete within database + * @param offset the offset of the "key" array to be used, + * must be non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("value".length - offset) * - * @param readOptions {@link ReadOptions} instance - * @param key byte array of a key to search for - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and - * @param value StringBuilder instance which is a out parameter if a value is - * found in block-cache. - * @return boolean value indicating if key does not exist or might exist. + * @throws RocksDBException thrown if error happens in underlying + * native library. */ - public boolean keyMayExist(final ReadOptions readOptions, - final byte[] key, int offset, int len, final StringBuilder value) { - checkBounds(offset, len, key.length); - return keyMayExist(nativeHandle_, readOptions.nativeHandle_, - key, offset, len, value); + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final int offset, final int len) + throws RocksDBException { + delete(nativeHandle_, key, offset, len, columnFamilyHandle.nativeHandle_); } /** - * If the key definitely does not exist in the database, then this method - * returns false, else true. - * - * This check is potentially lighter-weight than invoking DB::Get(). One way - * to make this lighter weight is to avoid doing any IOs. + * Remove the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. * - * @param readOptions {@link ReadOptions} instance - * @param columnFamilyHandle {@link ColumnFamilyHandle} instance - * @param key byte array of a key to search for - * @param value StringBuilder instance which is a out parameter if a value is - * found in block-cache. - * @return boolean value indicating if key does not exist or might exist. - */ - public boolean keyMayExist(final ReadOptions readOptions, - final ColumnFamilyHandle columnFamilyHandle, final byte[] key, - final StringBuilder value) { - return keyMayExist(nativeHandle_, readOptions.nativeHandle_, - key, 0, key.length, columnFamilyHandle.nativeHandle_, - value); - } - - /** - * If the key definitely does not exist in the database, then this method - * returns false, else true. + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database * - * This check is potentially lighter-weight than invoking DB::Get(). One way - * to make this lighter weight is to avoid doing any IOs. + * @throws RocksDBException thrown if error happens in underlying + * native library. * - * @param readOptions {@link ReadOptions} instance - * @param columnFamilyHandle {@link ColumnFamilyHandle} instance - * @param key byte array of a key to search for - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and - * @param value StringBuilder instance which is a out parameter if a value is - * found in block-cache. - * @return boolean value indicating if key does not exist or might exist. + * @deprecated Use {@link #delete(WriteOptions, byte[])} */ - public boolean keyMayExist(final ReadOptions readOptions, - final ColumnFamilyHandle columnFamilyHandle, final byte[] key, int offset, int len, - final StringBuilder value) { - checkBounds(offset, len, key.length); - return keyMayExist(nativeHandle_, readOptions.nativeHandle_, - key, offset, len, columnFamilyHandle.nativeHandle_, - value); + @Deprecated + public void remove(final WriteOptions writeOpt, final byte[] key) + throws RocksDBException { + delete(writeOpt, key); } /** - * Apply the specified updates to the database. + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. * - * @param writeOpts WriteOptions instance - * @param updates WriteBatch instance + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database * * @throws RocksDBException thrown if error happens in underlying * native library. */ - public void write(final WriteOptions writeOpts, final WriteBatch updates) + public void delete(final WriteOptions writeOpt, final byte[] key) throws RocksDBException { - write0(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_); + delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length); } /** - * Apply the specified updates to the database. + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. * - * @param writeOpts WriteOptions instance - * @param updates WriteBatchWithIndex instance + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be + * non-negative and no larger than ("key".length - offset) * * @throws RocksDBException thrown if error happens in underlying * native library. */ - public void write(final WriteOptions writeOpts, - final WriteBatchWithIndex updates) throws RocksDBException { - write1(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_); + public void delete(final WriteOptions writeOpt, final byte[] key, + final int offset, final int len) throws RocksDBException { + delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len); } /** - * Add merge operand for key/value pair. + * Remove the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. * - * @param key the specified key to be merged. - * @param value the value to be merged with the current value for - * the specified key. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database * * @throws RocksDBException thrown if error happens in underlying * native library. + * + * @deprecated Use {@link #delete(ColumnFamilyHandle, WriteOptions, byte[])} */ - public void merge(final byte[] key, final byte[] value) - throws RocksDBException { - merge(nativeHandle_, key, 0, key.length, value, 0, value.length); + @Deprecated + public void remove(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpt, final byte[] key) throws RocksDBException { + delete(columnFamilyHandle, writeOpt, key); } /** - * Add merge operand for key/value pair. + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. * - * @param key the specified key to be merged. - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and - * @param value the value to be merged with the current value for the specified key. - * @param vOffset the offset of the "value" array to be used, must be non-negative and - * no longer than "key".length - * @param vLen the length of the "value" array to be used, must be non-negative and - * must be non-negative and no larger than ("value".length - offset) + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database * * @throws RocksDBException thrown if error happens in underlying * native library. */ - public void merge(final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpt, final byte[] key) throws RocksDBException { - checkBounds(offset, len, key.length); - checkBounds(vOffset, vLen, value.length); - merge(nativeHandle_, key, offset, len, value, vOffset, vLen); + delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length, + columnFamilyHandle.nativeHandle_); } - /** - * Add merge operand for key/value pair in a ColumnFamily. + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. * - * @param columnFamilyHandle {@link ColumnFamilyHandle} instance - * @param key the specified key to be merged. - * @param value the value to be merged with the current value for - * the specified key. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be + * non-negative and no larger than ("key".length - offset) * * @throws RocksDBException thrown if error happens in underlying * native library. */ - public void merge(final ColumnFamilyHandle columnFamilyHandle, - final byte[] key, final byte[] value) throws RocksDBException { - merge(nativeHandle_, key, 0, key.length, value, 0, value.length, + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpt, final byte[] key, final int offset, + final int len) throws RocksDBException { + delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len, columnFamilyHandle.nativeHandle_); } /** - * Add merge operand for key/value pair in a ColumnFamily. + * Remove the database entry for {@code key}. Requires that the key exists + * and was not overwritten. It is not an error if the key did not exist + * in the database. * - * @param columnFamilyHandle {@link ColumnFamilyHandle} instance - * @param key the specified key to be merged. - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and - * @param value the value to be merged with the current value for - * the specified key. - * @param vOffset the offset of the "value" array to be used, must be non-negative and - * no longer than "key".length - * @param vLen the length of the "value" array to be used, must be non-negative and - * must be non-negative and no larger than ("value".length - offset) + * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple + * times), then the result of calling SingleDelete() on this key is undefined. + * SingleDelete() only behaves correctly if there has been only one Put() + * for this key since the previous call to SingleDelete() for this key. + * + * This feature is currently an experimental performance optimization + * for a very specific workload. It is up to the caller to ensure that + * SingleDelete is only used for a key that is not deleted using Delete() or + * written using Merge(). Mixing SingleDelete operations with Deletes and + * Merges can result in undefined behavior. + * + * @param key Key to delete within database * * @throws RocksDBException thrown if error happens in underlying - * native library. + * native library. */ - public void merge(final ColumnFamilyHandle columnFamilyHandle, - final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) throws RocksDBException { - checkBounds(offset, len, key.length); - checkBounds(vOffset, vLen, value.length); - merge(nativeHandle_, key, offset, len, value, vOffset, vLen, - columnFamilyHandle.nativeHandle_); + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final byte[] key) throws RocksDBException { + singleDelete(nativeHandle_, key, key.length); } /** - * Add merge operand for key/value pair. + * Remove the database entry for {@code key}. Requires that the key exists + * and was not overwritten. It is not an error if the key did not exist + * in the database. * - * @param writeOpts {@link WriteOptions} for this write. - * @param key the specified key to be merged. - * @param value the value to be merged with the current value for - * the specified key. + * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple + * times), then the result of calling SingleDelete() on this key is undefined. + * SingleDelete() only behaves correctly if there has been only one Put() + * for this key since the previous call to SingleDelete() for this key. + * + * This feature is currently an experimental performance optimization + * for a very specific workload. It is up to the caller to ensure that + * SingleDelete is only used for a key that is not deleted using Delete() or + * written using Merge(). Mixing SingleDelete operations with Deletes and + * Merges can result in undefined behavior. + * + * @param columnFamilyHandle The column family to delete the key from + * @param key Key to delete within database * * @throws RocksDBException thrown if error happens in underlying - * native library. + * native library. */ - public void merge(final WriteOptions writeOpts, final byte[] key, - final byte[] value) throws RocksDBException { - merge(nativeHandle_, writeOpts.nativeHandle_, - key, 0, key.length, value, 0, value.length); + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + singleDelete(nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); } /** - * Add merge operand for key/value pair. + * Remove the database entry for {@code key}. Requires that the key exists + * and was not overwritten. It is not an error if the key did not exist + * in the database. * - * @param writeOpts {@link WriteOptions} for this write. - * @param key the specified key to be merged. - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and - * @param value the value to be merged with the current value for - * the specified key. - * @param vOffset the offset of the "value" array to be used, must be non-negative and - * no longer than "key".length - * @param vLen the length of the "value" array to be used, must be non-negative and - * must be non-negative and no larger than ("value".length - offset) + * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple + * times), then the result of calling SingleDelete() on this key is undefined. + * SingleDelete() only behaves correctly if there has been only one Put() + * for this key since the previous call to SingleDelete() for this key. + * + * This feature is currently an experimental performance optimization + * for a very specific workload. It is up to the caller to ensure that + * SingleDelete is only used for a key that is not deleted using Delete() or + * written using Merge(). Mixing SingleDelete operations with Deletes and + * Merges can result in undefined behavior. + * + * Note: consider setting {@link WriteOptions#setSync(boolean)} true. + * + * @param writeOpt Write options for the delete + * @param key Key to delete within database * * @throws RocksDBException thrown if error happens in underlying - * native library. + * native library. */ - public void merge(final WriteOptions writeOpts, final byte[] key, int offset, int len, - final byte[] value, int vOffset, int vLen) throws RocksDBException { - checkBounds(offset, len, key.length); - checkBounds(vOffset, vLen, value.length); - merge(nativeHandle_, writeOpts.nativeHandle_, - key, offset, len, value, vOffset, vLen); + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final WriteOptions writeOpt, final byte[] key) + throws RocksDBException { + singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length); } /** - * Add merge operand for key/value pair. + * Remove the database entry for {@code key}. Requires that the key exists + * and was not overwritten. It is not an error if the key did not exist + * in the database. * - * @param columnFamilyHandle {@link ColumnFamilyHandle} instance - * @param writeOpts {@link WriteOptions} for this write. - * @param key the specified key to be merged. - * @param value the value to be merged with the current value for - * the specified key. + * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple + * times), then the result of calling SingleDelete() on this key is undefined. + * SingleDelete() only behaves correctly if there has been only one Put() + * for this key since the previous call to SingleDelete() for this key. + * + * This feature is currently an experimental performance optimization + * for a very specific workload. It is up to the caller to ensure that + * SingleDelete is only used for a key that is not deleted using Delete() or + * written using Merge(). Mixing SingleDelete operations with Deletes and + * Merges can result in undefined behavior. + * + * Note: consider setting {@link WriteOptions#setSync(boolean)} true. + * + * @param columnFamilyHandle The column family to delete the key from + * @param writeOpt Write options for the delete + * @param key Key to delete within database * * @throws RocksDBException thrown if error happens in underlying - * native library. + * native library. */ - public void merge(final ColumnFamilyHandle columnFamilyHandle, - final WriteOptions writeOpts, final byte[] key, - final byte[] value) throws RocksDBException { - merge(nativeHandle_, writeOpts.nativeHandle_, - key, 0, key.length, value, 0, value.length, + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpt, final byte[] key) throws RocksDBException { + singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); } + /** - * Add merge operand for key/value pair. + * Removes the database entries in the range ["beginKey", "endKey"), i.e., + * including "beginKey" and excluding "endKey". a non-OK status on error. It + * is not an error if no keys exist in the range ["beginKey", "endKey"). + * + * Delete the database entry (if any) for "key". Returns OK on success, and a + * non-OK status on error. It is not an error if "key" did not exist in the + * database. + * + * @param beginKey First key to delete within database (inclusive) + * @param endKey Last key to delete within database (exclusive) + * + * @throws RocksDBException thrown if error happens in underlying native + * library. + */ + public void deleteRange(final byte[] beginKey, final byte[] endKey) + throws RocksDBException { + deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, + endKey.length); + } + + /** + * Removes the database entries in the range ["beginKey", "endKey"), i.e., + * including "beginKey" and excluding "endKey". a non-OK status on error. It + * is not an error if no keys exist in the range ["beginKey", "endKey"). + * + * Delete the database entry (if any) for "key". Returns OK on success, and a + * non-OK status on error. It is not an error if "key" did not exist in the + * database. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance + * @param beginKey First key to delete within database (inclusive) + * @param endKey Last key to delete within database (exclusive) + * + * @throws RocksDBException thrown if error happens in underlying native + * library. + */ + public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, + final byte[] beginKey, final byte[] endKey) throws RocksDBException { + deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, + endKey.length, columnFamilyHandle.nativeHandle_); + } + + /** + * Removes the database entries in the range ["beginKey", "endKey"), i.e., + * including "beginKey" and excluding "endKey". a non-OK status on error. It + * is not an error if no keys exist in the range ["beginKey", "endKey"). + * + * Delete the database entry (if any) for "key". Returns OK on success, and a + * non-OK status on error. It is not an error if "key" did not exist in the + * database. + * + * @param writeOpt WriteOptions to be used with delete operation + * @param beginKey First key to delete within database (inclusive) + * @param endKey Last key to delete within database (exclusive) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void deleteRange(final WriteOptions writeOpt, final byte[] beginKey, + final byte[] endKey) throws RocksDBException { + deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0, + beginKey.length, endKey, 0, endKey.length); + } + + /** + * Removes the database entries in the range ["beginKey", "endKey"), i.e., + * including "beginKey" and excluding "endKey". a non-OK status on error. It + * is not an error if no keys exist in the range ["beginKey", "endKey"). + * + * Delete the database entry (if any) for "key". Returns OK on success, and a + * non-OK status on error. It is not an error if "key" did not exist in the + * database. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance + * @param writeOpt WriteOptions to be used with delete operation + * @param beginKey First key to delete within database (included) + * @param endKey Last key to delete within database (excluded) + * + * @throws RocksDBException thrown if error happens in underlying native + * library. + */ + public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpt, final byte[] beginKey, final byte[] endKey) + throws RocksDBException { + deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0, + beginKey.length, endKey, 0, endKey.length, + columnFamilyHandle.nativeHandle_); + } + + + /** + * Add merge operand for key/value pair. + * + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for the + * specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final byte[] key, final byte[] value) + throws RocksDBException { + merge(nativeHandle_, key, 0, key.length, value, 0, value.length); + } + + /** + * Add merge operand for key/value pair. + * + * @param key the specified key to be merged. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param value the value to be merged with the current value for the + * specified key. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and must be non-negative and no larger than + * ("value".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ + public void merge(final byte[] key, int offset, int len, final byte[] value, + final int vOffset, final int vLen) throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + merge(nativeHandle_, key, offset, len, value, vOffset, vLen); + } + + /** + * Add merge operand for key/value pair in a ColumnFamily. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final byte[] value) throws RocksDBException { + merge(nativeHandle_, key, 0, key.length, value, 0, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Add merge operand for key/value pair in a ColumnFamily. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key the specified key to be merged. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param value the value to be merged with the current value for + * the specified key. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * must be non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ + public void merge(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final int offset, final int len, final byte[] value, + final int vOffset, final int vLen) throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + merge(nativeHandle_, key, offset, len, value, vOffset, vLen, + columnFamilyHandle.nativeHandle_); + } + + /** + * Add merge operand for key/value pair. * - * @param columnFamilyHandle {@link ColumnFamilyHandle} instance * @param writeOpts {@link WriteOptions} for this write. * @param key the specified key to be merged. - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and * @param value the value to be merged with the current value for * the specified key. - * @param vOffset the offset of the "value" array to be used, must be non-negative and - * no longer than "key".length - * @param vLen the length of the "value" array to be used, must be non-negative and - * must be non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final WriteOptions writeOpts, final byte[] key, + final byte[] value) throws RocksDBException { + merge(nativeHandle_, writeOpts.nativeHandle_, + key, 0, key.length, value, 0, value.length); + } + + /** + * Add merge operand for key/value pair. + * + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("value".length - offset) + * @param value the value to be merged with the current value for + * the specified key. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ + public void merge(final WriteOptions writeOpts, + final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + merge(nativeHandle_, writeOpts.nativeHandle_, + key, offset, len, value, vOffset, vLen); + } + + /** + * Add merge operand for key/value pair. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for the + * specified key. * * @throws RocksDBException thrown if error happens in underlying * native library. */ public void merge(final ColumnFamilyHandle columnFamilyHandle, - final WriteOptions writeOpts, final byte[] key, int offset, int len, - final byte[] value, int vOffset, int vLen) throws RocksDBException { + final WriteOptions writeOpts, final byte[] key, final byte[] value) + throws RocksDBException { + merge(nativeHandle_, writeOpts.nativeHandle_, + key, 0, key.length, value, 0, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Add merge operand for key/value pair. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param value the value to be merged with the current value for + * the specified key. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ + public void merge( + final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpts, + final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { checkBounds(offset, len, key.length); checkBounds(vOffset, vLen, value.length); merge(nativeHandle_, writeOpts.nativeHandle_, @@ -994,6 +1407,34 @@ public class RocksDB extends RocksObject { columnFamilyHandle.nativeHandle_); } + /** + * Apply the specified updates to the database. + * + * @param writeOpts WriteOptions instance + * @param updates WriteBatch instance + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void write(final WriteOptions writeOpts, final WriteBatch updates) + throws RocksDBException { + write0(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_); + } + + /** + * Apply the specified updates to the database. + * + * @param writeOpts WriteOptions instance + * @param updates WriteBatchWithIndex instance + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void write(final WriteOptions writeOpts, + final WriteBatchWithIndex updates) throws RocksDBException { + write1(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_); + } + // TODO(AR) we should improve the #get() API, returning -1 (RocksDB.NOT_FOUND) is not very nice // when we could communicate better status into, also the C++ code show that -2 could be returned @@ -1021,14 +1462,15 @@ public class RocksDB extends RocksObject { * Get the value associated with the specified key within column family* * * @param key the key to retrieve the value. - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) * @param value the out-value to receive the retrieved value. - * @param vOffset the offset of the "value" array to be used, must be non-negative and - * no longer than "key".length - * @param vLen the length of the "value" array to be used, must be non-negative and - * must be non-negative and no larger than ("value".length - offset) + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "value".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and and no larger than ("value".length - offset) * * @return The size of the actual value that matches the specified * {@code key} in byte. If the return value is greater than the @@ -1040,7 +1482,9 @@ public class RocksDB extends RocksObject { * @throws RocksDBException thrown if error happens in underlying * native library. */ - public int get(final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) throws RocksDBException { + public int get(final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { checkBounds(offset, len, key.length); checkBounds(vOffset, vLen, value.length); return get(nativeHandle_, key, offset, len, value, vOffset, vLen); @@ -1075,14 +1519,15 @@ public class RocksDB extends RocksObject { * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} * instance * @param key the key to retrieve the value. - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * an no larger than ("key".length - offset) * @param value the out-value to receive the retrieved value. - * @param vOffset the offset of the "value" array to be used, must be non-negative and - * no longer than "key".length - * @param vLen the length of the "value" array to be used, must be non-negative and - * must be non-negative and no larger than ("value".length - offset) + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) * * @return The size of the actual value that matches the specified * {@code key} in byte. If the return value is greater than the @@ -1094,8 +1539,9 @@ public class RocksDB extends RocksObject { * @throws RocksDBException thrown if error happens in underlying * native library. */ - public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, int offset, int len, - final byte[] value, int vOffset, int vLen) throws RocksDBException, IllegalArgumentException { + public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final int offset, final int len, final byte[] value, final int vOffset, + final int vLen) throws RocksDBException, IllegalArgumentException { checkBounds(offset, len, key.length); checkBounds(vOffset, vLen, value.length); return get(nativeHandle_, key, offset, len, value, vOffset, vLen, @@ -1129,14 +1575,15 @@ public class RocksDB extends RocksObject { * * @param opt {@link org.rocksdb.ReadOptions} instance. * @param key the key to retrieve the value. - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) * @param value the out-value to receive the retrieved value. - * @param vOffset the offset of the "value" array to be used, must be non-negative and - * no longer than "key".length - * @param vLen the length of the "value" array to be used, must be non-negative and - * must be non-negative and no larger than ("value".length - offset) + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) * @return The size of the actual value that matches the specified * {@code key} in byte. If the return value is greater than the * length of {@code value}, then it indicates that the size of the @@ -1147,8 +1594,9 @@ public class RocksDB extends RocksObject { * @throws RocksDBException thrown if error happens in underlying * native library. */ - public int get(final ReadOptions opt, final byte[] key, int offset, int len, - final byte[] value, int vOffset, int vLen) throws RocksDBException { + public int get(final ReadOptions opt, final byte[] key, final int offset, + final int len, final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { checkBounds(offset, len, key.length); checkBounds(vOffset, vLen, value.length); return get(nativeHandle_, opt.nativeHandle_, @@ -1187,14 +1635,15 @@ public class RocksDB extends RocksObject { * instance * @param opt {@link org.rocksdb.ReadOptions} instance. * @param key the key to retrieve the value. - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be + * non-negative and and no larger than ("key".length - offset) * @param value the out-value to receive the retrieved value. - * @param vOffset the offset of the "value" array to be used, must be non-negative and - * no longer than "key".length - * @param vLen the length of the "value" array to be used, must be non-negative and - * must be non-negative and no larger than ("value".length - offset) + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, and must be + * non-negative and no larger than ("value".length - offset) * @return The size of the actual value that matches the specified * {@code key} in byte. If the return value is greater than the * length of {@code value}, then it indicates that the size of the @@ -1206,7 +1655,8 @@ public class RocksDB extends RocksObject { * native library. */ public int get(final ColumnFamilyHandle columnFamilyHandle, - final ReadOptions opt, final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) + final ReadOptions opt, final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) throws RocksDBException { checkBounds(offset, len, key.length); checkBounds(vOffset, vLen, value.length); @@ -1221,7 +1671,7 @@ public class RocksDB extends RocksObject { * * @param key the key retrieve the value. * @return a byte array storing the value associated with the input key if - * any. null if it does not find the specified key. + * any. null if it does not find the specified key. * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -1236,16 +1686,18 @@ public class RocksDB extends RocksObject { * returned if the specified key is not found. * * @param key the key retrieve the value. - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) * @return a byte array storing the value associated with the input key if - * any. null if it does not find the specified key. + * any. null if it does not find the specified key. * * @throws RocksDBException thrown if error happens in underlying * native library. */ - public byte[] get(final byte[] key, int offset, int len) throws RocksDBException { + public byte[] get(final byte[] key, final int offset, + final int len) throws RocksDBException { checkBounds(offset, len, key.length); return get(nativeHandle_, key, offset, len); } @@ -1278,17 +1730,19 @@ public class RocksDB extends RocksObject { * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} * instance * @param key the key retrieve the value. - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) * @return a byte array storing the value associated with the input key if - * any. null if it does not find the specified key. + * any. null if it does not find the specified key. * * @throws RocksDBException thrown if error happens in underlying * native library. */ public byte[] get(final ColumnFamilyHandle columnFamilyHandle, - final byte[] key, int offset, int len) throws RocksDBException { + final byte[] key, final int offset, final int len) + throws RocksDBException { checkBounds(offset, len, key.length); return get(nativeHandle_, key, offset, len, columnFamilyHandle.nativeHandle_); @@ -1318,18 +1772,19 @@ public class RocksDB extends RocksObject { * returned if the specified key is not found. * * @param key the key retrieve the value. - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) * @param opt Read options. * @return a byte array storing the value associated with the input key if - * any. null if it does not find the specified key. + * any. null if it does not find the specified key. * * @throws RocksDBException thrown if error happens in underlying * native library. */ - public byte[] get(final ReadOptions opt, final byte[] key, int offset, int len) - throws RocksDBException { + public byte[] get(final ReadOptions opt, final byte[] key, final int offset, + final int len) throws RocksDBException { checkBounds(offset, len, key.length); return get(nativeHandle_, opt.nativeHandle_, key, offset, len); } @@ -1344,7 +1799,7 @@ public class RocksDB extends RocksObject { * @param key the key retrieve the value. * @param opt Read options. * @return a byte array storing the value associated with the input key if - * any. null if it does not find the specified key. + * any. null if it does not find the specified key. * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -1363,18 +1818,20 @@ public class RocksDB extends RocksObject { * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} * instance * @param key the key retrieve the value. - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) * @param opt Read options. * @return a byte array storing the value associated with the input key if - * any. null if it does not find the specified key. + * any. null if it does not find the specified key. * * @throws RocksDBException thrown if error happens in underlying * native library. */ public byte[] get(final ColumnFamilyHandle columnFamilyHandle, - final ReadOptions opt, final byte[] key, int offset, int len) throws RocksDBException { + final ReadOptions opt, final byte[] key, final int offset, final int len) + throws RocksDBException { checkBounds(offset, len, key.length); return get(nativeHandle_, opt.nativeHandle_, key, offset, len, columnFamilyHandle.nativeHandle_); @@ -1397,7 +1854,7 @@ public class RocksDB extends RocksObject { throws RocksDBException { assert(keys.size() != 0); - final byte[][] keysArray = keys.toArray(new byte[keys.size()][]); + final byte[][] keysArray = keys.toArray(new byte[0][]); final int keyOffsets[] = new int[keysArray.length]; final int keyLengths[] = new int[keysArray.length]; for(int i = 0; i < keyLengths.length; i++) { @@ -1420,12 +1877,6 @@ public class RocksDB extends RocksObject { return keyValueMap; } - private static int computeCapacityHint(final int estimatedNumberOfItems) { - // Default load factor for HashMap is 0.75, so N * 1.5 will be at the load - // limit. We add +1 for a buffer. - return (int)Math.ceil(estimatedNumberOfItems * 1.5 + 1.0); - } - /** * Returns a map of keys for which values were found in DB. *

    @@ -1437,7 +1888,7 @@ public class RocksDB extends RocksObject { * {@link org.rocksdb.ColumnFamilyHandle} instances. * @param keys List of keys for which values need to be retrieved. * @return Map where key of map is the key passed by user and value for map - * entry is the corresponding value in DB. + * entry is the corresponding value in DB. * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -1463,7 +1914,7 @@ public class RocksDB extends RocksObject { cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; } - final byte[][] keysArray = keys.toArray(new byte[keys.size()][]); + final byte[][] keysArray = keys.toArray(new byte[0][]); final int keyOffsets[] = new int[keysArray.length]; final int keyLengths[] = new int[keysArray.length]; for(int i = 0; i < keyLengths.length; i++) { @@ -1490,7 +1941,7 @@ public class RocksDB extends RocksObject { * @param opt Read options. * @param keys of keys for which values need to be retrieved. * @return Map where key of map is the key passed by user and value for map - * entry is the corresponding value in DB. + * entry is the corresponding value in DB. * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -1502,7 +1953,7 @@ public class RocksDB extends RocksObject { final List keys) throws RocksDBException { assert(keys.size() != 0); - final byte[][] keysArray = keys.toArray(new byte[keys.size()][]); + final byte[][] keysArray = keys.toArray(new byte[0][]); final int keyOffsets[] = new int[keysArray.length]; final int keyLengths[] = new int[keysArray.length]; for(int i = 0; i < keyLengths.length; i++) { @@ -1537,7 +1988,7 @@ public class RocksDB extends RocksObject { * {@link org.rocksdb.ColumnFamilyHandle} instances. * @param keys of keys for which values need to be retrieved. * @return Map where key of map is the key passed by user and value for map - * entry is the corresponding value in DB. + * entry is the corresponding value in DB. * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -1563,7 +2014,7 @@ public class RocksDB extends RocksObject { cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; } - final byte[][] keysArray = keys.toArray(new byte[keys.size()][]); + final byte[][] keysArray = keys.toArray(new byte[0][]); final int keyOffsets[] = new int[keysArray.length]; final int keyLengths[] = new int[keysArray.length]; for(int i = 0; i < keyLengths.length; i++) { @@ -1731,344 +2182,339 @@ public class RocksDB extends RocksObject { } /** - * Remove the database entry (if any) for "key". Returns OK on - * success, and a non-OK status on error. It is not an error if "key" - * did not exist in the database. - * - * @param key Key to delete within database + * If the key definitely does not exist in the database, then this method + * returns false, else true. * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * This check is potentially lighter-weight than invoking DB::Get(). One way + * to make this lighter weight is to avoid doing any IOs. * - * @deprecated Use {@link #delete(byte[])} + * @param key byte array of a key to search for + * @param value StringBuilder instance which is a out parameter if a value is + * found in block-cache. + * @return boolean value indicating if key does not exist or might exist. */ - @Deprecated - public void remove(final byte[] key) throws RocksDBException { - delete(key); + public boolean keyMayExist(final byte[] key, final StringBuilder value) { + return keyMayExist(nativeHandle_, key, 0, key.length, value); } /** - * Delete the database entry (if any) for "key". Returns OK on - * success, and a non-OK status on error. It is not an error if "key" - * did not exist in the database. + * If the key definitely does not exist in the database, then this method + * returns false, else true. * - * @param key Key to delete within database + * This check is potentially lighter-weight than invoking DB::Get(). One way + * to make this lighter weight is to avoid doing any IOs. * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * @param key byte array of a key to search for + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than "key".length + * @param value StringBuilder instance which is a out parameter if a value is + * found in block-cache. + * + * @return boolean value indicating if key does not exist or might exist. */ - public void delete(final byte[] key) throws RocksDBException { - delete(nativeHandle_, key, 0, key.length); + public boolean keyMayExist(final byte[] key, final int offset, final int len, + final StringBuilder value) { + checkBounds(offset, len, key.length); + return keyMayExist(nativeHandle_, key, offset, len, value); } /** - * Delete the database entry (if any) for "key". Returns OK on - * success, and a non-OK status on error. It is not an error if "key" - * did not exist in the database. + * If the key definitely does not exist in the database, then this method + * returns false, else true. * - * @param key Key to delete within database - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and + * This check is potentially lighter-weight than invoking DB::Get(). One way + * to make this lighter weight is to avoid doing any IOs. * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key byte array of a key to search for + * @param value StringBuilder instance which is a out parameter if a value is + * found in block-cache. + * @return boolean value indicating if key does not exist or might exist. */ - public void delete(final byte[] key, int offset, int len) throws RocksDBException { - delete(nativeHandle_, key, offset, len); + public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final StringBuilder value) { + return keyMayExist(nativeHandle_, key, 0, key.length, + columnFamilyHandle.nativeHandle_, value); } /** - * Remove the database entry (if any) for "key". Returns OK on - * success, and a non-OK status on error. It is not an error if "key" - * did not exist in the database. - * - * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} - * instance - * @param key Key to delete within database + * If the key definitely does not exist in the database, then this method + * returns false, else true. * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * This check is potentially lighter-weight than invoking DB::Get(). One way + * to make this lighter weight is to avoid doing any IOs. * - * @deprecated Use {@link #delete(ColumnFamilyHandle, byte[])} + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key byte array of a key to search for + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than "key".length + * @param value StringBuilder instance which is a out parameter if a value is + * found in block-cache. + * @return boolean value indicating if key does not exist or might exist. */ - @Deprecated - public void remove(final ColumnFamilyHandle columnFamilyHandle, - final byte[] key) throws RocksDBException { - delete(columnFamilyHandle, key); + public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, int offset, int len, final StringBuilder value) { + checkBounds(offset, len, key.length); + return keyMayExist(nativeHandle_, key, offset, len, + columnFamilyHandle.nativeHandle_, value); } /** - * Delete the database entry (if any) for "key". Returns OK on - * success, and a non-OK status on error. It is not an error if "key" - * did not exist in the database. + * If the key definitely does not exist in the database, then this method + * returns false, else true. * - * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} - * instance - * @param key Key to delete within database + * This check is potentially lighter-weight than invoking DB::Get(). One way + * to make this lighter weight is to avoid doing any IOs. * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * @param readOptions {@link ReadOptions} instance + * @param key byte array of a key to search for + * @param value StringBuilder instance which is a out parameter if a value is + * found in block-cache. + * @return boolean value indicating if key does not exist or might exist. */ - public void delete(final ColumnFamilyHandle columnFamilyHandle, - final byte[] key) throws RocksDBException { - delete(nativeHandle_, key, 0, key.length, columnFamilyHandle.nativeHandle_); + public boolean keyMayExist(final ReadOptions readOptions, + final byte[] key, final StringBuilder value) { + return keyMayExist(nativeHandle_, readOptions.nativeHandle_, + key, 0, key.length, value); } /** - * Delete the database entry (if any) for "key". Returns OK on - * success, and a non-OK status on error. It is not an error if "key" - * did not exist in the database. + * If the key definitely does not exist in the database, then this method + * returns false, else true. * - * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} - * instance - * @param key Key to delete within database - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and + * This check is potentially lighter-weight than invoking DB::Get(). One way + * to make this lighter weight is to avoid doing any IOs. * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * @param readOptions {@link ReadOptions} instance + * @param key byte array of a key to search for + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than "key".length + * @param value StringBuilder instance which is a out parameter if a value is + * found in block-cache. + * @return boolean value indicating if key does not exist or might exist. */ - public void delete(final ColumnFamilyHandle columnFamilyHandle, - final byte[] key, int offset, int len) throws RocksDBException { - delete(nativeHandle_, key, offset, len, columnFamilyHandle.nativeHandle_); + public boolean keyMayExist(final ReadOptions readOptions, + final byte[] key, final int offset, final int len, + final StringBuilder value) { + checkBounds(offset, len, key.length); + return keyMayExist(nativeHandle_, readOptions.nativeHandle_, + key, offset, len, value); } /** - * Remove the database entry (if any) for "key". Returns OK on - * success, and a non-OK status on error. It is not an error if "key" - * did not exist in the database. - * - * @param writeOpt WriteOptions to be used with delete operation - * @param key Key to delete within database + * If the key definitely does not exist in the database, then this method + * returns false, else true. * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * This check is potentially lighter-weight than invoking DB::Get(). One way + * to make this lighter weight is to avoid doing any IOs. * - * @deprecated Use {@link #delete(WriteOptions, byte[])} + * @param readOptions {@link ReadOptions} instance + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key byte array of a key to search for + * @param value StringBuilder instance which is a out parameter if a value is + * found in block-cache. + * @return boolean value indicating if key does not exist or might exist. */ - @Deprecated - public void remove(final WriteOptions writeOpt, final byte[] key) - throws RocksDBException { - delete(writeOpt, key); + public boolean keyMayExist(final ReadOptions readOptions, + final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final StringBuilder value) { + return keyMayExist(nativeHandle_, readOptions.nativeHandle_, + key, 0, key.length, columnFamilyHandle.nativeHandle_, + value); } /** - * Delete the database entry (if any) for "key". Returns OK on - * success, and a non-OK status on error. It is not an error if "key" - * did not exist in the database. + * If the key definitely does not exist in the database, then this method + * returns false, else true. * - * @param writeOpt WriteOptions to be used with delete operation - * @param key Key to delete within database + * This check is potentially lighter-weight than invoking DB::Get(). One way + * to make this lighter weight is to avoid doing any IOs. * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * @param readOptions {@link ReadOptions} instance + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key byte array of a key to search for + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than "key".length + * @param value StringBuilder instance which is a out parameter if a value is + * found in block-cache. + * @return boolean value indicating if key does not exist or might exist. */ - public void delete(final WriteOptions writeOpt, final byte[] key) - throws RocksDBException { - delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length); + public boolean keyMayExist(final ReadOptions readOptions, + final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final int offset, final int len, final StringBuilder value) { + checkBounds(offset, len, key.length); + return keyMayExist(nativeHandle_, readOptions.nativeHandle_, + key, offset, len, columnFamilyHandle.nativeHandle_, + value); } /** - * Delete the database entry (if any) for "key". Returns OK on - * success, and a non-OK status on error. It is not an error if "key" - * did not exist in the database. + *

    Return a heap-allocated iterator over the contents of the + * database. The result of newIterator() is initially invalid + * (caller must call one of the Seek methods on the iterator + * before using it).

    * - * @param writeOpt WriteOptions to be used with delete operation - * @param key Key to delete within database - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and + *

    Caller should close the iterator when it is no longer needed. + * The returned iterator should be closed before this db is closed. + *

    * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * @return instance of iterator object. */ - public void delete(final WriteOptions writeOpt, final byte[] key, int offset, int len) - throws RocksDBException { - delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len); + public RocksIterator newIterator() { + return new RocksIterator(this, iterator(nativeHandle_)); } /** - * Remove the database entry (if any) for "key". Returns OK on - * success, and a non-OK status on error. It is not an error if "key" - * did not exist in the database. - * - * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} - * instance - * @param writeOpt WriteOptions to be used with delete operation - * @param key Key to delete within database + *

    Return a heap-allocated iterator over the contents of the + * database. The result of newIterator() is initially invalid + * (caller must call one of the Seek methods on the iterator + * before using it).

    * - * @throws RocksDBException thrown if error happens in underlying - * native library. + *

    Caller should close the iterator when it is no longer needed. + * The returned iterator should be closed before this db is closed. + *

    * - * @deprecated Use {@link #delete(ColumnFamilyHandle, WriteOptions, byte[])} + * @param readOptions {@link ReadOptions} instance. + * @return instance of iterator object. */ - @Deprecated - public void remove(final ColumnFamilyHandle columnFamilyHandle, - final WriteOptions writeOpt, final byte[] key) - throws RocksDBException { - delete(columnFamilyHandle, writeOpt, key); + public RocksIterator newIterator(final ReadOptions readOptions) { + return new RocksIterator(this, iterator(nativeHandle_, + readOptions.nativeHandle_)); } /** - * Delete the database entry (if any) for "key". Returns OK on - * success, and a non-OK status on error. It is not an error if "key" - * did not exist in the database. + *

    Return a heap-allocated iterator over the contents of the + * database. The result of newIterator() is initially invalid + * (caller must call one of the Seek methods on the iterator + * before using it).

    + * + *

    Caller should close the iterator when it is no longer needed. + * The returned iterator should be closed before this db is closed. + *

    * * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} * instance - * @param writeOpt WriteOptions to be used with delete operation - * @param key Key to delete within database - * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * @return instance of iterator object. */ - public void delete(final ColumnFamilyHandle columnFamilyHandle, - final WriteOptions writeOpt, final byte[] key) - throws RocksDBException { - delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length, - columnFamilyHandle.nativeHandle_); + public RocksIterator newIterator( + final ColumnFamilyHandle columnFamilyHandle) { + return new RocksIterator(this, iteratorCF(nativeHandle_, + columnFamilyHandle.nativeHandle_)); } /** - * Delete the database entry (if any) for "key". Returns OK on - * success, and a non-OK status on error. It is not an error if "key" - * did not exist in the database. + *

    Return a heap-allocated iterator over the contents of the + * database. The result of newIterator() is initially invalid + * (caller must call one of the Seek methods on the iterator + * before using it).

    + * + *

    Caller should close the iterator when it is no longer needed. + * The returned iterator should be closed before this db is closed. + *

    * * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} * instance - * @param writeOpt WriteOptions to be used with delete operation - * @param key Key to delete within database - * @param offset the offset of the "key" array to be used, must be non-negative and - * no larger than "key".length - * @param len the length of the "key" array to be used, must be non-negative and - * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * @param readOptions {@link ReadOptions} instance. + * @return instance of iterator object. */ - public void delete(final ColumnFamilyHandle columnFamilyHandle, - final WriteOptions writeOpt, final byte[] key, int offset, int len) - throws RocksDBException { - delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len, - columnFamilyHandle.nativeHandle_); + public RocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions readOptions) { + return new RocksIterator(this, iteratorCF(nativeHandle_, + columnFamilyHandle.nativeHandle_, readOptions.nativeHandle_)); } /** - * Remove the database entry for {@code key}. Requires that the key exists - * and was not overwritten. It is not an error if the key did not exist - * in the database. - * - * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple - * times), then the result of calling SingleDelete() on this key is undefined. - * SingleDelete() only behaves correctly if there has been only one Put() - * for this key since the previous call to SingleDelete() for this key. - * - * This feature is currently an experimental performance optimization - * for a very specific workload. It is up to the caller to ensure that - * SingleDelete is only used for a key that is not deleted using Delete() or - * written using Merge(). Mixing SingleDelete operations with Deletes and - * Merges can result in undefined behavior. + * Returns iterators from a consistent database state across multiple + * column families. Iterators are heap allocated and need to be deleted + * before the db is deleted * - * @param key Key to delete within database + * @param columnFamilyHandleList {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator} + * instances * * @throws RocksDBException thrown if error happens in underlying - * native library. + * native library. */ - @Experimental("Performance optimization for a very specific workload") - public void singleDelete(final byte[] key) throws RocksDBException { - singleDelete(nativeHandle_, key, key.length); + public List newIterators( + final List columnFamilyHandleList) + throws RocksDBException { + return newIterators(columnFamilyHandleList, new ReadOptions()); } /** - * Remove the database entry for {@code key}. Requires that the key exists - * and was not overwritten. It is not an error if the key did not exist - * in the database. - * - * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple - * times), then the result of calling SingleDelete() on this key is undefined. - * SingleDelete() only behaves correctly if there has been only one Put() - * for this key since the previous call to SingleDelete() for this key. - * - * This feature is currently an experimental performance optimization - * for a very specific workload. It is up to the caller to ensure that - * SingleDelete is only used for a key that is not deleted using Delete() or - * written using Merge(). Mixing SingleDelete operations with Deletes and - * Merges can result in undefined behavior. + * Returns iterators from a consistent database state across multiple + * column families. Iterators are heap allocated and need to be deleted + * before the db is deleted * - * @param columnFamilyHandle The column family to delete the key from - * @param key Key to delete within database + * @param columnFamilyHandleList {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param readOptions {@link ReadOptions} instance. + * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator} + * instances * * @throws RocksDBException thrown if error happens in underlying - * native library. + * native library. */ - @Experimental("Performance optimization for a very specific workload") - public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, - final byte[] key) throws RocksDBException { - singleDelete(nativeHandle_, key, key.length, - columnFamilyHandle.nativeHandle_); + public List newIterators( + final List columnFamilyHandleList, + final ReadOptions readOptions) throws RocksDBException { + + final long[] columnFamilyHandles = new long[columnFamilyHandleList.size()]; + for (int i = 0; i < columnFamilyHandleList.size(); i++) { + columnFamilyHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; + } + + final long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandles, + readOptions.nativeHandle_); + + final List iterators = new ArrayList<>( + columnFamilyHandleList.size()); + for (int i=0; iReturn a handle to the current DB state. Iterators created with + * this handle will all observe a stable snapshot of the current DB + * state. The caller must call ReleaseSnapshot(result) when the + * snapshot is no longer needed.

    * - * @param writeOpt Write options for the delete - * @param key Key to delete within database + *

    nullptr will be returned if the DB fails to take a snapshot or does + * not support snapshot.

    * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * @return Snapshot {@link Snapshot} instance */ - @Experimental("Performance optimization for a very specific workload") - public void singleDelete(final WriteOptions writeOpt, final byte[] key) - throws RocksDBException { - singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length); + public Snapshot getSnapshot() { + long snapshotHandle = getSnapshot(nativeHandle_); + if (snapshotHandle != 0) { + return new Snapshot(snapshotHandle); + } + return null; } /** - * Remove the database entry for {@code key}. Requires that the key exists - * and was not overwritten. It is not an error if the key did not exist - * in the database. - * - * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple - * times), then the result of calling SingleDelete() on this key is undefined. - * SingleDelete() only behaves correctly if there has been only one Put() - * for this key since the previous call to SingleDelete() for this key. - * - * This feature is currently an experimental performance optimization - * for a very specific workload. It is up to the caller to ensure that - * SingleDelete is only used for a key that is not deleted using Delete() or - * written using Merge(). Mixing SingleDelete operations with Deletes and - * Merges can result in undefined behavior. - * - * Note: consider setting {@link WriteOptions#setSync(boolean)} true. + * Release a previously acquired snapshot. * - * @param columnFamilyHandle The column family to delete the key from - * @param writeOpt Write options for the delete - * @param key Key to delete within database + * The caller must not use "snapshot" after this call. * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * @param snapshot {@link Snapshot} instance */ - @Experimental("Performance optimization for a very specific workload") - public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, - final WriteOptions writeOpt, final byte[] key) throws RocksDBException { - singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length, - columnFamilyHandle.nativeHandle_); + public void releaseSnapshot(final Snapshot snapshot) { + if (snapshot != null) { + releaseSnapshot(nativeHandle_, snapshot.nativeHandle_); + } } /** @@ -2082,124 +2528,28 @@ public class RocksDB extends RocksObject { *

    Valid property names include: *

      *
    • "rocksdb.num-files-at-level<N>" - return the number of files at - * level <N>, where <N> is an ASCII representation of a level - * number (e.g. "0").
    • - *
    • "rocksdb.stats" - returns a multi-line string that describes statistics - * about the internal operation of the DB.
    • - *
    • "rocksdb.sstables" - returns a multi-line string that describes all - * of the sstables that make up the db contents.
    • - *
    - * - * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} - * instance - * @param property to be fetched. See above for examples - * @return property value - * - * @throws RocksDBException thrown if error happens in underlying - * native library. - */ - public String getProperty(final ColumnFamilyHandle columnFamilyHandle, - final String property) throws RocksDBException { - return getProperty0(nativeHandle_, columnFamilyHandle.nativeHandle_, - property, property.length()); - } - - /** - * Removes the database entries in the range ["beginKey", "endKey"), i.e., - * including "beginKey" and excluding "endKey". a non-OK status on error. It - * is not an error if no keys exist in the range ["beginKey", "endKey"). - * - * Delete the database entry (if any) for "key". Returns OK on success, and a - * non-OK status on error. It is not an error if "key" did not exist in the - * database. - * - * @param beginKey - * First key to delete within database (included) - * @param endKey - * Last key to delete within database (excluded) - * - * @throws RocksDBException - * thrown if error happens in underlying native library. - */ - public void deleteRange(final byte[] beginKey, final byte[] endKey) throws RocksDBException { - deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, endKey.length); - } - - /** - * Removes the database entries in the range ["beginKey", "endKey"), i.e., - * including "beginKey" and excluding "endKey". a non-OK status on error. It - * is not an error if no keys exist in the range ["beginKey", "endKey"). - * - * Delete the database entry (if any) for "key". Returns OK on success, and a - * non-OK status on error. It is not an error if "key" did not exist in the - * database. - * - * @param columnFamilyHandle - * {@link org.rocksdb.ColumnFamilyHandle} instance - * @param beginKey - * First key to delete within database (included) - * @param endKey - * Last key to delete within database (excluded) - * - * @throws RocksDBException - * thrown if error happens in underlying native library. - */ - public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, final byte[] beginKey, - final byte[] endKey) throws RocksDBException { - deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, endKey.length, - columnFamilyHandle.nativeHandle_); - } - - /** - * Removes the database entries in the range ["beginKey", "endKey"), i.e., - * including "beginKey" and excluding "endKey". a non-OK status on error. It - * is not an error if no keys exist in the range ["beginKey", "endKey"). - * - * Delete the database entry (if any) for "key". Returns OK on success, and a - * non-OK status on error. It is not an error if "key" did not exist in the - * database. - * - * @param writeOpt - * WriteOptions to be used with delete operation - * @param beginKey - * First key to delete within database (included) - * @param endKey - * Last key to delete within database (excluded) - * - * @throws RocksDBException - * thrown if error happens in underlying native library. - */ - public void deleteRange(final WriteOptions writeOpt, final byte[] beginKey, final byte[] endKey) - throws RocksDBException { - deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, - endKey.length); - } - - /** - * Removes the database entries in the range ["beginKey", "endKey"), i.e., - * including "beginKey" and excluding "endKey". a non-OK status on error. It - * is not an error if no keys exist in the range ["beginKey", "endKey"). - * - * Delete the database entry (if any) for "key". Returns OK on success, and a - * non-OK status on error. It is not an error if "key" did not exist in the - * database. + * level <N>, where <N> is an ASCII representation of a level + * number (e.g. "0"). + *
  • "rocksdb.stats" - returns a multi-line string that describes statistics + * about the internal operation of the DB.
  • + *
  • "rocksdb.sstables" - returns a multi-line string that describes all + * of the sstables that make up the db contents.
  • + * * * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} - * instance - * @param writeOpt - * WriteOptions to be used with delete operation - * @param beginKey - * First key to delete within database (included) - * @param endKey - * Last key to delete within database (excluded) + * instance, or null for the default column family. + * @param property to be fetched. See above for examples + * @return property value * - * @throws RocksDBException - * thrown if error happens in underlying native library. + * @throws RocksDBException thrown if error happens in underlying + * native library. */ - public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpt, - final byte[] beginKey, final byte[] endKey) throws RocksDBException { - deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, - endKey.length, columnFamilyHandle.nativeHandle_); + public String getProperty( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, + final String property) throws RocksDBException { + return getProperty(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + property, property.length()); } /** @@ -2226,7 +2576,41 @@ public class RocksDB extends RocksObject { * native library. */ public String getProperty(final String property) throws RocksDBException { - return getProperty0(nativeHandle_, property, property.length()); + return getProperty(null, property); + } + + + /** + * Gets a property map. + * + * @param property to be fetched. + * + * @return the property map + * + * @throws RocksDBException if an error happens in the underlying native code. + */ + public Map getMapProperty(final String property) + throws RocksDBException { + return getMapProperty(null, property); + } + + /** + * Gets a property map. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family. + * @param property to be fetched. + * + * @return the property map + * + * @throws RocksDBException if an error happens in the underlying native code. + */ + public Map getMapProperty( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, + final String property) throws RocksDBException { + return getMapProperty(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + property, property.length()); } /** @@ -2252,7 +2636,7 @@ public class RocksDB extends RocksObject { * @throws RocksDBException if an error happens in the underlying native code. */ public long getLongProperty(final String property) throws RocksDBException { - return getLongProperty(nativeHandle_, property, property.length()); + return getLongProperty(null, property); } /** @@ -2272,20 +2656,32 @@ public class RocksDB extends RocksObject { * unsigned long using provided methods of type {@link Long}.

    * * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} - * instance + * instance, or null for the default column family * @param property to be fetched. * * @return numerical property value * * @throws RocksDBException if an error happens in the underlying native code. */ - public long getLongProperty(final ColumnFamilyHandle columnFamilyHandle, + public long getLongProperty( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, final String property) throws RocksDBException { - return getLongProperty(nativeHandle_, columnFamilyHandle.nativeHandle_, + return getLongProperty(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, property, property.length()); } - /** + /** + * Reset internal stats for DB and all column families. + * + * Note this doesn't reset {@link Options#statistics()} as it is not + * owned by DB. + */ + public void resetStats() throws RocksDBException { + resetStats(nativeHandle_); + } + + /** *

    Return sum of the getLongProperty of all the column families

    * *

    Note: As the returned property is of type @@ -2306,261 +2702,113 @@ public class RocksDB extends RocksObject { * * @throws RocksDBException if an error happens in the underlying native code. */ - public long getAggregatedLongProperty(final String property) throws RocksDBException { - return getAggregatedLongProperty(nativeHandle_, property, property.length()); - } - - /** - *

    Return a heap-allocated iterator over the contents of the - * database. The result of newIterator() is initially invalid - * (caller must call one of the Seek methods on the iterator - * before using it).

    - * - *

    Caller should close the iterator when it is no longer needed. - * The returned iterator should be closed before this db is closed. - *

    - * - * @return instance of iterator object. - */ - public RocksIterator newIterator() { - return new RocksIterator(this, iterator(nativeHandle_)); + public long getAggregatedLongProperty(final String property) + throws RocksDBException { + return getAggregatedLongProperty(nativeHandle_, property, + property.length()); } /** - *

    Return a heap-allocated iterator over the contents of the - * database. The result of newIterator() is initially invalid - * (caller must call one of the Seek methods on the iterator - * before using it).

    - * - *

    Caller should close the iterator when it is no longer needed. - * The returned iterator should be closed before this db is closed. - *

    + * Get the approximate file system space used by keys in each range. * - * @param readOptions {@link ReadOptions} instance. - * @return instance of iterator object. - */ - public RocksIterator newIterator(final ReadOptions readOptions) { - return new RocksIterator(this, iterator(nativeHandle_, - readOptions.nativeHandle_)); - } - - /** - *

    Return a handle to the current DB state. Iterators created with - * this handle will all observe a stable snapshot of the current DB - * state. The caller must call ReleaseSnapshot(result) when the - * snapshot is no longer needed.

    + * Note that the returned sizes measure file system space usage, so + * if the user data compresses by a factor of ten, the returned + * sizes will be one-tenth the size of the corresponding user data size. * - *

    nullptr will be returned if the DB fails to take a snapshot or does - * not support snapshot.

    + * If {@code sizeApproximationFlags} defines whether the returned size + * should include the recently written data in the mem-tables (if + * the mem-table type supports it), data serialized to disk, or both. * - * @return Snapshot {@link Snapshot} instance - */ - public Snapshot getSnapshot() { - long snapshotHandle = getSnapshot(nativeHandle_); - if (snapshotHandle != 0) { - return new Snapshot(snapshotHandle); - } - return null; - } - - /** - * Release a previously acquired snapshot. The caller must not - * use "snapshot" after this call. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family + * @param ranges the ranges over which to approximate sizes + * @param sizeApproximationFlags flags to determine what to include in the + * approximation. * - * @param snapshot {@link Snapshot} instance + * @return the sizes */ - public void releaseSnapshot(final Snapshot snapshot) { - if (snapshot != null) { - releaseSnapshot(nativeHandle_, snapshot.nativeHandle_); + public long[] getApproximateSizes( + /*@Nullable*/ final ColumnFamilyHandle columnFamilyHandle, + final List ranges, + final SizeApproximationFlag... sizeApproximationFlags) { + + byte flags = 0x0; + for (final SizeApproximationFlag sizeApproximationFlag + : sizeApproximationFlags) { + flags |= sizeApproximationFlag.getValue(); } - } - /** - *

    Return a heap-allocated iterator over the contents of the - * database. The result of newIterator() is initially invalid - * (caller must call one of the Seek methods on the iterator - * before using it).

    - * - *

    Caller should close the iterator when it is no longer needed. - * The returned iterator should be closed before this db is closed. - *

    - * - * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} - * instance - * @return instance of iterator object. - */ - public RocksIterator newIterator( - final ColumnFamilyHandle columnFamilyHandle) { - return new RocksIterator(this, iteratorCF(nativeHandle_, - columnFamilyHandle.nativeHandle_)); + return getApproximateSizes(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + toRangeSliceHandles(ranges), flags); } /** - *

    Return a heap-allocated iterator over the contents of the - * database. The result of newIterator() is initially invalid - * (caller must call one of the Seek methods on the iterator - * before using it).

    + * Get the approximate file system space used by keys in each range for + * the default column family. * - *

    Caller should close the iterator when it is no longer needed. - * The returned iterator should be closed before this db is closed. - *

    + * Note that the returned sizes measure file system space usage, so + * if the user data compresses by a factor of ten, the returned + * sizes will be one-tenth the size of the corresponding user data size. * - * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} - * instance - * @param readOptions {@link ReadOptions} instance. - * @return instance of iterator object. - */ - public RocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle, - final ReadOptions readOptions) { - return new RocksIterator(this, iteratorCF(nativeHandle_, - columnFamilyHandle.nativeHandle_, readOptions.nativeHandle_)); - } - - /** - * Returns iterators from a consistent database state across multiple - * column families. Iterators are heap allocated and need to be deleted - * before the db is deleted + * If {@code sizeApproximationFlags} defines whether the returned size + * should include the recently written data in the mem-tables (if + * the mem-table type supports it), data serialized to disk, or both. * - * @param columnFamilyHandleList {@link java.util.List} containing - * {@link org.rocksdb.ColumnFamilyHandle} instances. - * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator} - * instances + * @param ranges the ranges over which to approximate sizes + * @param sizeApproximationFlags flags to determine what to include in the + * approximation. * - * @throws RocksDBException thrown if error happens in underlying - * native library. + * @return the sizes. */ - public List newIterators( - final List columnFamilyHandleList) - throws RocksDBException { - return newIterators(columnFamilyHandleList, new ReadOptions()); + public long[] getApproximateSizes(final List ranges, + final SizeApproximationFlag... sizeApproximationFlags) { + return getApproximateSizes(null, ranges, sizeApproximationFlags); } - /** - * Returns iterators from a consistent database state across multiple - * column families. Iterators are heap allocated and need to be deleted - * before the db is deleted - * - * @param columnFamilyHandleList {@link java.util.List} containing - * {@link org.rocksdb.ColumnFamilyHandle} instances. - * @param readOptions {@link ReadOptions} instance. - * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator} - * instances - * - * @throws RocksDBException thrown if error happens in underlying - * native library. - */ - public List newIterators( - final List columnFamilyHandleList, - final ReadOptions readOptions) throws RocksDBException { - - final long[] columnFamilyHandles = new long[columnFamilyHandleList.size()]; - for (int i = 0; i < columnFamilyHandleList.size(); i++) { - columnFamilyHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; - } - - final long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandles, - readOptions.nativeHandle_); + public static class CountAndSize { + public final long count; + public final long size; - final List iterators = new ArrayList<>( - columnFamilyHandleList.size()); - for (int i=0; i columnFamilies) throws RocksDBException { - final long[] cfHandles = new long[columnFamilies.size()]; - for (int i = 0; i < columnFamilies.size(); i++) { - cfHandles[i] = columnFamilies.get(i).nativeHandle_; - } - dropColumnFamilies(nativeHandle_, cfHandles); - } - - /** - *

    Flush all memory table data.

    - * - *

    Note: it must be ensured that the FlushOptions instance - * is not GC'ed before this method finishes. If the wait parameter is - * set to false, flush processing is asynchronous.

    + * instance, or null for the default column family + * @param range the ranges over which to get the memtable stats * - * @param flushOptions {@link org.rocksdb.FlushOptions} instance. - * @throws RocksDBException thrown if an error occurs within the native - * part of the library. + * @return the count and size for the range */ - public void flush(final FlushOptions flushOptions) - throws RocksDBException { - flush(nativeHandle_, flushOptions.nativeHandle_); + public CountAndSize getApproximateMemTableStats( + /*@Nullable*/ final ColumnFamilyHandle columnFamilyHandle, + final Range range) { + final long[] result = getApproximateMemTableStats(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + range.start.getNativeHandle(), + range.limit.getNativeHandle()); + return new CountAndSize(result[0], result[1]); } /** - *

    Flush all memory table data.

    + * This method is similar to + * {@link #getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)}, + * except that it returns approximate number of records and size in memtables. * - *

    Note: it must be ensured that the FlushOptions instance - * is not GC'ed before this method finishes. If the wait parameter is - * set to false, flush processing is asynchronous.

    - * - * @param flushOptions {@link org.rocksdb.FlushOptions} instance. - * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance. - * @throws RocksDBException thrown if an error occurs within the native - * part of the library. + * @param range the ranges over which to get the memtable stats + * + * @return the count and size for the range */ - public void flush(final FlushOptions flushOptions, - final ColumnFamilyHandle columnFamilyHandle) throws RocksDBException { - flush(nativeHandle_, flushOptions.nativeHandle_, - columnFamilyHandle.nativeHandle_); + public CountAndSize getApproximateMemTableStats( + final Range range) { + return getApproximateMemTableStats(null, range); } /** @@ -2580,7 +2828,40 @@ public class RocksDB extends RocksObject { * part of the library. */ public void compactRange() throws RocksDBException { - compactRange0(nativeHandle_, false, -1, 0); + compactRange(null); + } + + /** + *

    Range compaction of column family.

    + *

    Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

    + * + *

    See also

    + *
      + *
    • + * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)} + *
    • + *
    • + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} + *
    • + *
    • + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[], + * boolean, int, int)} + *
    • + *
    + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family. + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) + throws RocksDBException { + compactRange(nativeHandle_, null, -1, null, -1, 0, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); } /** @@ -2604,45 +2885,44 @@ public class RocksDB extends RocksObject { */ public void compactRange(final byte[] begin, final byte[] end) throws RocksDBException { - compactRange0(nativeHandle_, begin, begin.length, end, - end.length, false, -1, 0); + compactRange(null, begin, end); } /** - *

    Range compaction of database.

    + *

    Range compaction of column family.

    *

    Note: After the database has been compacted, * all data will have been pushed down to the last level containing * any data.

    * - *

    Compaction outputs should be placed in options.db_paths - * [target_path_id]. Behavior is undefined if target_path_id is - * out of range.

    - * *

    See also

    *
      - *
    • {@link #compactRange()}
    • - *
    • {@link #compactRange(byte[], byte[])}
    • - *
    • {@link #compactRange(byte[], byte[], boolean, int, int)}
    • + *
    • {@link #compactRange(ColumnFamilyHandle)}
    • + *
    • + * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)} + *
    • + *
    • + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[], + * boolean, int, int)} + *
    • *
    * - * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead - * - * @param reduce_level reduce level after compaction - * @param target_level target level to compact to - * @param target_path_id the target path id of output path + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family. + * @param begin start of key range (included in range) + * @param end end of key range (excluded from range) * * @throws RocksDBException thrown if an error occurs within the native * part of the library. */ - @Deprecated - public void compactRange(final boolean reduce_level, - final int target_level, final int target_path_id) - throws RocksDBException { - compactRange0(nativeHandle_, reduce_level, - target_level, target_path_id); + public void compactRange( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, + final byte[] begin, final byte[] end) throws RocksDBException { + compactRange(nativeHandle_, + begin, begin == null ? -1 : begin.length, + end, end == null ? -1 : end.length, + 0, columnFamilyHandle == null ? 0: columnFamilyHandle.nativeHandle_); } - /** *

    Range compaction of database.

    *

    Note: After the database has been compacted, @@ -2656,27 +2936,23 @@ public class RocksDB extends RocksObject { *

    See also

    *
      *
    • {@link #compactRange()}
    • - *
    • {@link #compactRange(boolean, int, int)}
    • *
    • {@link #compactRange(byte[], byte[])}
    • + *
    • {@link #compactRange(byte[], byte[], boolean, int, int)}
    • *
    * * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead * - * @param begin start of key range (included in range) - * @param end end of key range (excluded from range) - * @param reduce_level reduce level after compaction - * @param target_level target level to compact to - * @param target_path_id the target path id of output path + * @param changeLevel reduce level after compaction + * @param targetLevel target level to compact to + * @param targetPathId the target path id of output path * * @throws RocksDBException thrown if an error occurs within the native * part of the library. */ @Deprecated - public void compactRange(final byte[] begin, final byte[] end, - final boolean reduce_level, final int target_level, - final int target_path_id) throws RocksDBException { - compactRange0(nativeHandle_, begin, begin.length, end, end.length, - reduce_level, target_level, target_path_id); + public void compactRange(final boolean changeLevel, final int targetLevel, + final int targetPathId) throws RocksDBException { + compactRange(null, changeLevel, targetLevel, targetPathId); } /** @@ -2685,11 +2961,13 @@ public class RocksDB extends RocksObject { * all data will have been pushed down to the last level containing * any data.

    * + *

    Compaction outputs should be placed in options.db_paths + * [target_path_id]. Behavior is undefined if target_path_id is + * out of range.

    + * *

    See also

    *
      - *
    • - * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)} - *
    • + *
    • {@link #compactRange(ColumnFamilyHandle)}
    • *
    • * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} *
    • @@ -2699,16 +2977,67 @@ public class RocksDB extends RocksObject { * *
    * + * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead + * * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} - * instance. + * instance, or null for the default column family. + * @param changeLevel reduce level after compaction + * @param targetLevel target level to compact to + * @param targetPathId the target path id of output path * * @throws RocksDBException thrown if an error occurs within the native * part of the library. */ - public void compactRange(final ColumnFamilyHandle columnFamilyHandle) + @Deprecated + public void compactRange( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, + final boolean changeLevel, final int targetLevel, final int targetPathId) throws RocksDBException { - compactRange(nativeHandle_, false, -1, 0, - columnFamilyHandle.nativeHandle_); + final CompactRangeOptions options = new CompactRangeOptions(); + options.setChangeLevel(changeLevel); + options.setTargetLevel(targetLevel); + options.setTargetPathId(targetPathId); + compactRange(nativeHandle_, + null, -1, + null, -1, + options.nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + *

    Range compaction of database.

    + *

    Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

    + * + *

    Compaction outputs should be placed in options.db_paths + * [target_path_id]. Behavior is undefined if target_path_id is + * out of range.

    + * + *

    See also

    + *
      + *
    • {@link #compactRange()}
    • + *
    • {@link #compactRange(boolean, int, int)}
    • + *
    • {@link #compactRange(byte[], byte[])}
    • + *
    + * + * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} + * instead + * + * @param begin start of key range (included in range) + * @param end end of key range (excluded from range) + * @param changeLevel reduce level after compaction + * @param targetLevel target level to compact to + * @param targetPathId the target path id of output path + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + @Deprecated + public void compactRange(final byte[] begin, final byte[] end, + final boolean changeLevel, final int targetLevel, + final int targetPathId) throws RocksDBException { + compactRange(null, begin, end, changeLevel, targetLevel, targetPathId); } /** @@ -2717,6 +3046,10 @@ public class RocksDB extends RocksObject { * all data will have been pushed down to the last level containing * any data.

    * + *

    Compaction outputs should be placed in options.db_paths + * [target_path_id]. Behavior is undefined if target_path_id is + * out of range.

    + * *

    See also

    *
      *
    • {@link #compactRange(ColumnFamilyHandle)}
    • @@ -2724,26 +3057,40 @@ public class RocksDB extends RocksObject { * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)} * *
    • - * {@link #compactRange(ColumnFamilyHandle, byte[], byte[], - * boolean, int, int)} + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} *
    • *
    * + * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead + * * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} * instance. * @param begin start of key range (included in range) * @param end end of key range (excluded from range) + * @param changeLevel reduce level after compaction + * @param targetLevel target level to compact to + * @param targetPathId the target path id of output path * * @throws RocksDBException thrown if an error occurs within the native * part of the library. */ - public void compactRange(final ColumnFamilyHandle columnFamilyHandle, - final byte[] begin, final byte[] end) throws RocksDBException { - compactRange(nativeHandle_, begin, begin.length, end, end.length, - false, -1, 0, columnFamilyHandle.nativeHandle_); + @Deprecated + public void compactRange( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, + final byte[] begin, final byte[] end, final boolean changeLevel, + final int targetLevel, final int targetPathId) + throws RocksDBException { + final CompactRangeOptions options = new CompactRangeOptions(); + options.setChangeLevel(changeLevel); + options.setTargetLevel(targetLevel); + options.setTargetPathId(targetPathId); + compactRange(nativeHandle_, + begin, begin == null ? -1 : begin.length, + end, end == null ? -1 : end.length, + options.nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); } - /** *

    Range compaction of column family.

    *

    Note: After the database has been compacted, @@ -2759,115 +3106,325 @@ public class RocksDB extends RocksObject { * part of the library. */ public void compactRange(final ColumnFamilyHandle columnFamilyHandle, - final byte[] begin, final byte[] end, CompactRangeOptions compactRangeOptions) throws RocksDBException { - compactRange(nativeHandle_, begin, begin.length, end, end.length, - compactRangeOptions.nativeHandle_, columnFamilyHandle.nativeHandle_); + final byte[] begin, final byte[] end, + final CompactRangeOptions compactRangeOptions) throws RocksDBException { + compactRange(nativeHandle_, + begin, begin == null ? -1 : begin.length, + end, end == null ? -1 : end.length, + compactRangeOptions.nativeHandle_, columnFamilyHandle.nativeHandle_); } /** - *

    Range compaction of column family.

    - *

    Note: After the database has been compacted, - * all data will have been pushed down to the last level containing - * any data.

    + * Change the options for the column family handle. * - *

    Compaction outputs should be placed in options.db_paths - * [target_path_id]. Behavior is undefined if target_path_id is - * out of range.

    + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family. + * @param mutableColumnFamilyOptions the options. + */ + public void setOptions( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle, + final MutableColumnFamilyOptions mutableColumnFamilyOptions) + throws RocksDBException { + setOptions(nativeHandle_, columnFamilyHandle.nativeHandle_, + mutableColumnFamilyOptions.getKeys(), + mutableColumnFamilyOptions.getValues()); + } + + /** + * Change the options for the default column family handle. * - *

    See also

    - *
      - *
    • {@link #compactRange(ColumnFamilyHandle)}
    • - *
    • - * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} - *
    • - *
    • - * {@link #compactRange(ColumnFamilyHandle, byte[], byte[], - * boolean, int, int)} - *
    • - *
    + * @param mutableColumnFamilyOptions the options. + */ + public void setOptions( + final MutableColumnFamilyOptions mutableColumnFamilyOptions) + throws RocksDBException { + setOptions(null, mutableColumnFamilyOptions); + } + + /** + * Set the options for the column family handle. * - * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead + * @param mutableDBoptions the options. + */ + public void setDBOptions(final MutableDBOptions mutableDBoptions) + throws RocksDBException { + setDBOptions(nativeHandle_, + mutableDBoptions.getKeys(), + mutableDBoptions.getValues()); + } + + /** + * Takes nputs a list of files specified by file names and + * compacts them to the specified level. + * + * Note that the behavior is different from + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} + * in that CompactFiles() performs the compaction job using the CURRENT + * thread. + * + * @param compactionOptions compaction options + * @param inputFileNames the name of the files to compact + * @param outputLevel the level to which they should be compacted + * @param outputPathId the id of the output path, or -1 + * @param compactionJobInfo the compaction job info, this parameter + * will be updated with the info from compacting the files, + * can just be null if you don't need it. + */ + public List compactFiles( + final CompactionOptions compactionOptions, + final List inputFileNames, + final int outputLevel, + final int outputPathId, + /* @Nullable */ final CompactionJobInfo compactionJobInfo) + throws RocksDBException { + return compactFiles(compactionOptions, null, inputFileNames, outputLevel, + outputPathId, compactionJobInfo); + } + + /** + * Takes a list of files specified by file names and + * compacts them to the specified level. + * + * Note that the behavior is different from + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} + * in that CompactFiles() performs the compaction job using the CURRENT + * thread. + * + * @param compactionOptions compaction options + * @param columnFamilyHandle columnFamilyHandle, or null for the + * default column family + * @param inputFileNames the name of the files to compact + * @param outputLevel the level to which they should be compacted + * @param outputPathId the id of the output path, or -1 + * @param compactionJobInfo the compaction job info, this parameter + * will be updated with the info from compacting the files, + * can just be null if you don't need it. + */ + public List compactFiles( + final CompactionOptions compactionOptions, + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, + final List inputFileNames, + final int outputLevel, + final int outputPathId, + /* @Nullable */ final CompactionJobInfo compactionJobInfo) + throws RocksDBException { + return Arrays.asList(compactFiles(nativeHandle_, compactionOptions.nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + inputFileNames.toArray(new String[0]), + outputLevel, + outputPathId, + compactionJobInfo == null ? 0 : compactionJobInfo.nativeHandle_)); + } + + /** + * This function will wait until all currently running background processes + * finish. After it returns, no background process will be run until + * {@link #continueBackgroundWork()} is called * - * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} - * instance. - * @param reduce_level reduce level after compaction - * @param target_level target level to compact to - * @param target_path_id the target path id of output path + * @throws RocksDBException If an error occurs when pausing background work + */ + public void pauseBackgroundWork() throws RocksDBException { + pauseBackgroundWork(nativeHandle_); + } + + /** + * Resumes background work which was suspended by + * previously calling {@link #pauseBackgroundWork()} + * + * @throws RocksDBException If an error occurs when resuming background work + */ + public void continueBackgroundWork() throws RocksDBException { + continueBackgroundWork(nativeHandle_); + } + + /** + * Enable automatic compactions for the given column + * families if they were previously disabled. + * + * The function will first set the + * {@link ColumnFamilyOptions#disableAutoCompactions()} option for each + * column family to false, after which it will schedule a flush/compaction. + * + * NOTE: Setting disableAutoCompactions to 'false' through + * {@link #setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} + * does NOT schedule a flush/compaction afterwards, and only changes the + * parameter itself within the column family option. + * + * @param columnFamilyHandles the column family handles + */ + public void enableAutoCompaction( + final List columnFamilyHandles) + throws RocksDBException { + enableAutoCompaction(nativeHandle_, + toNativeHandleList(columnFamilyHandles)); + } + + /** + * Number of levels used for this DB. + * + * @return the number of levels + */ + public int numberLevels() { + return numberLevels(null); + } + + /** + * Number of levels used for a column family in this DB. + * + * @param columnFamilyHandle the column family handle, or null + * for the default column family + * + * @return the number of levels + */ + public int numberLevels(/* @Nullable */final ColumnFamilyHandle columnFamilyHandle) { + return numberLevels(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + * Maximum level to which a new compacted memtable is pushed if it + * does not create overlap. + */ + public int maxMemCompactionLevel() { + return maxMemCompactionLevel(null); + } + + /** + * Maximum level to which a new compacted memtable is pushed if it + * does not create overlap. + * + * @param columnFamilyHandle the column family handle + */ + public int maxMemCompactionLevel( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle) { + return maxMemCompactionLevel(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + * Number of files in level-0 that would stop writes. + */ + public int level0StopWriteTrigger() { + return level0StopWriteTrigger(null); + } + + /** + * Number of files in level-0 that would stop writes. + * + * @param columnFamilyHandle the column family handle + */ + public int level0StopWriteTrigger( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle) { + return level0StopWriteTrigger(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + * Get DB name -- the exact same name that was provided as an argument to + * as path to {@link #open(Options, String)}. + * + * @return the DB name + */ + public String getName() { + return getName(nativeHandle_); + } + + /** + * Get the Env object from the DB + * + * @return the env + */ + public Env getEnv() { + final long envHandle = getEnv(nativeHandle_); + if (envHandle == Env.getDefault().nativeHandle_) { + return Env.getDefault(); + } else { + final Env env = new RocksEnv(envHandle); + env.disOwnNativeHandle(); // we do not own the Env! + return env; + } + } + + /** + *

    Flush all memory table data.

    + * + *

    Note: it must be ensured that the FlushOptions instance + * is not GC'ed before this method finishes. If the wait parameter is + * set to false, flush processing is asynchronous.

    + * + * @param flushOptions {@link org.rocksdb.FlushOptions} instance. + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void flush(final FlushOptions flushOptions) + throws RocksDBException { + flush(flushOptions, (List) null); + } + + /** + *

    Flush all memory table data.

    + * + *

    Note: it must be ensured that the FlushOptions instance + * is not GC'ed before this method finishes. If the wait parameter is + * set to false, flush processing is asynchronous.

    * + * @param flushOptions {@link org.rocksdb.FlushOptions} instance. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance. * @throws RocksDBException thrown if an error occurs within the native * part of the library. */ - @Deprecated - public void compactRange(final ColumnFamilyHandle columnFamilyHandle, - final boolean reduce_level, final int target_level, - final int target_path_id) throws RocksDBException { - compactRange(nativeHandle_, reduce_level, target_level, - target_path_id, columnFamilyHandle.nativeHandle_); + public void flush(final FlushOptions flushOptions, + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) + throws RocksDBException { + flush(flushOptions, + columnFamilyHandle == null ? null : Arrays.asList(columnFamilyHandle)); } /** - *

    Range compaction of column family.

    - *

    Note: After the database has been compacted, - * all data will have been pushed down to the last level containing - * any data.

    - * - *

    Compaction outputs should be placed in options.db_paths - * [target_path_id]. Behavior is undefined if target_path_id is - * out of range.

    - * - *

    See also

    - *
      - *
    • {@link #compactRange(ColumnFamilyHandle)}
    • - *
    • - * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)} - *
    • - *
    • - * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} - *
    • - *
    + * Flushes multiple column families. * - * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead + * If atomic flush is not enabled, this is equivalent to calling + * {@link #flush(FlushOptions, ColumnFamilyHandle)} multiple times. * - * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} - * instance. - * @param begin start of key range (included in range) - * @param end end of key range (excluded from range) - * @param reduce_level reduce level after compaction - * @param target_level target level to compact to - * @param target_path_id the target path id of output path + * If atomic flush is enabled, this will flush all column families + * specified up to the latest sequence number at the time when flush is + * requested. * + * @param flushOptions {@link org.rocksdb.FlushOptions} instance. + * @param columnFamilyHandles column family handles. * @throws RocksDBException thrown if an error occurs within the native * part of the library. */ - @Deprecated - public void compactRange(final ColumnFamilyHandle columnFamilyHandle, - final byte[] begin, final byte[] end, final boolean reduce_level, - final int target_level, final int target_path_id) + public void flush(final FlushOptions flushOptions, + /* @Nullable */ final List columnFamilyHandles) throws RocksDBException { - compactRange(nativeHandle_, begin, begin.length, end, end.length, - reduce_level, target_level, target_path_id, - columnFamilyHandle.nativeHandle_); + flush(nativeHandle_, flushOptions.nativeHandle_, + toNativeHandleList(columnFamilyHandles)); } /** - * This function will wait until all currently running background processes - * finish. After it returns, no background process will be run until - * {@link #continueBackgroundWork()} is called + * Flush the WAL memory buffer to the file. If {@code sync} is true, + * it calls {@link #syncWal()} afterwards. * - * @throws RocksDBException If an error occurs when pausing background work + * @param sync true to also fsync to disk. */ - public void pauseBackgroundWork() throws RocksDBException { - pauseBackgroundWork(nativeHandle_); + public void flushWal(final boolean sync) throws RocksDBException { + flushWal(nativeHandle_, sync); } /** - * Resumes backround work which was suspended by - * previously calling {@link #pauseBackgroundWork()} + * Sync the WAL. * - * @throws RocksDBException If an error occurs when resuming background work + * Note that {@link #write(WriteOptions, WriteBatch)} followed by + * {@link #syncWal()} is not exactly the same as + * {@link #write(WriteOptions, WriteBatch)} with + * {@link WriteOptions#sync()} set to true; In the latter case the changes + * won't be visible until the sync is done. + * + * Currently only works if {@link Options#allowMmapWrites()} is set to false. */ - public void continueBackgroundWork() throws RocksDBException { - continueBackgroundWork(nativeHandle_); + public void syncWal() throws RocksDBException { + syncWal(nativeHandle_); } /** @@ -2880,6 +3437,25 @@ public class RocksDB extends RocksObject { return getLatestSequenceNumber(nativeHandle_); } + /** + * Instructs DB to preserve deletes with sequence numbers >= sequenceNumber. + * + * Has no effect if DBOptions#preserveDeletes() is set to false. + * + * This function assumes that user calls this function with monotonically + * increasing seqnums (otherwise we can't guarantee that a particular delete + * hasn't been already processed). + * + * @param sequenceNumber the minimum sequence number to preserve + * + * @return true if the value was successfully updated, + * false if user attempted to call if with + * sequenceNumber <= current value. + */ + public boolean setPreserveDeletesSequenceNumber(final long sequenceNumber) { + return setPreserveDeletesSequenceNumber(nativeHandle_, sequenceNumber); + } + /** *

    Prevent file deletions. Compactions will continue to occur, * but no obsolete files will be deleted. Calling this multiple @@ -2917,6 +3493,78 @@ public class RocksDB extends RocksObject { enableFileDeletions(nativeHandle_, force); } + public static class LiveFiles { + /** + * The valid size of the manifest file. The manifest file is an ever growing + * file, but only the portion specified here is valid for this snapshot. + */ + public final long manifestFileSize; + + /** + * The files are relative to the {@link #getName()} and are not + * absolute paths. Despite being relative paths, the file names begin + * with "/". + */ + public final List files; + + LiveFiles(final long manifestFileSize, final List files) { + this.manifestFileSize = manifestFileSize; + this.files = files; + } + } + + /** + * Retrieve the list of all files in the database after flushing the memtable. + * + * See {@link #getLiveFiles(boolean)}. + * + * @return the live files + */ + public LiveFiles getLiveFiles() throws RocksDBException { + return getLiveFiles(true); + } + + /** + * Retrieve the list of all files in the database. + * + * In case you have multiple column families, even if {@code flushMemtable} + * is true, you still need to call {@link #getSortedWalFiles()} + * after {@link #getLiveFiles(boolean)} to compensate for new data that + * arrived to already-flushed column families while other column families + * were flushing. + * + * NOTE: Calling {@link #getLiveFiles(boolean)} followed by + * {@link #getSortedWalFiles()} can generate a lossless backup. + * + * @param flushMemtable set to true to flush before recoding the live + * files. Setting to false is useful when we don't want to wait for flush + * which may have to wait for compaction to complete taking an + * indeterminate time. + * + * @return the live files + */ + public LiveFiles getLiveFiles(final boolean flushMemtable) + throws RocksDBException { + final String[] result = getLiveFiles(nativeHandle_, flushMemtable); + if (result == null) { + return null; + } + final String[] files = Arrays.copyOf(result, result.length - 1); + final long manifestFileSize = Long.parseLong(result[result.length - 1]); + + return new LiveFiles(manifestFileSize, Arrays.asList(files)); + } + + /** + * Retrieve the sorted list of all wal files with earliest file first. + * + * @return the log files + */ + public List getSortedWalFiles() throws RocksDBException { + final LogFile[] logFiles = getSortedWalFiles(nativeHandle_); + return Arrays.asList(logFiles); + } + /** *

    Returns an iterator that is positioned at a write-batch containing * seq_number. If the sequence number is non existent, it returns an iterator @@ -2940,21 +3588,46 @@ public class RocksDB extends RocksObject { getUpdatesSince(nativeHandle_, sequenceNumber)); } - public void setOptions(final ColumnFamilyHandle columnFamilyHandle, - final MutableColumnFamilyOptions mutableColumnFamilyOptions) - throws RocksDBException { - setOptions(nativeHandle_, columnFamilyHandle.nativeHandle_, - mutableColumnFamilyOptions.getKeys(), - mutableColumnFamilyOptions.getValues()); + /** + * Delete the file name from the db directory and update the internal state to + * reflect that. Supports deletion of sst and log files only. 'name' must be + * path relative to the db directory. eg. 000001.sst, /archive/000003.log + * + * @param name the file name + */ + public void deleteFile(final String name) throws RocksDBException { + deleteFile(nativeHandle_, name); } - private long[] toNativeHandleList(final List objectList) { - final int len = objectList.size(); - final long[] handleList = new long[len]; - for (int i = 0; i < len; i++) { - handleList[i] = objectList.get(i).nativeHandle_; - } - return handleList; + /** + * Gets a list of all table files metadata. + * + * @return table files metadata. + */ + public List getLiveFilesMetaData() { + return Arrays.asList(getLiveFilesMetaData(nativeHandle_)); + } + + /** + * Obtains the meta data of the specified column family of the DB. + * + * @param columnFamilyHandle the column family + * + * @return the column family metadata + */ + public ColumnFamilyMetaData getColumnFamilyMetaData( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) { + return getColumnFamilyMetaData(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + * Obtains the meta data of the default column family of the DB. + * + * @return the column family metadata + */ + public ColumnFamilyMetaData GetColumnFamilyMetaData() { + return getColumnFamilyMetaData(null); } /** @@ -2978,7 +3651,7 @@ public class RocksDB extends RocksObject { final IngestExternalFileOptions ingestExternalFileOptions) throws RocksDBException { ingestExternalFile(nativeHandle_, getDefaultColumnFamily().nativeHandle_, - filePathList.toArray(new String[filePathList.size()]), + filePathList.toArray(new String[0]), filePathList.size(), ingestExternalFileOptions.nativeHandle_); } @@ -3005,10 +3678,162 @@ public class RocksDB extends RocksObject { final IngestExternalFileOptions ingestExternalFileOptions) throws RocksDBException { ingestExternalFile(nativeHandle_, columnFamilyHandle.nativeHandle_, - filePathList.toArray(new String[filePathList.size()]), + filePathList.toArray(new String[0]), filePathList.size(), ingestExternalFileOptions.nativeHandle_); } + /** + * Verify checksum + * + * @throws RocksDBException if the checksum is not valid + */ + public void verifyChecksum() throws RocksDBException { + verifyChecksum(nativeHandle_); + } + + /** + * Gets the handle for the default column family + * + * @return The handle of the default column family + */ + public ColumnFamilyHandle getDefaultColumnFamily() { + final ColumnFamilyHandle cfHandle = new ColumnFamilyHandle(this, + getDefaultColumnFamily(nativeHandle_)); + cfHandle.disOwnNativeHandle(); + return cfHandle; + } + + /** + * Get the properties of all tables. + * + * @param columnFamilyHandle the column family handle, or null for the default + * column family. + * + * @return the properties + */ + public Map getPropertiesOfAllTables( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle) + throws RocksDBException { + return getPropertiesOfAllTables(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + * Get the properties of all tables in the default column family. + * + * @return the properties + */ + public Map getPropertiesOfAllTables() + throws RocksDBException { + return getPropertiesOfAllTables(null); + } + + /** + * Get the properties of tables in range. + * + * @param columnFamilyHandle the column family handle, or null for the default + * column family. + * @param ranges the ranges over which to get the table properties + * + * @return the properties + */ + public Map getPropertiesOfTablesInRange( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle, + final List ranges) throws RocksDBException { + return getPropertiesOfTablesInRange(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + toRangeSliceHandles(ranges)); + } + + /** + * Get the properties of tables in range for the default column family. + * + * @param ranges the ranges over which to get the table properties + * + * @return the properties + */ + public Map getPropertiesOfTablesInRange( + final List ranges) throws RocksDBException { + return getPropertiesOfTablesInRange(null, ranges); + } + + /** + * Suggest the range to compact. + * + * @param columnFamilyHandle the column family handle, or null for the default + * column family. + * + * @return the suggested range. + */ + public Range suggestCompactRange( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle) + throws RocksDBException { + final long[] rangeSliceHandles = suggestCompactRange(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + return new Range(new Slice(rangeSliceHandles[0]), + new Slice(rangeSliceHandles[1])); + } + + /** + * Suggest the range to compact for the default column family. + * + * @return the suggested range. + */ + public Range suggestCompactRange() + throws RocksDBException { + return suggestCompactRange(null); + } + + /** + * Promote L0. + * + * @param columnFamilyHandle the column family handle, + * or null for the default column family. + */ + public void promoteL0( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle, + final int targetLevel) throws RocksDBException { + promoteL0(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + targetLevel); + } + + /** + * Promote L0 for the default column family. + */ + public void promoteL0(final int targetLevel) + throws RocksDBException { + promoteL0(null, targetLevel); + } + + /** + * Trace DB operations. + * + * Use {@link #endTrace()} to stop tracing. + * + * @param traceOptions the options + * @param traceWriter the trace writer + */ + public void startTrace(final TraceOptions traceOptions, + final AbstractTraceWriter traceWriter) throws RocksDBException { + startTrace(nativeHandle_, traceOptions.getMaxTraceFileSize(), + traceWriter.nativeHandle_); + /** + * NOTE: {@link #startTrace(long, long, long) transfers the ownership + * from Java to C++, so we must disown the native handle here. + */ + traceWriter.disOwnNativeHandle(); + } + + /** + * Stop tracing DB operations. + * + * See {@link #startTrace(TraceOptions, AbstractTraceWriter)} + */ + public void endTrace() throws RocksDBException { + endTrace(nativeHandle_); + } + /** * Static method to destroy the contents of the specified database. * Be very careful using this method. @@ -3024,17 +3849,47 @@ public class RocksDB extends RocksObject { destroyDB(path, options.nativeHandle_); } - /** - * Private constructor. - * - * @param nativeHandle The native handle of the C++ RocksDB object - */ - protected RocksDB(final long nativeHandle) { - super(nativeHandle); + private /* @Nullable */ long[] toNativeHandleList( + /* @Nullable */ final List objectList) { + if (objectList == null) { + return null; + } + final int len = objectList.size(); + final long[] handleList = new long[len]; + for (int i = 0; i < len; i++) { + handleList[i] = objectList.get(i).nativeHandle_; + } + return handleList; + } + + private static long[] toRangeSliceHandles(final List ranges) { + final long rangeSliceHandles[] = new long [ranges.size() * 2]; + for (int i = 0, j = 0; i < ranges.size(); i++) { + final Range range = ranges.get(i); + rangeSliceHandles[j++] = range.start.getNativeHandle(); + rangeSliceHandles[j++] = range.limit.getNativeHandle(); + } + return rangeSliceHandles; + } + + protected void storeOptionsInstance(DBOptionsInterface options) { + options_ = options; + } + + private static void checkBounds(int offset, int len, int size) { + if ((offset | len | (offset + len) | (size - (offset + len))) < 0) { + throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size)); + } + } + + private static int computeCapacityHint(final int estimatedNumberOfItems) { + // Default load factor for HashMap is 0.75, so N * 1.5 will be at the load + // limit. We add +1 for a buffer. + return (int)Math.ceil(estimatedNumberOfItems * 1.5 + 1.0); } // native methods - protected native static long open(final long optionsHandle, + private native static long open(final long optionsHandle, final String path) throws RocksDBException; /** @@ -3049,11 +3904,11 @@ public class RocksDB extends RocksObject { * * @throws RocksDBException thrown if the database could not be opened */ - protected native static long[] open(final long optionsHandle, + private native static long[] open(final long optionsHandle, final String path, final byte[][] columnFamilyNames, final long[] columnFamilyOptions) throws RocksDBException; - protected native static long openROnly(final long optionsHandle, + private native static long openROnly(final long optionsHandle, final String path) throws RocksDBException; /** @@ -3068,177 +3923,258 @@ public class RocksDB extends RocksObject { * * @throws RocksDBException thrown if the database could not be opened */ - protected native static long[] openROnly(final long optionsHandle, + private native static long[] openROnly(final long optionsHandle, final String path, final byte[][] columnFamilyNames, final long[] columnFamilyOptions ) throws RocksDBException; - protected native static byte[][] listColumnFamilies(long optionsHandle, - String path) throws RocksDBException; - protected native void put(long handle, byte[] key, int keyOffset, - int keyLength, byte[] value, int valueOffset, int valueLength) + @Override protected native void disposeInternal(final long handle); + + private native static void closeDatabase(final long handle) + throws RocksDBException; + private native static byte[][] listColumnFamilies(final long optionsHandle, + final String path) throws RocksDBException; + private native long createColumnFamily(final long handle, + final byte[] columnFamilyName, final int columnFamilyNamelen, + final long columnFamilyOptions) throws RocksDBException; + private native long[] createColumnFamilies(final long handle, + final long columnFamilyOptionsHandle, final byte[][] columnFamilyNames) + throws RocksDBException; + private native long[] createColumnFamilies(final long handle, + final long columnFamilyOptionsHandles[], final byte[][] columnFamilyNames) + throws RocksDBException; + private native void dropColumnFamily( + final long handle, final long cfHandle) throws RocksDBException; + private native void dropColumnFamilies(final long handle, + final long[] cfHandles) throws RocksDBException; + //TODO(AR) best way to express DestroyColumnFamilyHandle? ...maybe in ColumnFamilyHandle? + private native void put(final long handle, final byte[] key, + final int keyOffset, final int keyLength, final byte[] value, + final int valueOffset, int valueLength) throws RocksDBException; + private native void put(final long handle, final byte[] key, final int keyOffset, + final int keyLength, final byte[] value, final int valueOffset, + final int valueLength, final long cfHandle) throws RocksDBException; + private native void put(final long handle, final long writeOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final byte[] value, final int valueOffset, final int valueLength) + throws RocksDBException; + private native void put(final long handle, final long writeOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final byte[] value, final int valueOffset, final int valueLength, + final long cfHandle) throws RocksDBException; + private native void delete(final long handle, final byte[] key, + final int keyOffset, final int keyLength) throws RocksDBException; + private native void delete(final long handle, final byte[] key, + final int keyOffset, final int keyLength, final long cfHandle) + throws RocksDBException; + private native void delete(final long handle, final long writeOptHandle, + final byte[] key, final int keyOffset, final int keyLength) + throws RocksDBException; + private native void delete(final long handle, final long writeOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final long cfHandle) throws RocksDBException; + private native void singleDelete( + final long handle, final byte[] key, final int keyLen) + throws RocksDBException; + private native void singleDelete( + final long handle, final byte[] key, final int keyLen, + final long cfHandle) throws RocksDBException; + private native void singleDelete( + final long handle, final long writeOptHandle, final byte[] key, + final int keyLen) throws RocksDBException; + private native void singleDelete( + final long handle, final long writeOptHandle, + final byte[] key, final int keyLen, final long cfHandle) + throws RocksDBException; + private native void deleteRange(final long handle, final byte[] beginKey, + final int beginKeyOffset, final int beginKeyLength, final byte[] endKey, + final int endKeyOffset, final int endKeyLength) throws RocksDBException; + private native void deleteRange(final long handle, final byte[] beginKey, + final int beginKeyOffset, final int beginKeyLength, final byte[] endKey, + final int endKeyOffset, final int endKeyLength, final long cfHandle) + throws RocksDBException; + private native void deleteRange(final long handle, final long writeOptHandle, + final byte[] beginKey, final int beginKeyOffset, final int beginKeyLength, + final byte[] endKey, final int endKeyOffset, final int endKeyLength) throws RocksDBException; - protected native void put(long handle, byte[] key, int keyOffset, - int keyLength, byte[] value, int valueOffset, int valueLength, - long cfHandle) throws RocksDBException; - protected native void put(long handle, long writeOptHandle, byte[] key, - int keyOffset, int keyLength, byte[] value, int valueOffset, - int valueLength) throws RocksDBException; - protected native void put(long handle, long writeOptHandle, byte[] key, - int keyOffset, int keyLength, byte[] value, int valueOffset, - int valueLength, long cfHandle) throws RocksDBException; - protected native void write0(final long handle, long writeOptHandle, - long wbHandle) throws RocksDBException; - protected native void write1(final long handle, long writeOptHandle, - long wbwiHandle) throws RocksDBException; - protected native boolean keyMayExist(final long handle, final byte[] key, + private native void deleteRange( + final long handle, final long writeOptHandle, final byte[] beginKey, + final int beginKeyOffset, final int beginKeyLength, final byte[] endKey, + final int endKeyOffset, final int endKeyLength, final long cfHandle) + throws RocksDBException; + private native void merge(final long handle, final byte[] key, + final int keyOffset, final int keyLength, final byte[] value, + final int valueOffset, final int valueLength) throws RocksDBException; + private native void merge(final long handle, final byte[] key, + final int keyOffset, final int keyLength, final byte[] value, + final int valueOffset, final int valueLength, final long cfHandle) + throws RocksDBException; + private native void merge(final long handle, final long writeOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final byte[] value, final int valueOffset, final int valueLength) + throws RocksDBException; + private native void merge(final long handle, final long writeOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final byte[] value, final int valueOffset, final int valueLength, + final long cfHandle) throws RocksDBException; + private native void write0(final long handle, final long writeOptHandle, + final long wbHandle) throws RocksDBException; + private native void write1(final long handle, final long writeOptHandle, + final long wbwiHandle) throws RocksDBException; + private native int get(final long handle, final byte[] key, + final int keyOffset, final int keyLength, final byte[] value, + final int valueOffset, final int valueLength) throws RocksDBException; + private native int get(final long handle, final byte[] key, + final int keyOffset, final int keyLength, byte[] value, + final int valueOffset, final int valueLength, final long cfHandle) + throws RocksDBException; + private native int get(final long handle, final long readOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final byte[] value, final int valueOffset, final int valueLength) + throws RocksDBException; + private native int get(final long handle, final long readOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final byte[] value, final int valueOffset, final int valueLength, + final long cfHandle) throws RocksDBException; + private native byte[] get(final long handle, byte[] key, final int keyOffset, + final int keyLength) throws RocksDBException; + private native byte[] get(final long handle, final byte[] key, + final int keyOffset, final int keyLength, final long cfHandle) + throws RocksDBException; + private native byte[] get(final long handle, final long readOptHandle, + final byte[] key, final int keyOffset, final int keyLength) + throws RocksDBException; + private native byte[] get(final long handle, + final long readOptHandle, final byte[] key, final int keyOffset, + final int keyLength, final long cfHandle) throws RocksDBException; + private native byte[][] multiGet(final long dbHandle, final byte[][] keys, + final int[] keyOffsets, final int[] keyLengths); + private native byte[][] multiGet(final long dbHandle, final byte[][] keys, + final int[] keyOffsets, final int[] keyLengths, + final long[] columnFamilyHandles); + private native byte[][] multiGet(final long dbHandle, final long rOptHandle, + final byte[][] keys, final int[] keyOffsets, final int[] keyLengths); + private native byte[][] multiGet(final long dbHandle, final long rOptHandle, + final byte[][] keys, final int[] keyOffsets, final int[] keyLengths, + final long[] columnFamilyHandles); + private native boolean keyMayExist(final long handle, final byte[] key, final int keyOffset, final int keyLength, final StringBuilder stringBuilder); - protected native boolean keyMayExist(final long handle, final byte[] key, + private native boolean keyMayExist(final long handle, final byte[] key, final int keyOffset, final int keyLength, final long cfHandle, final StringBuilder stringBuilder); - protected native boolean keyMayExist(final long handle, + private native boolean keyMayExist(final long handle, final long optionsHandle, final byte[] key, final int keyOffset, final int keyLength, final StringBuilder stringBuilder); - protected native boolean keyMayExist(final long handle, + private native boolean keyMayExist(final long handle, final long optionsHandle, final byte[] key, final int keyOffset, final int keyLength, final long cfHandle, final StringBuilder stringBuilder); - protected native void merge(long handle, byte[] key, int keyOffset, - int keyLength, byte[] value, int valueOffset, int valueLength) + private native long iterator(final long handle); + private native long iterator(final long handle, final long readOptHandle); + private native long iteratorCF(final long handle, final long cfHandle); + private native long iteratorCF(final long handle, final long cfHandle, + final long readOptHandle); + private native long[] iterators(final long handle, + final long[] columnFamilyHandles, final long readOptHandle) throws RocksDBException; - protected native void merge(long handle, byte[] key, int keyOffset, - int keyLength, byte[] value, int valueOffset, int valueLength, - long cfHandle) throws RocksDBException; - protected native void merge(long handle, long writeOptHandle, byte[] key, - int keyOffset, int keyLength, byte[] value, int valueOffset, - int valueLength) throws RocksDBException; - protected native void merge(long handle, long writeOptHandle, byte[] key, - int keyOffset, int keyLength, byte[] value, int valueOffset, - int valueLength, long cfHandle) throws RocksDBException; - protected native int get(long handle, byte[] key, int keyOffset, - int keyLength, byte[] value, int valueOffset, int valueLength) + private native long getSnapshot(final long nativeHandle); + private native void releaseSnapshot( + final long nativeHandle, final long snapshotHandle); + private native String getProperty(final long nativeHandle, + final long cfHandle, final String property, final int propertyLength) throws RocksDBException; - protected native int get(long handle, byte[] key, int keyOffset, - int keyLength, byte[] value, int valueOffset, int valueLength, - long cfHandle) throws RocksDBException; - protected native int get(long handle, long readOptHandle, byte[] key, - int keyOffset, int keyLength, byte[] value, int valueOffset, - int valueLength) throws RocksDBException; - protected native int get(long handle, long readOptHandle, byte[] key, - int keyOffset, int keyLength, byte[] value, int valueOffset, - int valueLength, long cfHandle) throws RocksDBException; - protected native byte[][] multiGet(final long dbHandle, final byte[][] keys, - final int[] keyOffsets, final int[] keyLengths); - protected native byte[][] multiGet(final long dbHandle, final byte[][] keys, - final int[] keyOffsets, final int[] keyLengths, - final long[] columnFamilyHandles); - protected native byte[][] multiGet(final long dbHandle, final long rOptHandle, - final byte[][] keys, final int[] keyOffsets, final int[] keyLengths); - protected native byte[][] multiGet(final long dbHandle, final long rOptHandle, - final byte[][] keys, final int[] keyOffsets, final int[] keyLengths, - final long[] columnFamilyHandles); - protected native byte[] get(long handle, byte[] key, int keyOffset, - int keyLength) throws RocksDBException; - protected native byte[] get(long handle, byte[] key, int keyOffset, - int keyLength, long cfHandle) throws RocksDBException; - protected native byte[] get(long handle, long readOptHandle, - byte[] key, int keyOffset, int keyLength) throws RocksDBException; - protected native byte[] get(long handle, long readOptHandle, byte[] key, - int keyOffset, int keyLength, long cfHandle) throws RocksDBException; - protected native void delete(long handle, byte[] key, int keyOffset, - int keyLength) throws RocksDBException; - protected native void delete(long handle, byte[] key, int keyOffset, - int keyLength, long cfHandle) throws RocksDBException; - protected native void delete(long handle, long writeOptHandle, byte[] key, - int keyOffset, int keyLength) throws RocksDBException; - protected native void delete(long handle, long writeOptHandle, byte[] key, - int keyOffset, int keyLength, long cfHandle) throws RocksDBException; - protected native void singleDelete( - long handle, byte[] key, int keyLen) throws RocksDBException; - protected native void singleDelete( - long handle, byte[] key, int keyLen, long cfHandle) + private native Map getMapProperty(final long nativeHandle, + final long cfHandle, final String property, final int propertyLength) throws RocksDBException; - protected native void singleDelete( - long handle, long writeOptHandle, - byte[] key, int keyLen) throws RocksDBException; - protected native void singleDelete( - long handle, long writeOptHandle, - byte[] key, int keyLen, long cfHandle) throws RocksDBException; - protected native void deleteRange(long handle, byte[] beginKey, int beginKeyOffset, - int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength) + private native long getLongProperty(final long nativeHandle, + final long cfHandle, final String property, final int propertyLength) throws RocksDBException; - protected native void deleteRange(long handle, byte[] beginKey, int beginKeyOffset, - int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength, long cfHandle) + private native void resetStats(final long nativeHandle) throws RocksDBException; - protected native void deleteRange(long handle, long writeOptHandle, byte[] beginKey, - int beginKeyOffset, int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength) + private native long getAggregatedLongProperty(final long nativeHandle, + final String property, int propertyLength) throws RocksDBException; + private native long[] getApproximateSizes(final long nativeHandle, + final long columnFamilyHandle, final long[] rangeSliceHandles, + final byte includeFlags); + private final native long[] getApproximateMemTableStats( + final long nativeHandle, final long columnFamilyHandle, + final long rangeStartSliceHandle, final long rangeLimitSliceHandle); + private native void compactRange(final long handle, + /* @Nullable */ final byte[] begin, final int beginLen, + /* @Nullable */ final byte[] end, final int endLen, + final long compactRangeOptHandle, final long cfHandle) throws RocksDBException; - protected native void deleteRange(long handle, long writeOptHandle, byte[] beginKey, - int beginKeyOffset, int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength, - long cfHandle) throws RocksDBException; - protected native String getProperty0(long nativeHandle, - String property, int propertyLength) throws RocksDBException; - protected native String getProperty0(long nativeHandle, long cfHandle, - String property, int propertyLength) throws RocksDBException; - protected native long getLongProperty(long nativeHandle, String property, - int propertyLength) throws RocksDBException; - protected native long getLongProperty(long nativeHandle, long cfHandle, - String property, int propertyLength) throws RocksDBException; - protected native long getAggregatedLongProperty(long nativeHandle, String property, - int propertyLength) throws RocksDBException; - protected native long iterator(long handle); - protected native long iterator(long handle, long readOptHandle); - protected native long iteratorCF(long handle, long cfHandle); - protected native long iteratorCF(long handle, long cfHandle, - long readOptHandle); - protected native long[] iterators(final long handle, - final long[] columnFamilyHandles, final long readOptHandle) + private native void setOptions(final long handle, final long cfHandle, + final String[] keys, final String[] values) throws RocksDBException; + private native void setDBOptions(final long handle, + final String[] keys, final String[] values) throws RocksDBException; + private native String[] compactFiles(final long handle, + final long compactionOptionsHandle, + final long columnFamilyHandle, + final String[] inputFileNames, + final int outputLevel, + final int outputPathId, + final long compactionJobInfoHandle) throws RocksDBException; + private native void pauseBackgroundWork(final long handle) throws RocksDBException; - protected native long getSnapshot(long nativeHandle); - protected native void releaseSnapshot( - long nativeHandle, long snapshotHandle); - @Override protected native void disposeInternal(final long handle); - private native long getDefaultColumnFamily(long handle); - private native long createColumnFamily(final long handle, - final byte[] columnFamilyName, final long columnFamilyOptions) + private native void continueBackgroundWork(final long handle) throws RocksDBException; - private native void dropColumnFamily(final long handle, final long cfHandle) + private native void enableAutoCompaction(final long handle, + final long[] columnFamilyHandles) throws RocksDBException; + private native int numberLevels(final long handle, + final long columnFamilyHandle); + private native int maxMemCompactionLevel(final long handle, + final long columnFamilyHandle); + private native int level0StopWriteTrigger(final long handle, + final long columnFamilyHandle); + private native String getName(final long handle); + private native long getEnv(final long handle); + private native void flush(final long handle, final long flushOptHandle, + /* @Nullable */ final long[] cfHandles) throws RocksDBException; + private native void flushWal(final long handle, final boolean sync) throws RocksDBException; - private native void dropColumnFamilies(final long handle, - final long[] cfHandles) throws RocksDBException; - private native void flush(long handle, long flushOptHandle) + private native void syncWal(final long handle) throws RocksDBException; + private native long getLatestSequenceNumber(final long handle); + private native boolean setPreserveDeletesSequenceNumber(final long handle, + final long sequenceNumber); + private native void disableFileDeletions(long handle) throws RocksDBException; + private native void enableFileDeletions(long handle, boolean force) throws RocksDBException; - private native void flush(long handle, long flushOptHandle, long cfHandle) + private native String[] getLiveFiles(final long handle, + final boolean flushMemtable) throws RocksDBException; + private native LogFile[] getSortedWalFiles(final long handle) throws RocksDBException; - private native void compactRange0(long handle, boolean reduce_level, - int target_level, int target_path_id) throws RocksDBException; - private native void compactRange0(long handle, byte[] begin, int beginLen, - byte[] end, int endLen, boolean reduce_level, int target_level, - int target_path_id) throws RocksDBException; - private native void compactRange(long handle, byte[] begin, int beginLen, - byte[] end, int endLen, long compactRangeOptHandle, long cfHandle) - throws RocksDBException; - private native void compactRange(long handle, boolean reduce_level, - int target_level, int target_path_id, long cfHandle) + private native long getUpdatesSince(final long handle, + final long sequenceNumber) throws RocksDBException; + private native void deleteFile(final long handle, final String name) throws RocksDBException; - private native void compactRange(long handle, byte[] begin, int beginLen, - byte[] end, int endLen, boolean reduce_level, int target_level, - int target_path_id, long cfHandle) throws RocksDBException; - private native void pauseBackgroundWork(long handle) throws RocksDBException; - private native void continueBackgroundWork(long handle) throws RocksDBException; - private native long getLatestSequenceNumber(long handle); - private native void disableFileDeletions(long handle) throws RocksDBException; - private native void enableFileDeletions(long handle, boolean force) + private native LiveFileMetaData[] getLiveFilesMetaData(final long handle); + private native ColumnFamilyMetaData getColumnFamilyMetaData( + final long handle, final long columnFamilyHandle); + private native void ingestExternalFile(final long handle, + final long columnFamilyHandle, final String[] filePathList, + final int filePathListLen, final long ingestExternalFileOptionsHandle) throws RocksDBException; - private native long getUpdatesSince(long handle, long sequenceNumber) + private native void verifyChecksum(final long handle) throws RocksDBException; + private native long getDefaultColumnFamily(final long handle); + private native Map getPropertiesOfAllTables( + final long handle, final long columnFamilyHandle) throws RocksDBException; + private native Map getPropertiesOfTablesInRange( + final long handle, final long columnFamilyHandle, + final long[] rangeSliceHandles); + private native long[] suggestCompactRange(final long handle, + final long columnFamilyHandle) throws RocksDBException; + private native void promoteL0(final long handle, + final long columnFamilyHandle, final int tragetLevel) throws RocksDBException; - private native void setOptions(long handle, long cfHandle, String[] keys, - String[] values) throws RocksDBException; - private native void ingestExternalFile(long handle, long cfHandle, - String[] filePathList, int filePathListLen, - long ingest_external_file_options_handle) throws RocksDBException; + private native void startTrace(final long handle, final long maxTraceFileSize, + final long traceWriterHandle) throws RocksDBException; + private native void endTrace(final long handle) throws RocksDBException; + + private native static void destroyDB(final String path, final long optionsHandle) throws RocksDBException; + protected DBOptionsInterface options_; } diff --git a/java/src/main/java/org/rocksdb/RocksEnv.java b/java/src/main/java/org/rocksdb/RocksEnv.java index 8fe61fd45..b3681d77d 100644 --- a/java/src/main/java/org/rocksdb/RocksEnv.java +++ b/java/src/main/java/org/rocksdb/RocksEnv.java @@ -25,19 +25,8 @@ public class RocksEnv extends Env { */ RocksEnv(final long handle) { super(handle); - disOwnNativeHandle(); } - /** - *

    The helper function of {@link #dispose()} which all subclasses of - * {@link RocksObject} must implement to release their associated C++ - * resource.

    - * - *

    Note: this class is used to use the default - * RocksEnv with RocksJava. The default env allocation is managed - * by C++.

    - */ @Override - protected final void disposeInternal(final long handle) { - } + protected native final void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/RocksMemEnv.java b/java/src/main/java/org/rocksdb/RocksMemEnv.java index d18d0ceb9..0afa5f662 100644 --- a/java/src/main/java/org/rocksdb/RocksMemEnv.java +++ b/java/src/main/java/org/rocksdb/RocksMemEnv.java @@ -6,22 +6,34 @@ package org.rocksdb; /** - * RocksDB memory environment. + * Memory environment. */ +//TODO(AR) rename to MemEnv public class RocksMemEnv extends Env { /** - *

    Creates a new RocksDB environment that stores its data + *

    Creates a new environment that stores its data * in memory and delegates all non-file-storage tasks to - * base_env. The caller must delete the result when it is + * {@code baseEnv}.

    + * + *

    The caller must delete the result when it is * no longer needed.

    * - *

    {@code *base_env} must remain live while the result is in use.

    + * @param baseEnv the base environment, + * must remain live while the result is in use. + */ + public RocksMemEnv(final Env baseEnv) { + super(createMemEnv(baseEnv.nativeHandle_)); + } + + /** + * @deprecated Use {@link #RocksMemEnv(Env)}. */ + @Deprecated public RocksMemEnv() { - super(createMemEnv()); + this(Env.getDefault()); } - private static native long createMemEnv(); + private static native long createMemEnv(final long baseEnvHandle); @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/SizeApproximationFlag.java b/java/src/main/java/org/rocksdb/SizeApproximationFlag.java new file mode 100644 index 000000000..7807e7c83 --- /dev/null +++ b/java/src/main/java/org/rocksdb/SizeApproximationFlag.java @@ -0,0 +1,30 @@ +package org.rocksdb; + +import java.util.List; + +/** + * Flags for + * {@link RocksDB#getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)} + * that specify whether memtable stats should be included, + * or file stats approximation or both. + */ +public enum SizeApproximationFlag { + NONE((byte)0x0), + INCLUDE_MEMTABLES((byte)0x1), + INCLUDE_FILES((byte)0x2); + + private final byte value; + + SizeApproximationFlag(final byte value) { + this.value = value; + } + + /** + * Get the internal byte representation. + * + * @return the internal representation. + */ + byte getValue() { + return value; + } +} diff --git a/java/src/main/java/org/rocksdb/Slice.java b/java/src/main/java/org/rocksdb/Slice.java index 08a940c3f..50d9f7652 100644 --- a/java/src/main/java/org/rocksdb/Slice.java +++ b/java/src/main/java/org/rocksdb/Slice.java @@ -55,7 +55,8 @@ public class Slice extends AbstractSlice { * Slice instances using a handle.

    * * @param nativeHandle address of native instance. - * @param owningNativeHandle whether to own this reference from the C++ side or not + * @param owningNativeHandle true if the Java side owns the memory pointed to + * by this reference, false if ownership belongs to the C++ side */ Slice(final long nativeHandle, final boolean owningNativeHandle) { super(); diff --git a/java/src/main/java/org/rocksdb/SstFileMetaData.java b/java/src/main/java/org/rocksdb/SstFileMetaData.java new file mode 100644 index 000000000..52e984dff --- /dev/null +++ b/java/src/main/java/org/rocksdb/SstFileMetaData.java @@ -0,0 +1,150 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The metadata that describes a SST file. + */ +public class SstFileMetaData { + private final String fileName; + private final String path; + private final long size; + private final long smallestSeqno; + private final long largestSeqno; + private final byte[] smallestKey; + private final byte[] largestKey; + private final long numReadsSampled; + private final boolean beingCompacted; + private final long numEntries; + private final long numDeletions; + + /** + * Called from JNI C++ + */ + protected SstFileMetaData( + final String fileName, + final String path, + final long size, + final long smallestSeqno, + final long largestSeqno, + final byte[] smallestKey, + final byte[] largestKey, + final long numReadsSampled, + final boolean beingCompacted, + final long numEntries, + final long numDeletions) { + this.fileName = fileName; + this.path = path; + this.size = size; + this.smallestSeqno = smallestSeqno; + this.largestSeqno = largestSeqno; + this.smallestKey = smallestKey; + this.largestKey = largestKey; + this.numReadsSampled = numReadsSampled; + this.beingCompacted = beingCompacted; + this.numEntries = numEntries; + this.numDeletions = numDeletions; + } + + /** + * Get the name of the file. + * + * @return the name of the file. + */ + public String fileName() { + return fileName; + } + + /** + * Get the full path where the file locates. + * + * @return the full path + */ + public String path() { + return path; + } + + /** + * Get the file size in bytes. + * + * @return file size + */ + public long size() { + return size; + } + + /** + * Get the smallest sequence number in file. + * + * @return the smallest sequence number + */ + public long smallestSeqno() { + return smallestSeqno; + } + + /** + * Get the largest sequence number in file. + * + * @return the largest sequence number + */ + public long largestSeqno() { + return largestSeqno; + } + + /** + * Get the smallest user defined key in the file. + * + * @return the smallest user defined key + */ + public byte[] smallestKey() { + return smallestKey; + } + + /** + * Get the largest user defined key in the file. + * + * @return the largest user defined key + */ + public byte[] largestKey() { + return largestKey; + } + + /** + * Get the number of times the file has been read. + * + * @return the number of times the file has been read + */ + public long numReadsSampled() { + return numReadsSampled; + } + + /** + * Returns true if the file is currently being compacted. + * + * @return true if the file is currently being compacted, false otherwise. + */ + public boolean beingCompacted() { + return beingCompacted; + } + + /** + * Get the number of entries. + * + * @return the number of entries. + */ + public long numEntries() { + return numEntries; + } + + /** + * Get the number of deletions. + * + * @return the number of deletions. + */ + public long numDeletions() { + return numDeletions; + } +} diff --git a/java/src/main/java/org/rocksdb/StateType.java b/java/src/main/java/org/rocksdb/StateType.java new file mode 100644 index 000000000..803456bb2 --- /dev/null +++ b/java/src/main/java/org/rocksdb/StateType.java @@ -0,0 +1,53 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The type used to refer to a thread state. + * + * A state describes lower-level action of a thread + * such as reading / writing a file or waiting for a mutex. + */ +public enum StateType { + STATE_UNKNOWN((byte)0x0), + STATE_MUTEX_WAIT((byte)0x1); + + private final byte value; + + StateType(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ + byte getValue() { + return value; + } + + /** + * Get the State type from the internal representation value. + * + * @param value the internal representation value. + * + * @return the state type + * + * @throws IllegalArgumentException if the value does not match + * a StateType + */ + static StateType fromValue(final byte value) + throws IllegalArgumentException { + for (final StateType threadType : StateType.values()) { + if (threadType.value == value) { + return threadType; + } + } + throw new IllegalArgumentException( + "Unknown value for StateType: " + value); + } +} diff --git a/java/src/main/java/org/rocksdb/StatsLevel.java b/java/src/main/java/org/rocksdb/StatsLevel.java index cc2a87c6a..58504b84a 100644 --- a/java/src/main/java/org/rocksdb/StatsLevel.java +++ b/java/src/main/java/org/rocksdb/StatsLevel.java @@ -60,6 +60,6 @@ public enum StatsLevel { } } throw new IllegalArgumentException( - "Illegal value provided for InfoLogLevel."); + "Illegal value provided for StatsLevel."); } } diff --git a/java/src/main/java/org/rocksdb/TableFilter.java b/java/src/main/java/org/rocksdb/TableFilter.java new file mode 100644 index 000000000..45605063b --- /dev/null +++ b/java/src/main/java/org/rocksdb/TableFilter.java @@ -0,0 +1,20 @@ +package org.rocksdb; + +/** + * Filter for iterating a table. + */ +public interface TableFilter { + + /** + * A callback to determine whether relevant keys for this scan exist in a + * given table based on the table's properties. The callback is passed the + * properties of each table during iteration. If the callback returns false, + * the table will not be scanned. This option only affects Iterators and has + * no impact on point lookups. + * + * @param tableProperties the table properties. + * + * @return true if the table should be scanned, false otherwise. + */ + boolean filter(final TableProperties tableProperties); +} diff --git a/java/src/main/java/org/rocksdb/TableProperties.java b/java/src/main/java/org/rocksdb/TableProperties.java new file mode 100644 index 000000000..5fe98da67 --- /dev/null +++ b/java/src/main/java/org/rocksdb/TableProperties.java @@ -0,0 +1,365 @@ +package org.rocksdb; + +import java.util.Map; + +/** + * TableProperties contains read-only properties of its associated + * table. + */ +public class TableProperties { + private final long dataSize; + private final long indexSize; + private final long indexPartitions; + private final long topLevelIndexSize; + private final long indexKeyIsUserKey; + private final long indexValueIsDeltaEncoded; + private final long filterSize; + private final long rawKeySize; + private final long rawValueSize; + private final long numDataBlocks; + private final long numEntries; + private final long numDeletions; + private final long numMergeOperands; + private final long numRangeDeletions; + private final long formatVersion; + private final long fixedKeyLen; + private final long columnFamilyId; + private final long creationTime; + private final long oldestKeyTime; + private final byte[] columnFamilyName; + private final String filterPolicyName; + private final String comparatorName; + private final String mergeOperatorName; + private final String prefixExtractorName; + private final String propertyCollectorsNames; + private final String compressionName; + private final Map userCollectedProperties; + private final Map readableProperties; + private final Map propertiesOffsets; + + /** + * Access is private as this will only be constructed from + * C++ via JNI. + */ + private TableProperties(final long dataSize, final long indexSize, + final long indexPartitions, final long topLevelIndexSize, + final long indexKeyIsUserKey, final long indexValueIsDeltaEncoded, + final long filterSize, final long rawKeySize, final long rawValueSize, + final long numDataBlocks, final long numEntries, final long numDeletions, + final long numMergeOperands, final long numRangeDeletions, + final long formatVersion, final long fixedKeyLen, + final long columnFamilyId, final long creationTime, + final long oldestKeyTime, final byte[] columnFamilyName, + final String filterPolicyName, final String comparatorName, + final String mergeOperatorName, final String prefixExtractorName, + final String propertyCollectorsNames, final String compressionName, + final Map userCollectedProperties, + final Map readableProperties, + final Map propertiesOffsets) { + this.dataSize = dataSize; + this.indexSize = indexSize; + this.indexPartitions = indexPartitions; + this.topLevelIndexSize = topLevelIndexSize; + this.indexKeyIsUserKey = indexKeyIsUserKey; + this.indexValueIsDeltaEncoded = indexValueIsDeltaEncoded; + this.filterSize = filterSize; + this.rawKeySize = rawKeySize; + this.rawValueSize = rawValueSize; + this.numDataBlocks = numDataBlocks; + this.numEntries = numEntries; + this.numDeletions = numDeletions; + this.numMergeOperands = numMergeOperands; + this.numRangeDeletions = numRangeDeletions; + this.formatVersion = formatVersion; + this.fixedKeyLen = fixedKeyLen; + this.columnFamilyId = columnFamilyId; + this.creationTime = creationTime; + this.oldestKeyTime = oldestKeyTime; + this.columnFamilyName = columnFamilyName; + this.filterPolicyName = filterPolicyName; + this.comparatorName = comparatorName; + this.mergeOperatorName = mergeOperatorName; + this.prefixExtractorName = prefixExtractorName; + this.propertyCollectorsNames = propertyCollectorsNames; + this.compressionName = compressionName; + this.userCollectedProperties = userCollectedProperties; + this.readableProperties = readableProperties; + this.propertiesOffsets = propertiesOffsets; + } + + /** + * Get the total size of all data blocks. + * + * @return the total size of all data blocks. + */ + public long getDataSize() { + return dataSize; + } + + /** + * Get the size of index block. + * + * @return the size of index block. + */ + public long getIndexSize() { + return indexSize; + } + + /** + * Get the total number of index partitions + * if {@link IndexType#kTwoLevelIndexSearch} is used. + * + * @return the total number of index partitions. + */ + public long getIndexPartitions() { + return indexPartitions; + } + + /** + * Size of the top-level index + * if {@link IndexType#kTwoLevelIndexSearch} is used. + * + * @return the size of the top-level index. + */ + public long getTopLevelIndexSize() { + return topLevelIndexSize; + } + + /** + * Whether the index key is user key. + * Otherwise it includes 8 byte of sequence + * number added by internal key format. + * + * @return the index key + */ + public long getIndexKeyIsUserKey() { + return indexKeyIsUserKey; + } + + /** + * Whether delta encoding is used to encode the index values. + * + * @return whether delta encoding is used to encode the index values. + */ + public long getIndexValueIsDeltaEncoded() { + return indexValueIsDeltaEncoded; + } + + /** + * Get the size of filter block. + * + * @return the size of filter block. + */ + public long getFilterSize() { + return filterSize; + } + + /** + * Get the total raw key size. + * + * @return the total raw key size. + */ + public long getRawKeySize() { + return rawKeySize; + } + + /** + * Get the total raw value size. + * + * @return the total raw value size. + */ + public long getRawValueSize() { + return rawValueSize; + } + + /** + * Get the number of blocks in this table. + * + * @return the number of blocks in this table. + */ + public long getNumDataBlocks() { + return numDataBlocks; + } + + /** + * Get the number of entries in this table. + * + * @return the number of entries in this table. + */ + public long getNumEntries() { + return numEntries; + } + + /** + * Get the number of deletions in the table. + * + * @return the number of deletions in the table. + */ + public long getNumDeletions() { + return numDeletions; + } + + /** + * Get the number of merge operands in the table. + * + * @return the number of merge operands in the table. + */ + public long getNumMergeOperands() { + return numMergeOperands; + } + + /** + * Get the number of range deletions in this table. + * + * @return the number of range deletions in this table. + */ + public long getNumRangeDeletions() { + return numRangeDeletions; + } + + /** + * Get the format version, reserved for backward compatibility. + * + * @return the format version. + */ + public long getFormatVersion() { + return formatVersion; + } + + /** + * Get the length of the keys. + * + * @return 0 when the key is variable length, otherwise number of + * bytes for each key. + */ + public long getFixedKeyLen() { + return fixedKeyLen; + } + + /** + * Get the ID of column family for this SST file, + * corresponding to the column family identified by + * {@link #getColumnFamilyName()}. + * + * @return the id of the column family. + */ + public long getColumnFamilyId() { + return columnFamilyId; + } + + /** + * The time when the SST file was created. + * Since SST files are immutable, this is equivalent + * to last modified time. + * + * @return the created time. + */ + public long getCreationTime() { + return creationTime; + } + + /** + * Get the timestamp of the earliest key. + * + * @return 0 means unknown, otherwise the timestamp. + */ + public long getOldestKeyTime() { + return oldestKeyTime; + } + + /** + * Get the name of the column family with which this + * SST file is associated. + * + * @return the name of the column family, or null if the + * column family is unknown. + */ + /*@Nullable*/ public byte[] getColumnFamilyName() { + return columnFamilyName; + } + + /** + * Get the name of the filter policy used in this table. + * + * @return the name of the filter policy, or null if + * no filter policy is used. + */ + /*@Nullable*/ public String getFilterPolicyName() { + return filterPolicyName; + } + + /** + * Get the name of the comparator used in this table. + * + * @return the name of the comparator. + */ + public String getComparatorName() { + return comparatorName; + } + + /** + * Get the name of the merge operator used in this table. + * + * @return the name of the merge operator, or null if no merge operator + * is used. + */ + /*@Nullable*/ public String getMergeOperatorName() { + return mergeOperatorName; + } + + /** + * Get the name of the prefix extractor used in this table. + * + * @return the name of the prefix extractor, or null if no prefix + * extractor is used. + */ + /*@Nullable*/ public String getPrefixExtractorName() { + return prefixExtractorName; + } + + /** + * Get the names of the property collectors factories used in this table. + * + * @return the names of the property collector factories separated + * by commas, e.g. {collector_name[1]},{collector_name[2]},... + */ + public String getPropertyCollectorsNames() { + return propertyCollectorsNames; + } + + /** + * Get the name of the compression algorithm used to compress the SST files. + * + * @return the name of the compression algorithm. + */ + public String getCompressionName() { + return compressionName; + } + + /** + * Get the user collected properties. + * + * @return the user collected properties. + */ + public Map getUserCollectedProperties() { + return userCollectedProperties; + } + + /** + * Get the readable properties. + * + * @return the readable properties. + */ + public Map getReadableProperties() { + return readableProperties; + } + + /** + * The offset of the value of each property in the file. + * + * @return the offset of each property. + */ + public Map getPropertiesOffsets() { + return propertiesOffsets; + } +} diff --git a/java/src/main/java/org/rocksdb/ThreadStatus.java b/java/src/main/java/org/rocksdb/ThreadStatus.java new file mode 100644 index 000000000..062df5889 --- /dev/null +++ b/java/src/main/java/org/rocksdb/ThreadStatus.java @@ -0,0 +1,224 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Map; + +public class ThreadStatus { + private final long threadId; + private final ThreadType threadType; + private final String dbName; + private final String cfName; + private final OperationType operationType; + private final long operationElapsedTime; // microseconds + private final OperationStage operationStage; + private final long operationProperties[]; + private final StateType stateType; + + /** + * Invoked from C++ via JNI + */ + private ThreadStatus(final long threadId, + final byte threadTypeValue, + final String dbName, + final String cfName, + final byte operationTypeValue, + final long operationElapsedTime, + final byte operationStageValue, + final long[] operationProperties, + final byte stateTypeValue) { + this.threadId = threadId; + this.threadType = ThreadType.fromValue(threadTypeValue); + this.dbName = dbName; + this.cfName = cfName; + this.operationType = OperationType.fromValue(operationTypeValue); + this.operationElapsedTime = operationElapsedTime; + this.operationStage = OperationStage.fromValue(operationStageValue); + this.operationProperties = operationProperties; + this.stateType = StateType.fromValue(stateTypeValue); + } + + /** + * Get the unique ID of the thread. + * + * @return the thread id + */ + public long getThreadId() { + return threadId; + } + + /** + * Get the type of the thread. + * + * @return the type of the thread. + */ + public ThreadType getThreadType() { + return threadType; + } + + /** + * The name of the DB instance that the thread is currently + * involved with. + * + * @return the name of the db, or null if the thread is not involved + * in any DB operation. + */ + /* @Nullable */ public String getDbName() { + return dbName; + } + + /** + * The name of the Column Family that the thread is currently + * involved with. + * + * @return the name of the db, or null if the thread is not involved + * in any column Family operation. + */ + /* @Nullable */ public String getCfName() { + return cfName; + } + + /** + * Get the operation (high-level action) that the current thread is involved + * with. + * + * @return the operation + */ + public OperationType getOperationType() { + return operationType; + } + + /** + * Get the elapsed time of the current thread operation in microseconds. + * + * @return the elapsed time + */ + public long getOperationElapsedTime() { + return operationElapsedTime; + } + + /** + * Get the current stage where the thread is involved in the current + * operation. + * + * @return the current stage of the current operation + */ + public OperationStage getOperationStage() { + return operationStage; + } + + /** + * Get the list of properties that describe some details about the current + * operation. + * + * Each field in might have different meanings for different operations. + * + * @return the properties + */ + public long[] getOperationProperties() { + return operationProperties; + } + + /** + * Get the state (lower-level action) that the current thread is involved + * with. + * + * @return the state + */ + public StateType getStateType() { + return stateType; + } + + /** + * Get the name of the thread type. + * + * @param threadType the thread type + * + * @return the name of the thread type. + */ + public static String getThreadTypeName(final ThreadType threadType) { + return getThreadTypeName(threadType.getValue()); + } + + /** + * Get the name of an operation given its type. + * + * @param operationType the type of operation. + * + * @return the name of the operation. + */ + public static String getOperationName(final OperationType operationType) { + return getOperationName(operationType.getValue()); + } + + public static String microsToString(final long operationElapsedTime) { + return microsToStringNative(operationElapsedTime); + } + + /** + * Obtain a human-readable string describing the specified operation stage. + * + * @param operationStage the stage of the operation. + * + * @return the description of the operation stage. + */ + public static String getOperationStageName( + final OperationStage operationStage) { + return getOperationStageName(operationStage.getValue()); + } + + /** + * Obtain the name of the "i"th operation property of the + * specified operation. + * + * @param operationType the operation type. + * @param i the index of the operation property. + * + * @return the name of the operation property + */ + public static String getOperationPropertyName( + final OperationType operationType, final int i) { + return getOperationPropertyName(operationType.getValue(), i); + } + + /** + * Translate the "i"th property of the specified operation given + * a property value. + * + * @param operationType the operation type. + * @param operationProperties the operation properties. + * + * @return the property values. + */ + public static Map interpretOperationProperties( + final OperationType operationType, final long[] operationProperties) { + return interpretOperationProperties(operationType.getValue(), + operationProperties); + } + + /** + * Obtain the name of a state given its type. + * + * @param stateType the state type. + * + * @return the name of the state. + */ + public static String getStateName(final StateType stateType) { + return getStateName(stateType.getValue()); + } + + private static native String getThreadTypeName(final byte threadTypeValue); + private static native String getOperationName(final byte operationTypeValue); + private static native String microsToStringNative( + final long operationElapsedTime); + private static native String getOperationStageName( + final byte operationStageTypeValue); + private static native String getOperationPropertyName( + final byte operationTypeValue, final int i); + private static native MapinterpretOperationProperties( + final byte operationTypeValue, final long[] operationProperties); + private static native String getStateName(final byte stateTypeValue); +} diff --git a/java/src/main/java/org/rocksdb/ThreadType.java b/java/src/main/java/org/rocksdb/ThreadType.java new file mode 100644 index 000000000..cc329f442 --- /dev/null +++ b/java/src/main/java/org/rocksdb/ThreadType.java @@ -0,0 +1,65 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The type of a thread. + */ +public enum ThreadType { + /** + * RocksDB BG thread in high-pri thread pool. + */ + HIGH_PRIORITY((byte)0x0), + + /** + * RocksDB BG thread in low-pri thread pool. + */ + LOW_PRIORITY((byte)0x1), + + /** + * User thread (Non-RocksDB BG thread). + */ + USER((byte)0x2), + + /** + * RocksDB BG thread in bottom-pri thread pool + */ + BOTTOM_PRIORITY((byte)0x3); + + private final byte value; + + ThreadType(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ + byte getValue() { + return value; + } + + /** + * Get the Thread type from the internal representation value. + * + * @param value the internal representation value. + * + * @return the thread type + * + * @throws IllegalArgumentException if the value does not match a ThreadType + */ + static ThreadType fromValue(final byte value) + throws IllegalArgumentException { + for (final ThreadType threadType : ThreadType.values()) { + if (threadType.value == value) { + return threadType; + } + } + throw new IllegalArgumentException("Unknown value for ThreadType: " + value); + } +} diff --git a/java/src/main/java/org/rocksdb/TickerType.java b/java/src/main/java/org/rocksdb/TickerType.java index d240acf6b..551e366dc 100644 --- a/java/src/main/java/org/rocksdb/TickerType.java +++ b/java/src/main/java/org/rocksdb/TickerType.java @@ -726,9 +726,10 @@ public enum TickerType { } /** - * @deprecated - * Exposes internal value of native enum mappings. This method will be marked private in the - * next major release. + * @deprecated Exposes internal value of native enum mappings. + * This method will be marked package private in the next major release. + * + * @return the internal representation */ @Deprecated public byte getValue() { diff --git a/java/src/main/java/org/rocksdb/TimedEnv.java b/java/src/main/java/org/rocksdb/TimedEnv.java new file mode 100644 index 000000000..dc8b5d6ef --- /dev/null +++ b/java/src/main/java/org/rocksdb/TimedEnv.java @@ -0,0 +1,30 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Timed environment. + */ +public class TimedEnv extends Env { + + /** + *

    Creates a new environment that measures function call times for + * filesystem operations, reporting results to variables in PerfContext.

    + * + * + *

    The caller must delete the result when it is + * no longer needed.

    + * + * @param baseEnv the base environment, + * must remain live while the result is in use. + */ + public TimedEnv(final Env baseEnv) { + super(createTimedEnv(baseEnv.nativeHandle_)); + } + + private static native long createTimedEnv(final long baseEnvHandle); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/TraceOptions.java b/java/src/main/java/org/rocksdb/TraceOptions.java new file mode 100644 index 000000000..657b263c6 --- /dev/null +++ b/java/src/main/java/org/rocksdb/TraceOptions.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * TraceOptions is used for + * {@link RocksDB#startTrace(TraceOptions, AbstractTraceWriter)}. + */ +public class TraceOptions { + private final long maxTraceFileSize; + + public TraceOptions() { + this.maxTraceFileSize = 64 * 1024 * 1024 * 1024; // 64 GB + } + + public TraceOptions(final long maxTraceFileSize) { + this.maxTraceFileSize = maxTraceFileSize; + } + + /** + * To avoid the trace file size grows large than the storage space, + * user can set the max trace file size in Bytes. Default is 64GB + * + * @return the max trace size + */ + public long getMaxTraceFileSize() { + return maxTraceFileSize; + } +} diff --git a/java/src/main/java/org/rocksdb/TraceWriter.java b/java/src/main/java/org/rocksdb/TraceWriter.java new file mode 100644 index 000000000..cb0234e9b --- /dev/null +++ b/java/src/main/java/org/rocksdb/TraceWriter.java @@ -0,0 +1,36 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * TraceWriter allows exporting RocksDB traces to any system, + * one operation at a time. + */ +public interface TraceWriter { + + /** + * Write the data. + * + * @param data the data + * + * @throws RocksDBException if an error occurs whilst writing. + */ + void write(final Slice data) throws RocksDBException; + + /** + * Close the writer. + * + * @throws RocksDBException if an error occurs whilst closing the writer. + */ + void closeWriter() throws RocksDBException; + + /** + * Get the size of the file that this writer is writing to. + * + * @return the file size + */ + long getFileSize(); +} diff --git a/java/src/main/java/org/rocksdb/TransactionDB.java b/java/src/main/java/org/rocksdb/TransactionDB.java index fcecf3faf..a1a09cf96 100644 --- a/java/src/main/java/org/rocksdb/TransactionDB.java +++ b/java/src/main/java/org/rocksdb/TransactionDB.java @@ -104,6 +104,53 @@ public class TransactionDB extends RocksDB return tdb; } + /** + * This is similar to {@link #close()} except that it + * throws an exception if any error occurs. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + * + * @throws RocksDBException if an error occurs whilst closing. + */ + public void closeE() throws RocksDBException { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } finally { + disposeInternal(); + } + } + } + + /** + * This is similar to {@link #closeE()} except that it + * silently ignores any errors. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + */ + @Override + public void close() { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } catch (final RocksDBException e) { + // silently ignore the error report + } finally { + disposeInternal(); + } + } + } + @Override public Transaction beginTransaction(final WriteOptions writeOptions) { return new Transaction(this, beginTransaction(nativeHandle_, @@ -327,12 +374,16 @@ public class TransactionDB extends RocksDB this.transactionDbOptions_ = transactionDbOptions; } + @Override protected final native void disposeInternal(final long handle); + private static native long open(final long optionsHandle, final long transactionDbOptionsHandle, final String path) throws RocksDBException; private static native long[] open(final long dbOptionsHandle, final long transactionDbOptionsHandle, final String path, final byte[][] columnFamilyNames, final long[] columnFamilyOptions); + private native static void closeDatabase(final long handle) + throws RocksDBException; private native long beginTransaction(final long handle, final long writeOptionsHandle); private native long beginTransaction(final long handle, @@ -350,5 +401,4 @@ public class TransactionDB extends RocksDB private native DeadlockPath[] getDeadlockInfoBuffer(final long handle); private native void setDeadlockInfoBufferSize(final long handle, final int targetSize); - @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/TtlDB.java b/java/src/main/java/org/rocksdb/TtlDB.java index 740f51268..26eee4a87 100644 --- a/java/src/main/java/org/rocksdb/TtlDB.java +++ b/java/src/main/java/org/rocksdb/TtlDB.java @@ -139,6 +139,55 @@ public class TtlDB extends RocksDB { return ttlDB; } + /** + *

    Close the TtlDB instance and release resource.

    + * + * This is similar to {@link #close()} except that it + * throws an exception if any error occurs. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + * + * @throws RocksDBException if an error occurs whilst closing. + */ + public void closeE() throws RocksDBException { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } finally { + disposeInternal(); + } + } + } + + /** + *

    Close the TtlDB instance and release resource.

    + * + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + */ + @Override + public void close() { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } catch (final RocksDBException e) { + // silently ignore the error report + } finally { + disposeInternal(); + } + } + } + /** *

    Creates a new ttl based column family with a name defined * in given ColumnFamilyDescriptor and allocates a @@ -160,22 +209,8 @@ public class TtlDB extends RocksDB { final int ttl) throws RocksDBException { return new ColumnFamilyHandle(this, createColumnFamilyWithTtl(nativeHandle_, - columnFamilyDescriptor.columnFamilyName(), - columnFamilyDescriptor.columnFamilyOptions().nativeHandle_, ttl)); - } - - /** - *

    Close the TtlDB instance and release resource.

    - * - *

    Internally, TtlDB owns the {@code rocksdb::DB} pointer - * to its associated {@link org.rocksdb.RocksDB}. The release - * of that RocksDB pointer is handled in the destructor of the - * c++ {@code rocksdb::TtlDB} and should be transparent to - * Java developers.

    - */ - @Override - public void close() { - super.close(); + columnFamilyDescriptor.getName(), + columnFamilyDescriptor.getOptions().nativeHandle_, ttl)); } /** @@ -193,10 +228,7 @@ public class TtlDB extends RocksDB { super(nativeHandle); } - @Override protected void finalize() throws Throwable { - close(); //TODO(AR) revisit here when implementing AutoCloseable - super.finalize(); - } + @Override protected native void disposeInternal(final long handle); private native static long open(final long optionsHandle, final String db_path, final int ttl, final boolean readOnly) @@ -208,4 +240,6 @@ public class TtlDB extends RocksDB { private native long createColumnFamilyWithTtl(final long handle, final byte[] columnFamilyName, final long columnFamilyOptions, int ttl) throws RocksDBException; + private native static void closeDatabase(final long handle) + throws RocksDBException; } diff --git a/java/src/main/java/org/rocksdb/WalFileType.java b/java/src/main/java/org/rocksdb/WalFileType.java new file mode 100644 index 000000000..fed27ed11 --- /dev/null +++ b/java/src/main/java/org/rocksdb/WalFileType.java @@ -0,0 +1,55 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public enum WalFileType { + /** + * Indicates that WAL file is in archive directory. WAL files are moved from + * the main db directory to archive directory once they are not live and stay + * there until cleaned up. Files are cleaned depending on archive size + * (Options::WAL_size_limit_MB) and time since last cleaning + * (Options::WAL_ttl_seconds). + */ + kArchivedLogFile((byte)0x0), + + /** + * Indicates that WAL file is live and resides in the main db directory + */ + kAliveLogFile((byte)0x1); + + private final byte value; + + WalFileType(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value + */ + byte getValue() { + return value; + } + + /** + * Get the WalFileType from the internal representation value. + * + * @return the wal file type. + * + * @throws IllegalArgumentException if the value is unknown. + */ + static WalFileType fromValue(final byte value) { + for (final WalFileType walFileType : WalFileType.values()) { + if(walFileType.value == value) { + return walFileType; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for WalFileType: " + value); + } +} diff --git a/java/src/main/java/org/rocksdb/WalFilter.java b/java/src/main/java/org/rocksdb/WalFilter.java new file mode 100644 index 000000000..37e36213a --- /dev/null +++ b/java/src/main/java/org/rocksdb/WalFilter.java @@ -0,0 +1,87 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Map; + +/** + * WALFilter allows an application to inspect write-ahead-log (WAL) + * records or modify their processing on recovery. + */ +public interface WalFilter { + + /** + * Provide ColumnFamily->LogNumber map to filter + * so that filter can determine whether a log number applies to a given + * column family (i.e. that log hasn't been flushed to SST already for the + * column family). + * + * We also pass in name>id map as only name is known during + * recovery (as handles are opened post-recovery). + * while write batch callbacks happen in terms of column family id. + * + * @param cfLognumber column_family_id to lognumber map + * @param cfNameId column_family_name to column_family_id map + */ + void columnFamilyLogNumberMap(final Map cfLognumber, + final Map cfNameId); + + /** + * LogRecord is invoked for each log record encountered for all the logs + * during replay on logs on recovery. This method can be used to: + * * inspect the record (using the batch parameter) + * * ignoring current record + * (by returning WalProcessingOption::kIgnoreCurrentRecord) + * * reporting corrupted record + * (by returning WalProcessingOption::kCorruptedRecord) + * * stop log replay + * (by returning kStop replay) - please note that this implies + * discarding the logs from current record onwards. + * + * @param logNumber log number of the current log. + * Filter might use this to determine if the log + * record is applicable to a certain column family. + * @param logFileName log file name - only for informational purposes + * @param batch batch encountered in the log during recovery + * @param newBatch new batch to populate if filter wants to change + * the batch (for example to filter some records out, or alter some + * records). Please note that the new batch MUST NOT contain + * more records than original, else recovery would be failed. + * + * @return Processing option for the current record. + */ + LogRecordFoundResult logRecordFound(final long logNumber, + final String logFileName, final WriteBatch batch, + final WriteBatch newBatch); + + class LogRecordFoundResult { + public static LogRecordFoundResult CONTINUE_UNCHANGED = + new LogRecordFoundResult(WalProcessingOption.CONTINUE_PROCESSING, false); + + final WalProcessingOption walProcessingOption; + final boolean batchChanged; + + /** + * @param walProcessingOption the processing option + * @param batchChanged Whether batch was changed by the filter. + * It must be set to true if newBatch was populated, + * else newBatch has no effect. + */ + public LogRecordFoundResult(final WalProcessingOption walProcessingOption, + final boolean batchChanged) { + this.walProcessingOption = walProcessingOption; + this.batchChanged = batchChanged; + } + } + + /** + * Returns a name that identifies this WAL filter. + * The name will be printed to LOG file on start up for diagnosis. + * + * @return the name + */ + String name(); +} diff --git a/java/src/main/java/org/rocksdb/WalProcessingOption.java b/java/src/main/java/org/rocksdb/WalProcessingOption.java new file mode 100644 index 000000000..889602edc --- /dev/null +++ b/java/src/main/java/org/rocksdb/WalProcessingOption.java @@ -0,0 +1,54 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public enum WalProcessingOption { + /** + * Continue processing as usual. + */ + CONTINUE_PROCESSING((byte)0x0), + + /** + * Ignore the current record but continue processing of log(s). + */ + IGNORE_CURRENT_RECORD((byte)0x1), + + /** + * Stop replay of logs and discard logs. + * Logs won't be replayed on subsequent recovery. + */ + STOP_REPLAY((byte)0x2), + + /** + * Corrupted record detected by filter. + */ + CORRUPTED_RECORD((byte)0x3); + + private final byte value; + + WalProcessingOption(final byte value) { + this.value = value; + } + + /** + * Get the internal representation. + * + * @return the internal representation. + */ + byte getValue() { + return value; + } + + public static WalProcessingOption fromValue(final byte value) { + for (final WalProcessingOption walProcessingOption : WalProcessingOption.values()) { + if (walProcessingOption.value == value) { + return walProcessingOption; + } + } + throw new IllegalArgumentException( + "Illegal value provided for WalProcessingOption: " + value); + } +} diff --git a/java/src/main/java/org/rocksdb/WriteBufferManager.java b/java/src/main/java/org/rocksdb/WriteBufferManager.java index a5f80644f..b244aa952 100644 --- a/java/src/main/java/org/rocksdb/WriteBufferManager.java +++ b/java/src/main/java/org/rocksdb/WriteBufferManager.java @@ -1,6 +1,9 @@ -package org.rocksdb; +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). -import org.rocksdb.Cache; +package org.rocksdb; /** * Java wrapper over native write_buffer_manager class diff --git a/java/src/main/java/org/rocksdb/WriteOptions.java b/java/src/main/java/org/rocksdb/WriteOptions.java index db662aa50..ad987a859 100644 --- a/java/src/main/java/org/rocksdb/WriteOptions.java +++ b/java/src/main/java/org/rocksdb/WriteOptions.java @@ -163,8 +163,41 @@ public class WriteOptions extends RocksObject { return noSlowdown(nativeHandle_); } + /** + * If true, this write request is of lower priority if compaction is + * behind. In this case that, {@link #noSlowdown()} == true, the request + * will be cancelled immediately with {@link Status.Code#Incomplete} returned. + * Otherwise, it will be slowed down. The slowdown value is determined by + * RocksDB to guarantee it introduces minimum impacts to high priority writes. + * + * Default: false + * + * @param lowPri true if the write request should be of lower priority than + * compactions which are behind. + * + * @return the instance of the current WriteOptions. + */ + public WriteOptions setLowPri(final boolean lowPri) { + setLowPri(nativeHandle_, lowPri); + return this; + } + + /** + * Returns true if this write request is of lower priority if compaction is + * behind. + * + * See {@link #setLowPri(boolean)}. + * + * @return true if this write request is of lower priority, false otherwise. + */ + public boolean lowPri() { + return lowPri(nativeHandle_); + } + private native static long newWriteOptions(); private native static long copyWriteOptions(long handle); + @Override protected final native void disposeInternal(final long handle); + private native void setSync(long handle, boolean flag); private native boolean sync(long handle); private native void setDisableWAL(long handle, boolean flag); @@ -175,5 +208,6 @@ public class WriteOptions extends RocksObject { private native void setNoSlowdown(final long handle, final boolean noSlowdown); private native boolean noSlowdown(final long handle); - @Override protected final native void disposeInternal(final long handle); + private native void setLowPri(final long handle, final boolean lowPri); + private native boolean lowPri(final long handle); } diff --git a/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java b/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java index c223014fd..0b4992184 100644 --- a/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java +++ b/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java @@ -45,7 +45,7 @@ public class BackupableDBOptionsTest { assertThat(backupableDBOptions.backupEnv()). isNull(); - try(final Env env = new RocksMemEnv()) { + try(final Env env = new RocksMemEnv(Env.getDefault())) { backupableDBOptions.setBackupEnv(env); assertThat(backupableDBOptions.backupEnv()) .isEqualTo(env); diff --git a/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java b/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java index 754cf11c0..fe9f86325 100644 --- a/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java +++ b/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java @@ -6,6 +6,7 @@ package org.rocksdb; import org.junit.ClassRule; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -22,23 +23,94 @@ public class BlockBasedTableConfigTest { @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + @Test + public void cacheIndexAndFilterBlocks() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setCacheIndexAndFilterBlocks(true); + assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocks()). + isTrue(); + + } + + @Test + public void cacheIndexAndFilterBlocksWithHighPriority() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setCacheIndexAndFilterBlocksWithHighPriority(true); + assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocksWithHighPriority()). + isTrue(); + } + + @Test + public void pinL0FilterAndIndexBlocksInCache() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setPinL0FilterAndIndexBlocksInCache(true); + assertThat(blockBasedTableConfig.pinL0FilterAndIndexBlocksInCache()). + isTrue(); + } + + @Test + public void pinTopLevelIndexAndFilter() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setPinTopLevelIndexAndFilter(false); + assertThat(blockBasedTableConfig.pinTopLevelIndexAndFilter()). + isFalse(); + } + + @Test + public void indexType() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + assertThat(IndexType.values().length).isEqualTo(3); + blockBasedTableConfig.setIndexType(IndexType.kHashSearch); + assertThat(blockBasedTableConfig.indexType().equals( + IndexType.kHashSearch)); + assertThat(IndexType.valueOf("kBinarySearch")).isNotNull(); + blockBasedTableConfig.setIndexType(IndexType.valueOf("kBinarySearch")); + assertThat(blockBasedTableConfig.indexType().equals( + IndexType.kBinarySearch)); + } + + @Test + public void dataBlockIndexType() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinaryAndHash); + assertThat(blockBasedTableConfig.dataBlockIndexType().equals( + DataBlockIndexType.kDataBlockBinaryAndHash)); + blockBasedTableConfig.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinarySearch); + assertThat(blockBasedTableConfig.dataBlockIndexType().equals( + DataBlockIndexType.kDataBlockBinarySearch)); + } + + @Test + public void checksumType() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + assertThat(ChecksumType.values().length).isEqualTo(3); + assertThat(ChecksumType.valueOf("kxxHash")). + isEqualTo(ChecksumType.kxxHash); + blockBasedTableConfig.setChecksumType(ChecksumType.kNoChecksum); + blockBasedTableConfig.setChecksumType(ChecksumType.kxxHash); + assertThat(blockBasedTableConfig.checksumType().equals( + ChecksumType.kxxHash)); + } + @Test public void noBlockCache() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); blockBasedTableConfig.setNoBlockCache(true); assertThat(blockBasedTableConfig.noBlockCache()).isTrue(); } @Test - public void blockCacheSize() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setBlockCacheSize(8 * 1024); - assertThat(blockBasedTableConfig.blockCacheSize()). - isEqualTo(8 * 1024); + public void blockCache() { + try ( + final Cache cache = new LRUCache(17 * 1024 * 1024); + final Options options = new Options().setTableFormatConfig( + new BlockBasedTableConfig().setBlockCache(cache))) { + assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable"); + } } @Test - public void sharedBlockCache() throws RocksDBException { + public void blockCacheIntegration() throws RocksDBException { try (final Cache cache = new LRUCache(8 * 1024 * 1024); final Statistics statistics = new Statistics()) { for (int shard = 0; shard < 8; shard++) { @@ -63,188 +135,259 @@ public class BlockBasedTableConfigTest { } @Test - public void blockSizeDeviation() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setBlockSizeDeviation(12); - assertThat(blockBasedTableConfig.blockSizeDeviation()). - isEqualTo(12); + public void persistentCache() throws RocksDBException { + try (final DBOptions dbOptions = new DBOptions(). + setInfoLogLevel(InfoLogLevel.INFO_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(dbOptions) { + @Override + protected void log(final InfoLogLevel infoLogLevel, final String logMsg) { + System.out.println(infoLogLevel.name() + ": " + logMsg); + } + }) { + try (final PersistentCache persistentCache = + new PersistentCache(Env.getDefault(), dbFolder.getRoot().getPath(), 1024 * 1024 * 100, logger, false); + final Options options = new Options().setTableFormatConfig( + new BlockBasedTableConfig().setPersistentCache(persistentCache))) { + assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable"); + } + } } @Test - public void blockRestartInterval() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setBlockRestartInterval(15); - assertThat(blockBasedTableConfig.blockRestartInterval()). - isEqualTo(15); + public void blockCacheCompressed() { + try (final Cache cache = new LRUCache(17 * 1024 * 1024); + final Options options = new Options().setTableFormatConfig( + new BlockBasedTableConfig().setBlockCacheCompressed(cache))) { + assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable"); + } } + @Ignore("See issue: https://github.com/facebook/rocksdb/issues/4822") @Test - public void wholeKeyFiltering() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setWholeKeyFiltering(false); - assertThat(blockBasedTableConfig.wholeKeyFiltering()). - isFalse(); + public void blockCacheCompressedIntegration() throws RocksDBException { + final byte[] key1 = "some-key1".getBytes(StandardCharsets.UTF_8); + final byte[] key2 = "some-key1".getBytes(StandardCharsets.UTF_8); + final byte[] key3 = "some-key1".getBytes(StandardCharsets.UTF_8); + final byte[] key4 = "some-key1".getBytes(StandardCharsets.UTF_8); + final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8); + + try (final Cache compressedCache = new LRUCache(8 * 1024 * 1024); + final Statistics statistics = new Statistics()) { + + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig() + .setNoBlockCache(true) + .setBlockCache(null) + .setBlockCacheCompressed(compressedCache) + .setFormatVersion(4); + + try (final Options options = new Options() + .setCreateIfMissing(true) + .setStatistics(statistics) + .setTableFormatConfig(blockBasedTableConfig)) { + + for (int shard = 0; shard < 8; shard++) { + try (final FlushOptions flushOptions = new FlushOptions(); + final WriteOptions writeOptions = new WriteOptions(); + final ReadOptions readOptions = new ReadOptions(); + final RocksDB db = + RocksDB.open(options, dbFolder.getRoot().getAbsolutePath() + "/" + shard)) { + + db.put(writeOptions, key1, value); + db.put(writeOptions, key2, value); + db.put(writeOptions, key3, value); + db.put(writeOptions, key4, value); + db.flush(flushOptions); + + db.get(readOptions, key1); + db.get(readOptions, key2); + db.get(readOptions, key3); + db.get(readOptions, key4); + + assertThat(statistics.getTickerCount(TickerType.BLOCK_CACHE_COMPRESSED_ADD)).isEqualTo(shard + 1); + } + } + } + } } @Test - public void cacheIndexAndFilterBlocks() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setCacheIndexAndFilterBlocks(true); - assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocks()). - isTrue(); - + public void blockSize() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockSize(10); + assertThat(blockBasedTableConfig.blockSize()).isEqualTo(10); } @Test - public void cacheIndexAndFilterBlocksWithHighPriority() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setCacheIndexAndFilterBlocksWithHighPriority(true); - assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocksWithHighPriority()). - isTrue(); + public void blockSizeDeviation() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockSizeDeviation(12); + assertThat(blockBasedTableConfig.blockSizeDeviation()). + isEqualTo(12); } @Test - public void pinL0FilterAndIndexBlocksInCache() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setPinL0FilterAndIndexBlocksInCache(true); - assertThat(blockBasedTableConfig.pinL0FilterAndIndexBlocksInCache()). - isTrue(); + public void blockRestartInterval() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockRestartInterval(15); + assertThat(blockBasedTableConfig.blockRestartInterval()). + isEqualTo(15); } @Test - public void partitionFilters() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setPartitionFilters(true); - assertThat(blockBasedTableConfig.partitionFilters()). - isTrue(); + public void indexBlockRestartInterval() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setIndexBlockRestartInterval(15); + assertThat(blockBasedTableConfig.indexBlockRestartInterval()). + isEqualTo(15); } @Test public void metadataBlockSize() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); blockBasedTableConfig.setMetadataBlockSize(1024); assertThat(blockBasedTableConfig.metadataBlockSize()). - isEqualTo(1024); + isEqualTo(1024); } @Test - public void pinTopLevelIndexAndFilter() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setPinTopLevelIndexAndFilter(false); - assertThat(blockBasedTableConfig.pinTopLevelIndexAndFilter()). - isFalse(); + public void partitionFilters() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setPartitionFilters(true); + assertThat(blockBasedTableConfig.partitionFilters()). + isTrue(); } @Test - public void hashIndexAllowCollision() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setHashIndexAllowCollision(false); - assertThat(blockBasedTableConfig.hashIndexAllowCollision()). + public void useDeltaEncoding() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setUseDeltaEncoding(false); + assertThat(blockBasedTableConfig.useDeltaEncoding()). isFalse(); } @Test - public void blockCacheCompressedSize() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setBlockCacheCompressedSize(40); - assertThat(blockBasedTableConfig.blockCacheCompressedSize()). - isEqualTo(40); + public void blockBasedTableWithFilterPolicy() { + try(final Options options = new Options() + .setTableFormatConfig(new BlockBasedTableConfig() + .setFilterPolicy(new BloomFilter(10)))) { + assertThat(options.tableFactoryName()). + isEqualTo("BlockBasedTable"); + } } @Test - public void checksumType() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - assertThat(ChecksumType.values().length).isEqualTo(3); - assertThat(ChecksumType.valueOf("kxxHash")). - isEqualTo(ChecksumType.kxxHash); - blockBasedTableConfig.setChecksumType(ChecksumType.kNoChecksum); - blockBasedTableConfig.setChecksumType(ChecksumType.kxxHash); - assertThat(blockBasedTableConfig.checksumType().equals( - ChecksumType.kxxHash)); + public void blockBasedTableWithoutFilterPolicy() { + try(final Options options = new Options().setTableFormatConfig( + new BlockBasedTableConfig().setFilterPolicy(null))) { + assertThat(options.tableFactoryName()). + isEqualTo("BlockBasedTable"); + } } @Test - public void indexType() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - assertThat(IndexType.values().length).isEqualTo(3); - blockBasedTableConfig.setIndexType(IndexType.kHashSearch); - assertThat(blockBasedTableConfig.indexType().equals( - IndexType.kHashSearch)); - assertThat(IndexType.valueOf("kBinarySearch")).isNotNull(); - blockBasedTableConfig.setIndexType(IndexType.valueOf("kBinarySearch")); - assertThat(blockBasedTableConfig.indexType().equals( - IndexType.kBinarySearch)); + public void wholeKeyFiltering() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setWholeKeyFiltering(false); + assertThat(blockBasedTableConfig.wholeKeyFiltering()). + isFalse(); } @Test - public void blockCacheCompressedNumShardBits() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setBlockCacheCompressedNumShardBits(4); - assertThat(blockBasedTableConfig.blockCacheCompressedNumShardBits()). - isEqualTo(4); + public void verifyCompression() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setVerifyCompression(true); + assertThat(blockBasedTableConfig.verifyCompression()). + isTrue(); } @Test - public void cacheNumShardBits() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setCacheNumShardBits(5); - assertThat(blockBasedTableConfig.cacheNumShardBits()). - isEqualTo(5); + public void readAmpBytesPerBit() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setReadAmpBytesPerBit(2); + assertThat(blockBasedTableConfig.readAmpBytesPerBit()). + isEqualTo(2); } @Test - public void blockSize() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); - blockBasedTableConfig.setBlockSize(10); - assertThat(blockBasedTableConfig.blockSize()).isEqualTo(10); + public void formatVersion() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + for (int version = 0; version < 5; version++) { + blockBasedTableConfig.setFormatVersion(version); + assertThat(blockBasedTableConfig.formatVersion()).isEqualTo(version); + } + } + + @Test(expected = AssertionError.class) + public void formatVersionFailNegative() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setFormatVersion(-1); } + @Test(expected = AssertionError.class) + public void formatVersionFailIllegalVersion() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setFormatVersion(99); + } @Test - public void blockBasedTableWithFilter() { - try(final Options options = new Options() - .setTableFormatConfig(new BlockBasedTableConfig() - .setFilter(new BloomFilter(10)))) { - assertThat(options.tableFactoryName()). - isEqualTo("BlockBasedTable"); - } + public void enableIndexCompression() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setEnableIndexCompression(false); + assertThat(blockBasedTableConfig.enableIndexCompression()). + isFalse(); } @Test - public void blockBasedTableWithoutFilter() { - try(final Options options = new Options().setTableFormatConfig( - new BlockBasedTableConfig().setFilter(null))) { - assertThat(options.tableFactoryName()). - isEqualTo("BlockBasedTable"); - } + public void blockAlign() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockAlign(true); + assertThat(blockBasedTableConfig.blockAlign()). + isTrue(); } + @Deprecated @Test - public void blockBasedTableWithBlockCache() { - try (final Options options = new Options().setTableFormatConfig( - new BlockBasedTableConfig().setBlockCache(new LRUCache(17 * 1024 * 1024)))) { - assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable"); - } + public void hashIndexAllowCollision() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setHashIndexAllowCollision(false); + assertThat(blockBasedTableConfig.hashIndexAllowCollision()). + isTrue(); // NOTE: setHashIndexAllowCollision should do nothing! } + @Deprecated @Test - public void blockBasedTableFormatVersion() { - BlockBasedTableConfig config = new BlockBasedTableConfig(); - for (int version=0; version<=2; version++) { - config.setFormatVersion(version); - assertThat(config.formatVersion()).isEqualTo(version); - } + public void blockCacheSize() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockCacheSize(8 * 1024); + assertThat(blockBasedTableConfig.blockCacheSize()). + isEqualTo(8 * 1024); } - @Test(expected = AssertionError.class) - public void blockBasedTableFormatVersionFailNegative() { - BlockBasedTableConfig config = new BlockBasedTableConfig(); - config.setFormatVersion(-1); + @Deprecated + @Test + public void blockCacheNumShardBits() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setCacheNumShardBits(5); + assertThat(blockBasedTableConfig.cacheNumShardBits()). + isEqualTo(5); } - @Test(expected = AssertionError.class) - public void blockBasedTableFormatVersionFailIllegalVersion() { - BlockBasedTableConfig config = new BlockBasedTableConfig(); - config.setFormatVersion(3); + @Deprecated + @Test + public void blockCacheCompressedSize() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockCacheCompressedSize(40); + assertThat(blockBasedTableConfig.blockCacheCompressedSize()). + isEqualTo(40); + } + + @Deprecated + @Test + public void blockCacheCompressedNumShardBits() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockCacheCompressedNumShardBits(4); + assertThat(blockBasedTableConfig.blockCacheCompressedNumShardBits()). + isEqualTo(4); } } diff --git a/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java index 8121a09ca..2cd8f0de9 100644 --- a/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java +++ b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java @@ -464,6 +464,23 @@ public class ColumnFamilyOptionsTest { } } + @Test + public void bottommostCompressionOptions() { + try (final ColumnFamilyOptions columnFamilyOptions = + new ColumnFamilyOptions(); + final CompressionOptions bottommostCompressionOptions = + new CompressionOptions() + .setMaxDictBytes(123)) { + + columnFamilyOptions.setBottommostCompressionOptions( + bottommostCompressionOptions); + assertThat(columnFamilyOptions.bottommostCompressionOptions()) + .isEqualTo(bottommostCompressionOptions); + assertThat(columnFamilyOptions.bottommostCompressionOptions() + .maxDictBytes()).isEqualTo(123); + } + } + @Test public void compressionOptions() { try (final ColumnFamilyOptions columnFamilyOptions @@ -542,6 +559,15 @@ public class ColumnFamilyOptionsTest { } } + @Test + public void ttl() { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { + options.setTtl(1000 * 60); + assertThat(options.ttl()). + isEqualTo(1000 * 60); + } + } + @Test public void compactionOptionsUniversal() { try (final ColumnFamilyOptions opt = new ColumnFamilyOptions(); diff --git a/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java b/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java new file mode 100644 index 000000000..6c920439c --- /dev/null +++ b/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java @@ -0,0 +1,114 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionJobInfoTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void columnFamilyName() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.columnFamilyName()) + .isEmpty(); + } + } + + @Test + public void status() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.status().getCode()) + .isEqualTo(Status.Code.Ok); + } + } + + @Test + public void threadId() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.threadId()) + .isEqualTo(0); + } + } + + @Test + public void jobId() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.jobId()) + .isEqualTo(0); + } + } + + @Test + public void baseInputLevel() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.baseInputLevel()) + .isEqualTo(0); + } + } + + @Test + public void outputLevel() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.outputLevel()) + .isEqualTo(0); + } + } + + @Test + public void inputFiles() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.inputFiles()) + .isEmpty(); + } + } + + @Test + public void outputFiles() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.outputFiles()) + .isEmpty(); + } + } + + @Test + public void tableProperties() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.tableProperties()) + .isEmpty(); + } + } + + @Test + public void compactionReason() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.compactionReason()) + .isEqualTo(CompactionReason.kUnknown); + } + } + + @Test + public void compression() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.compression()) + .isEqualTo(CompressionType.NO_COMPRESSION); + } + } + + @Test + public void stats() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.stats()) + .isNotNull(); + } + } +} diff --git a/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java b/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java new file mode 100644 index 000000000..7be7226da --- /dev/null +++ b/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java @@ -0,0 +1,196 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionJobStatsTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void reset() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + compactionJobStats.reset(); + assertThat(compactionJobStats.elapsedMicros()).isEqualTo(0); + } + } + + @Test + public void add() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats(); + final CompactionJobStats otherCompactionJobStats = new CompactionJobStats()) { + compactionJobStats.add(otherCompactionJobStats); + } + } + + @Test + public void elapsedMicros() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.elapsedMicros()).isEqualTo(0); + } + } + + @Test + public void numInputRecords() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numInputRecords()).isEqualTo(0); + } + } + + @Test + public void numInputFiles() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numInputFiles()).isEqualTo(0); + } + } + + @Test + public void numInputFilesAtOutputLevel() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numInputFilesAtOutputLevel()).isEqualTo(0); + } + } + + @Test + public void numOutputRecords() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numOutputRecords()).isEqualTo(0); + } + } + + @Test + public void numOutputFiles() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numOutputFiles()).isEqualTo(0); + } + } + + @Test + public void isManualCompaction() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.isManualCompaction()).isFalse(); + } + } + + @Test + public void totalInputBytes() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.totalInputBytes()).isEqualTo(0); + } + } + + @Test + public void totalOutputBytes() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.totalOutputBytes()).isEqualTo(0); + } + } + + + @Test + public void numRecordsReplaced() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numRecordsReplaced()).isEqualTo(0); + } + } + + @Test + public void totalInputRawKeyBytes() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.totalInputRawKeyBytes()).isEqualTo(0); + } + } + + @Test + public void totalInputRawValueBytes() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.totalInputRawValueBytes()).isEqualTo(0); + } + } + + @Test + public void numInputDeletionRecords() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numInputDeletionRecords()).isEqualTo(0); + } + } + + @Test + public void numExpiredDeletionRecords() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numExpiredDeletionRecords()).isEqualTo(0); + } + } + + @Test + public void numCorruptKeys() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numCorruptKeys()).isEqualTo(0); + } + } + + @Test + public void fileWriteNanos() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.fileWriteNanos()).isEqualTo(0); + } + } + + @Test + public void fileRangeSyncNanos() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.fileRangeSyncNanos()).isEqualTo(0); + } + } + + @Test + public void fileFsyncNanos() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.fileFsyncNanos()).isEqualTo(0); + } + } + + @Test + public void filePrepareWriteNanos() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.filePrepareWriteNanos()).isEqualTo(0); + } + } + + @Test + public void smallestOutputKeyPrefix() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.smallestOutputKeyPrefix()).isEmpty(); + } + } + + @Test + public void largestOutputKeyPrefix() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.largestOutputKeyPrefix()).isEmpty(); + } + } + + @Test + public void numSingleDelFallthru() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numSingleDelFallthru()).isEqualTo(0); + } + } + + @Test + public void numSingleDelMismatch() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numSingleDelMismatch()).isEqualTo(0); + } + } +} diff --git a/java/src/test/java/org/rocksdb/CompactionOptionsTest.java b/java/src/test/java/org/rocksdb/CompactionOptionsTest.java new file mode 100644 index 000000000..b1726e866 --- /dev/null +++ b/java/src/test/java/org/rocksdb/CompactionOptionsTest.java @@ -0,0 +1,52 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionOptionsTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void compression() { + try (final CompactionOptions compactionOptions = new CompactionOptions()) { + assertThat(compactionOptions.compression()) + .isEqualTo(CompressionType.SNAPPY_COMPRESSION); + compactionOptions.setCompression(CompressionType.NO_COMPRESSION); + assertThat(compactionOptions.compression()) + .isEqualTo(CompressionType.NO_COMPRESSION); + } + } + + @Test + public void outputFileSizeLimit() { + final long mb250 = 1024 * 1024 * 250; + try (final CompactionOptions compactionOptions = new CompactionOptions()) { + assertThat(compactionOptions.outputFileSizeLimit()) + .isEqualTo(-1); + compactionOptions.setOutputFileSizeLimit(mb250); + assertThat(compactionOptions.outputFileSizeLimit()) + .isEqualTo(mb250); + } + } + + @Test + public void maxSubcompactions() { + try (final CompactionOptions compactionOptions = new CompactionOptions()) { + assertThat(compactionOptions.maxSubcompactions()) + .isEqualTo(0); + compactionOptions.setMaxSubcompactions(9); + assertThat(compactionOptions.maxSubcompactions()) + .isEqualTo(9); + } + } +} diff --git a/java/src/test/java/org/rocksdb/CompressionOptionsTest.java b/java/src/test/java/org/rocksdb/CompressionOptionsTest.java index c49224ca3..116552c32 100644 --- a/java/src/test/java/org/rocksdb/CompressionOptionsTest.java +++ b/java/src/test/java/org/rocksdb/CompressionOptionsTest.java @@ -50,4 +50,22 @@ public class CompressionOptionsTest { assertThat(opt.maxDictBytes()).isEqualTo(maxDictBytes); } } + + @Test + public void zstdMaxTrainBytes() { + final int zstdMaxTrainBytes = 999; + try(final CompressionOptions opt = new CompressionOptions()) { + opt.setZStdMaxTrainBytes(zstdMaxTrainBytes); + assertThat(opt.zstdMaxTrainBytes()).isEqualTo(zstdMaxTrainBytes); + } + } + + @Test + public void enabled() { + try(final CompressionOptions opt = new CompressionOptions()) { + assertThat(opt.enabled()).isFalse(); + opt.setEnabled(true); + assertThat(opt.enabled()).isTrue(); + } + } } diff --git a/java/src/test/java/org/rocksdb/DBOptionsTest.java b/java/src/test/java/org/rocksdb/DBOptionsTest.java index bad01c435..e6ebc46cd 100644 --- a/java/src/test/java/org/rocksdb/DBOptionsTest.java +++ b/java/src/test/java/org/rocksdb/DBOptionsTest.java @@ -534,6 +534,15 @@ public class DBOptionsTest { } } + @Test + public void enablePipelinedWrite() { + try(final DBOptions opt = new DBOptions()) { + assertThat(opt.enablePipelinedWrite()).isFalse(); + opt.setEnablePipelinedWrite(true); + assertThat(opt.enablePipelinedWrite()).isTrue(); + } + } + @Test public void allowConcurrentMemtableWrite() { try (final DBOptions opt = new DBOptions()) { @@ -615,6 +624,38 @@ public class DBOptionsTest { } } + @Test + public void walFilter() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.walFilter()).isNull(); + + try (final AbstractWalFilter walFilter = new AbstractWalFilter() { + @Override + public void columnFamilyLogNumberMap( + final Map cfLognumber, + final Map cfNameId) { + // no-op + } + + @Override + public LogRecordFoundResult logRecordFound(final long logNumber, + final String logFileName, final WriteBatch batch, + final WriteBatch newBatch) { + return new LogRecordFoundResult( + WalProcessingOption.CONTINUE_PROCESSING, false); + } + + @Override + public String name() { + return "test-wal-filter"; + } + }) { + opt.setWalFilter(walFilter); + assertThat(opt.walFilter()).isEqualTo(walFilter); + } + } + } + @Test public void failIfOptionsFileError() { try (final DBOptions opt = new DBOptions()) { @@ -651,6 +692,51 @@ public class DBOptionsTest { } } + @Test + public void allowIngestBehind() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.allowIngestBehind()).isFalse(); + opt.setAllowIngestBehind(true); + assertThat(opt.allowIngestBehind()).isTrue(); + } + } + + @Test + public void preserveDeletes() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.preserveDeletes()).isFalse(); + opt.setPreserveDeletes(true); + assertThat(opt.preserveDeletes()).isTrue(); + } + } + + @Test + public void twoWriteQueues() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.twoWriteQueues()).isFalse(); + opt.setTwoWriteQueues(true); + assertThat(opt.twoWriteQueues()).isTrue(); + } + } + + @Test + public void manualWalFlush() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.manualWalFlush()).isFalse(); + opt.setManualWalFlush(true); + assertThat(opt.manualWalFlush()).isTrue(); + } + } + + @Test + public void atomicFlush() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.atomicFlush()).isFalse(); + opt.setAtomicFlush(true); + assertThat(opt.atomicFlush()).isTrue(); + } + } + @Test public void rateLimiter() { try(final DBOptions options = new DBOptions(); diff --git a/java/src/test/java/org/rocksdb/DefaultEnvTest.java b/java/src/test/java/org/rocksdb/DefaultEnvTest.java new file mode 100644 index 000000000..a9cd0c021 --- /dev/null +++ b/java/src/test/java/org/rocksdb/DefaultEnvTest.java @@ -0,0 +1,113 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.Collection; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public class DefaultEnvTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void backgroundThreads() { + try (final Env defaultEnv = RocksEnv.getDefault()) { + defaultEnv.setBackgroundThreads(5, Priority.BOTTOM); + assertThat(defaultEnv.getBackgroundThreads(Priority.BOTTOM)).isEqualTo(5); + + defaultEnv.setBackgroundThreads(5); + assertThat(defaultEnv.getBackgroundThreads(Priority.LOW)).isEqualTo(5); + + defaultEnv.setBackgroundThreads(5, Priority.LOW); + assertThat(defaultEnv.getBackgroundThreads(Priority.LOW)).isEqualTo(5); + + defaultEnv.setBackgroundThreads(5, Priority.HIGH); + assertThat(defaultEnv.getBackgroundThreads(Priority.HIGH)).isEqualTo(5); + } + } + + @Test + public void threadPoolQueueLen() { + try (final Env defaultEnv = RocksEnv.getDefault()) { + assertThat(defaultEnv.getThreadPoolQueueLen(Priority.BOTTOM)).isEqualTo(0); + assertThat(defaultEnv.getThreadPoolQueueLen(Priority.LOW)).isEqualTo(0); + assertThat(defaultEnv.getThreadPoolQueueLen(Priority.HIGH)).isEqualTo(0); + } + } + + @Test + public void incBackgroundThreadsIfNeeded() { + try (final Env defaultEnv = RocksEnv.getDefault()) { + defaultEnv.incBackgroundThreadsIfNeeded(20, Priority.BOTTOM); + assertThat(defaultEnv.getBackgroundThreads(Priority.BOTTOM)).isEqualTo(20); + + defaultEnv.incBackgroundThreadsIfNeeded(20, Priority.LOW); + assertThat(defaultEnv.getBackgroundThreads(Priority.LOW)).isEqualTo(20); + + defaultEnv.incBackgroundThreadsIfNeeded(20, Priority.HIGH); + assertThat(defaultEnv.getBackgroundThreads(Priority.HIGH)).isEqualTo(20); + } + } + + @Test + public void lowerThreadPoolIOPriority() { + try (final Env defaultEnv = RocksEnv.getDefault()) { + defaultEnv.lowerThreadPoolIOPriority(Priority.BOTTOM); + + defaultEnv.lowerThreadPoolIOPriority(Priority.LOW); + + defaultEnv.lowerThreadPoolIOPriority(Priority.HIGH); + } + } + + @Test + public void lowerThreadPoolCPUPriority() { + try (final Env defaultEnv = RocksEnv.getDefault()) { + defaultEnv.lowerThreadPoolCPUPriority(Priority.BOTTOM); + + defaultEnv.lowerThreadPoolCPUPriority(Priority.LOW); + + defaultEnv.lowerThreadPoolCPUPriority(Priority.HIGH); + } + } + + @Test + public void threadList() throws RocksDBException { + try (final Env defaultEnv = RocksEnv.getDefault()) { + final Collection threadList = defaultEnv.getThreadList(); + assertThat(threadList.size()).isGreaterThan(0); + } + } + + @Test + public void threadList_integration() throws RocksDBException { + try (final Env env = RocksEnv.getDefault(); + final Options opt = new Options() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true) + .setEnv(env)) { + // open database + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + + final List threadList = env.getThreadList(); + assertThat(threadList.size()).isGreaterThan(0); + } + } + } +} diff --git a/java/src/test/java/org/rocksdb/EnvOptionsTest.java b/java/src/test/java/org/rocksdb/EnvOptionsTest.java index 9933b1e1d..9be61b7d7 100644 --- a/java/src/test/java/org/rocksdb/EnvOptionsTest.java +++ b/java/src/test/java/org/rocksdb/EnvOptionsTest.java @@ -18,6 +18,18 @@ public class EnvOptionsTest { public static final Random rand = PlatformRandomHelper.getPlatformSpecificRandomFactory(); + @Test + public void dbOptionsConstructor() { + final long compactionReadaheadSize = 4 * 1024 * 1024; + try (final DBOptions dbOptions = new DBOptions() + .setCompactionReadaheadSize(compactionReadaheadSize)) { + try (final EnvOptions envOptions = new EnvOptions(dbOptions)) { + assertThat(envOptions.compactionReadaheadSize()) + .isEqualTo(compactionReadaheadSize); + } + } + } + @Test public void useMmapReads() { try (final EnvOptions envOptions = new EnvOptions()) { diff --git a/java/src/test/java/org/rocksdb/FlushOptionsTest.java b/java/src/test/java/org/rocksdb/FlushOptionsTest.java new file mode 100644 index 000000000..f90ae911d --- /dev/null +++ b/java/src/test/java/org/rocksdb/FlushOptionsTest.java @@ -0,0 +1,31 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class FlushOptionsTest { + + @Test + public void waitForFlush() { + try (final FlushOptions flushOptions = new FlushOptions()) { + assertThat(flushOptions.waitForFlush()).isTrue(); + flushOptions.setWaitForFlush(false); + assertThat(flushOptions.waitForFlush()).isFalse(); + } + } + + @Test + public void allowWriteStall() { + try (final FlushOptions flushOptions = new FlushOptions()) { + assertThat(flushOptions.allowWriteStall()).isFalse(); + flushOptions.setAllowWriteStall(true); + assertThat(flushOptions.allowWriteStall()).isTrue(); + } + } +} diff --git a/java/src/test/java/org/rocksdb/HdfsEnvTest.java b/java/src/test/java/org/rocksdb/HdfsEnvTest.java new file mode 100644 index 000000000..3a91c5cad --- /dev/null +++ b/java/src/test/java/org/rocksdb/HdfsEnvTest.java @@ -0,0 +1,45 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static java.nio.charset.StandardCharsets.UTF_8; + +public class HdfsEnvTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + // expect org.rocksdb.RocksDBException: Not compiled with hdfs support + @Test(expected = RocksDBException.class) + public void construct() throws RocksDBException { + try (final Env env = new HdfsEnv("hdfs://localhost:5000")) { + // no-op + } + } + + // expect org.rocksdb.RocksDBException: Not compiled with hdfs support + @Test(expected = RocksDBException.class) + public void construct_integration() throws RocksDBException { + try (final Env env = new HdfsEnv("hdfs://localhost:5000"); + final Options options = new Options() + .setCreateIfMissing(true) + .setEnv(env); + ) { + try (final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getPath())) { + db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + } + } + } +} diff --git a/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java b/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java index 83e0dd17a..a3973ccd9 100644 --- a/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java +++ b/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java @@ -84,4 +84,24 @@ public class IngestExternalFileOptionsTest { assertThat(options.allowBlockingFlush()).isEqualTo(allowBlockingFlush); } } + + @Test + public void ingestBehind() { + try (final IngestExternalFileOptions options = + new IngestExternalFileOptions()) { + assertThat(options.ingestBehind()).isFalse(); + options.setIngestBehind(true); + assertThat(options.ingestBehind()).isTrue(); + } + } + + @Test + public void writeGlobalSeqno() { + try (final IngestExternalFileOptions options = + new IngestExternalFileOptions()) { + assertThat(options.writeGlobalSeqno()).isTrue(); + options.setWriteGlobalSeqno(false); + assertThat(options.writeGlobalSeqno()).isFalse(); + } + } } diff --git a/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java b/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java new file mode 100644 index 000000000..1ce3e1177 --- /dev/null +++ b/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java @@ -0,0 +1,84 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import org.junit.Test; +import org.rocksdb.MutableDBOptions.MutableDBOptionsBuilder; + +import java.util.NoSuchElementException; + +import static org.assertj.core.api.Assertions.assertThat; + +public class MutableDBOptionsTest { + + @Test + public void builder() { + final MutableDBOptionsBuilder builder = + MutableDBOptions.builder(); + builder + .setBytesPerSync(1024 * 1024 * 7) + .setMaxBackgroundJobs(5) + .setAvoidFlushDuringShutdown(false); + + assertThat(builder.bytesPerSync()).isEqualTo(1024 * 1024 * 7); + assertThat(builder.maxBackgroundJobs()).isEqualTo(5); + assertThat(builder.avoidFlushDuringShutdown()).isEqualTo(false); + } + + @Test(expected = NoSuchElementException.class) + public void builder_getWhenNotSet() { + final MutableDBOptionsBuilder builder = + MutableDBOptions.builder(); + + builder.bytesPerSync(); + } + + @Test + public void builder_build() { + final MutableDBOptions options = MutableDBOptions + .builder() + .setBytesPerSync(1024 * 1024 * 7) + .setMaxBackgroundJobs(5) + .build(); + + assertThat(options.getKeys().length).isEqualTo(2); + assertThat(options.getValues().length).isEqualTo(2); + assertThat(options.getKeys()[0]) + .isEqualTo( + MutableDBOptions.DBOption.bytes_per_sync.name()); + assertThat(options.getValues()[0]).isEqualTo("7340032"); + assertThat(options.getKeys()[1]) + .isEqualTo( + MutableDBOptions.DBOption.max_background_jobs.name()); + assertThat(options.getValues()[1]).isEqualTo("5"); + } + + @Test + public void mutableColumnFamilyOptions_toString() { + final String str = MutableDBOptions + .builder() + .setMaxOpenFiles(99) + .setDelayedWriteRate(789) + .setAvoidFlushDuringShutdown(true) + .build() + .toString(); + + assertThat(str).isEqualTo("max_open_files=99;delayed_write_rate=789;" + + "avoid_flush_during_shutdown=true"); + } + + @Test + public void mutableColumnFamilyOptions_parse() { + final String str = "max_open_files=99;delayed_write_rate=789;" + + "avoid_flush_during_shutdown=true"; + + final MutableDBOptionsBuilder builder = + MutableDBOptions.parse(str); + + assertThat(builder.maxOpenFiles()).isEqualTo(99); + assertThat(builder.delayedWriteRate()).isEqualTo(789); + assertThat(builder.avoidFlushDuringShutdown()).isEqualTo(true); + } +} diff --git a/java/src/test/java/org/rocksdb/OptionsTest.java b/java/src/test/java/org/rocksdb/OptionsTest.java index e145a5e7c..e27a33d7d 100644 --- a/java/src/test/java/org/rocksdb/OptionsTest.java +++ b/java/src/test/java/org/rocksdb/OptionsTest.java @@ -6,10 +6,7 @@ package org.rocksdb; import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Random; +import java.util.*; import org.junit.ClassRule; import org.junit.Test; @@ -756,6 +753,15 @@ public class OptionsTest { } } + @Test + public void enablePipelinedWrite() { + try(final Options opt = new Options()) { + assertThat(opt.enablePipelinedWrite()).isFalse(); + opt.setEnablePipelinedWrite(true); + assertThat(opt.enablePipelinedWrite()).isTrue(); + } + } + @Test public void allowConcurrentMemtableWrite() { try (final Options opt = new Options()) { @@ -837,6 +843,38 @@ public class OptionsTest { } } + @Test + public void walFilter() { + try (final Options opt = new Options()) { + assertThat(opt.walFilter()).isNull(); + + try (final AbstractWalFilter walFilter = new AbstractWalFilter() { + @Override + public void columnFamilyLogNumberMap( + final Map cfLognumber, + final Map cfNameId) { + // no-op + } + + @Override + public LogRecordFoundResult logRecordFound(final long logNumber, + final String logFileName, final WriteBatch batch, + final WriteBatch newBatch) { + return new LogRecordFoundResult( + WalProcessingOption.CONTINUE_PROCESSING, false); + } + + @Override + public String name() { + return "test-wal-filter"; + } + }) { + opt.setWalFilter(walFilter); + assertThat(opt.walFilter()).isEqualTo(walFilter); + } + } + } + @Test public void failIfOptionsFileError() { try (final Options opt = new Options()) { @@ -873,6 +911,52 @@ public class OptionsTest { } } + + @Test + public void allowIngestBehind() { + try (final Options opt = new Options()) { + assertThat(opt.allowIngestBehind()).isFalse(); + opt.setAllowIngestBehind(true); + assertThat(opt.allowIngestBehind()).isTrue(); + } + } + + @Test + public void preserveDeletes() { + try (final Options opt = new Options()) { + assertThat(opt.preserveDeletes()).isFalse(); + opt.setPreserveDeletes(true); + assertThat(opt.preserveDeletes()).isTrue(); + } + } + + @Test + public void twoWriteQueues() { + try (final Options opt = new Options()) { + assertThat(opt.twoWriteQueues()).isFalse(); + opt.setTwoWriteQueues(true); + assertThat(opt.twoWriteQueues()).isTrue(); + } + } + + @Test + public void manualWalFlush() { + try (final Options opt = new Options()) { + assertThat(opt.manualWalFlush()).isFalse(); + opt.setManualWalFlush(true); + assertThat(opt.manualWalFlush()).isTrue(); + } + } + + @Test + public void atomicFlush() { + try (final Options opt = new Options()) { + assertThat(opt.atomicFlush()).isFalse(); + opt.setAtomicFlush(true); + assertThat(opt.atomicFlush()).isTrue(); + } + } + @Test public void env() { try (final Options options = new Options(); @@ -966,6 +1050,20 @@ public class OptionsTest { } } + @Test + public void bottommostCompressionOptions() { + try (final Options options = new Options(); + final CompressionOptions bottommostCompressionOptions = new CompressionOptions() + .setMaxDictBytes(123)) { + + options.setBottommostCompressionOptions(bottommostCompressionOptions); + assertThat(options.bottommostCompressionOptions()) + .isEqualTo(bottommostCompressionOptions); + assertThat(options.bottommostCompressionOptions().maxDictBytes()) + .isEqualTo(123); + } + } + @Test public void compressionOptions() { try (final Options options = new Options(); @@ -1108,6 +1206,15 @@ public class OptionsTest { } } + @Test + public void ttl() { + try (final Options options = new Options()) { + options.setTtl(1000 * 60); + assertThat(options.ttl()). + isEqualTo(1000 * 60); + } + } + @Test public void compactionOptionsUniversal() { try (final Options options = new Options(); diff --git a/java/src/test/java/org/rocksdb/ReadOptionsTest.java b/java/src/test/java/org/rocksdb/ReadOptionsTest.java index 4e860ae4c..9708cd0b1 100644 --- a/java/src/test/java/org/rocksdb/ReadOptionsTest.java +++ b/java/src/test/java/org/rocksdb/ReadOptionsTest.java @@ -24,6 +24,30 @@ public class ReadOptionsTest { @Rule public ExpectedException exception = ExpectedException.none(); + @Test + public void altConstructor() { + try (final ReadOptions opt = new ReadOptions(true, true)) { + assertThat(opt.verifyChecksums()).isTrue(); + assertThat(opt.fillCache()).isTrue(); + } + } + + @Test + public void copyConstructor() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setVerifyChecksums(false); + opt.setFillCache(false); + opt.setIterateUpperBound(buildRandomSlice()); + opt.setIterateLowerBound(buildRandomSlice()); + try (final ReadOptions other = new ReadOptions(opt)) { + assertThat(opt.verifyChecksums()).isEqualTo(other.verifyChecksums()); + assertThat(opt.fillCache()).isEqualTo(other.fillCache()); + assertThat(Arrays.equals(opt.iterateUpperBound().data(), other.iterateUpperBound().data())).isTrue(); + assertThat(Arrays.equals(opt.iterateLowerBound().data(), other.iterateLowerBound().data())).isTrue(); + } + } + } + @Test public void verifyChecksum() { try (final ReadOptions opt = new ReadOptions()) { @@ -161,17 +185,20 @@ public class ReadOptionsTest { } @Test - public void copyConstructor() { + public void tableFilter() { + try (final ReadOptions opt = new ReadOptions(); + final AbstractTableFilter allTablesFilter = new AllTablesFilter()) { + opt.setTableFilter(allTablesFilter); + } + } + + @Test + public void iterStartSeqnum() { try (final ReadOptions opt = new ReadOptions()) { - opt.setVerifyChecksums(false); - opt.setFillCache(false); - opt.setIterateUpperBound(buildRandomSlice()); - opt.setIterateLowerBound(buildRandomSlice()); - ReadOptions other = new ReadOptions(opt); - assertThat(opt.verifyChecksums()).isEqualTo(other.verifyChecksums()); - assertThat(opt.fillCache()).isEqualTo(other.fillCache()); - assertThat(Arrays.equals(opt.iterateUpperBound().data(), other.iterateUpperBound().data())).isTrue(); - assertThat(Arrays.equals(opt.iterateLowerBound().data(), other.iterateLowerBound().data())).isTrue(); + assertThat(opt.iterStartSeqnum()).isEqualTo(0); + + opt.setIterStartSeqnum(10); + assertThat(opt.iterStartSeqnum()).isEqualTo(10); } } @@ -286,4 +313,10 @@ public class ReadOptionsTest { return new Slice(sliceBytes); } + private static class AllTablesFilter extends AbstractTableFilter { + @Override + public boolean filter(final TableProperties tableProperties) { + return true; + } + } } diff --git a/java/src/test/java/org/rocksdb/RocksDBTest.java b/java/src/test/java/org/rocksdb/RocksDBTest.java index 495215ac2..da1b1c971 100644 --- a/java/src/test/java/org/rocksdb/RocksDBTest.java +++ b/java/src/test/java/org/rocksdb/RocksDBTest.java @@ -4,17 +4,14 @@ // (found in the LICENSE.Apache file in the root directory). package org.rocksdb; -import org.junit.Assert; -import org.junit.Assume; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; +import org.junit.*; import org.junit.rules.ExpectedException; import org.junit.rules.TemporaryFolder; import java.nio.ByteBuffer; import java.util.*; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.fail; @@ -62,6 +59,130 @@ public class RocksDBTest { } } + @Test + public void createColumnFamily() throws RocksDBException { + final byte[] col1Name = "col1".getBytes(UTF_8); + + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions() + ) { + try (final ColumnFamilyHandle col1 = + db.createColumnFamily(new ColumnFamilyDescriptor(col1Name, cfOpts))) { + assertThat(col1).isNotNull(); + assertThat(col1.getName()).isEqualTo(col1Name); + } + } + + final List cfHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(col1Name)), + cfHandles)) { + try { + assertThat(cfHandles.size()).isEqualTo(2); + assertThat(cfHandles.get(1)).isNotNull(); + assertThat(cfHandles.get(1).getName()).isEqualTo(col1Name); + } finally { + for (final ColumnFamilyHandle cfHandle : + cfHandles) { + cfHandle.close(); + } + } + } + } + + + @Test + public void createColumnFamilies() throws RocksDBException { + final byte[] col1Name = "col1".getBytes(UTF_8); + final byte[] col2Name = "col2".getBytes(UTF_8); + + List cfHandles; + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions() + ) { + cfHandles = + db.createColumnFamilies(cfOpts, Arrays.asList(col1Name, col2Name)); + try { + assertThat(cfHandles).isNotNull(); + assertThat(cfHandles.size()).isEqualTo(2); + assertThat(cfHandles.get(0).getName()).isEqualTo(col1Name); + assertThat(cfHandles.get(1).getName()).isEqualTo(col2Name); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + + cfHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(col1Name), + new ColumnFamilyDescriptor(col2Name)), + cfHandles)) { + try { + assertThat(cfHandles.size()).isEqualTo(3); + assertThat(cfHandles.get(1)).isNotNull(); + assertThat(cfHandles.get(1).getName()).isEqualTo(col1Name); + assertThat(cfHandles.get(2)).isNotNull(); + assertThat(cfHandles.get(2).getName()).isEqualTo(col2Name); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + + @Test + public void createColumnFamiliesfromDescriptors() throws RocksDBException { + final byte[] col1Name = "col1".getBytes(UTF_8); + final byte[] col2Name = "col2".getBytes(UTF_8); + + List cfHandles; + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions() + ) { + cfHandles = + db.createColumnFamilies(Arrays.asList( + new ColumnFamilyDescriptor(col1Name, cfOpts), + new ColumnFamilyDescriptor(col2Name, cfOpts))); + try { + assertThat(cfHandles).isNotNull(); + assertThat(cfHandles.size()).isEqualTo(2); + assertThat(cfHandles.get(0).getName()).isEqualTo(col1Name); + assertThat(cfHandles.get(1).getName()).isEqualTo(col2Name); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + + cfHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(col1Name), + new ColumnFamilyDescriptor(col2Name)), + cfHandles)) { + try { + assertThat(cfHandles.size()).isEqualTo(3); + assertThat(cfHandles.get(1)).isNotNull(); + assertThat(cfHandles.get(1).getName()).isEqualTo(col1Name); + assertThat(cfHandles.get(2)).isNotNull(); + assertThat(cfHandles.get(2).getName()).isEqualTo(col2Name); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + @Test public void put() throws RocksDBException { try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); @@ -934,4 +1055,500 @@ public class RocksDBTest { } } } + + @Test + public void getApproximateSizes() throws RocksDBException { + final byte key1[] = "key1".getBytes(UTF_8); + final byte key2[] = "key2".getBytes(UTF_8); + final byte key3[] = "key3".getBytes(UTF_8); + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.put(key1, key1); + db.put(key2, key2); + db.put(key3, key3); + + final long[] sizes = db.getApproximateSizes( + Arrays.asList( + new Range(new Slice(key1), new Slice(key2)), + new Range(new Slice(key2), new Slice(key3)) + ), + SizeApproximationFlag.INCLUDE_FILES, + SizeApproximationFlag.INCLUDE_MEMTABLES); + + assertThat(sizes.length).isEqualTo(2); + assertThat(sizes[0]).isEqualTo(0); + assertThat(sizes[1]).isGreaterThanOrEqualTo(1); + } + } + } + + @Test + public void getApproximateMemTableStats() throws RocksDBException { + final byte key1[] = "key1".getBytes(UTF_8); + final byte key2[] = "key2".getBytes(UTF_8); + final byte key3[] = "key3".getBytes(UTF_8); + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.put(key1, key1); + db.put(key2, key2); + db.put(key3, key3); + + final RocksDB.CountAndSize stats = + db.getApproximateMemTableStats( + new Range(new Slice(key1), new Slice(key3))); + + assertThat(stats).isNotNull(); + assertThat(stats.count).isGreaterThan(1); + assertThat(stats.size).isGreaterThan(1); + } + } + } + + @Ignore("TODO(AR) re-enable when ready!") + @Test + public void compactFiles() throws RocksDBException { + final int kTestKeySize = 16; + final int kTestValueSize = 984; + final int kEntrySize = kTestKeySize + kTestValueSize; + final int kEntriesPerBuffer = 100; + final int writeBufferSize = kEntrySize * kEntriesPerBuffer; + final byte[] cfName = "pikachu".getBytes(UTF_8); + + try (final Options options = new Options() + .setCreateIfMissing(true) + .setWriteBufferSize(writeBufferSize) + .setCompactionStyle(CompactionStyle.LEVEL) + .setTargetFileSizeBase(writeBufferSize) + .setMaxBytesForLevelBase(writeBufferSize * 2) + .setLevel0StopWritesTrigger(2) + .setMaxBytesForLevelMultiplier(2) + .setCompressionType(CompressionType.NO_COMPRESSION) + .setMaxSubcompactions(4)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath); + final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions(options)) { + db.createColumnFamily(new ColumnFamilyDescriptor(cfName, + cfOptions)).close(); + } + + try (final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions(options)) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOptions), + new ColumnFamilyDescriptor(cfName, cfOptions) + ); + final List cfHandles = new ArrayList<>(); + try (final DBOptions dbOptions = new DBOptions(options); + final RocksDB db = RocksDB.open(dbOptions, dbPath, cfDescriptors, + cfHandles); + ) { + try (final FlushOptions flushOptions = new FlushOptions() + .setWaitForFlush(true) + .setAllowWriteStall(true); + final CompactionOptions compactionOptions = new CompactionOptions()) { + final Random rnd = new Random(301); + for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) { + final byte[] value = new byte[kTestValueSize]; + rnd.nextBytes(value); + db.put(cfHandles.get(1), Integer.toString(key).getBytes(UTF_8), + value); + } + db.flush(flushOptions, cfHandles); + + final RocksDB.LiveFiles liveFiles = db.getLiveFiles(); + final List compactedFiles = + db.compactFiles(compactionOptions, cfHandles.get(1), + liveFiles.files, 1, -1, null); + assertThat(compactedFiles).isNotEmpty(); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } + } + + @Test + public void enableAutoCompaction() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true)) { + final List cfDescs = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) + ); + final List cfHandles = new ArrayList<>(); + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { + try { + db.enableAutoCompaction(cfHandles); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } + + @Test + public void numberLevels() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db.numberLevels()).isEqualTo(7); + } + } + } + + @Test + public void maxMemCompactionLevel() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db.maxMemCompactionLevel()).isEqualTo(0); + } + } + } + + @Test + public void level0StopWriteTrigger() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db.level0StopWriteTrigger()).isEqualTo(36); + } + } + } + + @Test + public void getName() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db.getName()).isEqualTo(dbPath); + } + } + } + + @Test + public void getEnv() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db.getEnv()).isEqualTo(Env.getDefault()); + } + } + } + + @Test + public void flush() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath); + final FlushOptions flushOptions = new FlushOptions()) { + db.flush(flushOptions); + } + } + } + + @Test + public void flushWal() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.flushWal(true); + } + } + } + + @Test + public void syncWal() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.syncWal(); + } + } + } + + @Test + public void setPreserveDeletesSequenceNumber() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db.setPreserveDeletesSequenceNumber(db.getLatestSequenceNumber())) + .isFalse(); + } + } + } + + @Test + public void getLiveFiles() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + final RocksDB.LiveFiles livefiles = db.getLiveFiles(true); + assertThat(livefiles).isNotNull(); + assertThat(livefiles.manifestFileSize).isEqualTo(13); + assertThat(livefiles.files.size()).isEqualTo(3); + assertThat(livefiles.files.get(0)).isEqualTo("/CURRENT"); + assertThat(livefiles.files.get(1)).isEqualTo("/MANIFEST-000001"); + assertThat(livefiles.files.get(2)).isEqualTo("/OPTIONS-000005"); + } + } + } + + @Test + public void getSortedWalFiles() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + final List logFiles = db.getSortedWalFiles(); + assertThat(logFiles).isNotNull(); + assertThat(logFiles.size()).isEqualTo(1); + assertThat(logFiles.get(0).type()) + .isEqualTo(WalFileType.kAliveLogFile); + } + } + } + + @Test + public void deleteFile() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.deleteFile("unknown"); + } + } + } + + @Test + public void getLiveFilesMetaData() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + final List liveFilesMetaData + = db.getLiveFilesMetaData(); + assertThat(liveFilesMetaData).isEmpty(); + } + } + } + + @Test + public void getColumnFamilyMetaData() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true)) { + final List cfDescs = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) + ); + final List cfHandles = new ArrayList<>(); + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { + db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + try { + final ColumnFamilyMetaData cfMetadata = + db.getColumnFamilyMetaData(cfHandles.get(0)); + assertThat(cfMetadata).isNotNull(); + assertThat(cfMetadata.name()).isEqualTo(RocksDB.DEFAULT_COLUMN_FAMILY); + assertThat(cfMetadata.levels().size()).isEqualTo(7); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } + + @Test + public void verifyChecksum() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.verifyChecksum(); + } + } + } + + @Test + public void getPropertiesOfAllTables() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true)) { + final List cfDescs = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) + ); + final List cfHandles = new ArrayList<>(); + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { + db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + try { + final Map properties = + db.getPropertiesOfAllTables(cfHandles.get(0)); + assertThat(properties).isNotNull(); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } + + @Test + public void getPropertiesOfTablesInRange() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true)) { + final List cfDescs = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) + ); + final List cfHandles = new ArrayList<>(); + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { + db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + db.put(cfHandles.get(0), "key2".getBytes(UTF_8), "value2".getBytes(UTF_8)); + db.put(cfHandles.get(0), "key3".getBytes(UTF_8), "value3".getBytes(UTF_8)); + try { + final Range range = new Range( + new Slice("key1".getBytes(UTF_8)), + new Slice("key3".getBytes(UTF_8))); + final Map properties = + db.getPropertiesOfTablesInRange( + cfHandles.get(0), Arrays.asList(range)); + assertThat(properties).isNotNull(); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } + + @Test + public void suggestCompactRange() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true)) { + final List cfDescs = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) + ); + final List cfHandles = new ArrayList<>(); + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { + db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + db.put(cfHandles.get(0), "key2".getBytes(UTF_8), "value2".getBytes(UTF_8)); + db.put(cfHandles.get(0), "key3".getBytes(UTF_8), "value3".getBytes(UTF_8)); + try { + final Range range = db.suggestCompactRange(cfHandles.get(0)); + assertThat(range).isNotNull(); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } + + @Test + public void promoteL0() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.promoteL0(2); + } + } + } + + @Test + public void startTrace() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + final TraceOptions traceOptions = new TraceOptions(); + + try (final InMemoryTraceWriter traceWriter = new InMemoryTraceWriter()) { + db.startTrace(traceOptions, traceWriter); + + db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + + db.endTrace(); + + final List writes = traceWriter.getWrites(); + assertThat(writes.size()).isGreaterThan(0); + } + } + } + } + + @Test + public void setDBOptions() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions() + .setWriteBufferSize(4096)) { + + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)); + + // open database + final List columnFamilyHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) { + try { + final MutableDBOptions mutableOptions = + MutableDBOptions.builder() + .setBytesPerSync(1024 * 1027 * 7) + .setAvoidFlushDuringShutdown(false) + .build(); + + db.setDBOptions(mutableOptions); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + private static class InMemoryTraceWriter extends AbstractTraceWriter { + private final List writes = new ArrayList<>(); + private volatile boolean closed = false; + + @Override + public void write(final Slice slice) { + if (closed) { + return; + } + final byte[] data = slice.data(); + final byte[] dataCopy = new byte[data.length]; + System.arraycopy(data, 0, dataCopy, 0, data.length); + writes.add(dataCopy); + } + + @Override + public void closeWriter() { + closed = true; + } + + @Override + public long getFileSize() { + long size = 0; + for (int i = 0; i < writes.size(); i++) { + size += writes.get(i).length; + } + return size; + } + + public List getWrites() { + return writes; + } + } } diff --git a/java/src/test/java/org/rocksdb/RocksEnvTest.java b/java/src/test/java/org/rocksdb/RocksEnvTest.java deleted file mode 100644 index dfb796107..000000000 --- a/java/src/test/java/org/rocksdb/RocksEnvTest.java +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -package org.rocksdb; - -import org.junit.ClassRule; -import org.junit.Test; - -import static org.assertj.core.api.Assertions.assertThat; - -public class RocksEnvTest { - - @ClassRule - public static final RocksMemoryResource rocksMemoryResource = - new RocksMemoryResource(); - - @Test - public void rocksEnv() { - try (final Env rocksEnv = RocksEnv.getDefault()) { - rocksEnv.setBackgroundThreads(5); - // default rocksenv will always return zero for flush pool - // no matter what was set via setBackgroundThreads - assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)). - isEqualTo(0); - rocksEnv.setBackgroundThreads(5, RocksEnv.FLUSH_POOL); - // default rocksenv will always return zero for flush pool - // no matter what was set via setBackgroundThreads - assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)). - isEqualTo(0); - rocksEnv.setBackgroundThreads(5, RocksEnv.COMPACTION_POOL); - // default rocksenv will always return zero for compaction pool - // no matter what was set via setBackgroundThreads - assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.COMPACTION_POOL)). - isEqualTo(0); - } - } -} diff --git a/java/src/test/java/org/rocksdb/RocksMemEnvTest.java b/java/src/test/java/org/rocksdb/RocksMemEnvTest.java index 04fae2e95..8e429d4ec 100644 --- a/java/src/test/java/org/rocksdb/RocksMemEnvTest.java +++ b/java/src/test/java/org/rocksdb/RocksMemEnvTest.java @@ -33,7 +33,7 @@ public class RocksMemEnvTest { "baz".getBytes() }; - try (final Env env = new RocksMemEnv(); + try (final Env env = new RocksMemEnv(Env.getDefault()); final Options options = new Options() .setCreateIfMissing(true) .setEnv(env); @@ -107,7 +107,7 @@ public class RocksMemEnvTest { "baz".getBytes() }; - try (final Env env = new RocksMemEnv(); + try (final Env env = new RocksMemEnv(Env.getDefault()); final Options options = new Options() .setCreateIfMissing(true) .setEnv(env); @@ -136,7 +136,7 @@ public class RocksMemEnvTest { @Test(expected = RocksDBException.class) public void createIfMissingFalse() throws RocksDBException { - try (final Env env = new RocksMemEnv(); + try (final Env env = new RocksMemEnv(Env.getDefault()); final Options options = new Options() .setCreateIfMissing(false) .setEnv(env); diff --git a/java/src/test/java/org/rocksdb/TableFilterTest.java b/java/src/test/java/org/rocksdb/TableFilterTest.java new file mode 100644 index 000000000..862696763 --- /dev/null +++ b/java/src/test/java/org/rocksdb/TableFilterTest.java @@ -0,0 +1,105 @@ +package org.rocksdb; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +public class TableFilterTest { + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void readOptions() throws RocksDBException { + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions() + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts) + ); + + final List columnFamilyHandles = new ArrayList<>(); + + // open database + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, + columnFamilyHandles)) { + + try (final CfNameCollectionTableFilter cfNameCollectingTableFilter = + new CfNameCollectionTableFilter(); + final FlushOptions flushOptions = + new FlushOptions().setWaitForFlush(true); + final ReadOptions readOptions = + new ReadOptions().setTableFilter(cfNameCollectingTableFilter)) { + + db.put(columnFamilyHandles.get(0), + "key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + db.put(columnFamilyHandles.get(0), + "key2".getBytes(UTF_8), "value2".getBytes(UTF_8)); + db.put(columnFamilyHandles.get(0), + "key3".getBytes(UTF_8), "value3".getBytes(UTF_8)); + db.put(columnFamilyHandles.get(1), + "key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + db.put(columnFamilyHandles.get(1), + "key2".getBytes(UTF_8), "value2".getBytes(UTF_8)); + db.put(columnFamilyHandles.get(1), + "key3".getBytes(UTF_8), "value3".getBytes(UTF_8)); + + db.flush(flushOptions, columnFamilyHandles); + + try (final RocksIterator iterator = + db.newIterator(columnFamilyHandles.get(0), readOptions)) { + iterator.seekToFirst(); + while (iterator.isValid()) { + iterator.key(); + iterator.value(); + iterator.next(); + } + } + + try (final RocksIterator iterator = + db.newIterator(columnFamilyHandles.get(1), readOptions)) { + iterator.seekToFirst(); + while (iterator.isValid()) { + iterator.key(); + iterator.value(); + iterator.next(); + } + } + + assertThat(cfNameCollectingTableFilter.cfNames.size()).isEqualTo(2); + assertThat(cfNameCollectingTableFilter.cfNames.get(0)) + .isEqualTo(RocksDB.DEFAULT_COLUMN_FAMILY); + assertThat(cfNameCollectingTableFilter.cfNames.get(1)) + .isEqualTo("new_cf".getBytes(UTF_8)); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + columnFamilyHandle.close(); + } + } + } + } + } + + private static class CfNameCollectionTableFilter extends AbstractTableFilter { + private final List cfNames = new ArrayList<>(); + + @Override + public boolean filter(final TableProperties tableProperties) { + cfNames.add(tableProperties.getColumnFamilyName()); + return true; + } + } +} diff --git a/java/src/test/java/org/rocksdb/TimedEnvTest.java b/java/src/test/java/org/rocksdb/TimedEnvTest.java new file mode 100644 index 000000000..2eb5eea82 --- /dev/null +++ b/java/src/test/java/org/rocksdb/TimedEnvTest.java @@ -0,0 +1,43 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static java.nio.charset.StandardCharsets.UTF_8; + +public class TimedEnvTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void construct() throws RocksDBException { + try (final Env env = new TimedEnv(Env.getDefault())) { + // no-op + } + } + + @Test + public void construct_integration() throws RocksDBException { + try (final Env env = new TimedEnv(Env.getDefault()); + final Options options = new Options() + .setCreateIfMissing(true) + .setEnv(env); + ) { + try (final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getPath())) { + db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + } + } + } +} diff --git a/java/src/test/java/org/rocksdb/WalFilterTest.java b/java/src/test/java/org/rocksdb/WalFilterTest.java new file mode 100644 index 000000000..aeb49165d --- /dev/null +++ b/java/src/test/java/org/rocksdb/WalFilterTest.java @@ -0,0 +1,164 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.rocksdb.util.TestUtil.*; + +public class WalFilterTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void walFilter() throws RocksDBException { + // Create 3 batches with two keys each + final byte[][][] batchKeys = { + new byte[][] { + u("key1"), + u("key2") + }, + new byte[][] { + u("key3"), + u("key4") + }, + new byte[][] { + u("key5"), + u("key6") + } + + }; + + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(u("pikachu")) + ); + final List cfHandles = new ArrayList<>(); + + // Test with all WAL processing options + for (final WalProcessingOption option : WalProcessingOption.values()) { + try (final Options options = optionsForLogIterTest(); + final DBOptions dbOptions = new DBOptions(options) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(dbOptions, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, cfHandles)) { + try (final WriteOptions writeOptions = new WriteOptions()) { + // Write given keys in given batches + for (int i = 0; i < batchKeys.length; i++) { + final WriteBatch batch = new WriteBatch(); + for (int j = 0; j < batchKeys[i].length; j++) { + batch.put(cfHandles.get(0), batchKeys[i][j], dummyString(1024)); + } + db.write(writeOptions, batch); + } + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + cfHandles.clear(); + } + } + + // Create a test filter that would apply wal_processing_option at the first + // record + final int applyOptionForRecordIndex = 1; + try (final TestableWalFilter walFilter = + new TestableWalFilter(option, applyOptionForRecordIndex)) { + + try (final Options options = optionsForLogIterTest(); + final DBOptions dbOptions = new DBOptions(options) + .setWalFilter(walFilter)) { + + try (final RocksDB db = RocksDB.open(dbOptions, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, cfHandles)) { + + try { + assertThat(walFilter.logNumbers).isNotEmpty(); + assertThat(walFilter.logFileNames).isNotEmpty(); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + cfHandles.clear(); + } + } catch (final RocksDBException e) { + if (option != WalProcessingOption.CORRUPTED_RECORD) { + // exception is expected when CORRUPTED_RECORD! + throw e; + } + } + } + } + } + } + + + private static class TestableWalFilter extends AbstractWalFilter { + private final WalProcessingOption walProcessingOption; + private final int applyOptionForRecordIndex; + Map cfLognumber; + Map cfNameId; + final List logNumbers = new ArrayList<>(); + final List logFileNames = new ArrayList<>(); + private int currentRecordIndex = 0; + + public TestableWalFilter(final WalProcessingOption walProcessingOption, + final int applyOptionForRecordIndex) { + super(); + this.walProcessingOption = walProcessingOption; + this.applyOptionForRecordIndex = applyOptionForRecordIndex; + } + + @Override + public void columnFamilyLogNumberMap(final Map cfLognumber, + final Map cfNameId) { + this.cfLognumber = cfLognumber; + this.cfNameId = cfNameId; + } + + @Override + public LogRecordFoundResult logRecordFound( + final long logNumber, final String logFileName, final WriteBatch batch, + final WriteBatch newBatch) { + + logNumbers.add(logNumber); + logFileNames.add(logFileName); + + final WalProcessingOption optionToReturn; + if (currentRecordIndex == applyOptionForRecordIndex) { + optionToReturn = walProcessingOption; + } + else { + optionToReturn = WalProcessingOption.CONTINUE_PROCESSING; + } + + currentRecordIndex++; + + return new LogRecordFoundResult(optionToReturn, false); + } + + @Override + public String name() { + return "testable-wal-filter"; + } + } +} diff --git a/java/src/test/java/org/rocksdb/WriteOptionsTest.java b/java/src/test/java/org/rocksdb/WriteOptionsTest.java index 27071e8f2..00c1d7239 100644 --- a/java/src/test/java/org/rocksdb/WriteOptionsTest.java +++ b/java/src/test/java/org/rocksdb/WriteOptionsTest.java @@ -45,6 +45,11 @@ public class WriteOptionsTest { assertThat(writeOptions.noSlowdown()).isTrue(); writeOptions.setNoSlowdown(false); assertThat(writeOptions.noSlowdown()).isFalse(); + + writeOptions.setLowPri(true); + assertThat(writeOptions.lowPri()).isTrue(); + writeOptions.setLowPri(false); + assertThat(writeOptions.lowPri()).isFalse(); } } diff --git a/java/src/test/java/org/rocksdb/util/TestUtil.java b/java/src/test/java/org/rocksdb/util/TestUtil.java new file mode 100644 index 000000000..12b3bbbbd --- /dev/null +++ b/java/src/test/java/org/rocksdb/util/TestUtil.java @@ -0,0 +1,72 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import org.rocksdb.CompactionPriority; +import org.rocksdb.Options; +import org.rocksdb.WALRecoveryMode; + +import java.util.Random; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * General test utilities. + */ +public class TestUtil { + + /** + * Get the options for log iteration tests. + * + * @return the options + */ + public static Options optionsForLogIterTest() { + return defaultOptions() + .setCreateIfMissing(true) + .setWalTtlSeconds(1000); + } + + /** + * Get the default options. + * + * @return the options + */ + public static Options defaultOptions() { + return new Options() + .setWriteBufferSize(4090 * 4096) + .setTargetFileSizeBase(2 * 1024 * 1024) + .setMaxBytesForLevelBase(10 * 1024 * 1024) + .setMaxOpenFiles(5000) + .setWalRecoveryMode(WALRecoveryMode.TolerateCorruptedTailRecords) + .setCompactionPriority(CompactionPriority.ByCompensatedSize); + } + + private static final Random random = new Random(); + + /** + * Generate a random string of bytes. + * + * @param len the length of the string to generate. + * + * @return the random string of bytes + */ + public static byte[] dummyString(final int len) { + final byte[] str = new byte[len]; + random.nextBytes(str); + return str; + } + + /** + * Convert a UTF-8 String to a byte array. + * + * @param str the string + * + * @return the byte array. + */ + public static byte[] u(final String str) { + return str.getBytes(UTF_8); + } +} diff --git a/src.mk b/src.mk index 39ba3f99b..728332905 100644 --- a/src.mk +++ b/src.mk @@ -403,10 +403,13 @@ JNI_NATIVE_SOURCES = \ java/rocksjni/checkpoint.cc \ java/rocksjni/clock_cache.cc \ java/rocksjni/columnfamilyhandle.cc \ + java/rocksjni/compact_range_options.cc \ java/rocksjni/compaction_filter.cc \ java/rocksjni/compaction_filter_factory.cc \ java/rocksjni/compaction_filter_factory_jnicallback.cc \ - java/rocksjni/compact_range_options.cc \ + java/rocksjni/compaction_job_info.cc \ + java/rocksjni/compaction_job_stats.cc \ + java/rocksjni/compaction_options.cc \ java/rocksjni/compaction_options_fifo.cc \ java/rocksjni/compaction_options_universal.cc \ java/rocksjni/comparator.cc \ @@ -428,6 +431,7 @@ JNI_NATIVE_SOURCES = \ java/rocksjni/optimistic_transaction_options.cc \ java/rocksjni/options.cc \ java/rocksjni/options_util.cc \ + java/rocksjni/persistent_cache.cc \ java/rocksjni/ratelimiterjni.cc \ java/rocksjni/remove_emptyvalue_compactionfilterjni.cc \ java/rocksjni/cassandra_compactionfilterjni.cc \ @@ -443,6 +447,11 @@ JNI_NATIVE_SOURCES = \ java/rocksjni/statistics.cc \ java/rocksjni/statisticsjni.cc \ java/rocksjni/table.cc \ + java/rocksjni/table_filter.cc \ + java/rocksjni/table_filter_jnicallback.cc \ + java/rocksjni/thread_status.cc \ + java/rocksjni/trace_writer.cc \ + java/rocksjni/trace_writer_jnicallback.cc \ java/rocksjni/transaction.cc \ java/rocksjni/transaction_db.cc \ java/rocksjni/transaction_options.cc \ @@ -451,6 +460,8 @@ JNI_NATIVE_SOURCES = \ java/rocksjni/transaction_notifier.cc \ java/rocksjni/transaction_notifier_jnicallback.cc \ java/rocksjni/ttl.cc \ + java/rocksjni/wal_filter.cc \ + java/rocksjni/wal_filter_jnicallback.cc \ java/rocksjni/write_batch.cc \ java/rocksjni/writebatchhandlerjnicallback.cc \ java/rocksjni/write_batch_test.cc \