diff --git a/java/Makefile b/java/Makefile index 5725c9920..33e47a8fd 100644 --- a/java/Makefile +++ b/java/Makefile @@ -6,10 +6,14 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\ org.rocksdb.BlockBasedTableConfig\ org.rocksdb.BloomFilter\ org.rocksdb.Checkpoint\ + org.rocksdb.ClockCache\ org.rocksdb.ColumnFamilyHandle\ org.rocksdb.ColumnFamilyOptions\ + org.rocksdb.CompactionOptionsFIFO\ + org.rocksdb.CompactionOptionsUniversal\ org.rocksdb.Comparator\ org.rocksdb.ComparatorOptions\ + org.rocksdb.CompressionOptions\ org.rocksdb.DBOptions\ org.rocksdb.DirectComparator\ org.rocksdb.DirectSlice\ @@ -21,6 +25,7 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\ org.rocksdb.HashLinkedListMemTableConfig\ org.rocksdb.HashSkipListMemTableConfig\ org.rocksdb.Logger\ + org.rocksdb.LRUCache\ org.rocksdb.MergeOperator\ org.rocksdb.Options\ org.rocksdb.PlainTableConfig\ @@ -67,11 +72,17 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\ org.rocksdb.BlockBasedTableConfigTest\ org.rocksdb.util.BytewiseComparatorTest\ org.rocksdb.CheckPointTest\ + org.rocksdb.ClockCacheTest\ org.rocksdb.ColumnFamilyOptionsTest\ org.rocksdb.ColumnFamilyTest\ + org.rocksdb.CompactionOptionsFIFOTest\ + org.rocksdb.CompactionOptionsUniversalTest\ + org.rocksdb.CompactionPriorityTest\ + org.rocksdb.CompactionStopStyleTest\ org.rocksdb.ComparatorOptionsTest\ org.rocksdb.ComparatorTest\ org.rocksdb.CompressionOptionsTest\ + org.rocksdb.CompressionTypesTest\ org.rocksdb.DBOptionsTest\ org.rocksdb.DirectComparatorTest\ org.rocksdb.DirectSliceTest\ @@ -83,6 +94,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\ org.rocksdb.InfoLogLevelTest\ org.rocksdb.KeyMayExistTest\ org.rocksdb.LoggerTest\ + org.rocksdb.LRUCacheTest\ org.rocksdb.MemTableTest\ org.rocksdb.MergeTest\ org.rocksdb.MixedOptionsTest\ @@ -105,6 +117,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\ org.rocksdb.TransactionLogIteratorTest\ org.rocksdb.TtlDBTest\ org.rocksdb.StatisticsCollectorTest\ + org.rocksdb.WALRecoveryModeTest\ org.rocksdb.WriteBatchHandlerTest\ org.rocksdb.WriteBatchTest\ org.rocksdb.WriteBatchThreadedTest\ diff --git a/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java b/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java index 1708587da..899a79198 100644 --- a/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java +++ b/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java @@ -582,12 +582,6 @@ public class DbBenchmark { (Integer)flags_.get(Flag.level0_slowdown_writes_trigger)); options.setLevelZeroFileNumCompactionTrigger( (Integer)flags_.get(Flag.level0_file_num_compaction_trigger)); - options.setSoftRateLimit( - (Double)flags_.get(Flag.soft_rate_limit)); - options.setHardRateLimit( - (Double)flags_.get(Flag.hard_rate_limit)); - options.setRateLimitDelayMaxMilliseconds( - (Integer)flags_.get(Flag.rate_limit_delay_max_milliseconds)); options.setMaxCompactionBytes( (Long) flags_.get(Flag.max_compaction_bytes)); options.setDisableAutoCompactions( diff --git a/java/rocksjni/backupablejni.cc b/java/rocksjni/backupablejni.cc index 31bbcfd29..c31dbdbab 100644 --- a/java/rocksjni/backupablejni.cc +++ b/java/rocksjni/backupablejni.cc @@ -48,6 +48,18 @@ jstring Java_org_rocksdb_BackupableDBOptions_backupDir( return env->NewStringUTF(bopt->backup_dir.c_str()); } +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setBackupEnv + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupableDBOptions_setBackupEnv( + JNIEnv* env, jobject jopt, jlong jhandle, jlong jrocks_env_handle) { + auto* bopt = reinterpret_cast(jhandle); + auto* rocks_env = reinterpret_cast(jrocks_env_handle); + bopt->backup_env = rocks_env; +} + /* * Class: org_rocksdb_BackupableDBOptions * Method: setShareTableFiles @@ -70,6 +82,19 @@ jboolean Java_org_rocksdb_BackupableDBOptions_shareTableFiles( return bopt->share_table_files; } +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setInfoLog + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupableDBOptions_setInfoLog( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jlogger_handle) { + auto* bopt = reinterpret_cast(jhandle); + auto* sptr_logger = + reinterpret_cast *>(jhandle); + bopt->info_log = sptr_logger->get(); +} + /* * Class: org_rocksdb_BackupableDBOptions * Method: setSync @@ -158,6 +183,19 @@ jlong Java_org_rocksdb_BackupableDBOptions_backupRateLimit( return bopt->backup_rate_limit; } +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setBackupRateLimiter + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupableDBOptions_setBackupRateLimiter( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) { + auto* bopt = reinterpret_cast(jhandle); + auto* sptr_rate_limiter = + reinterpret_cast *>(jrate_limiter_handle); + bopt->backup_rate_limiter = *sptr_rate_limiter; +} + /* * Class: org_rocksdb_BackupableDBOptions * Method: setRestoreRateLimit @@ -180,6 +218,19 @@ jlong Java_org_rocksdb_BackupableDBOptions_restoreRateLimit( return bopt->restore_rate_limit; } +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setRestoreRateLimiter + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupableDBOptions_setRestoreRateLimiter( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) { + auto* bopt = reinterpret_cast(jhandle); + auto* sptr_rate_limiter = + reinterpret_cast *>(jrate_limiter_handle); + bopt->restore_rate_limiter = *sptr_rate_limiter; +} + /* * Class: org_rocksdb_BackupableDBOptions * Method: setShareFilesWithChecksum @@ -202,6 +253,53 @@ jboolean Java_org_rocksdb_BackupableDBOptions_shareFilesWithChecksum( return bopt->share_files_with_checksum; } +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setMaxBackgroundOperations + * Signature: (JI)V + */ +void Java_org_rocksdb_BackupableDBOptions_setMaxBackgroundOperations( + JNIEnv* env, jobject jobj, jlong jhandle, jint max_background_operations) { + auto* bopt = reinterpret_cast(jhandle); + bopt->max_background_operations = + static_cast(max_background_operations); +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: maxBackgroundOperations + * Signature: (J)I + */ +jint Java_org_rocksdb_BackupableDBOptions_maxBackgroundOperations( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* bopt = reinterpret_cast(jhandle); + return static_cast(bopt->max_background_operations); +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setCallbackTriggerIntervalSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupableDBOptions_setCallbackTriggerIntervalSize( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jcallback_trigger_interval_size) { + auto* bopt = reinterpret_cast(jhandle); + bopt->callback_trigger_interval_size = + static_cast(jcallback_trigger_interval_size); +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: callbackTriggerIntervalSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_BackupableDBOptions_callbackTriggerIntervalSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* bopt = reinterpret_cast(jhandle); + return static_cast(bopt->callback_trigger_interval_size); +} + /* * Class: org_rocksdb_BackupableDBOptions * Method: disposeInternal diff --git a/java/rocksjni/clock_cache.cc b/java/rocksjni/clock_cache.cc new file mode 100644 index 000000000..fa6cda27b --- /dev/null +++ b/java/rocksjni/clock_cache.cc @@ -0,0 +1,40 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::ClockCache. + +#include + +#include "include/org_rocksdb_ClockCache.h" +#include "util/clock_cache.h" + +/* + * Class: org_rocksdb_ClockCache + * Method: newClockCache + * Signature: (JIZ)J + */ +jlong Java_org_rocksdb_ClockCache_newClockCache( + JNIEnv* env, jclass jcls, jlong jcapacity, jint jnum_shard_bits, + jboolean jstrict_capacity_limit) { + auto* sptr_clock_cache = + new std::shared_ptr(rocksdb::NewClockCache( + static_cast(jcapacity), + static_cast(jnum_shard_bits), + static_cast(jstrict_capacity_limit))); + return reinterpret_cast(sptr_clock_cache); +} + +/* + * Class: org_rocksdb_ClockCache + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_ClockCache_disposeInternal( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* sptr_clock_cache = + reinterpret_cast *>(jhandle); + delete sptr_clock_cache; // delete std::shared_ptr +} diff --git a/java/rocksjni/compaction_options_fifo.cc b/java/rocksjni/compaction_options_fifo.cc new file mode 100644 index 000000000..99a2847cb --- /dev/null +++ b/java/rocksjni/compaction_options_fifo.cc @@ -0,0 +1,55 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::CompactionOptionsFIFO. + +#include + +#include "include/org_rocksdb_CompactionOptionsFIFO.h" +#include "rocksdb/advanced_options.h" + +/* + * Class: org_rocksdb_CompactionOptionsFIFO + * Method: newCompactionOptionsFIFO + * Signature: ()J + */ +jlong Java_org_rocksdb_CompactionOptionsFIFO_newCompactionOptionsFIFO( + JNIEnv* env, jclass jcls) { + const auto* opt = new rocksdb::CompactionOptionsFIFO(); + return reinterpret_cast(opt); +} + +/* + * Class: org_rocksdb_CompactionOptionsFIFO + * Method: setMaxTableFilesSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_CompactionOptionsFIFO_setMaxTableFilesSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_table_files_size) { + auto* opt = reinterpret_cast(jhandle); + opt->max_table_files_size = static_cast(jmax_table_files_size); +} + +/* + * Class: org_rocksdb_CompactionOptionsFIFO + * Method: maxTableFilesSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_table_files_size); +} + +/* + * Class: org_rocksdb_CompactionOptionsFIFO + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompactionOptionsFIFO_disposeInternal( + JNIEnv* env, jobject jobj, jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/java/rocksjni/compaction_options_universal.cc b/java/rocksjni/compaction_options_universal.cc new file mode 100644 index 000000000..74c02886e --- /dev/null +++ b/java/rocksjni/compaction_options_universal.cc @@ -0,0 +1,194 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::CompactionOptionsUniversal. + +#include + +#include "include/org_rocksdb_CompactionOptionsUniversal.h" +#include "rocksdb/advanced_options.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: newCompactionOptionsUniversal + * Signature: ()J + */ +jlong Java_org_rocksdb_CompactionOptionsUniversal_newCompactionOptionsUniversal( + JNIEnv* env, jclass jcls) { + const auto* opt = new rocksdb::CompactionOptionsUniversal(); + return reinterpret_cast(opt); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setSizeRatio + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setSizeRatio( + JNIEnv* env, jobject jobj, jlong jhandle, jint jsize_ratio) { + auto* opt = reinterpret_cast(jhandle); + opt->size_ratio = static_cast(jsize_ratio); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: sizeRatio + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->size_ratio); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setMinMergeWidth + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setMinMergeWidth( + JNIEnv* env, jobject jobj, jlong jhandle, jint jmin_merge_width) { + auto* opt = reinterpret_cast(jhandle); + opt->min_merge_width = static_cast(jmin_merge_width); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: minMergeWidth + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->min_merge_width); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setMaxMergeWidth + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setMaxMergeWidth( + JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_merge_width) { + auto* opt = reinterpret_cast(jhandle); + opt->max_merge_width = static_cast(jmax_merge_width); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: maxMergeWidth + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_merge_width); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setMaxSizeAmplificationPercent + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setMaxSizeAmplificationPercent( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmax_size_amplification_percent) { + auto* opt = reinterpret_cast(jhandle); + opt->max_size_amplification_percent = + static_cast(jmax_size_amplification_percent); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: maxSizeAmplificationPercent + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionOptionsUniversal_maxSizeAmplificationPercent( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_size_amplification_percent); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setCompressionSizePercent + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setCompressionSizePercent( + JNIEnv* env, jobject jobj, jlong jhandle, jint jcompression_size_percent) { + auto* opt = reinterpret_cast(jhandle); + opt->compression_size_percent = + static_cast(jcompression_size_percent); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: compressionSizePercent + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionOptionsUniversal_compressionSizePercent( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->compression_size_percent); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setStopStyle + * Signature: (JB)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setStopStyle( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jstop_style_value) { + auto* opt = reinterpret_cast(jhandle); + opt->stop_style = + rocksdb::CompactionStopStyleJni::toCppCompactionStopStyle( + jstop_style_value); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: stopStyle + * Signature: (J)B + */ +jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return rocksdb::CompactionStopStyleJni::toJavaCompactionStopStyle( + opt->stop_style); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setAllowTrivialMove + * Signature: (JZ)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setAllowTrivialMove( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_trivial_move) { + auto* opt = reinterpret_cast(jhandle); + opt->allow_trivial_move = static_cast(jallow_trivial_move); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: allowTrivialMove + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_CompactionOptionsUniversal_allowTrivialMove( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return opt->allow_trivial_move; +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_disposeInternal( + JNIEnv* env, jobject jobj, jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/java/rocksjni/compression_options.cc b/java/rocksjni/compression_options.cc new file mode 100644 index 000000000..a1c2aa154 --- /dev/null +++ b/java/rocksjni/compression_options.cc @@ -0,0 +1,121 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::CompressionOptions. + +#include + +#include "include/org_rocksdb_CompressionOptions.h" +#include "rocksdb/advanced_options.h" + +/* + * Class: org_rocksdb_CompressionOptions + * Method: newCompressionOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_CompressionOptions_newCompressionOptions( + JNIEnv* env, jclass jcls) { + const auto* opt = new rocksdb::CompressionOptions(); + return reinterpret_cast(opt); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setWindowBits + * Signature: (JI)V + */ +void Java_org_rocksdb_CompressionOptions_setWindowBits( + JNIEnv* env, jobject jobj, jlong jhandle, jint jwindow_bits) { + auto* opt = reinterpret_cast(jhandle); + opt->window_bits = static_cast(jwindow_bits); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: windowBits + * Signature: (J)I + */ +jint Java_org_rocksdb_CompressionOptions_windowBits( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->window_bits); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setLevel + * Signature: (JI)V + */ +void Java_org_rocksdb_CompressionOptions_setLevel( + JNIEnv* env, jobject jobj, jlong jhandle, jint jlevel) { + auto* opt = reinterpret_cast(jhandle); + opt->level = static_cast(jlevel); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: level + * Signature: (J)I + */ +jint Java_org_rocksdb_CompressionOptions_level( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->level); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setStrategy + * Signature: (JI)V + */ +void Java_org_rocksdb_CompressionOptions_setStrategy( + JNIEnv* env, jobject jobj, jlong jhandle, jint jstrategy) { + auto* opt = reinterpret_cast(jhandle); + opt->strategy = static_cast(jstrategy); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: strategy + * Signature: (J)I + */ +jint Java_org_rocksdb_CompressionOptions_strategy( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->strategy); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setMaxDictBytes + * Signature: (JI)V + */ +void Java_org_rocksdb_CompressionOptions_setMaxDictBytes( + JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_dict_bytes) { + auto* opt = reinterpret_cast(jhandle); + opt->max_dict_bytes = static_cast(jmax_dict_bytes); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: maxDictBytes + * Signature: (J)I + */ +jint Java_org_rocksdb_CompressionOptions_maxDictBytes( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_dict_bytes); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompressionOptions_disposeInternal( + JNIEnv* env, jobject jobj, jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/java/rocksjni/lru_cache.cc b/java/rocksjni/lru_cache.cc new file mode 100644 index 000000000..5157c2a07 --- /dev/null +++ b/java/rocksjni/lru_cache.cc @@ -0,0 +1,41 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::LRUCache. + +#include + +#include "include/org_rocksdb_LRUCache.h" +#include "util/lru_cache.h" + +/* + * Class: org_rocksdb_LRUCache + * Method: newLRUCache + * Signature: (JIZD)J + */ +jlong Java_org_rocksdb_LRUCache_newLRUCache( + JNIEnv* env, jclass jcls, jlong jcapacity, jint jnum_shard_bits, + jboolean jstrict_capacity_limit, jdouble jhigh_pri_pool_ratio) { + auto* sptr_lru_cache = + new std::shared_ptr(rocksdb::NewLRUCache( + static_cast(jcapacity), + static_cast(jnum_shard_bits), + static_cast(jstrict_capacity_limit), + static_cast(jhigh_pri_pool_ratio))); + return reinterpret_cast(sptr_lru_cache); +} + +/* + * Class: org_rocksdb_LRUCache + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_LRUCache_disposeInternal( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* sptr_lru_cache = + reinterpret_cast *>(jhandle); + delete sptr_lru_cache; // delete std::shared_ptr +} diff --git a/java/rocksjni/options.cc b/java/rocksjni/options.cc index cf9f30199..4999c3f2f 100644 --- a/java/rocksjni/options.cc +++ b/java/rocksjni/options.cc @@ -9,6 +9,7 @@ #include #include #include +#include #include "include/org_rocksdb_Options.h" #include "include/org_rocksdb_DBOptions.h" @@ -75,7 +76,7 @@ void Java_org_rocksdb_Options_disposeInternal( * Signature: (JI)V */ void Java_org_rocksdb_Options_setIncreaseParallelism( - JNIEnv * evnv, jobject jobj, jlong jhandle, jint totalThreads) { + JNIEnv * env, jobject jobj, jlong jhandle, jint totalThreads) { reinterpret_cast (jhandle)->IncreaseParallelism(static_cast(totalThreads)); } @@ -350,6 +351,28 @@ void Java_org_rocksdb_Options_setMaxOpenFiles( static_cast(max_open_files); } +/* + * Class: org_rocksdb_Options + * Method: setMaxFileOpeningThreads + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxFileOpeningThreads( + JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_file_opening_threads) { + reinterpret_cast(jhandle)->max_file_opening_threads = + static_cast(jmax_file_opening_threads); +} + +/* + * Class: org_rocksdb_Options + * Method: maxFileOpeningThreads + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxFileOpeningThreads( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_file_opening_threads); +} + /* * Class: org_rocksdb_Options * Method: useFsync @@ -371,6 +394,106 @@ void Java_org_rocksdb_Options_setUseFsync( static_cast(use_fsync); } +/* + * Class: org_rocksdb_Options + * Method: setDbPaths + * Signature: (J[Ljava/lang/String;[J)V + */ +void Java_org_rocksdb_Options_setDbPaths( + JNIEnv* env, jobject jobj, jlong jhandle, jobjectArray jpaths, + jlongArray jtarget_sizes) { + std::vector db_paths; + jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr); + if(ptr_jtarget_size == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + jboolean has_exception = JNI_FALSE; + const jsize len = env->GetArrayLength(jpaths); + for(jsize i = 0; i < len; i++) { + jobject jpath = reinterpret_cast(env-> + GetObjectArrayElement(jpaths, i)); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->ReleaseLongArrayElements( + jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + std::string path = rocksdb::JniUtil::copyString( + env, static_cast(jpath), &has_exception); + env->DeleteLocalRef(jpath); + + if(has_exception == JNI_TRUE) { + env->ReleaseLongArrayElements( + jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + + jlong jtarget_size = ptr_jtarget_size[i]; + + db_paths.push_back( + rocksdb::DbPath(path, static_cast(jtarget_size))); + } + + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + + auto* opt = reinterpret_cast(jhandle); + opt->db_paths = db_paths; +} + +/* + * Class: org_rocksdb_Options + * Method: dbPathsLen + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_dbPathsLen( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->db_paths.size()); +} + +/* + * Class: org_rocksdb_Options + * Method: dbPaths + * Signature: (J[Ljava/lang/String;[J)V + */ +void Java_org_rocksdb_Options_dbPaths( + JNIEnv* env, jobject jobj, jlong jhandle, jobjectArray jpaths, + jlongArray jtarget_sizes) { + jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr); + if(ptr_jtarget_size == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + auto* opt = reinterpret_cast(jhandle); + const jsize len = env->GetArrayLength(jpaths); + for(jsize i = 0; i < len; i++) { + rocksdb::DbPath db_path = opt->db_paths[i]; + + jstring jpath = env->NewStringUTF(db_path.path.c_str()); + if(jpath == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseLongArrayElements( + jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + env->SetObjectArrayElement(jpaths, i, jpath); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jpath); + env->ReleaseLongArrayElements( + jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + + ptr_jtarget_size[i] = static_cast(db_path.target_size); + } + + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_COMMIT); +} + /* * Class: org_rocksdb_Options * Method: dbLogDir @@ -615,23 +738,23 @@ void Java_org_rocksdb_Options_setKeepLogFileNum( /* * Class: org_rocksdb_Options - * Method: recycleLogFiles + * Method: recycleLogFileNum * Signature: (J)J */ -jlong Java_org_rocksdb_Options_recycleLogFileNum(JNIEnv* env, jobject jobj, - jlong jhandle) { +jlong Java_org_rocksdb_Options_recycleLogFileNum( + JNIEnv* env, jobject jobj, jlong jhandle) { return reinterpret_cast(jhandle)->recycle_log_file_num; } /* * Class: org_rocksdb_Options - * Method: setRecycleLogFiles + * Method: setRecycleLogFileNum * Signature: (JJ)V */ -void Java_org_rocksdb_Options_setRecycleLogFiles(JNIEnv* env, jobject jobj, - jlong jhandle, - jlong recycle_log_file_num) { - rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(recycle_log_file_num); +void Java_org_rocksdb_Options_setRecycleLogFileNum( + JNIEnv* env, jobject jobj, jlong jhandle, jlong recycle_log_file_num) { + rocksdb::Status s = + rocksdb::check_if_jlong_fits_size_t(recycle_log_file_num); if (s.ok()) { reinterpret_cast(jhandle)->recycle_log_file_num = recycle_log_file_num; @@ -949,6 +1072,28 @@ void Java_org_rocksdb_Options_setUseDirectWrites(JNIEnv* env, jobject jobj, static_cast(use_direct_writes); } +/* + * Class: org_rocksdb_Options + * Method: setAllowFAllocate + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowFAllocate( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_fallocate) { + reinterpret_cast(jhandle)->allow_fallocate = + static_cast(jallow_fallocate); +} + +/* + * Class: org_rocksdb_Options + * Method: allowFAllocate + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowFAllocate( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->allow_fallocate); +} + /* * Class: org_rocksdb_Options * Method: isFdCloseOnExec @@ -1012,6 +1157,147 @@ void Java_org_rocksdb_Options_setAdviseRandomOnOpen( static_cast(advise_random_on_open); } +/* + * Class: org_rocksdb_Options + * Method: setDbWriteBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setDbWriteBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jdb_write_buffer_size) { + auto* opt = reinterpret_cast(jhandle); + opt->db_write_buffer_size = static_cast(jdb_write_buffer_size); +} + +/* + * Class: org_rocksdb_Options + * Method: dbWriteBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_dbWriteBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->db_write_buffer_size); +} + +/* + * Class: org_rocksdb_Options + * Method: setAccessHintOnCompactionStart + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setAccessHintOnCompactionStart( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jaccess_hint_value) { + auto* opt = reinterpret_cast(jhandle); + opt->access_hint_on_compaction_start = + rocksdb::AccessHintJni::toCppAccessHint(jaccess_hint_value); +} + +/* + * Class: org_rocksdb_Options + * Method: accessHintOnCompactionStart + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_accessHintOnCompactionStart( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return rocksdb::AccessHintJni::toJavaAccessHint( + opt->access_hint_on_compaction_start); +} + +/* + * Class: org_rocksdb_Options + * Method: setNewTableReaderForCompactionInputs + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setNewTableReaderForCompactionInputs( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jnew_table_reader_for_compaction_inputs) { + auto* opt = reinterpret_cast(jhandle); + opt->new_table_reader_for_compaction_inputs = + static_cast(jnew_table_reader_for_compaction_inputs); +} + +/* + * Class: org_rocksdb_Options + * Method: newTableReaderForCompactionInputs + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_newTableReaderForCompactionInputs( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->new_table_reader_for_compaction_inputs); +} + +/* + * Class: org_rocksdb_Options + * Method: setCompactionReadaheadSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setCompactionReadaheadSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jcompaction_readahead_size) { + auto* opt = reinterpret_cast(jhandle); + opt->compaction_readahead_size = + static_cast(jcompaction_readahead_size); +} + +/* + * Class: org_rocksdb_Options + * Method: compactionReadaheadSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_compactionReadaheadSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->compaction_readahead_size); +} + +/* + * Class: org_rocksdb_Options + * Method: setRandomAccessMaxBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setRandomAccessMaxBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jrandom_access_max_buffer_size) { + auto* opt = reinterpret_cast(jhandle); + opt->random_access_max_buffer_size = + static_cast(jrandom_access_max_buffer_size); +} + +/* + * Class: org_rocksdb_Options + * Method: randomAccessMaxBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_randomAccessMaxBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->random_access_max_buffer_size); +} + +/* + * Class: org_rocksdb_Options + * Method: setWritableFileMaxBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWritableFileMaxBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jwritable_file_max_buffer_size) { + auto* opt = reinterpret_cast(jhandle); + opt->writable_file_max_buffer_size = + static_cast(jwritable_file_max_buffer_size); +} + +/* + * Class: org_rocksdb_Options + * Method: writableFileMaxBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_writableFileMaxBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->writable_file_max_buffer_size); +} + /* * Class: org_rocksdb_Options * Method: useAdaptiveMutex @@ -1054,6 +1340,73 @@ void Java_org_rocksdb_Options_setBytesPerSync( static_cast(bytes_per_sync); } +/* + * Class: org_rocksdb_Options + * Method: setWalBytesPerSync + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWalBytesPerSync( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jwal_bytes_per_sync) { + reinterpret_cast(jhandle)->wal_bytes_per_sync = + static_cast(jwal_bytes_per_sync); +} + +/* + * Class: org_rocksdb_Options + * Method: walBytesPerSync + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_walBytesPerSync( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->wal_bytes_per_sync); +} + +/* + * Class: org_rocksdb_Options + * Method: setEnableThreadTracking + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setEnableThreadTracking( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jenable_thread_tracking) { + auto* opt = reinterpret_cast(jhandle); + opt->enable_thread_tracking = static_cast(jenable_thread_tracking); +} + +/* + * Class: org_rocksdb_Options + * Method: enableThreadTracking + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_enableThreadTracking( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->enable_thread_tracking); +} + +/* + * Class: org_rocksdb_Options + * Method: setDelayedWriteRate + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setDelayedWriteRate( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jdelayed_write_rate) { + auto* opt = reinterpret_cast(jhandle); + opt->delayed_write_rate = static_cast(jdelayed_write_rate); +} + +/* + * Class: org_rocksdb_Options + * Method: delayedWriteRate + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_delayedWriteRate( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->delayed_write_rate); +} + /* * Class: org_rocksdb_Options * Method: setAllowConcurrentMemtableWrite @@ -1143,52 +1496,226 @@ jlong Java_org_rocksdb_Options_writeThreadSlowYieldUsec( } /* - * Method: tableFactoryName - * Signature: (J)Ljava/lang/String + * Class: org_rocksdb_Options + * Method: setSkipStatsUpdateOnDbOpen + * Signature: (JZ)V */ -jstring Java_org_rocksdb_Options_tableFactoryName( - JNIEnv* env, jobject jobj, jlong jhandle) { +void Java_org_rocksdb_Options_setSkipStatsUpdateOnDbOpen( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jskip_stats_update_on_db_open) { auto* opt = reinterpret_cast(jhandle); - rocksdb::TableFactory* tf = opt->table_factory.get(); - - // Should never be nullptr. - // Default memtable factory is SkipListFactory - assert(tf); - - return env->NewStringUTF(tf->Name()); + opt->skip_stats_update_on_db_open = + static_cast(jskip_stats_update_on_db_open); } - /* * Class: org_rocksdb_Options - * Method: minWriteBufferNumberToMerge - * Signature: (J)I + * Method: skipStatsUpdateOnDbOpen + * Signature: (J)Z */ -jint Java_org_rocksdb_Options_minWriteBufferNumberToMerge( +jboolean Java_org_rocksdb_Options_skipStatsUpdateOnDbOpen( JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast( - jhandle)->min_write_buffer_number_to_merge; + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->skip_stats_update_on_db_open); } /* * Class: org_rocksdb_Options - * Method: setMinWriteBufferNumberToMerge - * Signature: (JI)V + * Method: setWalRecoveryMode + * Signature: (JB)V */ -void Java_org_rocksdb_Options_setMinWriteBufferNumberToMerge( - JNIEnv* env, jobject jobj, jlong jhandle, - jint jmin_write_buffer_number_to_merge) { - reinterpret_cast( - jhandle)->min_write_buffer_number_to_merge = - static_cast(jmin_write_buffer_number_to_merge); +void Java_org_rocksdb_Options_setWalRecoveryMode( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jwal_recovery_mode_value) { + auto* opt = reinterpret_cast(jhandle); + opt->wal_recovery_mode = + rocksdb::WALRecoveryModeJni::toCppWALRecoveryMode( + jwal_recovery_mode_value); } + /* * Class: org_rocksdb_Options - * Method: maxWriteBufferNumberToMaintain - * Signature: (J)I + * Method: walRecoveryMode + * Signature: (J)B */ -jint Java_org_rocksdb_Options_maxWriteBufferNumberToMaintain(JNIEnv* env, - jobject jobj, +jbyte Java_org_rocksdb_Options_walRecoveryMode( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return rocksdb::WALRecoveryModeJni::toJavaWALRecoveryMode( + opt->wal_recovery_mode); +} + +/* + * Class: org_rocksdb_Options + * Method: setAllow2pc + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllow2pc( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_2pc) { + auto* opt = reinterpret_cast(jhandle); + opt->allow_2pc = static_cast(jallow_2pc); +} + +/* + * Class: org_rocksdb_Options + * Method: allow2pc + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allow2pc(JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->allow_2pc); +} + +/* + * Class: org_rocksdb_Options + * Method: setRowCache + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setRowCache( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jrow_cache_handle) { + auto* opt = reinterpret_cast(jhandle); + auto* row_cache = reinterpret_cast*>(jrow_cache_handle); + opt->row_cache = *row_cache; +} + +/* + * Class: org_rocksdb_Options + * Method: setFailIfOptionsFileError + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setFailIfOptionsFileError( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jfail_if_options_file_error) { + auto* opt = reinterpret_cast(jhandle); + opt->fail_if_options_file_error = + static_cast(jfail_if_options_file_error); +} + +/* + * Class: org_rocksdb_Options + * Method: failIfOptionsFileError + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_failIfOptionsFileError( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->fail_if_options_file_error); +} + +/* + * Class: org_rocksdb_Options + * Method: setDumpMallocStats + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setDumpMallocStats( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jdump_malloc_stats) { + auto* opt = reinterpret_cast(jhandle); + opt->dump_malloc_stats = static_cast(jdump_malloc_stats); +} + +/* + * Class: org_rocksdb_Options + * Method: dumpMallocStats + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_dumpMallocStats( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->dump_malloc_stats); +} + +/* + * Class: org_rocksdb_Options + * Method: setAvoidFlushDuringRecovery + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAvoidFlushDuringRecovery( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean javoid_flush_during_recovery) { + auto* opt = reinterpret_cast(jhandle); + opt->avoid_flush_during_recovery = static_cast(javoid_flush_during_recovery); +} + +/* + * Class: org_rocksdb_Options + * Method: avoidFlushDuringRecovery + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_avoidFlushDuringRecovery( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->avoid_flush_during_recovery); +} + +/* + * Class: org_rocksdb_Options + * Method: setAvoidFlushDuringShutdown + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAvoidFlushDuringShutdown( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean javoid_flush_during_shutdown) { + auto* opt = reinterpret_cast(jhandle); + opt->avoid_flush_during_shutdown = static_cast(javoid_flush_during_shutdown); +} + +/* + * Class: org_rocksdb_Options + * Method: avoidFlushDuringShutdown + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_avoidFlushDuringShutdown( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->avoid_flush_during_shutdown); +} + +/* + * Method: tableFactoryName + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_Options_tableFactoryName( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + rocksdb::TableFactory* tf = opt->table_factory.get(); + + // Should never be nullptr. + // Default memtable factory is SkipListFactory + assert(tf); + + return env->NewStringUTF(tf->Name()); +} + + +/* + * Class: org_rocksdb_Options + * Method: minWriteBufferNumberToMerge + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_minWriteBufferNumberToMerge( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->min_write_buffer_number_to_merge; +} + +/* + * Class: org_rocksdb_Options + * Method: setMinWriteBufferNumberToMerge + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMinWriteBufferNumberToMerge( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmin_write_buffer_number_to_merge) { + reinterpret_cast( + jhandle)->min_write_buffer_number_to_merge = + static_cast(jmin_write_buffer_number_to_merge); +} +/* + * Class: org_rocksdb_Options + * Method: maxWriteBufferNumberToMaintain + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxWriteBufferNumberToMaintain(JNIEnv* env, + jobject jobj, jlong jhandle) { return reinterpret_cast(jhandle) ->max_write_buffer_number_to_maintain; @@ -1213,9 +1740,10 @@ void Java_org_rocksdb_Options_setMaxWriteBufferNumberToMaintain( * Signature: (JB)V */ void Java_org_rocksdb_Options_setCompressionType( - JNIEnv* env, jobject jobj, jlong jhandle, jbyte compression) { - reinterpret_cast(jhandle)->compression = - static_cast(compression); + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jcompression_type_value) { + auto* opts = reinterpret_cast(jhandle); + opts->compression = rocksdb::CompressionTypeJni::toCppCompressionType( + jcompression_type_value); } /* @@ -1225,7 +1753,9 @@ void Java_org_rocksdb_Options_setCompressionType( */ jbyte Java_org_rocksdb_Options_compressionType( JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)->compression; + auto* opts = reinterpret_cast(jhandle); + return rocksdb::CompressionTypeJni::toJavaCompressionType( + opts->compression); } /** @@ -1332,6 +1862,44 @@ jbyteArray Java_org_rocksdb_Options_compressionPerLevel( options->compression_per_level); } +/* + * Class: org_rocksdb_Options + * Method: setBottommostCompressionType + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setBottommostCompressionType( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jcompression_type_value) { + auto* options = reinterpret_cast(jhandle); + options->bottommost_compression = + rocksdb::CompressionTypeJni::toCppCompressionType( + jcompression_type_value); +} + +/* + * Class: org_rocksdb_Options + * Method: bottommostCompressionType + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_bottommostCompressionType( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* options = reinterpret_cast(jhandle); + return rocksdb::CompressionTypeJni::toJavaCompressionType( + options->bottommost_compression); +} + +/* + * Class: org_rocksdb_Options + * Method: setCompressionOptions + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setCompressionOptions( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jcompression_options_handle) { + auto* options = reinterpret_cast(jhandle); + auto* compression_options = + reinterpret_cast(jcompression_options_handle); + options->compression_opts = *compression_options; +} /* * Class: org_rocksdb_Options @@ -1607,72 +2175,6 @@ void Java_org_rocksdb_Options_setMaxCompactionBytes( static_cast(jmax_compaction_bytes); } -/* - * Class: org_rocksdb_Options - * Method: softRateLimit - * Signature: (J)D - */ -jdouble Java_org_rocksdb_Options_softRateLimit( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)->soft_rate_limit; -} - -/* - * Class: org_rocksdb_Options - * Method: setSoftRateLimit - * Signature: (JD)V - */ -void Java_org_rocksdb_Options_setSoftRateLimit( - JNIEnv* env, jobject jobj, jlong jhandle, jdouble jsoft_rate_limit) { - reinterpret_cast(jhandle)->soft_rate_limit = - static_cast(jsoft_rate_limit); -} - -/* - * Class: org_rocksdb_Options - * Method: hardRateLimit - * Signature: (J)D - */ -jdouble Java_org_rocksdb_Options_hardRateLimit( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)->hard_rate_limit; -} - -/* - * Class: org_rocksdb_Options - * Method: setHardRateLimit - * Signature: (JD)V - */ -void Java_org_rocksdb_Options_setHardRateLimit( - JNIEnv* env, jobject jobj, jlong jhandle, jdouble jhard_rate_limit) { - reinterpret_cast(jhandle)->hard_rate_limit = - static_cast(jhard_rate_limit); -} - -/* - * Class: org_rocksdb_Options - * Method: rateLimitDelayMaxMilliseconds - * Signature: (J)I - */ -jint Java_org_rocksdb_Options_rateLimitDelayMaxMilliseconds( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast( - jhandle)->rate_limit_delay_max_milliseconds; -} - -/* - * Class: org_rocksdb_Options - * Method: setRateLimitDelayMaxMilliseconds - * Signature: (JI)V - */ -void Java_org_rocksdb_Options_setRateLimitDelayMaxMilliseconds( - JNIEnv* env, jobject jobj, jlong jhandle, - jint jrate_limit_delay_max_milliseconds) { - reinterpret_cast( - jhandle)->rate_limit_delay_max_milliseconds = - static_cast(jrate_limit_delay_max_milliseconds); -} - /* * Class: org_rocksdb_Options * Method: arenaBlockSize @@ -1723,30 +2225,6 @@ void Java_org_rocksdb_Options_setDisableAutoCompactions( static_cast(jdisable_auto_compactions); } -/* - * Class: org_rocksdb_Options - * Method: purgeRedundantKvsWhileFlush - * Signature: (J)Z - */ -jboolean Java_org_rocksdb_Options_purgeRedundantKvsWhileFlush( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast( - jhandle)->purge_redundant_kvs_while_flush; -} - -/* - * Class: org_rocksdb_Options - * Method: setPurgeRedundantKvsWhileFlush - * Signature: (JZ)V - */ -void Java_org_rocksdb_Options_setPurgeRedundantKvsWhileFlush( - JNIEnv* env, jobject jobj, jlong jhandle, - jboolean jpurge_redundant_kvs_while_flush) { - reinterpret_cast( - jhandle)->purge_redundant_kvs_while_flush = - static_cast(jpurge_redundant_kvs_while_flush); -} - /* * Class: org_rocksdb_Options * Method: maxSequentialSkipInIterations @@ -1923,6 +2401,17 @@ void Java_org_rocksdb_Options_setOptimizeFiltersForHits( } /* + * Class: org_rocksdb_Options + * Method: optimizeForSmallDb + * Signature: (J)V + */ +void Java_org_rocksdb_Options_optimizeForSmallDb( + JNIEnv* env, jobject jobj, jlong jhandle) { + reinterpret_cast(jhandle)->OptimizeForSmallDb(); +} + +/* + * Class: org_rocksdb_Options * Method: optimizeForPointLookup * Signature: (JJ)V */ @@ -1934,6 +2423,7 @@ void Java_org_rocksdb_Options_optimizeForPointLookup( } /* + * Class: org_rocksdb_Options * Method: optimizeLevelStyleCompaction * Signature: (JJ)V */ @@ -2203,34 +2693,133 @@ void Java_org_rocksdb_Options_setParanoidFileChecks( static_cast(jparanoid_file_checks); } -////////////////////////////////////////////////////////////////////////////// -// rocksdb::ColumnFamilyOptions +/* + * Class: org_rocksdb_Options + * Method: setCompactionPriority + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setCompactionPriority( + JNIEnv* env, jobject jobj, jlong jhandle, + jbyte jcompaction_priority_value) { + auto* opts = reinterpret_cast(jhandle); + opts->compaction_pri = + rocksdb::CompactionPriorityJni::toCppCompactionPriority(jcompaction_priority_value); +} /* - * Class: org_rocksdb_ColumnFamilyOptions - * Method: newColumnFamilyOptions - * Signature: ()J + * Class: org_rocksdb_Options + * Method: compactionPriority + * Signature: (J)B */ -jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions( - JNIEnv* env, jclass jcls) { - auto* op = new rocksdb::ColumnFamilyOptions(); - return reinterpret_cast(op); +jbyte Java_org_rocksdb_Options_compactionPriority( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return rocksdb::CompactionPriorityJni::toJavaCompactionPriority( + opts->compaction_pri); } /* - * Class: org_rocksdb_ColumnFamilyOptions - * Method: getColumnFamilyOptionsFromProps - * Signature: (Ljava/util/String;)J + * Class: org_rocksdb_Options + * Method: setReportBgIoStats + * Signature: (JZ)V */ -jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps( - JNIEnv* env, jclass jclazz, jstring jopt_string) { - const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr); - if(opt_string == nullptr) { - // exception thrown: OutOfMemoryError - return 0; - } +void Java_org_rocksdb_Options_setReportBgIoStats( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jreport_bg_io_stats) { + auto* opts = reinterpret_cast(jhandle); + opts->report_bg_io_stats = static_cast(jreport_bg_io_stats); +} - auto* cf_options = new rocksdb::ColumnFamilyOptions(); +/* + * Class: org_rocksdb_Options + * Method: reportBgIoStats + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_reportBgIoStats( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->report_bg_io_stats); +} + +/* + * Class: org_rocksdb_Options + * Method: setCompactionOptionsUniversal + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setCompactionOptionsUniversal( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jcompaction_options_universal_handle) { + auto* opts = reinterpret_cast(jhandle); + auto* opts_uni = + reinterpret_cast( + jcompaction_options_universal_handle); + opts->compaction_options_universal = *opts_uni; +} + +/* + * Class: org_rocksdb_Options + * Method: setCompactionOptionsFIFO + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setCompactionOptionsFIFO( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jcompaction_options_fifo_handle) { + auto* opts = reinterpret_cast(jhandle); + auto* opts_fifo = + reinterpret_cast( + jcompaction_options_fifo_handle); + opts->compaction_options_fifo = *opts_fifo; +} + +/* + * Class: org_rocksdb_Options + * Method: setForceConsistencyChecks + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setForceConsistencyChecks( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jforce_consistency_checks) { + auto* opts = reinterpret_cast(jhandle); + opts->force_consistency_checks = static_cast(jforce_consistency_checks); +} + +/* + * Class: org_rocksdb_Options + * Method: forceConsistencyChecks + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_forceConsistencyChecks( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->force_consistency_checks); +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::ColumnFamilyOptions + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: newColumnFamilyOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions( + JNIEnv* env, jclass jcls) { + auto* op = new rocksdb::ColumnFamilyOptions(); + return reinterpret_cast(op); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: getColumnFamilyOptionsFromProps + * Signature: (Ljava/util/String;)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps( + JNIEnv* env, jclass jclazz, jstring jopt_string) { + const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr); + if(opt_string == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + + auto* cf_options = new rocksdb::ColumnFamilyOptions(); rocksdb::Status status = rocksdb::GetColumnFamilyOptionsFromString( rocksdb::ColumnFamilyOptions(), opt_string, cf_options); @@ -2260,6 +2849,17 @@ void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal( delete cfo; } +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: optimizeForSmallDb + * Signature: (J)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_optimizeForSmallDb( + JNIEnv* env, jobject jobj, jlong jhandle) { + reinterpret_cast(jhandle)-> + OptimizeForSmallDb(); +} + /* * Class: org_rocksdb_ColumnFamilyOptions * Method: optimizeForPointLookup @@ -2556,9 +3156,10 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumberToMaintain( * Signature: (JB)V */ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionType( - JNIEnv* env, jobject jobj, jlong jhandle, jbyte compression) { - reinterpret_cast(jhandle)-> - compression = static_cast(compression); + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jcompression_type_value) { + auto* cf_opts = reinterpret_cast(jhandle); + cf_opts->compression = rocksdb::CompressionTypeJni::toCppCompressionType( + jcompression_type_value); } /* @@ -2568,8 +3169,9 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionType( */ jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType( JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)-> - compression; + auto* cf_opts = reinterpret_cast(jhandle); + return rocksdb::CompressionTypeJni::toJavaCompressionType( + cf_opts->compression); } /* @@ -2602,6 +3204,45 @@ jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel( cf_options->compression_per_level); } +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setBottommostCompressionType + * Signature: (JB)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setBottommostCompressionType( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jcompression_type_value) { + auto* cf_options = reinterpret_cast(jhandle); + cf_options->bottommost_compression = + rocksdb::CompressionTypeJni::toCppCompressionType( + jcompression_type_value); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: bottommostCompressionType + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ColumnFamilyOptions_bottommostCompressionType( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* cf_options = reinterpret_cast(jhandle); + return rocksdb::CompressionTypeJni::toJavaCompressionType( + cf_options->bottommost_compression); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompressionOptions + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompressionOptions( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jcompression_options_handle) { + auto* cf_options = reinterpret_cast(jhandle); + auto* compression_options = + reinterpret_cast(jcompression_options_handle); + cf_options->compression_opts = *compression_options; +} + /* * Class: org_rocksdb_ColumnFamilyOptions * Method: setCompactionStyle @@ -2738,26 +3379,6 @@ void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroStopWritesTrigger( jlevel0_stop_writes_trigger); } -/* - * Class: org_rocksdb_ColumnFamilyOptions - * Method: maxMemCompactionLevel - * Signature: (J)I - */ -jint Java_org_rocksdb_ColumnFamilyOptions_maxMemCompactionLevel( - JNIEnv* env, jobject jobj, jlong jhandle) { - return 0; // deprecated and intentionally not implemented, see the Java code -} - -/* - * Class: org_rocksdb_ColumnFamilyOptions - * Method: setMaxMemCompactionLevel - * Signature: (JI)V - */ -void Java_org_rocksdb_ColumnFamilyOptions_setMaxMemCompactionLevel( - JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_mem_compaction_level) { - // deprecated and intentionally not implemented, see the Java code -} - /* * Class: org_rocksdb_ColumnFamilyOptions * Method: targetFileSizeBase @@ -2901,74 +3522,6 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxCompactionBytes( ->max_compaction_bytes = static_cast(jmax_compaction_bytes); } -/* - * Class: org_rocksdb_ColumnFamilyOptions - * Method: softRateLimit - * Signature: (J)D - */ -jdouble Java_org_rocksdb_ColumnFamilyOptions_softRateLimit( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)-> - soft_rate_limit; -} - -/* - * Class: org_rocksdb_ColumnFamilyOptions - * Method: setSoftRateLimit - * Signature: (JD)V - */ -void Java_org_rocksdb_ColumnFamilyOptions_setSoftRateLimit( - JNIEnv* env, jobject jobj, jlong jhandle, jdouble jsoft_rate_limit) { - reinterpret_cast(jhandle)->soft_rate_limit = - static_cast(jsoft_rate_limit); -} - -/* - * Class: org_rocksdb_ColumnFamilyOptions - * Method: hardRateLimit - * Signature: (J)D - */ -jdouble Java_org_rocksdb_ColumnFamilyOptions_hardRateLimit( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)-> - hard_rate_limit; -} - -/* - * Class: org_rocksdb_ColumnFamilyOptions - * Method: setHardRateLimit - * Signature: (JD)V - */ -void Java_org_rocksdb_ColumnFamilyOptions_setHardRateLimit( - JNIEnv* env, jobject jobj, jlong jhandle, jdouble jhard_rate_limit) { - reinterpret_cast(jhandle)->hard_rate_limit = - static_cast(jhard_rate_limit); -} - -/* - * Class: org_rocksdb_ColumnFamilyOptions - * Method: rateLimitDelayMaxMilliseconds - * Signature: (J)I - */ -jint Java_org_rocksdb_ColumnFamilyOptions_rateLimitDelayMaxMilliseconds( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast( - jhandle)->rate_limit_delay_max_milliseconds; -} - -/* - * Class: org_rocksdb_ColumnFamilyOptions - * Method: setRateLimitDelayMaxMilliseconds - * Signature: (JI)V - */ -void Java_org_rocksdb_ColumnFamilyOptions_setRateLimitDelayMaxMilliseconds( - JNIEnv* env, jobject jobj, jlong jhandle, - jint jrate_limit_delay_max_milliseconds) { - reinterpret_cast( - jhandle)->rate_limit_delay_max_milliseconds = - static_cast(jrate_limit_delay_max_milliseconds); -} - /* * Class: org_rocksdb_ColumnFamilyOptions * Method: arenaBlockSize @@ -3020,30 +3573,6 @@ void Java_org_rocksdb_ColumnFamilyOptions_setDisableAutoCompactions( static_cast(jdisable_auto_compactions); } -/* - * Class: org_rocksdb_ColumnFamilyOptions - * Method: purgeRedundantKvsWhileFlush - * Signature: (J)Z - */ -jboolean Java_org_rocksdb_ColumnFamilyOptions_purgeRedundantKvsWhileFlush( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast( - jhandle)->purge_redundant_kvs_while_flush; -} - -/* - * Class: org_rocksdb_ColumnFamilyOptions - * Method: setPurgeRedundantKvsWhileFlush - * Signature: (JZ)V - */ -void Java_org_rocksdb_ColumnFamilyOptions_setPurgeRedundantKvsWhileFlush( - JNIEnv* env, jobject jobj, jlong jhandle, - jboolean jpurge_redundant_kvs_while_flush) { - reinterpret_cast( - jhandle)->purge_redundant_kvs_while_flush = - static_cast(jpurge_redundant_kvs_while_flush); -} - /* * Class: org_rocksdb_ColumnFamilyOptions * Method: maxSequentialSkipInIterations @@ -3455,6 +3984,104 @@ void Java_org_rocksdb_ColumnFamilyOptions_setParanoidFileChecks( static_cast(jparanoid_file_checks); } +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompactionPriority + * Signature: (JB)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionPriority( + JNIEnv* env, jobject jobj, jlong jhandle, + jbyte jcompaction_priority_value) { + auto* cf_opts = reinterpret_cast(jhandle); + cf_opts->compaction_pri = + rocksdb::CompactionPriorityJni::toCppCompactionPriority(jcompaction_priority_value); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: compactionPriority + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionPriority( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* cf_opts = reinterpret_cast(jhandle); + return rocksdb::CompactionPriorityJni::toJavaCompactionPriority( + cf_opts->compaction_pri); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setReportBgIoStats + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setReportBgIoStats( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jreport_bg_io_stats) { + auto* cf_opts = reinterpret_cast(jhandle); + cf_opts->report_bg_io_stats = static_cast(jreport_bg_io_stats); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: reportBgIoStats + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_reportBgIoStats( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* cf_opts = reinterpret_cast(jhandle); + return static_cast(cf_opts->report_bg_io_stats); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompactionOptionsUniversal + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsUniversal( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jcompaction_options_universal_handle) { + auto* cf_opts = reinterpret_cast(jhandle); + auto* opts_uni = + reinterpret_cast( + jcompaction_options_universal_handle); + cf_opts->compaction_options_universal = *opts_uni; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompactionOptionsFIFO + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsFIFO( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jcompaction_options_fifo_handle) { + auto* cf_opts = reinterpret_cast(jhandle); + auto* opts_fifo = + reinterpret_cast( + jcompaction_options_fifo_handle); + cf_opts->compaction_options_fifo = *opts_fifo; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setForceConsistencyChecks + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setForceConsistencyChecks( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jforce_consistency_checks) { + auto* cf_opts = reinterpret_cast(jhandle); + cf_opts->force_consistency_checks = static_cast(jforce_consistency_checks); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: forceConsistencyChecks + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_forceConsistencyChecks( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* cf_opts = reinterpret_cast(jhandle); + return static_cast(cf_opts->force_consistency_checks); +} ///////////////////////////////////////////////////////////////////// // rocksdb::DBOptions @@ -3513,6 +4140,27 @@ void Java_org_rocksdb_DBOptions_disposeInternal( delete dbo; } +/* + * Class: org_rocksdb_DBOptions + * Method: optimizeForSmallDb + * Signature: (J)V + */ +void Java_org_rocksdb_DBOptions_optimizeForSmallDb( + JNIEnv* env, jobject jobj, jlong jhandle) { + reinterpret_cast(jhandle)->OptimizeForSmallDb(); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setEnv + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setEnv( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jenv_handle) { + reinterpret_cast(jhandle)->env = + reinterpret_cast(jenv_handle); +} + /* * Class: org_rocksdb_DBOptions * Method: setIncreaseParallelism @@ -3702,6 +4350,28 @@ jint Java_org_rocksdb_DBOptions_maxOpenFiles( return reinterpret_cast(jhandle)->max_open_files; } +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxFileOpeningThreads + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setMaxFileOpeningThreads( + JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_file_opening_threads) { + reinterpret_cast(jhandle)->max_file_opening_threads = + static_cast(jmax_file_opening_threads); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxFileOpeningThreads + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_maxFileOpeningThreads( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_file_opening_threads); +} + /* * Class: org_rocksdb_DBOptions * Method: createStatistics @@ -3748,14 +4418,114 @@ jboolean Java_org_rocksdb_DBOptions_useFsync( /* * Class: org_rocksdb_DBOptions - * Method: setDbLogDir - * Signature: (JLjava/lang/String)V - */ -void Java_org_rocksdb_DBOptions_setDbLogDir( - JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) { - const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr); - if(log_dir == nullptr) { - // exception thrown: OutOfMemoryError + * Method: setDbPaths + * Signature: (J[Ljava/lang/String;[J)V + */ +void Java_org_rocksdb_DBOptions_setDbPaths( + JNIEnv* env, jobject jobj, jlong jhandle, jobjectArray jpaths, + jlongArray jtarget_sizes) { + std::vector db_paths; + jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr); + if(ptr_jtarget_size == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + jboolean has_exception = JNI_FALSE; + const jsize len = env->GetArrayLength(jpaths); + for(jsize i = 0; i < len; i++) { + jobject jpath = reinterpret_cast(env-> + GetObjectArrayElement(jpaths, i)); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->ReleaseLongArrayElements( + jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + std::string path = rocksdb::JniUtil::copyString( + env, static_cast(jpath), &has_exception); + env->DeleteLocalRef(jpath); + + if(has_exception == JNI_TRUE) { + env->ReleaseLongArrayElements( + jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + + jlong jtarget_size = ptr_jtarget_size[i]; + + db_paths.push_back( + rocksdb::DbPath(path, static_cast(jtarget_size))); + } + + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + + auto* opt = reinterpret_cast(jhandle); + opt->db_paths = db_paths; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: dbPathsLen + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_dbPathsLen( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->db_paths.size()); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: dbPaths + * Signature: (J[Ljava/lang/String;[J)V + */ +void Java_org_rocksdb_DBOptions_dbPaths( + JNIEnv* env, jobject jobj, jlong jhandle, jobjectArray jpaths, + jlongArray jtarget_sizes) { + jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr); + if(ptr_jtarget_size == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + auto* opt = reinterpret_cast(jhandle); + const jsize len = env->GetArrayLength(jpaths); + for(jsize i = 0; i < len; i++) { + rocksdb::DbPath db_path = opt->db_paths[i]; + + jstring jpath = env->NewStringUTF(db_path.path.c_str()); + if(jpath == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseLongArrayElements( + jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + env->SetObjectArrayElement(jpaths, i, jpath); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jpath); + env->ReleaseLongArrayElements( + jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + + ptr_jtarget_size[i] = static_cast(db_path.target_size); + } + + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_COMMIT); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setDbLogDir + * Signature: (JLjava/lang/String)V + */ +void Java_org_rocksdb_DBOptions_setDbLogDir( + JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) { + const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr); + if(log_dir == nullptr) { + // exception thrown: OutOfMemoryError return; } @@ -3989,7 +4759,7 @@ jlong Java_org_rocksdb_DBOptions_keepLogFileNum( /* * Class: org_rocksdb_DBOptions - * Method: setRecycleLogFiles + * Method: setRecycleLogFileNum * Signature: (JJ)V */ void Java_org_rocksdb_DBOptions_setRecycleLogFileNum( @@ -4005,7 +4775,7 @@ void Java_org_rocksdb_DBOptions_setRecycleLogFileNum( /* * Class: org_rocksdb_DBOptions - * Method: recycleLogFiles + * Method: recycleLogFileNum * Signature: (J)J */ jlong Java_org_rocksdb_DBOptions_recycleLogFileNum(JNIEnv* env, jobject jobj, @@ -4170,6 +4940,28 @@ void Java_org_rocksdb_DBOptions_setUseDirectWrites(JNIEnv* env, jobject jobj, static_cast(use_direct_writes); } +/* + * Class: org_rocksdb_DBOptions + * Method: setAllowFAllocate + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAllowFAllocate( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_fallocate) { + reinterpret_cast(jhandle)->allow_fallocate = + static_cast(jallow_fallocate); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: allowFAllocate + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_allowFAllocate( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->allow_fallocate); +} + /* * Class: org_rocksdb_DBOptions * Method: setAllowMmapReads @@ -4275,6 +5067,147 @@ jboolean Java_org_rocksdb_DBOptions_adviseRandomOnOpen( return reinterpret_cast(jhandle)->advise_random_on_open; } +/* + * Class: org_rocksdb_DBOptions + * Method: setDbWriteBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setDbWriteBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jdb_write_buffer_size) { + auto* opt = reinterpret_cast(jhandle); + opt->db_write_buffer_size = static_cast(jdb_write_buffer_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: dbWriteBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_dbWriteBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->db_write_buffer_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAccessHintOnCompactionStart + * Signature: (JB)V + */ +void Java_org_rocksdb_DBOptions_setAccessHintOnCompactionStart( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jaccess_hint_value) { + auto* opt = reinterpret_cast(jhandle); + opt->access_hint_on_compaction_start = + rocksdb::AccessHintJni::toCppAccessHint(jaccess_hint_value); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: accessHintOnCompactionStart + * Signature: (J)B + */ +jbyte Java_org_rocksdb_DBOptions_accessHintOnCompactionStart( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return rocksdb::AccessHintJni::toJavaAccessHint( + opt->access_hint_on_compaction_start); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setNewTableReaderForCompactionInputs + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setNewTableReaderForCompactionInputs( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jnew_table_reader_for_compaction_inputs) { + auto* opt = reinterpret_cast(jhandle); + opt->new_table_reader_for_compaction_inputs = + static_cast(jnew_table_reader_for_compaction_inputs); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: newTableReaderForCompactionInputs + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_newTableReaderForCompactionInputs( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->new_table_reader_for_compaction_inputs); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setCompactionReadaheadSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setCompactionReadaheadSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jcompaction_readahead_size) { + auto* opt = reinterpret_cast(jhandle); + opt->compaction_readahead_size = + static_cast(jcompaction_readahead_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: compactionReadaheadSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_compactionReadaheadSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->compaction_readahead_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setRandomAccessMaxBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setRandomAccessMaxBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jrandom_access_max_buffer_size) { + auto* opt = reinterpret_cast(jhandle); + opt->random_access_max_buffer_size = + static_cast(jrandom_access_max_buffer_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: randomAccessMaxBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_randomAccessMaxBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->random_access_max_buffer_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWritableFileMaxBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWritableFileMaxBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jwritable_file_max_buffer_size) { + auto* opt = reinterpret_cast(jhandle); + opt->writable_file_max_buffer_size = + static_cast(jwritable_file_max_buffer_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: writableFileMaxBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_writableFileMaxBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->writable_file_max_buffer_size); +} + /* * Class: org_rocksdb_DBOptions * Method: setUseAdaptiveMutex @@ -4317,6 +5250,73 @@ jlong Java_org_rocksdb_DBOptions_bytesPerSync( return reinterpret_cast(jhandle)->bytes_per_sync; } +/* + * Class: org_rocksdb_DBOptions + * Method: setWalBytesPerSync + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWalBytesPerSync( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jwal_bytes_per_sync) { + reinterpret_cast(jhandle)->wal_bytes_per_sync = + static_cast(jwal_bytes_per_sync); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: walBytesPerSync + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_walBytesPerSync( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->wal_bytes_per_sync); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setEnableThreadTracking + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setEnableThreadTracking( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jenable_thread_tracking) { + auto* opt = reinterpret_cast(jhandle); + opt->enable_thread_tracking = static_cast(jenable_thread_tracking); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: enableThreadTracking + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_enableThreadTracking( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->enable_thread_tracking); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setDelayedWriteRate + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setDelayedWriteRate( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jdelayed_write_rate) { + auto* opt = reinterpret_cast(jhandle); + opt->delayed_write_rate = static_cast(jdelayed_write_rate); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: delayedWriteRate + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_delayedWriteRate( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->delayed_write_rate); +} + /* * Class: org_rocksdb_DBOptions * Method: setAllowConcurrentMemtableWrite @@ -4405,16 +5405,178 @@ jlong Java_org_rocksdb_DBOptions_writeThreadSlowYieldUsec( write_thread_slow_yield_usec; } -void Java_org_rocksdb_DBOptions_setDelayedWriteRate( - JNIEnv* env, jobject jobj, jlong jhandle, jlong delay_write_rate) { - reinterpret_cast(jhandle)->delayed_write_rate = - static_cast(delay_write_rate); +/* + * Class: org_rocksdb_DBOptions + * Method: setSkipStatsUpdateOnDbOpen + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setSkipStatsUpdateOnDbOpen( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jskip_stats_update_on_db_open) { + auto* opt = reinterpret_cast(jhandle); + opt->skip_stats_update_on_db_open = + static_cast(jskip_stats_update_on_db_open); } -jlong Java_org_rocksdb_DBOptions_delayedWriteRate( +/* + * Class: org_rocksdb_DBOptions + * Method: skipStatsUpdateOnDbOpen + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_skipStatsUpdateOnDbOpen( JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)-> - delayed_write_rate; + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->skip_stats_update_on_db_open); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWalRecoveryMode + * Signature: (JB)V + */ +void Java_org_rocksdb_DBOptions_setWalRecoveryMode( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jwal_recovery_mode_value) { + auto* opt = reinterpret_cast(jhandle); + opt->wal_recovery_mode = + rocksdb::WALRecoveryModeJni::toCppWALRecoveryMode( + jwal_recovery_mode_value); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: walRecoveryMode + * Signature: (J)B + */ +jbyte Java_org_rocksdb_DBOptions_walRecoveryMode( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return rocksdb::WALRecoveryModeJni::toJavaWALRecoveryMode( + opt->wal_recovery_mode); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAllow2pc + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAllow2pc( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_2pc) { + auto* opt = reinterpret_cast(jhandle); + opt->allow_2pc = static_cast(jallow_2pc); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: allow2pc + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_allow2pc(JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->allow_2pc); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setRowCache + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setRowCache( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jrow_cache_handle) { + auto* opt = reinterpret_cast(jhandle); + auto* row_cache = reinterpret_cast*>(jrow_cache_handle); + opt->row_cache = *row_cache; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setFailIfOptionsFileError + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setFailIfOptionsFileError( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jfail_if_options_file_error) { + auto* opt = reinterpret_cast(jhandle); + opt->fail_if_options_file_error = + static_cast(jfail_if_options_file_error); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: failIfOptionsFileError + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_failIfOptionsFileError( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->fail_if_options_file_error); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setDumpMallocStats + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setDumpMallocStats( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jdump_malloc_stats) { + auto* opt = reinterpret_cast(jhandle); + opt->dump_malloc_stats = static_cast(jdump_malloc_stats); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: dumpMallocStats + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_dumpMallocStats( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->dump_malloc_stats); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAvoidFlushDuringRecovery + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAvoidFlushDuringRecovery( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean javoid_flush_during_recovery) { + auto* opt = reinterpret_cast(jhandle); + opt->avoid_flush_during_recovery = static_cast(javoid_flush_during_recovery); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: avoidFlushDuringRecovery + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringRecovery( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->avoid_flush_during_recovery); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAvoidFlushDuringShutdown + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAvoidFlushDuringShutdown( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean javoid_flush_during_shutdown) { + auto* opt = reinterpret_cast(jhandle); + opt->avoid_flush_during_shutdown = static_cast(javoid_flush_during_shutdown); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: avoidFlushDuringShutdown + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringShutdown( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->avoid_flush_during_shutdown); } ////////////////////////////////////////////////////////////////////////////// @@ -4483,6 +5645,51 @@ jboolean Java_org_rocksdb_WriteOptions_disableWAL( return reinterpret_cast(jhandle)->disableWAL; } +/* + * Class: org_rocksdb_WriteOptions + * Method: setIgnoreMissingColumnFamilies + * Signature: (JZ)V + */ +void Java_org_rocksdb_WriteOptions_setIgnoreMissingColumnFamilies( + JNIEnv* env, jobject jwrite_options, jlong jhandle, + jboolean jignore_missing_column_families) { + reinterpret_cast(jhandle)-> + ignore_missing_column_families = + static_cast(jignore_missing_column_families); +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: ignoreMissingColumnFamilies + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteOptions_ignoreMissingColumnFamilies( + JNIEnv* env, jobject jwrite_options, jlong jhandle) { + return reinterpret_cast(jhandle)-> + ignore_missing_column_families; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: setNoSlowdown + * Signature: (JZ)V + */ +void Java_org_rocksdb_WriteOptions_setNoSlowdown( + JNIEnv* env, jobject jwrite_options, jlong jhandle, jboolean jno_slowdown) { + reinterpret_cast(jhandle)->no_slowdown = + static_cast(jno_slowdown); +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: noSlowdown + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteOptions_noSlowdown( + JNIEnv* env, jobject jwrite_options, jlong jhandle) { + return reinterpret_cast(jhandle)->no_slowdown; +} + ///////////////////////////////////////////////////////////////////// // rocksdb::ReadOptions @@ -4658,6 +5865,75 @@ void Java_org_rocksdb_ReadOptions_setPinData( static_cast(jpin_data); } +/* + * Class: org_rocksdb_ReadOptions + * Method: backgroundPurgeOnIteratorCleanup + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_backgroundPurgeOnIteratorCleanup( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->background_purge_on_iterator_cleanup); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setBackgroundPurgeOnIteratorCleanup + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setBackgroundPurgeOnIteratorCleanup( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jbackground_purge_on_iterator_cleanup) { + auto* opt = reinterpret_cast(jhandle); + opt->background_purge_on_iterator_cleanup = + static_cast(jbackground_purge_on_iterator_cleanup); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: readaheadSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_readaheadSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->readahead_size); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setReadaheadSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setReadaheadSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jreadahead_size) { + auto* opt = reinterpret_cast(jhandle); + opt->readahead_size = static_cast(jreadahead_size); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: ignoreRangeDeletions + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_ignoreRangeDeletions( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->ignore_range_deletions); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setIgnoreRangeDeletions + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setIgnoreRangeDeletions( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jignore_range_deletions) { + auto* opt = reinterpret_cast(jhandle); + opt->ignore_range_deletions = static_cast(jignore_range_deletions); +} + /* * Class: org_rocksdb_ReadOptions * Method: setSnapshot diff --git a/java/rocksjni/portal.h b/java/rocksjni/portal.h index 62ab2f99c..c429d996d 100644 --- a/java/rocksjni/portal.h +++ b/java/rocksjni/portal.h @@ -2069,6 +2069,220 @@ class BatchResultJni : public JavaClass { } }; +// The portal class for org.rocksdb.CompactionStopStyle +class CompactionStopStyleJni { + public: + // Returns the equivalent org.rocksdb.CompactionStopStyle for the provided + // C++ rocksdb::CompactionStopStyle enum + static jbyte toJavaCompactionStopStyle( + const rocksdb::CompactionStopStyle& compaction_stop_style) { + switch(compaction_stop_style) { + case rocksdb::CompactionStopStyle::kCompactionStopStyleSimilarSize: + return 0x0; + case rocksdb::CompactionStopStyle::kCompactionStopStyleTotalSize: + return 0x1; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ rocksdb::CompactionStopStyle enum for the + // provided Java org.rocksdb.CompactionStopStyle + static rocksdb::CompactionStopStyle toCppCompactionStopStyle( + jbyte jcompaction_stop_style) { + switch(jcompaction_stop_style) { + case 0x0: + return rocksdb::CompactionStopStyle::kCompactionStopStyleSimilarSize; + case 0x1: + return rocksdb::CompactionStopStyle::kCompactionStopStyleTotalSize; + default: + // undefined/default + return rocksdb::CompactionStopStyle::kCompactionStopStyleSimilarSize; + } + } +}; + +// The portal class for org.rocksdb.CompressionType +class CompressionTypeJni { + public: + // Returns the equivalent org.rocksdb.CompressionType for the provided + // C++ rocksdb::CompressionType enum + static jbyte toJavaCompressionType( + const rocksdb::CompressionType& compression_type) { + switch(compression_type) { + case rocksdb::CompressionType::kNoCompression: + return 0x0; + case rocksdb::CompressionType::kSnappyCompression: + return 0x1; + case rocksdb::CompressionType::kZlibCompression: + return 0x2; + case rocksdb::CompressionType::kBZip2Compression: + return 0x3; + case rocksdb::CompressionType::kLZ4Compression: + return 0x4; + case rocksdb::CompressionType::kLZ4HCCompression: + return 0x5; + case rocksdb::CompressionType::kXpressCompression: + return 0x6; + case rocksdb::CompressionType::kZSTD: + return 0x7; + case rocksdb::CompressionType::kDisableCompressionOption: + default: + return 0x7F; + } + } + + // Returns the equivalent C++ rocksdb::CompressionType enum for the + // provided Java org.rocksdb.CompressionType + static rocksdb::CompressionType toCppCompressionType( + jbyte jcompression_type) { + switch(jcompression_type) { + case 0x0: + return rocksdb::CompressionType::kNoCompression; + case 0x1: + return rocksdb::CompressionType::kSnappyCompression; + case 0x2: + return rocksdb::CompressionType::kZlibCompression; + case 0x3: + return rocksdb::CompressionType::kBZip2Compression; + case 0x4: + return rocksdb::CompressionType::kLZ4Compression; + case 0x5: + return rocksdb::CompressionType::kLZ4HCCompression; + case 0x6: + return rocksdb::CompressionType::kXpressCompression; + case 0x7: + return rocksdb::CompressionType::kZSTD; + case 0x7F: + default: + return rocksdb::CompressionType::kDisableCompressionOption; + } + } +}; + +// The portal class for org.rocksdb.CompactionPriority +class CompactionPriorityJni { + public: + // Returns the equivalent org.rocksdb.CompactionPriority for the provided + // C++ rocksdb::CompactionPri enum + static jbyte toJavaCompactionPriority( + const rocksdb::CompactionPri& compaction_priority) { + switch(compaction_priority) { + case rocksdb::CompactionPri::kByCompensatedSize: + return 0x0; + case rocksdb::CompactionPri::kOldestLargestSeqFirst: + return 0x1; + case rocksdb::CompactionPri::kOldestSmallestSeqFirst: + return 0x2; + case rocksdb::CompactionPri::kMinOverlappingRatio: + return 0x3; + default: + return 0x0; // undefined + } + } + + // Returns the equivalent C++ rocksdb::CompactionPri enum for the + // provided Java org.rocksdb.CompactionPriority + static rocksdb::CompactionPri toCppCompactionPriority( + jbyte jcompaction_priority) { + switch(jcompaction_priority) { + case 0x0: + return rocksdb::CompactionPri::kByCompensatedSize; + case 0x1: + return rocksdb::CompactionPri::kOldestLargestSeqFirst; + case 0x2: + return rocksdb::CompactionPri::kOldestSmallestSeqFirst; + case 0x3: + return rocksdb::CompactionPri::kMinOverlappingRatio; + default: + // undefined/default + return rocksdb::CompactionPri::kByCompensatedSize; + } + } +}; + +// The portal class for org.rocksdb.AccessHint +class AccessHintJni { + public: + // Returns the equivalent org.rocksdb.AccessHint for the provided + // C++ rocksdb::DBOptions::AccessHint enum + static jbyte toJavaAccessHint( + const rocksdb::DBOptions::AccessHint& access_hint) { + switch(access_hint) { + case rocksdb::DBOptions::AccessHint::NONE: + return 0x0; + case rocksdb::DBOptions::AccessHint::NORMAL: + return 0x1; + case rocksdb::DBOptions::AccessHint::SEQUENTIAL: + return 0x2; + case rocksdb::DBOptions::AccessHint::WILLNEED: + return 0x3; + default: + // undefined/default + return 0x1; + } + } + + // Returns the equivalent C++ rocksdb::DBOptions::AccessHint enum for the + // provided Java org.rocksdb.AccessHint + static rocksdb::DBOptions::AccessHint toCppAccessHint(jbyte jaccess_hint) { + switch(jaccess_hint) { + case 0x0: + return rocksdb::DBOptions::AccessHint::NONE; + case 0x1: + return rocksdb::DBOptions::AccessHint::NORMAL; + case 0x2: + return rocksdb::DBOptions::AccessHint::SEQUENTIAL; + case 0x3: + return rocksdb::DBOptions::AccessHint::WILLNEED; + default: + // undefined/default + return rocksdb::DBOptions::AccessHint::NORMAL; + } + } +}; + +// The portal class for org.rocksdb.WALRecoveryMode +class WALRecoveryModeJni { + public: + // Returns the equivalent org.rocksdb.WALRecoveryMode for the provided + // C++ rocksdb::WALRecoveryMode enum + static jbyte toJavaWALRecoveryMode( + const rocksdb::WALRecoveryMode& wal_recovery_mode) { + switch(wal_recovery_mode) { + case rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords: + return 0x0; + case rocksdb::WALRecoveryMode::kAbsoluteConsistency: + return 0x1; + case rocksdb::WALRecoveryMode::kPointInTimeRecovery: + return 0x2; + case rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords: + return 0x3; + default: + // undefined/default + return 0x2; + } + } + + // Returns the equivalent C++ rocksdb::WALRecoveryMode enum for the + // provided Java org.rocksdb.WALRecoveryMode + static rocksdb::WALRecoveryMode toCppWALRecoveryMode(jbyte jwal_recovery_mode) { + switch(jwal_recovery_mode) { + case 0x0: + return rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords; + case 0x1: + return rocksdb::WALRecoveryMode::kAbsoluteConsistency; + case 0x2: + return rocksdb::WALRecoveryMode::kPointInTimeRecovery; + case 0x3: + return rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords; + default: + // undefined/default + return rocksdb::WALRecoveryMode::kPointInTimeRecovery; + } + } +}; + // various utility functions for working with RocksDB and JNI class JniUtil { public: diff --git a/java/src/main/java/org/rocksdb/AccessHint.java b/java/src/main/java/org/rocksdb/AccessHint.java new file mode 100644 index 000000000..8202e89a8 --- /dev/null +++ b/java/src/main/java/org/rocksdb/AccessHint.java @@ -0,0 +1,53 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * File access pattern once a compaction has started + */ +public enum AccessHint { + NONE((byte)0x0), + NORMAL((byte)0x1), + SEQUENTIAL((byte)0x2), + WILLNEED((byte)0x3); + + private final byte value; + + AccessHint(final byte value) { + this.value = value; + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + *

Get the AccessHint enumeration value by + * passing the byte identifier to this method.

+ * + * @param byteIdentifier of AccessHint. + * + * @return AccessHint instance. + * + * @throws IllegalArgumentException if the access hint for the byteIdentifier + * cannot be found + */ + public static AccessHint getAccessHint(final byte byteIdentifier) { + for (final AccessHint accessHint : AccessHint.values()) { + if (accessHint.getValue() == byteIdentifier) { + return accessHint; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for AccessHint."); + } +} diff --git a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java new file mode 100644 index 000000000..54011b983 --- /dev/null +++ b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java @@ -0,0 +1,465 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.List; + +/** + * Advanced Column Family Options which are not + * mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface} + * + * Taken from include/rocksdb/advanced_options.h + */ +public interface AdvancedColumnFamilyOptionsInterface + { + + /** + * The minimum number of write buffers that will be merged together + * before writing to storage. If set to 1, then + * all write buffers are flushed to L0 as individual files and this increases + * read amplification because a get request has to check in all of these + * files. Also, an in-memory merge may result in writing lesser + * data to storage if there are duplicate records in each of these + * individual write buffers. Default: 1 + * + * @param minWriteBufferNumberToMerge the minimum number of write buffers + * that will be merged together. + * @return the reference to the current options. + */ + T setMinWriteBufferNumberToMerge( + int minWriteBufferNumberToMerge); + + /** + * The minimum number of write buffers that will be merged together + * before writing to storage. If set to 1, then + * all write buffers are flushed to L0 as individual files and this increases + * read amplification because a get request has to check in all of these + * files. Also, an in-memory merge may result in writing lesser + * data to storage if there are duplicate records in each of these + * individual write buffers. Default: 1 + * + * @return the minimum number of write buffers that will be merged together. + */ + int minWriteBufferNumberToMerge(); + + /** + * The total maximum number of write buffers to maintain in memory including + * copies of buffers that have already been flushed. Unlike + * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()}, + * this parameter does not affect flushing. + * This controls the minimum amount of write history that will be available + * in memory for conflict checking when Transactions are used. + * + * When using an OptimisticTransactionDB: + * If this value is too low, some transactions may fail at commit time due + * to not being able to determine whether there were any write conflicts. + * + * When using a TransactionDB: + * If Transaction::SetSnapshot is used, TransactionDB will read either + * in-memory write buffers or SST files to do write-conflict checking. + * Increasing this value can reduce the number of reads to SST files + * done for conflict detection. + * + * Setting this value to 0 will cause write buffers to be freed immediately + * after they are flushed. + * If this value is set to -1, + * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()} + * will be used. + * + * Default: + * If using a TransactionDB/OptimisticTransactionDB, the default value will + * be set to the value of + * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()} + * if it is not explicitly set by the user. Otherwise, the default is 0. + * + * @param maxWriteBufferNumberToMaintain The maximum number of write + * buffers to maintain + * + * @return the reference to the current options. + */ + T setMaxWriteBufferNumberToMaintain( + int maxWriteBufferNumberToMaintain); + + /** + * The total maximum number of write buffers to maintain in memory including + * copies of buffers that have already been flushed. + * + * @return maxWriteBufferNumberToMaintain The maximum number of write buffers + * to maintain + */ + int maxWriteBufferNumberToMaintain(); + + /** + * Allows thread-safe inplace updates. + * If inplace_callback function is not set, + * Put(key, new_value) will update inplace the existing_value iff + * * key exists in current memtable + * * new sizeof(new_value) ≤ sizeof(existing_value) + * * existing_value for that key is a put i.e. kTypeValue + * If inplace_callback function is set, check doc for inplace_callback. + * Default: false. + * + * @param inplaceUpdateSupport true if thread-safe inplace updates + * are allowed. + * @return the reference to the current options. + */ + T setInplaceUpdateSupport( + boolean inplaceUpdateSupport); + + /** + * Allows thread-safe inplace updates. + * If inplace_callback function is not set, + * Put(key, new_value) will update inplace the existing_value iff + * * key exists in current memtable + * * new sizeof(new_value) ≤ sizeof(existing_value) + * * existing_value for that key is a put i.e. kTypeValue + * If inplace_callback function is set, check doc for inplace_callback. + * Default: false. + * + * @return true if thread-safe inplace updates are allowed. + */ + boolean inplaceUpdateSupport(); + + /** + * Control locality of bloom filter probes to improve cache miss rate. + * This option only applies to memtable prefix bloom and plaintable + * prefix bloom. It essentially limits the max number of cache lines each + * bloom filter check can touch. + * This optimization is turned off when set to 0. The number should never + * be greater than number of probes. This option can boost performance + * for in-memory workload but should use with care since it can cause + * higher false positive rate. + * Default: 0 + * + * @param bloomLocality the level of locality of bloom-filter probes. + * @return the reference to the current options. + */ + T setBloomLocality(int bloomLocality); + + /** + * Control locality of bloom filter probes to improve cache miss rate. + * This option only applies to memtable prefix bloom and plaintable + * prefix bloom. It essentially limits the max number of cache lines each + * bloom filter check can touch. + * This optimization is turned off when set to 0. The number should never + * be greater than number of probes. This option can boost performance + * for in-memory workload but should use with care since it can cause + * higher false positive rate. + * Default: 0 + * + * @return the level of locality of bloom-filter probes. + * @see #setBloomLocality(int) + */ + int bloomLocality(); + + /** + *

Different levels can have different compression + * policies. There are cases where most lower levels + * would like to use quick compression algorithms while + * the higher levels (which have more data) use + * compression algorithms that have better compression + * but could be slower. This array, if non-empty, should + * have an entry for each level of the database; + * these override the value specified in the previous + * field 'compression'.

+ * + * NOTICE + *

If {@code level_compaction_dynamic_level_bytes=true}, + * {@code compression_per_level[0]} still determines {@code L0}, + * but other elements of the array are based on base level + * (the level {@code L0} files are merged to), and may not + * match the level users see from info log for metadata. + *

+ *

If {@code L0} files are merged to {@code level - n}, + * then, for {@code i>0}, {@code compression_per_level[i]} + * determines compaction type for level {@code n+i-1}.

+ * + * Example + *

For example, if we have 5 levels, and we determine to + * merge {@code L0} data to {@code L4} (which means {@code L1..L3} + * will be empty), then the new files go to {@code L4} uses + * compression type {@code compression_per_level[1]}.

+ * + *

If now {@code L0} is merged to {@code L2}. Data goes to + * {@code L2} will be compressed according to + * {@code compression_per_level[1]}, {@code L3} using + * {@code compression_per_level[2]}and {@code L4} using + * {@code compression_per_level[3]}. Compaction for each + * level can change when data grows.

+ * + *

Default: empty

+ * + * @param compressionLevels list of + * {@link org.rocksdb.CompressionType} instances. + * + * @return the reference to the current options. + */ + T setCompressionPerLevel( + List compressionLevels); + + /** + *

Return the currently set {@link org.rocksdb.CompressionType} + * per instances.

+ * + *

See: {@link #setCompressionPerLevel(java.util.List)}

+ * + * @return list of {@link org.rocksdb.CompressionType} + * instances. + */ + List compressionPerLevel(); + + /** + * Set the number of levels for this database + * If level-styled compaction is used, then this number determines + * the total number of levels. + * + * @param numLevels the number of levels. + * @return the reference to the current options. + */ + T setNumLevels(int numLevels); + + /** + * If level-styled compaction is used, then this number determines + * the total number of levels. + * + * @return the number of levels. + */ + int numLevels(); + + /** + *

If {@code true}, RocksDB will pick target size of each level + * dynamically. We will pick a base level b >= 1. L0 will be + * directly merged into level b, instead of always into level 1. + * Level 1 to b-1 need to be empty. We try to pick b and its target + * size so that

+ * + *
    + *
  1. target size is in the range of + * (max_bytes_for_level_base / max_bytes_for_level_multiplier, + * max_bytes_for_level_base]
  2. + *
  3. target size of the last level (level num_levels-1) equals to extra size + * of the level.
  4. + *
+ * + *

At the same time max_bytes_for_level_multiplier and + * max_bytes_for_level_multiplier_additional are still satisfied.

+ * + *

With this option on, from an empty DB, we make last level the base + * level, which means merging L0 data into the last level, until it exceeds + * max_bytes_for_level_base. And then we make the second last level to be + * base level, to start to merge L0 data to second last level, with its + * target size to be {@code 1/max_bytes_for_level_multiplier} of the last + * levels extra size. After the data accumulates more so that we need to + * move the base level to the third last one, and so on.

+ * + *

Example

+ *

For example, assume {@code max_bytes_for_level_multiplier=10}, + * {@code num_levels=6}, and {@code max_bytes_for_level_base=10MB}.

+ * + *

Target sizes of level 1 to 5 starts with:

+ * {@code [- - - - 10MB]} + *

with base level is level. Target sizes of level 1 to 4 are not applicable + * because they will not be used. + * Until the size of Level 5 grows to more than 10MB, say 11MB, we make + * base target to level 4 and now the targets looks like:

+ * {@code [- - - 1.1MB 11MB]} + *

While data are accumulated, size targets are tuned based on actual data + * of level 5. When level 5 has 50MB of data, the target is like:

+ * {@code [- - - 5MB 50MB]} + *

Until level 5's actual size is more than 100MB, say 101MB. Now if we + * keep level 4 to be the base level, its target size needs to be 10.1MB, + * which doesn't satisfy the target size range. So now we make level 3 + * the target size and the target sizes of the levels look like:

+ * {@code [- - 1.01MB 10.1MB 101MB]} + *

In the same way, while level 5 further grows, all levels' targets grow, + * like

+ * {@code [- - 5MB 50MB 500MB]} + *

Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the + * base level and make levels' target sizes like this:

+ * {@code [- 1.001MB 10.01MB 100.1MB 1001MB]} + *

and go on...

+ * + *

By doing it, we give {@code max_bytes_for_level_multiplier} a priority + * against {@code max_bytes_for_level_base}, for a more predictable LSM tree + * shape. It is useful to limit worse case space amplification.

+ * + *

{@code max_bytes_for_level_multiplier_additional} is ignored with + * this flag on.

+ * + *

Turning this feature on or off for an existing DB can cause unexpected + * LSM tree structure so it's not recommended.

+ * + *

Caution: this option is experimental

+ * + *

Default: false

+ * + * @param enableLevelCompactionDynamicLevelBytes boolean value indicating + * if {@code LevelCompactionDynamicLevelBytes} shall be enabled. + * @return the reference to the current options. + */ + @Experimental("Turning this feature on or off for an existing DB can cause" + + "unexpected LSM tree structure so it's not recommended") + T setLevelCompactionDynamicLevelBytes( + boolean enableLevelCompactionDynamicLevelBytes); + + /** + *

Return if {@code LevelCompactionDynamicLevelBytes} is enabled. + *

+ * + *

For further information see + * {@link #setLevelCompactionDynamicLevelBytes(boolean)}

+ * + * @return boolean value indicating if + * {@code levelCompactionDynamicLevelBytes} is enabled. + */ + @Experimental("Caution: this option is experimental") + boolean levelCompactionDynamicLevelBytes(); + + /** + * Maximum size of each compaction (not guarantee) + * + * @param maxCompactionBytes the compaction size limit + * @return the reference to the current options. + */ + T setMaxCompactionBytes( + long maxCompactionBytes); + + /** + * Control maximum size of each compaction (not guaranteed) + * + * @return compaction size threshold + */ + long maxCompactionBytes(); + + /** + * Set compaction style for DB. + * + * Default: LEVEL. + * + * @param compactionStyle Compaction style. + * @return the reference to the current options. + */ + ColumnFamilyOptionsInterface setCompactionStyle( + CompactionStyle compactionStyle); + + /** + * Compaction style for DB. + * + * @return Compaction style. + */ + CompactionStyle compactionStyle(); + + /** + * If level {@link #compactionStyle()} == {@link CompactionStyle#LEVEL}, + * for each level, which files are prioritized to be picked to compact. + * + * Default: {@link CompactionPriority#ByCompensatedSize} + * + * @param compactionPriority The compaction priority + * + * @return the reference to the current options. + */ + T setCompactionPriority( + CompactionPriority compactionPriority); + + /** + * Get the Compaction priority if level compaction + * is used for all levels + * + * @return The compaction priority + */ + CompactionPriority compactionPriority(); + + /** + * Set the options needed to support Universal Style compactions + * + * @param compactionOptionsUniversal The Universal Style compaction options + * + * @return the reference to the current options. + */ + T setCompactionOptionsUniversal( + CompactionOptionsUniversal compactionOptionsUniversal); + + /** + * The options needed to support Universal Style compactions + * + * @return The Universal Style compaction options + */ + CompactionOptionsUniversal compactionOptionsUniversal(); + + /** + * The options for FIFO compaction style + * + * @param compactionOptionsFIFO The FIFO compaction options + * + * @return the reference to the current options. + */ + T setCompactionOptionsFIFO( + CompactionOptionsFIFO compactionOptionsFIFO); + + /** + * The options for FIFO compaction style + * + * @return The FIFO compaction options + */ + CompactionOptionsFIFO compactionOptionsFIFO(); + + /** + *

This flag specifies that the implementation should optimize the filters + * mainly for cases where keys are found rather than also optimize for keys + * missed. This would be used in cases where the application knows that + * there are very few misses or the performance in the case of misses is not + * important.

+ * + *

For now, this flag allows us to not store filters for the last level i.e + * the largest level which contains data of the LSM store. For keys which + * are hits, the filters in this level are not useful because we will search + * for the data anyway.

+ * + *

NOTE: the filters in other levels are still useful + * even for key hit because they tell us whether to look in that level or go + * to the higher level.

+ * + *

Default: false

+ * + * @param optimizeFiltersForHits boolean value indicating if this flag is set. + * @return the reference to the current options. + */ + T setOptimizeFiltersForHits( + boolean optimizeFiltersForHits); + + /** + *

Returns the current state of the {@code optimize_filters_for_hits} + * setting.

+ * + * @return boolean value indicating if the flag + * {@code optimize_filters_for_hits} was set. + */ + boolean optimizeFiltersForHits(); + + /** + * In debug mode, RocksDB run consistency checks on the LSM everytime the LSM + * change (Flush, Compaction, AddFile). These checks are disabled in release + * mode, use this option to enable them in release mode as well. + * + * Default: false + * + * @param forceConsistencyChecks true to force consistency checks + * + * @return the reference to the current options. + */ + T setForceConsistencyChecks( + boolean forceConsistencyChecks); + + /** + * In debug mode, RocksDB run consistency checks on the LSM everytime the LSM + * change (Flush, Compaction, AddFile). These checks are disabled in release + * mode. + * + * @return true if consistency checks are enforced + */ + boolean forceConsistencyChecks(); +} diff --git a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java new file mode 100644 index 000000000..16cafbf0c --- /dev/null +++ b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java @@ -0,0 +1,437 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Advanced Column Family Options which are mutable + * + * Taken from include/rocksdb/advanced_options.h + * and MutableCFOptions in util/cf_options.h + */ +public interface AdvancedMutableColumnFamilyOptionsInterface + { + + /** + * The maximum number of write buffers that are built up in memory. + * The default is 2, so that when 1 write buffer is being flushed to + * storage, new writes can continue to the other write buffer. + * Default: 2 + * + * @param maxWriteBufferNumber maximum number of write buffers. + * @return the instance of the current options. + */ + T setMaxWriteBufferNumber( + int maxWriteBufferNumber); + + /** + * Returns maximum number of write buffers. + * + * @return maximum number of write buffers. + * @see #setMaxWriteBufferNumber(int) + */ + int maxWriteBufferNumber(); + + /** + * Number of locks used for inplace update + * Default: 10000, if inplace_update_support = true, else 0. + * + * @param inplaceUpdateNumLocks the number of locks used for + * inplace updates. + * @return the reference to the current options. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + T setInplaceUpdateNumLocks( + long inplaceUpdateNumLocks); + + /** + * Number of locks used for inplace update + * Default: 10000, if inplace_update_support = true, else 0. + * + * @return the number of locks used for inplace update. + */ + long inplaceUpdateNumLocks(); + + /** + * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, + * create prefix bloom for memtable with the size of + * write_buffer_size * memtable_prefix_bloom_size_ratio. + * If it is larger than 0.25, it is santinized to 0.25. + * + * Default: 0 (disable) + * + * @param memtablePrefixBloomSizeRatio The ratio + * @return the reference to the current options. + */ + T setMemtablePrefixBloomSizeRatio( + double memtablePrefixBloomSizeRatio); + + /** + * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, + * create prefix bloom for memtable with the size of + * write_buffer_size * memtable_prefix_bloom_size_ratio. + * If it is larger than 0.25, it is santinized to 0.25. + * + * Default: 0 (disable) + * + * @return the ratio + */ + double memtablePrefixBloomSizeRatio(); + + /** + * Page size for huge page TLB for bloom in memtable. If ≤ 0, not allocate + * from huge page TLB but from malloc. + * Need to reserve huge pages for it to be allocated. For example: + * sysctl -w vm.nr_hugepages=20 + * See linux doc Documentation/vm/hugetlbpage.txt + * + * @param memtableHugePageSize The page size of the huge + * page tlb + * @return the reference to the current options. + */ + T setMemtableHugePageSize( + long memtableHugePageSize); + + /** + * Page size for huge page TLB for bloom in memtable. If ≤ 0, not allocate + * from huge page TLB but from malloc. + * Need to reserve huge pages for it to be allocated. For example: + * sysctl -w vm.nr_hugepages=20 + * See linux doc Documentation/vm/hugetlbpage.txt + * + * @return The page size of the huge page tlb + */ + long memtableHugePageSize(); + + /** + * The size of one block in arena memory allocation. + * If ≤ 0, a proper value is automatically calculated (usually 1/10 of + * writer_buffer_size). + * + * There are two additional restriction of the The specified size: + * (1) size should be in the range of [4096, 2 << 30] and + * (2) be the multiple of the CPU word (which helps with the memory + * alignment). + * + * We'll automatically check and adjust the size number to make sure it + * conforms to the restrictions. + * Default: 0 + * + * @param arenaBlockSize the size of an arena block + * @return the reference to the current options. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + T setArenaBlockSize(long arenaBlockSize); + + /** + * The size of one block in arena memory allocation. + * If ≤ 0, a proper value is automatically calculated (usually 1/10 of + * writer_buffer_size). + * + * There are two additional restriction of the The specified size: + * (1) size should be in the range of [4096, 2 << 30] and + * (2) be the multiple of the CPU word (which helps with the memory + * alignment). + * + * We'll automatically check and adjust the size number to make sure it + * conforms to the restrictions. + * Default: 0 + * + * @return the size of an arena block + */ + long arenaBlockSize(); + + /** + * Soft limit on number of level-0 files. We start slowing down writes at this + * point. A value < 0 means that no writing slow down will be triggered by + * number of files in level-0. + * + * @param level0SlowdownWritesTrigger The soft limit on the number of + * level-0 files + * @return the reference to the current options. + */ + T setLevel0SlowdownWritesTrigger( + int level0SlowdownWritesTrigger); + + /** + * Soft limit on number of level-0 files. We start slowing down writes at this + * point. A value < 0 means that no writing slow down will be triggered by + * number of files in level-0. + * + * @return The soft limit on the number of + * level-0 files + */ + int level0SlowdownWritesTrigger(); + + /** + * Maximum number of level-0 files. We stop writes at this point. + * + * @param level0StopWritesTrigger The maximum number of level-0 files + * @return the reference to the current options. + */ + T setLevel0StopWritesTrigger( + int level0StopWritesTrigger); + + /** + * Maximum number of level-0 files. We stop writes at this point. + * + * @return The maximum number of level-0 files + */ + int level0StopWritesTrigger(); + + /** + * The target file size for compaction. + * This targetFileSizeBase determines a level-1 file size. + * Target file size for level L can be calculated by + * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1)) + * For example, if targetFileSizeBase is 2MB and + * target_file_size_multiplier is 10, then each file on level-1 will + * be 2MB, and each file on level 2 will be 20MB, + * and each file on level-3 will be 200MB. + * by default targetFileSizeBase is 2MB. + * + * @param targetFileSizeBase the target size of a level-0 file. + * @return the reference to the current options. + * + * @see #setTargetFileSizeMultiplier(int) + */ + T setTargetFileSizeBase( + long targetFileSizeBase); + + /** + * The target file size for compaction. + * This targetFileSizeBase determines a level-1 file size. + * Target file size for level L can be calculated by + * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1)) + * For example, if targetFileSizeBase is 2MB and + * target_file_size_multiplier is 10, then each file on level-1 will + * be 2MB, and each file on level 2 will be 20MB, + * and each file on level-3 will be 200MB. + * by default targetFileSizeBase is 2MB. + * + * @return the target size of a level-0 file. + * + * @see #targetFileSizeMultiplier() + */ + long targetFileSizeBase(); + + /** + * targetFileSizeMultiplier defines the size ratio between a + * level-L file and level-(L+1) file. + * By default target_file_size_multiplier is 1, meaning + * files in different levels have the same target. + * + * @param multiplier the size ratio between a level-(L+1) file + * and level-L file. + * @return the reference to the current options. + */ + T setTargetFileSizeMultiplier( + int multiplier); + + /** + * targetFileSizeMultiplier defines the size ratio between a + * level-(L+1) file and level-L file. + * By default targetFileSizeMultiplier is 1, meaning + * files in different levels have the same target. + * + * @return the size ratio between a level-(L+1) file and level-L file. + */ + int targetFileSizeMultiplier(); + + /** + * The ratio between the total size of level-(L+1) files and the total + * size of level-L files for all L. + * DEFAULT: 10 + * + * @param multiplier the ratio between the total size of level-(L+1) + * files and the total size of level-L files for all L. + * @return the reference to the current options. + * + * See {@link MutableColumnFamilyOptionsInterface#setMaxBytesForLevelBase(long)} + */ + T setMaxBytesForLevelMultiplier(double multiplier); + + /** + * The ratio between the total size of level-(L+1) files and the total + * size of level-L files for all L. + * DEFAULT: 10 + * + * @return the ratio between the total size of level-(L+1) files and + * the total size of level-L files for all L. + * + * See {@link MutableColumnFamilyOptionsInterface#maxBytesForLevelBase()} + */ + double maxBytesForLevelMultiplier(); + + /** + * Different max-size multipliers for different levels. + * These are multiplied by max_bytes_for_level_multiplier to arrive + * at the max-size of each level. + * + * Default: 1 + * + * @param maxBytesForLevelMultiplierAdditional The max-size multipliers + * for each level + * @return the reference to the current options. + */ + T setMaxBytesForLevelMultiplierAdditional( + int[] maxBytesForLevelMultiplierAdditional); + + /** + * Different max-size multipliers for different levels. + * These are multiplied by max_bytes_for_level_multiplier to arrive + * at the max-size of each level. + * + * Default: 1 + * + * @return The max-size multipliers for each level + */ + int[] maxBytesForLevelMultiplierAdditional(); + + /** + * All writes will be slowed down to at least delayed_write_rate if estimated + * bytes needed to be compaction exceed this threshold. + * + * Default: 64GB + * + * @param softPendingCompactionBytesLimit The soft limit to impose on + * compaction + * @return the reference to the current options. + */ + T setSoftPendingCompactionBytesLimit( + long softPendingCompactionBytesLimit); + + /** + * All writes will be slowed down to at least delayed_write_rate if estimated + * bytes needed to be compaction exceed this threshold. + * + * Default: 64GB + * + * @return The soft limit to impose on compaction + */ + long softPendingCompactionBytesLimit(); + + /** + * All writes are stopped if estimated bytes needed to be compaction exceed + * this threshold. + * + * Default: 256GB + * + * @param hardPendingCompactionBytesLimit The hard limit to impose on + * compaction + * @return the reference to the current options. + */ + T setHardPendingCompactionBytesLimit( + long hardPendingCompactionBytesLimit); + + /** + * All writes are stopped if estimated bytes needed to be compaction exceed + * this threshold. + * + * Default: 256GB + * + * @return The hard limit to impose on compaction + */ + long hardPendingCompactionBytesLimit(); + + /** + * An iteration->Next() sequentially skips over keys with the same + * user-key unless this option is set. This number specifies the number + * of keys (with the same userkey) that will be sequentially + * skipped before a reseek is issued. + * Default: 8 + * + * @param maxSequentialSkipInIterations the number of keys could + * be skipped in a iteration. + * @return the reference to the current options. + */ + T setMaxSequentialSkipInIterations( + long maxSequentialSkipInIterations); + + /** + * An iteration->Next() sequentially skips over keys with the same + * user-key unless this option is set. This number specifies the number + * of keys (with the same userkey) that will be sequentially + * skipped before a reseek is issued. + * Default: 8 + * + * @return the number of keys could be skipped in a iteration. + */ + long maxSequentialSkipInIterations(); + + /** + * Maximum number of successive merge operations on a key in the memtable. + * + * When a merge operation is added to the memtable and the maximum number of + * successive merges is reached, the value of the key will be calculated and + * inserted into the memtable instead of the merge operation. This will + * ensure that there are never more than max_successive_merges merge + * operations in the memtable. + * + * Default: 0 (disabled) + * + * @param maxSuccessiveMerges the maximum number of successive merges. + * @return the reference to the current options. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + T setMaxSuccessiveMerges( + long maxSuccessiveMerges); + + /** + * Maximum number of successive merge operations on a key in the memtable. + * + * When a merge operation is added to the memtable and the maximum number of + * successive merges is reached, the value of the key will be calculated and + * inserted into the memtable instead of the merge operation. This will + * ensure that there are never more than max_successive_merges merge + * operations in the memtable. + * + * Default: 0 (disabled) + * + * @return the maximum number of successive merges. + */ + long maxSuccessiveMerges(); + + /** + * After writing every SST file, reopen it and read all the keys. + * + * Default: false + * + * @param paranoidFileChecks true to enable paranoid file checks + * @return the reference to the current options. + */ + T setParanoidFileChecks( + boolean paranoidFileChecks); + + /** + * After writing every SST file, reopen it and read all the keys. + * + * Default: false + * + * @return true if paranoid file checks are enabled + */ + boolean paranoidFileChecks(); + + /** + * Measure IO stats in compactions and flushes, if true. + * + * Default: false + * + * @param reportBgIoStats true to enable reporting + * @return the reference to the current options. + */ + T setReportBgIoStats( + boolean reportBgIoStats); + + /** + * Determine whether IO stats in compactions and flushes are being measured + * + * @return true if reporting is enabled + */ + boolean reportBgIoStats(); +} diff --git a/java/src/main/java/org/rocksdb/BackupableDBOptions.java b/java/src/main/java/org/rocksdb/BackupableDBOptions.java index 89591de82..453dd9832 100644 --- a/java/src/main/java/org/rocksdb/BackupableDBOptions.java +++ b/java/src/main/java/org/rocksdb/BackupableDBOptions.java @@ -18,6 +18,11 @@ import java.io.File; */ public class BackupableDBOptions extends RocksObject { + private Env backupEnv = null; + private Logger infoLog = null; + private RateLimiter backupRateLimiter = null; + private RateLimiter restoreRateLimiter = null; + /** *

BackupableDBOptions constructor.

* @@ -49,6 +54,40 @@ public class BackupableDBOptions extends RocksObject { return backupDir(nativeHandle_); } + /** + * Backup Env object. It will be used for backup file I/O. If it's + * null, backups will be written out using DBs Env. Otherwise + * backup's I/O will be performed using this object. + * + * If you want to have backups on HDFS, use HDFS Env here! + * + * Default: null + * + * @param env The environment to use + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setBackupEnv(final Env env) { + assert(isOwningHandle()); + setBackupEnv(nativeHandle_, env.nativeHandle_); + this.backupEnv = env; + return this; + } + + /** + * Backup Env object. It will be used for backup file I/O. If it's + * null, backups will be written out using DBs Env. Otherwise + * backup's I/O will be performed using this object. + * + * If you want to have backups on HDFS, use HDFS Env here! + * + * Default: null + * + * @return The environment in use + */ + public Env backupEnv() { + return this.backupEnv; + } + /** *

Share table files between backups.

* @@ -79,6 +118,30 @@ public class BackupableDBOptions extends RocksObject { return shareTableFiles(nativeHandle_); } + /** + * Set the logger to use for Backup info and error messages + * + * @param logger The logger to use for the backup + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setInfoLog(final Logger logger) { + assert(isOwningHandle()); + setInfoLog(nativeHandle_, logger.nativeHandle_); + this.infoLog = logger; + return this; + } + + /** + * Set the logger to use for Backup info and error messages + * + * Default: null + * + * @return The logger in use for the backup + */ + public Logger infoLog() { + return this.infoLog; + } + /** *

Set synchronous backups.

* @@ -189,6 +252,35 @@ public class BackupableDBOptions extends RocksObject { return backupRateLimit(nativeHandle_); } + /** + * Backup rate limiter. Used to control transfer speed for backup. If this is + * not null, {@link #backupRateLimit()} is ignored. + * + * Default: null + * + * @param backupRateLimiter The rate limiter to use for the backup + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setBackupRateLimiter(final RateLimiter backupRateLimiter) { + assert(isOwningHandle()); + setBackupRateLimiter(nativeHandle_, backupRateLimiter.nativeHandle_); + this.backupRateLimiter = backupRateLimiter; + return this; + } + + /** + * Backup rate limiter. Used to control transfer speed for backup. If this is + * not null, {@link #backupRateLimit()} is ignored. + * + * Default: null + * + * @return The rate limiter in use for the backup + */ + public RateLimiter backupRateLimiter() { + assert(isOwningHandle()); + return this.backupRateLimiter; + } + /** *

Set restore rate limit.

* @@ -218,6 +310,35 @@ public class BackupableDBOptions extends RocksObject { return restoreRateLimit(nativeHandle_); } + /** + * Restore rate limiter. Used to control transfer speed during restore. If + * this is not null, {@link #restoreRateLimit()} is ignored. + * + * Default: null + * + * @param restoreRateLimiter The rate limiter to use during restore + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setRestoreRateLimiter(final RateLimiter restoreRateLimiter) { + assert(isOwningHandle()); + setRestoreRateLimiter(nativeHandle_, restoreRateLimiter.nativeHandle_); + this.restoreRateLimiter = restoreRateLimiter; + return this; + } + + /** + * Restore rate limiter. Used to control transfer speed during restore. If + * this is not null, {@link #restoreRateLimit()} is ignored. + * + * Default: null + * + * @return The rate limiter in use during restore + */ + public RateLimiter restoreRateLimiter() { + assert(isOwningHandle()); + return this.restoreRateLimiter; + } + /** *

Only used if share_table_files is set to true. If true, will consider * that backups can come from different databases, hence a sst is not uniquely @@ -252,10 +373,73 @@ public class BackupableDBOptions extends RocksObject { return shareFilesWithChecksum(nativeHandle_); } + /** + * Up to this many background threads will copy files for + * {@link BackupableDB#createNewBackup(boolean)} and + * {@link RestoreBackupableDB#restoreDBFromBackup(long, String, String, RestoreOptions)} + * + * Default: 1 + * + * @param maxBackgroundOperations The maximum number of background threads + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setMaxBackgroundOperations( + final int maxBackgroundOperations) { + assert(isOwningHandle()); + setMaxBackgroundOperations(nativeHandle_, maxBackgroundOperations); + return this; + } + + /** + * Up to this many background threads will copy files for + * {@link BackupableDB#createNewBackup(boolean)} and + * {@link RestoreBackupableDB#restoreDBFromBackup(long, String, String, RestoreOptions)} + * + * Default: 1 + * + * @return The maximum number of background threads + */ + public int maxBackgroundOperations() { + assert(isOwningHandle()); + return maxBackgroundOperations(nativeHandle_); + } + + /** + * During backup user can get callback every time next + * {@link #callbackTriggerIntervalSize()} bytes being copied. + * + * Default: 4194304 + * + * @param callbackTriggerIntervalSize The interval size for the + * callback trigger + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setCallbackTriggerIntervalSize( + final long callbackTriggerIntervalSize) { + assert(isOwningHandle()); + setCallbackTriggerIntervalSize(nativeHandle_, callbackTriggerIntervalSize); + return this; + } + + /** + * During backup user can get callback every time next + * {@link #callbackTriggerIntervalSize()} bytes being copied. + * + * Default: 4194304 + * + * @return The interval size for the callback trigger + */ + public long callbackTriggerIntervalSize() { + assert(isOwningHandle()); + return callbackTriggerIntervalSize(nativeHandle_); + } + private native static long newBackupableDBOptions(final String path); private native String backupDir(long handle); + private native void setBackupEnv(final long handle, final long envHandle); private native void setShareTableFiles(long handle, boolean flag); private native boolean shareTableFiles(long handle); + private native void setInfoLog(final long handle, final long infoLogHandle); private native void setSync(long handle, boolean flag); private native boolean sync(long handle); private native void setDestroyOldData(long handle, boolean flag); @@ -264,9 +448,18 @@ public class BackupableDBOptions extends RocksObject { private native boolean backupLogFiles(long handle); private native void setBackupRateLimit(long handle, long rateLimit); private native long backupRateLimit(long handle); + private native void setBackupRateLimiter(long handle, long rateLimiterHandle); private native void setRestoreRateLimit(long handle, long rateLimit); private native long restoreRateLimit(long handle); + private native void setRestoreRateLimiter(final long handle, + final long rateLimiterHandle); private native void setShareFilesWithChecksum(long handle, boolean flag); private native boolean shareFilesWithChecksum(long handle); + private native void setMaxBackgroundOperations(final long handle, + final int maxBackgroundOperations); + private native int maxBackgroundOperations(final long handle); + private native void setCallbackTriggerIntervalSize(final long handle, + long callbackTriggerIntervalSize); + private native long callbackTriggerIntervalSize(final long handle); @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/Cache.java b/java/src/main/java/org/rocksdb/Cache.java new file mode 100644 index 000000000..2b74b1546 --- /dev/null +++ b/java/src/main/java/org/rocksdb/Cache.java @@ -0,0 +1,13 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + + +public abstract class Cache extends RocksObject { + protected Cache(final long nativeHandle) { + super(nativeHandle); + } +} diff --git a/java/src/main/java/org/rocksdb/ClockCache.java b/java/src/main/java/org/rocksdb/ClockCache.java new file mode 100644 index 000000000..7a2c8e7f8 --- /dev/null +++ b/java/src/main/java/org/rocksdb/ClockCache.java @@ -0,0 +1,59 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Similar to {@link LRUCache}, but based on the CLOCK algorithm with + * better concurrent performance in some cases + */ +public class ClockCache extends Cache { + + /** + * Create a new cache with a fixed size capacity. + * + * @param capacity The fixed size capacity of the cache + */ + public ClockCache(final long capacity) { + super(newClockCache(capacity, -1, false)); + } + + /** + * Create a new cache with a fixed size capacity. The cache is sharded + * to 2^numShardBits shards, by hash of the key. The total capacity + * is divided and evenly assigned to each shard. + * numShardBits = -1 means it is automatically determined: every shard + * will be at least 512KB and number of shard bits will not exceed 6. + * + * @param capacity The fixed size capacity of the cache + * @param numShardBits The cache is sharded to 2^numShardBits shards, + * by hash of the key + */ + public ClockCache(final long capacity, final int numShardBits) { + super(newClockCache(capacity, numShardBits, false)); + } + + /** + * Create a new cache with a fixed size capacity. The cache is sharded + * to 2^numShardBits shards, by hash of the key. The total capacity + * is divided and evenly assigned to each shard. If strictCapacityLimit + * is set, insert to the cache will fail when cache is full. + * numShardBits = -1 means it is automatically determined: every shard + * will be at least 512KB and number of shard bits will not exceed 6. + * + * @param capacity The fixed size capacity of the cache + * @param numShardBits The cache is sharded to 2^numShardBits shards, + * by hash of the key + * @param strictCapacityLimit insert to the cache will fail when cache is full + */ + public ClockCache(final long capacity, final int numShardBits, + final boolean strictCapacityLimit) { + super(newClockCache(capacity, numShardBits, strictCapacityLimit)); + } + + private native static long newClockCache(final long capacity, + final int numShardBits, final boolean strictCapacityLimit); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java index 9d2ed82cf..5528dca62 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java @@ -17,8 +17,8 @@ import java.util.Properties; * automatically and native resources will be released as part of the process. */ public class ColumnFamilyOptions extends RocksObject - implements ColumnFamilyOptionsInterface, - MutableColumnFamilyOptionsInterface { + implements ColumnFamilyOptionsInterface, + MutableColumnFamilyOptionsInterface { static { RocksDB.loadLibrary(); } @@ -74,6 +74,12 @@ public class ColumnFamilyOptions extends RocksObject return columnFamilyOptions; } + @Override + public ColumnFamilyOptions optimizeForSmallDb() { + optimizeForSmallDb(nativeHandle_); + return this; + } + @Override public ColumnFamilyOptions optimizeForPointLookup( final long blockCacheSizeMb) { @@ -217,7 +223,7 @@ public class ColumnFamilyOptions extends RocksObject @Override public CompressionType compressionType() { - return CompressionType.values()[compressionType(nativeHandle_)]; + return CompressionType.getCompressionType(compressionType(nativeHandle_)); } @Override @@ -244,6 +250,33 @@ public class ColumnFamilyOptions extends RocksObject return compressionLevels; } + @Override + public ColumnFamilyOptions setBottommostCompressionType( + final CompressionType bottommostCompressionType) { + setBottommostCompressionType(nativeHandle_, + bottommostCompressionType.getValue()); + return this; + } + + @Override + public CompressionType bottommostCompressionType() { + return CompressionType.getCompressionType( + bottommostCompressionType(nativeHandle_)); + } + + @Override + public ColumnFamilyOptions setCompressionOptions( + final CompressionOptions compressionOptions) { + setCompressionOptions(nativeHandle_, compressionOptions.nativeHandle_); + this.compressionOptions_ = compressionOptions; + return this; + } + + @Override + public CompressionOptions compressionOptions() { + return this.compressionOptions_; + } + @Override public ColumnFamilyOptions setNumLevels(final int numLevels) { setNumLevels(nativeHandle_, numLevels); @@ -291,17 +324,6 @@ public class ColumnFamilyOptions extends RocksObject return levelZeroStopWritesTrigger(nativeHandle_); } - @Override - public ColumnFamilyOptions setMaxMemCompactionLevel( - final int maxMemCompactionLevel) { - return this; - } - - @Override - public int maxMemCompactionLevel() { - return 0; - } - @Override public ColumnFamilyOptions setTargetFileSizeBase( final long targetFileSizeBase) { @@ -373,43 +395,6 @@ public class ColumnFamilyOptions extends RocksObject return maxCompactionBytes(nativeHandle_); } - @Override - public ColumnFamilyOptions setSoftRateLimit( - final double softRateLimit) { - setSoftRateLimit(nativeHandle_, softRateLimit); - return this; - } - - @Override - public double softRateLimit() { - return softRateLimit(nativeHandle_); - } - - @Override - public ColumnFamilyOptions setHardRateLimit( - final double hardRateLimit) { - setHardRateLimit(nativeHandle_, hardRateLimit); - return this; - } - - @Override - public double hardRateLimit() { - return hardRateLimit(nativeHandle_); - } - - @Override - public ColumnFamilyOptions setRateLimitDelayMaxMilliseconds( - final int rateLimitDelayMaxMilliseconds) { - setRateLimitDelayMaxMilliseconds( - nativeHandle_, rateLimitDelayMaxMilliseconds); - return this; - } - - @Override - public int rateLimitDelayMaxMilliseconds() { - return rateLimitDelayMaxMilliseconds(nativeHandle_); - } - @Override public ColumnFamilyOptions setArenaBlockSize( final long arenaBlockSize) { @@ -434,19 +419,6 @@ public class ColumnFamilyOptions extends RocksObject return disableAutoCompactions(nativeHandle_); } - @Override - public ColumnFamilyOptions setPurgeRedundantKvsWhileFlush( - final boolean purgeRedundantKvsWhileFlush) { - setPurgeRedundantKvsWhileFlush( - nativeHandle_, purgeRedundantKvsWhileFlush); - return this; - } - - @Override - public boolean purgeRedundantKvsWhileFlush() { - return purgeRedundantKvsWhileFlush(nativeHandle_); - } - @Override public ColumnFamilyOptions setCompactionStyle( final CompactionStyle compactionStyle) { @@ -486,11 +458,17 @@ public class ColumnFamilyOptions extends RocksObject return maxSequentialSkipInIterations(nativeHandle_); } + @Override + public MemTableConfig memTableConfig() { + return this.memTableConfig_; + } + @Override public ColumnFamilyOptions setMemTableConfig( - final MemTableConfig config) { - memTableConfig_ = config; - setMemTableFactory(nativeHandle_, config.newMemTableFactoryHandle()); + final MemTableConfig memTableConfig) { + setMemTableFactory( + nativeHandle_, memTableConfig.newMemTableFactoryHandle()); + this.memTableConfig_ = memTableConfig; return this; } @@ -500,11 +478,16 @@ public class ColumnFamilyOptions extends RocksObject return memTableFactoryName(nativeHandle_); } + @Override + public TableFormatConfig tableFormatConfig() { + return this.tableFormatConfig_; + } + @Override public ColumnFamilyOptions setTableFormatConfig( - final TableFormatConfig config) { - tableFormatConfig_ = config; - setTableFactory(nativeHandle_, config.newTableFactoryHandle()); + final TableFormatConfig tableFormatConfig) { + setTableFactory(nativeHandle_, tableFormatConfig.newTableFactoryHandle()); + this.tableFormatConfig_ = tableFormatConfig; return this; } @@ -677,6 +660,81 @@ public class ColumnFamilyOptions extends RocksObject return paranoidFileChecks(nativeHandle_); } + @Override + public ColumnFamilyOptions setMaxWriteBufferNumberToMaintain( + final int maxWriteBufferNumberToMaintain) { + setMaxWriteBufferNumberToMaintain( + nativeHandle_, maxWriteBufferNumberToMaintain); + return this; + } + + @Override + public int maxWriteBufferNumberToMaintain() { + return maxWriteBufferNumberToMaintain(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setCompactionPriority( + final CompactionPriority compactionPriority) { + setCompactionPriority(nativeHandle_, compactionPriority.getValue()); + return this; + } + + @Override + public CompactionPriority compactionPriority() { + return CompactionPriority.getCompactionPriority( + compactionPriority(nativeHandle_)); + } + + @Override + public ColumnFamilyOptions setReportBgIoStats(final boolean reportBgIoStats) { + setReportBgIoStats(nativeHandle_, reportBgIoStats); + return this; + } + + @Override + public boolean reportBgIoStats() { + return reportBgIoStats(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setCompactionOptionsUniversal( + final CompactionOptionsUniversal compactionOptionsUniversal) { + setCompactionOptionsUniversal(nativeHandle_, + compactionOptionsUniversal.nativeHandle_); + this.compactionOptionsUniversal_ = compactionOptionsUniversal; + return this; + } + + @Override + public CompactionOptionsUniversal compactionOptionsUniversal() { + return this.compactionOptionsUniversal_; + } + + @Override + public ColumnFamilyOptions setCompactionOptionsFIFO(final CompactionOptionsFIFO compactionOptionsFIFO) { + setCompactionOptionsFIFO(nativeHandle_, + compactionOptionsFIFO.nativeHandle_); + this.compactionOptionsFIFO_ = compactionOptionsFIFO; + return this; + } + + @Override + public CompactionOptionsFIFO compactionOptionsFIFO() { + return this.compactionOptionsFIFO_; + } + + @Override + public ColumnFamilyOptions setForceConsistencyChecks(final boolean forceConsistencyChecks) { + setForceConsistencyChecks(nativeHandle_, forceConsistencyChecks); + return this; + } + + @Override + public boolean forceConsistencyChecks() { + return forceConsistencyChecks(nativeHandle_); + } + /** *

Private constructor to be used by * {@link #getColumnFamilyOptionsFromProps(java.util.Properties)}

@@ -693,6 +751,7 @@ public class ColumnFamilyOptions extends RocksObject private static native long newColumnFamilyOptions(); @Override protected final native void disposeInternal(final long handle); + private native void optimizeForSmallDb(final long handle); private native void optimizeForPointLookup(long handle, long blockCacheSizeMb); private native void optimizeLevelStyleCompaction(long handle, @@ -720,6 +779,11 @@ public class ColumnFamilyOptions extends RocksObject private native void setCompressionPerLevel(long handle, byte[] compressionLevels); private native byte[] compressionPerLevel(long handle); + private native void setBottommostCompressionType(long handle, + byte bottommostCompressionType); + private native byte bottommostCompressionType(long handle); + private native void setCompressionOptions(long handle, + long compressionOptionsHandle); private native void useFixedLengthPrefixExtractor( long handle, int prefixLength); private native void useCappedPrefixExtractor( @@ -753,15 +817,6 @@ public class ColumnFamilyOptions extends RocksObject private native double maxBytesForLevelMultiplier(long handle); private native void setMaxCompactionBytes(long handle, long maxCompactionBytes); private native long maxCompactionBytes(long handle); - private native void setSoftRateLimit( - long handle, double softRateLimit); - private native double softRateLimit(long handle); - private native void setHardRateLimit( - long handle, double hardRateLimit); - private native double hardRateLimit(long handle); - private native void setRateLimitDelayMaxMilliseconds( - long handle, int rateLimitDelayMaxMilliseconds); - private native int rateLimitDelayMaxMilliseconds(long handle); private native void setArenaBlockSize( long handle, long arenaBlockSize) throws IllegalArgumentException; @@ -774,9 +829,6 @@ public class ColumnFamilyOptions extends RocksObject private native void setMaxTableFilesSizeFIFO( long handle, long max_table_files_size); private native long maxTableFilesSizeFIFO(long handle); - private native void setPurgeRedundantKvsWhileFlush( - long handle, boolean purgeRedundantKvsWhileFlush); - private native boolean purgeRedundantKvsWhileFlush(long handle); private native void setMaxSequentialSkipInIterations( long handle, long maxSequentialSkipInIterations); private native long maxSequentialSkipInIterations(long handle); @@ -828,9 +880,30 @@ public class ColumnFamilyOptions extends RocksObject private native void setParanoidFileChecks(long handle, boolean paranoidFileChecks); private native boolean paranoidFileChecks(long handle); + private native void setMaxWriteBufferNumberToMaintain(final long handle, + final int maxWriteBufferNumberToMaintain); + private native int maxWriteBufferNumberToMaintain(final long handle); + private native void setCompactionPriority(final long handle, + final byte compactionPriority); + private native byte compactionPriority(final long handle); + private native void setReportBgIoStats(final long handle, + final boolean reportBgIoStats); + private native boolean reportBgIoStats(final long handle); + private native void setCompactionOptionsUniversal(final long handle, + final long compactionOptionsUniversalHandle); + private native void setCompactionOptionsFIFO(final long handle, + final long compactionOptionsFIFOHandle); + private native void setForceConsistencyChecks(final long handle, + final boolean forceConsistencyChecks); + private native boolean forceConsistencyChecks(final long handle); + + // instance variables + private MemTableConfig memTableConfig_; + private TableFormatConfig tableFormatConfig_; + private AbstractComparator> comparator_; + private AbstractCompactionFilter> compactionFilter_; + private CompactionOptionsUniversal compactionOptionsUniversal_; + private CompactionOptionsFIFO compactionOptionsFIFO_; + private CompressionOptions compressionOptions_; - MemTableConfig memTableConfig_; - TableFormatConfig tableFormatConfig_; - AbstractComparator> comparator_; - AbstractCompactionFilter> compactionFilter_; } diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java index 4d429b0e7..789015179 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java @@ -5,18 +5,26 @@ package org.rocksdb; -import java.util.List; +public interface ColumnFamilyOptionsInterface + + extends AdvancedColumnFamilyOptionsInterface { -public interface ColumnFamilyOptionsInterface { + /** + * Use this if your DB is very small (like under 1GB) and you don't want to + * spend lots of memory for memtables. + * + * @return the instance of the current object. + */ + T optimizeForSmallDb(); /** * Use this if you don't need to keep the data sorted, i.e. you'll never use * an iterator, only Put() and Get() API calls * * @param blockCacheSizeMb Block cache size in MB - * @return the instance of the current Object. + * @return the instance of the current object. */ - ColumnFamilyOptionsInterface optimizeForPointLookup(long blockCacheSizeMb); + T optimizeForPointLookup(long blockCacheSizeMb); /** *

Default values for some parameters in ColumnFamilyOptions are not @@ -29,9 +37,9 @@ public interface ColumnFamilyOptionsInterface { *

Note: we might use more memory than memtable_memory_budget during high * write rate period

* - * @return the instance of the current Object. + * @return the instance of the current object. */ - ColumnFamilyOptionsInterface optimizeLevelStyleCompaction(); + T optimizeLevelStyleCompaction(); /** *

Default values for some parameters in ColumnFamilyOptions are not @@ -45,9 +53,10 @@ public interface ColumnFamilyOptionsInterface { * write rate period

* * @param memtableMemoryBudget memory budget in bytes - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object optimizeLevelStyleCompaction(long memtableMemoryBudget); + T optimizeLevelStyleCompaction( + long memtableMemoryBudget); /** *

Default values for some parameters in ColumnFamilyOptions are not @@ -64,9 +73,9 @@ public interface ColumnFamilyOptionsInterface { *

Note: we might use more memory than memtable_memory_budget during high * write rate period

* - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object optimizeUniversalStyleCompaction(); + T optimizeUniversalStyleCompaction(); /** *

Default values for some parameters in ColumnFamilyOptions are not @@ -84,9 +93,10 @@ public interface ColumnFamilyOptionsInterface { * write rate period

* * @param memtableMemoryBudget memory budget in bytes - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object optimizeUniversalStyleCompaction(long memtableMemoryBudget); + T optimizeUniversalStyleCompaction( + long memtableMemoryBudget); /** * Set {@link BuiltinComparator} to be used with RocksDB. @@ -95,9 +105,10 @@ public interface ColumnFamilyOptionsInterface { * * Default: BytewiseComparator. * @param builtinComparator a {@link BuiltinComparator} type. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setComparator(BuiltinComparator builtinComparator); + T setComparator( + BuiltinComparator builtinComparator); /** * Use the specified comparator for key ordering. @@ -109,9 +120,10 @@ public interface ColumnFamilyOptionsInterface { * Comparator instance can be re-used in multiple options instances. * * @param comparator java instance. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setComparator(AbstractComparator> comparator); + T setComparator( + AbstractComparator> comparator); /** *

Set the merge operator to be used for merging two merge operands @@ -124,9 +136,9 @@ public interface ColumnFamilyOptionsInterface { * The merge function is specified by name and must be one of the * standard merge operators provided by RocksDB. The available * operators are "put", "uint64add", "stringappend" and "stringappendtest". - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setMergeOperatorName(String name); + T setMergeOperatorName(String name); /** *

Set the merge operator to be used for merging two different key/value @@ -135,38 +147,9 @@ public interface ColumnFamilyOptionsInterface { * to the same key are found in the database.

* * @param mergeOperator {@link MergeOperator} instance. - * @return the instance of the current Object. - */ - Object setMergeOperator(MergeOperator mergeOperator); - - /** - * The minimum number of write buffers that will be merged together - * before writing to storage. If set to 1, then - * all write buffers are flushed to L0 as individual files and this increases - * read amplification because a get request has to check in all of these - * files. Also, an in-memory merge may result in writing lesser - * data to storage if there are duplicate records in each of these - * individual write buffers. Default: 1 - * - * @param minWriteBufferNumberToMerge the minimum number of write buffers - * that will be merged together. - * @return the reference to the current option. + * @return the instance of the current object. */ - Object setMinWriteBufferNumberToMerge( - int minWriteBufferNumberToMerge); - - /** - * The minimum number of write buffers that will be merged together - * before writing to storage. If set to 1, then - * all write buffers are flushed to L0 as individual files and this increases - * read amplification because a get request has to check in all of these - * files. Also, an in-memory merge may result in writing lesser - * data to storage if there are duplicate records in each of these - * individual write buffers. Default: 1 - * - * @return the minimum number of write buffers that will be merged together. - */ - int minWriteBufferNumberToMerge(); + T setMergeOperator(MergeOperator mergeOperator); /** * This prefix-extractor uses the first n bytes of a key as its prefix. @@ -179,8 +162,7 @@ public interface ColumnFamilyOptionsInterface { * @param n use the first n bytes of a key as its prefix. * @return the reference to the current option. */ - Object useFixedLengthPrefixExtractor(int n); - + T useFixedLengthPrefixExtractor(int n); /** * Same as fixed length prefix extractor, except that when slice is @@ -189,102 +171,7 @@ public interface ColumnFamilyOptionsInterface { * @param n use the first n bytes of a key as its prefix. * @return the reference to the current option. */ - Object useCappedPrefixExtractor(int n); - - /** - * Compress blocks using the specified compression algorithm. This - * parameter can be changed dynamically. - * - * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. - * - * @param compressionType Compression Type. - * @return the reference to the current option. - */ - Object setCompressionType(CompressionType compressionType); - - /** - * Compress blocks using the specified compression algorithm. This - * parameter can be changed dynamically. - * - * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. - * - * @return Compression type. - */ - CompressionType compressionType(); - - /** - *

Different levels can have different compression - * policies. There are cases where most lower levels - * would like to use quick compression algorithms while - * the higher levels (which have more data) use - * compression algorithms that have better compression - * but could be slower. This array, if non-empty, should - * have an entry for each level of the database; - * these override the value specified in the previous - * field 'compression'.

- * - * NOTICE - *

If {@code level_compaction_dynamic_level_bytes=true}, - * {@code compression_per_level[0]} still determines {@code L0}, - * but other elements of the array are based on base level - * (the level {@code L0} files are merged to), and may not - * match the level users see from info log for metadata. - *

- *

If {@code L0} files are merged to {@code level - n}, - * then, for {@code i>0}, {@code compression_per_level[i]} - * determines compaction type for level {@code n+i-1}.

- * - * Example - *

For example, if we have 5 levels, and we determine to - * merge {@code L0} data to {@code L4} (which means {@code L1..L3} - * will be empty), then the new files go to {@code L4} uses - * compression type {@code compression_per_level[1]}.

- * - *

If now {@code L0} is merged to {@code L2}. Data goes to - * {@code L2} will be compressed according to - * {@code compression_per_level[1]}, {@code L3} using - * {@code compression_per_level[2]}and {@code L4} using - * {@code compression_per_level[3]}. Compaction for each - * level can change when data grows.

- * - *

Default: empty

- * - * @param compressionLevels list of - * {@link org.rocksdb.CompressionType} instances. - * - * @return the reference to the current option. - */ - Object setCompressionPerLevel( - List compressionLevels); - - /** - *

Return the currently set {@link org.rocksdb.CompressionType} - * per instances.

- * - *

See: {@link #setCompressionPerLevel(java.util.List)}

- * - * @return list of {@link org.rocksdb.CompressionType} - * instances. - */ - List compressionPerLevel(); - - /** - * Set the number of levels for this database - * If level-styled compaction is used, then this number determines - * the total number of levels. - * - * @param numLevels the number of levels. - * @return the reference to the current option. - */ - Object setNumLevels(int numLevels); - - /** - * If level-styled compaction is used, then this number determines - * the total number of levels. - * - * @return the number of levels. - */ - int numLevels(); + T useCappedPrefixExtractor(int n); /** * Number of files to trigger level-0 compaction. A value < 0 means that @@ -294,7 +181,7 @@ public interface ColumnFamilyOptionsInterface { * @param numFiles the number of files in level-0 to trigger compaction. * @return the reference to the current option. */ - Object setLevelZeroFileNumCompactionTrigger( + T setLevelZeroFileNumCompactionTrigger( int numFiles); /** @@ -315,7 +202,7 @@ public interface ColumnFamilyOptionsInterface { * @param numFiles soft limit on number of level-0 files. * @return the reference to the current option. */ - Object setLevelZeroSlowdownWritesTrigger( + T setLevelZeroSlowdownWritesTrigger( int numFiles); /** @@ -333,7 +220,7 @@ public interface ColumnFamilyOptionsInterface { * @param numFiles the hard limit of the number of level-0 files. * @return the reference to the current option. */ - Object setLevelZeroStopWritesTrigger(int numFiles); + T setLevelZeroStopWritesTrigger(int numFiles); /** * Maximum number of level-0 files. We stop writes at this point. @@ -342,111 +229,6 @@ public interface ColumnFamilyOptionsInterface { */ int levelZeroStopWritesTrigger(); - /** - * This does nothing anymore. Deprecated. - * - * @param maxMemCompactionLevel Unused. - * - * @return the reference to the current option. - */ - @Deprecated - Object setMaxMemCompactionLevel( - int maxMemCompactionLevel); - - /** - * This does nothing anymore. Deprecated. - * - * @return Always returns 0. - */ - @Deprecated - int maxMemCompactionLevel(); - - /** - *

If {@code true}, RocksDB will pick target size of each level - * dynamically. We will pick a base level b >= 1. L0 will be - * directly merged into level b, instead of always into level 1. - * Level 1 to b-1 need to be empty. We try to pick b and its target - * size so that

- * - *
    - *
  1. target size is in the range of - * (max_bytes_for_level_base / max_bytes_for_level_multiplier, - * max_bytes_for_level_base]
  2. - *
  3. target size of the last level (level num_levels-1) equals to extra size - * of the level.
  4. - *
- * - *

At the same time max_bytes_for_level_multiplier and - * max_bytes_for_level_multiplier_additional are still satisfied.

- * - *

With this option on, from an empty DB, we make last level the base - * level, which means merging L0 data into the last level, until it exceeds - * max_bytes_for_level_base. And then we make the second last level to be - * base level, to start to merge L0 data to second last level, with its - * target size to be {@code 1/max_bytes_for_level_multiplier} of the last - * levels extra size. After the data accumulates more so that we need to - * move the base level to the third last one, and so on.

- * - *

Example

- *

For example, assume {@code max_bytes_for_level_multiplier=10}, - * {@code num_levels=6}, and {@code max_bytes_for_level_base=10MB}.

- * - *

Target sizes of level 1 to 5 starts with:

- * {@code [- - - - 10MB]} - *

with base level is level. Target sizes of level 1 to 4 are not applicable - * because they will not be used. - * Until the size of Level 5 grows to more than 10MB, say 11MB, we make - * base target to level 4 and now the targets looks like:

- * {@code [- - - 1.1MB 11MB]} - *

While data are accumulated, size targets are tuned based on actual data - * of level 5. When level 5 has 50MB of data, the target is like:

- * {@code [- - - 5MB 50MB]} - *

Until level 5's actual size is more than 100MB, say 101MB. Now if we - * keep level 4 to be the base level, its target size needs to be 10.1MB, - * which doesn't satisfy the target size range. So now we make level 3 - * the target size and the target sizes of the levels look like:

- * {@code [- - 1.01MB 10.1MB 101MB]} - *

In the same way, while level 5 further grows, all levels' targets grow, - * like

- * {@code [- - 5MB 50MB 500MB]} - *

Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the - * base level and make levels' target sizes like this:

- * {@code [- 1.001MB 10.01MB 100.1MB 1001MB]} - *

and go on...

- * - *

By doing it, we give {@code max_bytes_for_level_multiplier} a priority - * against {@code max_bytes_for_level_base}, for a more predictable LSM tree - * shape. It is useful to limit worse case space amplification.

- * - *

{@code max_bytes_for_level_multiplier_additional} is ignored with - * this flag on.

- * - *

Turning this feature on or off for an existing DB can cause unexpected - * LSM tree structure so it's not recommended.

- * - *

Caution: this option is experimental

- * - *

Default: false

- * - * @param enableLevelCompactionDynamicLevelBytes boolean value indicating - * if {@code LevelCompactionDynamicLevelBytes} shall be enabled. - * @return the reference to the current option. - */ - Object setLevelCompactionDynamicLevelBytes( - boolean enableLevelCompactionDynamicLevelBytes); - - /** - *

Return if {@code LevelCompactionDynamicLevelBytes} is enabled. - *

- * - *

For further information see - * {@link #setLevelCompactionDynamicLevelBytes(boolean)}

- * - * @return boolean value indicating if - * {@code levelCompactionDynamicLevelBytes} is enabled. - */ - boolean levelCompactionDynamicLevelBytes(); - /** * The ratio between the total size of level-(L+1) files and the total * size of level-L files for all L. @@ -456,7 +238,8 @@ public interface ColumnFamilyOptionsInterface { * files and the total size of level-L files for all L. * @return the reference to the current option. */ - Object setMaxBytesForLevelMultiplier(double multiplier); + T setMaxBytesForLevelMultiplier( + double multiplier); /** * The ratio between the total size of level-(L+1) files and the total @@ -468,122 +251,6 @@ public interface ColumnFamilyOptionsInterface { */ double maxBytesForLevelMultiplier(); - /** - * Maximum size of each compaction (not guarantee) - * - * @param maxCompactionBytes the compaction size limit - * @return the reference to the current option. - */ - Object setMaxCompactionBytes(long maxCompactionBytes); - - /** - * Control maximum size of each compaction (not guaranteed) - * - * @return compaction size threshold - */ - long maxCompactionBytes(); - - /** - * Puts are delayed 0-1 ms when any level has a compaction score that exceeds - * soft_rate_limit. This is ignored when == 0.0. - * CONSTRAINT: soft_rate_limit ≤ hard_rate_limit. If this constraint does not - * hold, RocksDB will set soft_rate_limit = hard_rate_limit - * Default: 0 (disabled) - * - * @param softRateLimit the soft-rate-limit of a compaction score - * for put delay. - * @return the reference to the current option. - */ - Object setSoftRateLimit(double softRateLimit); - - /** - * Puts are delayed 0-1 ms when any level has a compaction score that exceeds - * soft_rate_limit. This is ignored when == 0.0. - * CONSTRAINT: soft_rate_limit ≤ hard_rate_limit. If this constraint does not - * hold, RocksDB will set soft_rate_limit = hard_rate_limit - * Default: 0 (disabled) - * - * @return soft-rate-limit for put delay. - */ - double softRateLimit(); - - /** - * Puts are delayed 1ms at a time when any level has a compaction score that - * exceeds hard_rate_limit. This is ignored when ≤ 1.0. - * Default: 0 (disabled) - * - * @param hardRateLimit the hard-rate-limit of a compaction score for put - * delay. - * @return the reference to the current option. - */ - Object setHardRateLimit(double hardRateLimit); - - /** - * Puts are delayed 1ms at a time when any level has a compaction score that - * exceeds hard_rate_limit. This is ignored when ≤ 1.0. - * Default: 0 (disabled) - * - * @return the hard-rate-limit of a compaction score for put delay. - */ - double hardRateLimit(); - - /** - * The maximum time interval a put will be stalled when hard_rate_limit - * is enforced. If 0, then there is no limit. - * Default: 1000 - * - * @param rateLimitDelayMaxMilliseconds the maximum time interval a put - * will be stalled. - * @return the reference to the current option. - */ - Object setRateLimitDelayMaxMilliseconds( - int rateLimitDelayMaxMilliseconds); - - /** - * The maximum time interval a put will be stalled when hard_rate_limit - * is enforced. If 0, then there is no limit. - * Default: 1000 - * - * @return the maximum time interval a put will be stalled when - * hard_rate_limit is enforced. - */ - int rateLimitDelayMaxMilliseconds(); - - /** - * Purge duplicate/deleted keys when a memtable is flushed to storage. - * Default: true - * - * @param purgeRedundantKvsWhileFlush true if purging keys is disabled. - * @return the reference to the current option. - */ - Object setPurgeRedundantKvsWhileFlush( - boolean purgeRedundantKvsWhileFlush); - - /** - * Purge duplicate/deleted keys when a memtable is flushed to storage. - * Default: true - * - * @return true if purging keys is disabled. - */ - boolean purgeRedundantKvsWhileFlush(); - - /** - * Set compaction style for DB. - * - * Default: LEVEL. - * - * @param compactionStyle Compaction style. - * @return the reference to the current option. - */ - Object setCompactionStyle(CompactionStyle compactionStyle); - - /** - * Compaction style for DB. - * - * @return Compaction style. - */ - CompactionStyle compactionStyle(); - /** * FIFO compaction option. * The oldest table file will be deleted @@ -591,9 +258,10 @@ public interface ColumnFamilyOptionsInterface { * The default value is 1GB (1 * 1024 * 1024 * 1024). * * @param maxTableFilesSize the size limit of the total sum of table files. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setMaxTableFilesSizeFIFO(long maxTableFilesSize); + T setMaxTableFilesSizeFIFO( + long maxTableFilesSize); /** * FIFO compaction option. @@ -605,15 +273,22 @@ public interface ColumnFamilyOptionsInterface { */ long maxTableFilesSizeFIFO(); + /** + * Get the config for mem-table. + * + * @return the mem-table config. + */ + MemTableConfig memTableConfig(); + /** * Set the config for mem-table. * - * @param config the mem-table config. - * @return the instance of the current Object. + * @param memTableConfig the mem-table config. + * @return the instance of the current object. * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms * while overflowing the underlying platform specific value. */ - Object setMemTableConfig(MemTableConfig config); + T setMemTableConfig(MemTableConfig memTableConfig); /** * Returns the name of the current mem table representation. @@ -624,13 +299,20 @@ public interface ColumnFamilyOptionsInterface { */ String memTableFactoryName(); + /** + * Get the config for table format. + * + * @return the table format config. + */ + TableFormatConfig tableFormatConfig(); + /** * Set the config for table format. * * @param config the table format config. - * @return the reference of the current Options. + * @return the reference of the current options. */ - Object setTableFormatConfig(TableFormatConfig config); + T setTableFormatConfig(TableFormatConfig config); /** * @return the name of the currently used table factory. @@ -638,98 +320,48 @@ public interface ColumnFamilyOptionsInterface { String tableFactoryName(); /** - * Allows thread-safe inplace updates. - * If inplace_callback function is not set, - * Put(key, new_value) will update inplace the existing_value iff - * * key exists in current memtable - * * new sizeof(new_value) ≤ sizeof(existing_value) - * * existing_value for that key is a put i.e. kTypeValue - * If inplace_callback function is set, check doc for inplace_callback. - * Default: false. + * Compression algorithm that will be used for the bottommost level that + * contain files. If level-compaction is used, this option will only affect + * levels after base level. * - * @param inplaceUpdateSupport true if thread-safe inplace updates - * are allowed. - * @return the reference to the current option. - */ - Object setInplaceUpdateSupport(boolean inplaceUpdateSupport); - - /** - * Allows thread-safe inplace updates. - * If inplace_callback function is not set, - * Put(key, new_value) will update inplace the existing_value iff - * * key exists in current memtable - * * new sizeof(new_value) ≤ sizeof(existing_value) - * * existing_value for that key is a put i.e. kTypeValue - * If inplace_callback function is set, check doc for inplace_callback. - * Default: false. + * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION} * - * @return true if thread-safe inplace updates are allowed. - */ - boolean inplaceUpdateSupport(); - - /** - * Control locality of bloom filter probes to improve cache miss rate. - * This option only applies to memtable prefix bloom and plaintable - * prefix bloom. It essentially limits the max number of cache lines each - * bloom filter check can touch. - * This optimization is turned off when set to 0. The number should never - * be greater than number of probes. This option can boost performance - * for in-memory workload but should use with care since it can cause - * higher false positive rate. - * Default: 0 + * @param bottommostCompressionType The compression type to use for the + * bottommost level * - * @param bloomLocality the level of locality of bloom-filter probes. - * @return the reference to the current option. + * @return the reference of the current options. */ - Object setBloomLocality(int bloomLocality); + T setBottommostCompressionType( + final CompressionType bottommostCompressionType); /** - * Control locality of bloom filter probes to improve cache miss rate. - * This option only applies to memtable prefix bloom and plaintable - * prefix bloom. It essentially limits the max number of cache lines each - * bloom filter check can touch. - * This optimization is turned off when set to 0. The number should never - * be greater than number of probes. This option can boost performance - * for in-memory workload but should use with care since it can cause - * higher false positive rate. - * Default: 0 + * Compression algorithm that will be used for the bottommost level that + * contain files. If level-compaction is used, this option will only affect + * levels after base level. + * + * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION} * - * @return the level of locality of bloom-filter probes. - * @see #setBloomLocality(int) + * @return The compression type used for the bottommost level */ - int bloomLocality(); + CompressionType bottommostCompressionType(); + /** - *

This flag specifies that the implementation should optimize the filters - * mainly for cases where keys are found rather than also optimize for keys - * missed. This would be used in cases where the application knows that - * there are very few misses or the performance in the case of misses is not - * important.

- * - *

For now, this flag allows us to not store filters for the last level i.e - * the largest level which contains data of the LSM store. For keys which - * are hits, the filters in this level are not useful because we will search - * for the data anyway.

+ * Set the different options for compression algorithms * - *

NOTE: the filters in other levels are still useful - * even for key hit because they tell us whether to look in that level or go - * to the higher level.

+ * @param compressionOptions The compression options * - *

Default: false

- * - * @param optimizeFiltersForHits boolean value indicating if this flag is set. - * @return the reference to the current option. + * @return the reference of the current options. */ - Object setOptimizeFiltersForHits(boolean optimizeFiltersForHits); + T setCompressionOptions( + CompressionOptions compressionOptions); /** - *

Returns the current state of the {@code optimize_filters_for_hits} - * setting.

+ * Get the different options for compression algorithms * - * @return boolean value indicating if the flag - * {@code optimize_filters_for_hits} was set. + * @return The compression options */ - boolean optimizeFiltersForHits(); + CompressionOptions compressionOptions(); /** * Default memtable memory budget used with the following methods: diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java new file mode 100644 index 000000000..f7925f58f --- /dev/null +++ b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java @@ -0,0 +1,50 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Options for FIFO Compaction + */ +public class CompactionOptionsFIFO extends RocksObject { + + public CompactionOptionsFIFO() { + super(newCompactionOptionsFIFO()); + } + + /** + * Once the total sum of table files reaches this, we will delete the oldest + * table file + * + * Default: 1GB + * + * @param maxTableFilesSize The maximum size of the table files + * + * @return the reference to the current options. + */ + public CompactionOptionsFIFO setMaxTableFilesSize( + final long maxTableFilesSize) { + setMaxTableFilesSize(nativeHandle_, maxTableFilesSize); + return this; + } + + /** + * Once the total sum of table files reaches this, we will delete the oldest + * table file + * + * Default: 1GB + * + * @return max table file size in bytes + */ + public long maxTableFilesSize() { + return maxTableFilesSize(nativeHandle_); + } + + private native void setMaxTableFilesSize(long handle, long maxTableFilesSize); + private native long maxTableFilesSize(long handle); + + private native static long newCompactionOptionsFIFO(); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java new file mode 100644 index 000000000..fdf3a4709 --- /dev/null +++ b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java @@ -0,0 +1,273 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Options for Universal Compaction + */ +public class CompactionOptionsUniversal extends RocksObject { + + public CompactionOptionsUniversal() { + super(newCompactionOptionsUniversal()); + } + + /** + * Percentage flexibilty while comparing file size. If the candidate file(s) + * size is 1% smaller than the next file's size, then include next file into + * this candidate set. + * + * Default: 1 + * + * @param sizeRatio The size ratio to use + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setSizeRatio(final int sizeRatio) { + setSizeRatio(nativeHandle_, sizeRatio); + return this; + } + + /** + * Percentage flexibilty while comparing file size. If the candidate file(s) + * size is 1% smaller than the next file's size, then include next file into + * this candidate set. + * + * Default: 1 + * + * @return The size ratio in use + */ + public int sizeRatio() { + return sizeRatio(nativeHandle_); + } + + /** + * The minimum number of files in a single compaction run. + * + * Default: 2 + * + * @param minMergeWidth minimum number of files in a single compaction run + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setMinMergeWidth(final int minMergeWidth) { + setMinMergeWidth(nativeHandle_, minMergeWidth); + return this; + } + + /** + * The minimum number of files in a single compaction run. + * + * Default: 2 + * + * @return minimum number of files in a single compaction run + */ + public int minMergeWidth() { + return minMergeWidth(nativeHandle_); + } + + /** + * The maximum number of files in a single compaction run. + * + * Default: {@link Long#MAX_VALUE} + * + * @param maxMergeWidth maximum number of files in a single compaction run + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setMaxMergeWidth(final int maxMergeWidth) { + setMaxMergeWidth(nativeHandle_, maxMergeWidth); + return this; + } + + /** + * The maximum number of files in a single compaction run. + * + * Default: {@link Long#MAX_VALUE} + * + * @return maximum number of files in a single compaction run + */ + public int maxMergeWidth() { + return maxMergeWidth(nativeHandle_); + } + + /** + * The size amplification is defined as the amount (in percentage) of + * additional storage needed to store a single byte of data in the database. + * For example, a size amplification of 2% means that a database that + * contains 100 bytes of user-data may occupy upto 102 bytes of + * physical storage. By this definition, a fully compacted database has + * a size amplification of 0%. Rocksdb uses the following heuristic + * to calculate size amplification: it assumes that all files excluding + * the earliest file contribute to the size amplification. + * + * Default: 200, which means that a 100 byte database could require upto + * 300 bytes of storage. + * + * @param maxSizeAmplificationPercent the amount of additional storage needed + * (as a percentage) to store a single byte in the database + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setMaxSizeAmplificationPercent( + final int maxSizeAmplificationPercent) { + setMaxSizeAmplificationPercent(nativeHandle_, maxSizeAmplificationPercent); + return this; + } + + /** + * The size amplification is defined as the amount (in percentage) of + * additional storage needed to store a single byte of data in the database. + * For example, a size amplification of 2% means that a database that + * contains 100 bytes of user-data may occupy upto 102 bytes of + * physical storage. By this definition, a fully compacted database has + * a size amplification of 0%. Rocksdb uses the following heuristic + * to calculate size amplification: it assumes that all files excluding + * the earliest file contribute to the size amplification. + * + * Default: 200, which means that a 100 byte database could require upto + * 300 bytes of storage. + * + * @return the amount of additional storage needed (as a percentage) to store + * a single byte in the database + */ + public int maxSizeAmplificationPercent() { + return maxSizeAmplificationPercent(nativeHandle_); + } + + /** + * If this option is set to be -1 (the default value), all the output files + * will follow compression type specified. + * + * If this option is not negative, we will try to make sure compressed + * size is just above this value. In normal cases, at least this percentage + * of data will be compressed. + * + * When we are compacting to a new file, here is the criteria whether + * it needs to be compressed: assuming here are the list of files sorted + * by generation time: + * A1...An B1...Bm C1...Ct + * where A1 is the newest and Ct is the oldest, and we are going to compact + * B1...Bm, we calculate the total size of all the files as total_size, as + * well as the total size of C1...Ct as total_C, the compaction output file + * will be compressed iff + * total_C / total_size < this percentage + * + * Default: -1 + * + * @param compressionSizePercent percentage of size for compression + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setCompressionSizePercent( + final int compressionSizePercent) { + setCompressionSizePercent(nativeHandle_, compressionSizePercent); + return this; + } + + /** + * If this option is set to be -1 (the default value), all the output files + * will follow compression type specified. + * + * If this option is not negative, we will try to make sure compressed + * size is just above this value. In normal cases, at least this percentage + * of data will be compressed. + * + * When we are compacting to a new file, here is the criteria whether + * it needs to be compressed: assuming here are the list of files sorted + * by generation time: + * A1...An B1...Bm C1...Ct + * where A1 is the newest and Ct is the oldest, and we are going to compact + * B1...Bm, we calculate the total size of all the files as total_size, as + * well as the total size of C1...Ct as total_C, the compaction output file + * will be compressed iff + * total_C / total_size < this percentage + * + * Default: -1 + * + * @return percentage of size for compression + */ + public int compressionSizePercent() { + return compressionSizePercent(nativeHandle_); + } + + /** + * The algorithm used to stop picking files into a single compaction run + * + * Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize} + * + * @param compactionStopStyle The compaction algorithm + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setStopStyle( + final CompactionStopStyle compactionStopStyle) { + setStopStyle(nativeHandle_, compactionStopStyle.getValue()); + return this; + } + + /** + * The algorithm used to stop picking files into a single compaction run + * + * Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize} + * + * @return The compaction algorithm + */ + public CompactionStopStyle stopStyle() { + return CompactionStopStyle.getCompactionStopStyle(stopStyle(nativeHandle_)); + } + + /** + * Option to optimize the universal multi level compaction by enabling + * trivial move for non overlapping files. + * + * Default: false + * + * @param allowTrivialMove true if trivial move is allowed + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setAllowTrivialMove( + final boolean allowTrivialMove) { + setAllowTrivialMove(nativeHandle_, allowTrivialMove); + return this; + } + + /** + * Option to optimize the universal multi level compaction by enabling + * trivial move for non overlapping files. + * + * Default: false + * + * @return true if trivial move is allowed + */ + public boolean allowTrivialMove() { + return allowTrivialMove(nativeHandle_); + } + + private native static long newCompactionOptionsUniversal(); + @Override protected final native void disposeInternal(final long handle); + + private native void setSizeRatio(final long handle, final int sizeRatio); + private native int sizeRatio(final long handle); + private native void setMinMergeWidth( + final long handle, final int minMergeWidth); + private native int minMergeWidth(final long handle); + private native void setMaxMergeWidth( + final long handle, final int maxMergeWidth); + private native int maxMergeWidth(final long handle); + private native void setMaxSizeAmplificationPercent( + final long handle, final int maxSizeAmplificationPercent); + private native int maxSizeAmplificationPercent(final long handle); + private native void setCompressionSizePercent( + final long handle, final int compressionSizePercent); + private native int compressionSizePercent(final long handle); + private native void setStopStyle( + final long handle, final byte stopStyle); + private native byte stopStyle(final long handle); + private native void setAllowTrivialMove( + final long handle, final boolean allowTrivialMove); + private native boolean allowTrivialMove(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/CompactionPriority.java b/java/src/main/java/org/rocksdb/CompactionPriority.java new file mode 100644 index 000000000..17dcb9935 --- /dev/null +++ b/java/src/main/java/org/rocksdb/CompactionPriority.java @@ -0,0 +1,73 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Compaction Priorities + */ +public enum CompactionPriority { + + /** + * Slightly Prioritize larger files by size compensated by #deletes + */ + ByCompensatedSize((byte)0x0), + + /** + * First compact files whose data's latest update time is oldest. + * Try this if you only update some hot keys in small ranges. + */ + OldestLargestSeqFirst((byte)0x1), + + /** + * First compact files whose range hasn't been compacted to the next level + * for the longest. If your updates are random across the key space, + * write amplification is slightly better with this option. + */ + OldestSmallestSeqFirst((byte)0x2), + + /** + * First compact files whose ratio between overlapping size in next level + * and its size is the smallest. It in many cases can optimize write + * amplification. + */ + MinOverlappingRatio((byte)0x3); + + + private final byte value; + + CompactionPriority(final byte value) { + this.value = value; + } + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + * Get CompactionPriority by byte value. + * + * @param value byte representation of CompactionPriority. + * + * @return {@link org.rocksdb.CompactionPriority} instance or null. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static CompactionPriority getCompactionPriority(final byte value) { + for (final CompactionPriority compactionPriority : + CompactionPriority.values()) { + if (compactionPriority.getValue() == value){ + return compactionPriority; + } + } + throw new IllegalArgumentException( + "Illegal value provided for CompactionPriority."); + } +} diff --git a/java/src/main/java/org/rocksdb/CompactionStopStyle.java b/java/src/main/java/org/rocksdb/CompactionStopStyle.java new file mode 100644 index 000000000..13cc873c3 --- /dev/null +++ b/java/src/main/java/org/rocksdb/CompactionStopStyle.java @@ -0,0 +1,54 @@ +package org.rocksdb; + +/** + * Algorithm used to make a compaction request stop picking new files + * into a single compaction run + */ +public enum CompactionStopStyle { + + /** + * Pick files of similar size + */ + CompactionStopStyleSimilarSize((byte)0x0), + + /** + * Total size of picked files > next file + */ + CompactionStopStyleTotalSize((byte)0x1); + + + private final byte value; + + CompactionStopStyle(final byte value) { + this.value = value; + } + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + * Get CompactionStopStyle by byte value. + * + * @param value byte representation of CompactionStopStyle. + * + * @return {@link org.rocksdb.CompactionStopStyle} instance or null. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static CompactionStopStyle getCompactionStopStyle(final byte value) { + for (final CompactionStopStyle compactionStopStyle : + CompactionStopStyle.values()) { + if (compactionStopStyle.getValue() == value){ + return compactionStopStyle; + } + } + throw new IllegalArgumentException( + "Illegal value provided for CompactionStopStyle."); + } +} diff --git a/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java new file mode 100644 index 000000000..9fe21e3d3 --- /dev/null +++ b/java/src/main/java/org/rocksdb/CompressionOptions.java @@ -0,0 +1,85 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Options for Compression + */ +public class CompressionOptions extends RocksObject { + + public CompressionOptions() { + super(newCompressionOptions()); + } + + public CompressionOptions setWindowBits(final int windowBits) { + setWindowBits(nativeHandle_, windowBits); + return this; + } + + public int windowBits() { + return windowBits(nativeHandle_); + } + + public CompressionOptions setLevel(final int level) { + setLevel(nativeHandle_, level); + return this; + } + + public int level() { + return level(nativeHandle_); + } + + public CompressionOptions setStrategy(final int strategy) { + setStrategy(nativeHandle_, strategy); + return this; + } + + public int strategy() { + return strategy(nativeHandle_); + } + + /** + * Maximum size of dictionary used to prime the compression library. Currently + * this dictionary will be constructed by sampling the first output file in a + * subcompaction when the target level is bottommost. This dictionary will be + * loaded into the compression library before compressing/uncompressing each + * data block of subsequent files in the subcompaction. Effectively, this + * improves compression ratios when there are repetitions across data blocks. + * + * A value of 0 indicates the feature is disabled. + * + * Default: 0. + * + * @param maxDictBytes Maximum bytes to use for the dictionary + * + * @return the reference to the current options + */ + public CompressionOptions setMaxDictBytes(final int maxDictBytes) { + setMaxDictBytes(nativeHandle_, maxDictBytes); + return this; + } + + /** + * Maximum size of dictionary used to prime the compression library. + * + * @return The maximum bytes to use for the dictionary + */ + public int maxDictBytes() { + return maxDictBytes(nativeHandle_); + } + + private native static long newCompressionOptions(); + @Override protected final native void disposeInternal(final long handle); + + private native void setWindowBits(final long handle, final int windowBits); + private native int windowBits(final long handle); + private native void setLevel(final long handle, final int level); + private native int level(final long handle); + private native void setStrategy(final long handle, final int strategy); + private native int strategy(final long handle); + private native void setMaxDictBytes(final long handle, final int maxDictBytes); + private native int maxDictBytes(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/CompressionType.java b/java/src/main/java/org/rocksdb/CompressionType.java index b4d86166e..bcb2e8ba2 100644 --- a/java/src/main/java/org/rocksdb/CompressionType.java +++ b/java/src/main/java/org/rocksdb/CompressionType.java @@ -15,12 +15,15 @@ package org.rocksdb; */ public enum CompressionType { - NO_COMPRESSION((byte) 0, null), - SNAPPY_COMPRESSION((byte) 1, "snappy"), - ZLIB_COMPRESSION((byte) 2, "z"), - BZLIB2_COMPRESSION((byte) 3, "bzip2"), - LZ4_COMPRESSION((byte) 4, "lz4"), - LZ4HC_COMPRESSION((byte) 5, "lz4hc"); + NO_COMPRESSION((byte) 0x0, null), + SNAPPY_COMPRESSION((byte) 0x1, "snappy"), + ZLIB_COMPRESSION((byte) 0x2, "z"), + BZLIB2_COMPRESSION((byte) 0x3, "bzip2"), + LZ4_COMPRESSION((byte) 0x4, "lz4"), + LZ4HC_COMPRESSION((byte) 0x5, "lz4hc"), + XPRESS_COMPRESSION((byte) 0x6, "xpress"), + ZSTD_COMPRESSION((byte)0x7, "zstd"), + DISABLE_COMPRESSION_OPTION((byte)0x7F, null); /** *

Get the CompressionType enumeration value by @@ -49,20 +52,22 @@ public enum CompressionType { *

Get the CompressionType enumeration value by * passing the byte identifier to this method.

* - *

If library cannot be found the enumeration - * value {@code NO_COMPRESSION} will be returned.

- * * @param byteIdentifier of CompressionType. * * @return CompressionType instance. + * + * @throws IllegalArgumentException If CompressionType cannot be found for the + * provided byteIdentifier */ public static CompressionType getCompressionType(byte byteIdentifier) { - for (CompressionType compressionType : CompressionType.values()) { + for (final CompressionType compressionType : CompressionType.values()) { if (compressionType.getValue() == byteIdentifier) { return compressionType; } } - return CompressionType.NO_COMPRESSION; + + throw new IllegalArgumentException( + "Illegal value provided for CompressionType."); } /** @@ -84,9 +89,9 @@ public enum CompressionType { return libraryName_; } - private CompressionType(byte value, final String libraryName) { - value_ = value; - libraryName_ = libraryName; + CompressionType(final byte value, final String libraryName) { + value_ = value; + libraryName_ = libraryName; } private final byte value_; diff --git a/java/src/main/java/org/rocksdb/DBOptions.java b/java/src/main/java/org/rocksdb/DBOptions.java index 635867f7a..776d22309 100644 --- a/java/src/main/java/org/rocksdb/DBOptions.java +++ b/java/src/main/java/org/rocksdb/DBOptions.java @@ -5,7 +5,8 @@ package org.rocksdb; -import java.util.Properties; +import java.nio.file.Paths; +import java.util.*; /** * DBOptions to control the behavior of a database. It will be used @@ -14,7 +15,8 @@ import java.util.Properties; * If {@link #dispose()} function is not called, then it will be GC'd * automatically and native resources will be released as part of the process. */ -public class DBOptions extends RocksObject implements DBOptionsInterface { +public class DBOptions + extends RocksObject implements DBOptionsInterface { static { RocksDB.loadLibrary(); } @@ -71,6 +73,12 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { return dbOptions; } + @Override + public DBOptions optimizeForSmallDb() { + optimizeForSmallDb(nativeHandle_); + return this; + } + @Override public DBOptions setIncreaseParallelism( final int totalThreads) { @@ -106,6 +114,18 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { return createMissingColumnFamilies(nativeHandle_); } + @Override + public DBOptions setEnv(final Env env) { + setEnv(nativeHandle_, env.nativeHandle_); + this.env_ = env; + return this; + } + + @Override + public Env getEnv() { + return env_; + } + @Override public DBOptions setErrorIfExists( final boolean errorIfExists) { @@ -178,6 +198,19 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { return maxOpenFiles(nativeHandle_); } + @Override + public DBOptions setMaxFileOpeningThreads(final int maxFileOpeningThreads) { + assert(isOwningHandle()); + setMaxFileOpeningThreads(nativeHandle_, maxFileOpeningThreads); + return this; + } + + @Override + public int maxFileOpeningThreads() { + assert(isOwningHandle()); + return maxFileOpeningThreads(nativeHandle_); + } + @Override public DBOptions setMaxTotalWalSize( final long maxTotalWalSize) { @@ -226,6 +259,43 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { return useFsync(nativeHandle_); } + @Override + public DBOptions setDbPaths(final Collection dbPaths) { + assert(isOwningHandle()); + + final int len = dbPaths.size(); + final String paths[] = new String[len]; + final long targetSizes[] = new long[len]; + + int i = 0; + for(final DbPath dbPath : dbPaths) { + paths[i] = dbPath.path.toString(); + targetSizes[i] = dbPath.targetSize; + i++; + } + setDbPaths(nativeHandle_, paths, targetSizes); + return this; + } + + @Override + public List dbPaths() { + final int len = (int)dbPathsLen(nativeHandle_); + if(len == 0) { + return Collections.emptyList(); + } else { + final String paths[] = new String[len]; + final long targetSizes[] = new long[len]; + + dbPaths(nativeHandle_, paths, targetSizes); + + final List dbPaths = new ArrayList<>(); + for(int i = 0; i < len; i++) { + dbPaths.add(new DbPath(Paths.get(paths[i]), targetSizes[i])); + } + return dbPaths; + } + } + @Override public DBOptions setDbLogDir( final String dbLogDir) { @@ -363,6 +433,19 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { return keepLogFileNum(nativeHandle_); } + @Override + public DBOptions setRecycleLogFileNum(final long recycleLogFileNum) { + assert(isOwningHandle()); + setRecycleLogFileNum(nativeHandle_, recycleLogFileNum); + return this; + } + + @Override + public long recycleLogFileNum() { + assert(isOwningHandle()); + return recycleLogFileNum(nativeHandle_); + } + @Override public DBOptions setMaxManifestFileSize( final long maxManifestFileSize) { @@ -461,6 +544,19 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { return useDirectWrites(nativeHandle_); } + @Override + public DBOptions setAllowFAllocate(final boolean allowFAllocate) { + assert(isOwningHandle()); + setAllowFAllocate(nativeHandle_, allowFAllocate); + return this; + } + + @Override + public boolean allowFAllocate() { + assert(isOwningHandle()); + return allowFAllocate(nativeHandle_); + } + @Override public DBOptions setAllowMmapReads( final boolean allowMmapReads) { @@ -530,6 +626,86 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { return adviseRandomOnOpen(nativeHandle_); } + @Override + public DBOptions setDbWriteBufferSize(final long dbWriteBufferSize) { + assert(isOwningHandle()); + setDbWriteBufferSize(nativeHandle_, dbWriteBufferSize); + return this; + } + + @Override + public long dbWriteBufferSize() { + assert(isOwningHandle()); + return dbWriteBufferSize(nativeHandle_); + } + + @Override + public DBOptions setAccessHintOnCompactionStart(final AccessHint accessHint) { + assert(isOwningHandle()); + setAccessHintOnCompactionStart(nativeHandle_, accessHint.getValue()); + return this; + } + + @Override + public AccessHint accessHintOnCompactionStart() { + assert(isOwningHandle()); + return AccessHint.getAccessHint(accessHintOnCompactionStart(nativeHandle_)); + } + + @Override + public DBOptions setNewTableReaderForCompactionInputs( + final boolean newTableReaderForCompactionInputs) { + assert(isOwningHandle()); + setNewTableReaderForCompactionInputs(nativeHandle_, + newTableReaderForCompactionInputs); + return this; + } + + @Override + public boolean newTableReaderForCompactionInputs() { + assert(isOwningHandle()); + return newTableReaderForCompactionInputs(nativeHandle_); + } + + @Override + public DBOptions setCompactionReadaheadSize(final long compactionReadaheadSize) { + assert(isOwningHandle()); + setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize); + return this; + } + + @Override + public long compactionReadaheadSize() { + assert(isOwningHandle()); + return compactionReadaheadSize(nativeHandle_); + } + + @Override + public DBOptions setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) { + assert(isOwningHandle()); + setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize); + return this; + } + + @Override + public long randomAccessMaxBufferSize() { + assert(isOwningHandle()); + return randomAccessMaxBufferSize(nativeHandle_); + } + + @Override + public DBOptions setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) { + assert(isOwningHandle()); + setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize); + return this; + } + + @Override + public long writableFileMaxBufferSize() { + assert(isOwningHandle()); + return writableFileMaxBufferSize(nativeHandle_); + } + @Override public DBOptions setUseAdaptiveMutex( final boolean useAdaptiveMutex) { @@ -558,10 +734,49 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { } @Override - public void setAllowConcurrentMemtableWrite( + public DBOptions setWalBytesPerSync(final long walBytesPerSync) { + assert(isOwningHandle()); + setWalBytesPerSync(nativeHandle_, walBytesPerSync); + return this; + } + + @Override + public long walBytesPerSync() { + assert(isOwningHandle()); + return walBytesPerSync(nativeHandle_); + } + + @Override + public DBOptions setEnableThreadTracking(final boolean enableThreadTracking) { + assert(isOwningHandle()); + setEnableThreadTracking(nativeHandle_, enableThreadTracking); + return this; + } + + @Override + public boolean enableThreadTracking() { + assert(isOwningHandle()); + return enableThreadTracking(nativeHandle_); + } + + @Override + public DBOptions setDelayedWriteRate(final long delayedWriteRate) { + assert(isOwningHandle()); + setDelayedWriteRate(nativeHandle_, delayedWriteRate); + return this; + } + + @Override + public long delayedWriteRate(){ + return delayedWriteRate(nativeHandle_); + } + + @Override + public DBOptions setAllowConcurrentMemtableWrite( final boolean allowConcurrentMemtableWrite) { setAllowConcurrentMemtableWrite(nativeHandle_, allowConcurrentMemtableWrite); + return this; } @Override @@ -570,10 +785,11 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { } @Override - public void setEnableWriteThreadAdaptiveYield( + public DBOptions setEnableWriteThreadAdaptiveYield( final boolean enableWriteThreadAdaptiveYield) { setEnableWriteThreadAdaptiveYield(nativeHandle_, enableWriteThreadAdaptiveYield); + return this; } @Override @@ -582,8 +798,9 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { } @Override - public void setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) { + public DBOptions setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) { setWriteThreadMaxYieldUsec(nativeHandle_, writeThreadMaxYieldUsec); + return this; } @Override @@ -592,8 +809,9 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { } @Override - public void setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) { + public DBOptions setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) { setWriteThreadSlowYieldUsec(nativeHandle_, writeThreadSlowYieldUsec); + return this; } @Override @@ -601,17 +819,114 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { return writeThreadSlowYieldUsec(nativeHandle_); } + @Override + public DBOptions setSkipStatsUpdateOnDbOpen(final boolean skipStatsUpdateOnDbOpen) { + assert(isOwningHandle()); + setSkipStatsUpdateOnDbOpen(nativeHandle_, skipStatsUpdateOnDbOpen); + return this; + } + + @Override + public boolean skipStatsUpdateOnDbOpen() { + assert(isOwningHandle()); + return skipStatsUpdateOnDbOpen(nativeHandle_); + } + + @Override + public DBOptions setWalRecoveryMode(final WALRecoveryMode walRecoveryMode) { + assert(isOwningHandle()); + setWalRecoveryMode(nativeHandle_, walRecoveryMode.getValue()); + return this; + } + + @Override + public WALRecoveryMode walRecoveryMode() { + assert(isOwningHandle()); + return WALRecoveryMode.getWALRecoveryMode(walRecoveryMode(nativeHandle_)); + } + + @Override + public DBOptions setAllow2pc(final boolean allow2pc) { + assert(isOwningHandle()); + setAllow2pc(nativeHandle_, allow2pc); + return this; + } + + @Override + public boolean allow2pc() { + assert(isOwningHandle()); + return allow2pc(nativeHandle_); + } + + @Override + public DBOptions setRowCache(final Cache rowCache) { + assert(isOwningHandle()); + setRowCache(nativeHandle_, rowCache.nativeHandle_); + this.rowCache_ = rowCache; + return this; + } + + @Override + public Cache rowCache() { + assert(isOwningHandle()); + return this.rowCache_; + } + + @Override + public DBOptions setFailIfOptionsFileError(final boolean failIfOptionsFileError) { + assert(isOwningHandle()); + setFailIfOptionsFileError(nativeHandle_, failIfOptionsFileError); + return this; + } + + @Override + public boolean failIfOptionsFileError() { + assert(isOwningHandle()); + return failIfOptionsFileError(nativeHandle_); + } + + @Override + public DBOptions setDumpMallocStats(final boolean dumpMallocStats) { + assert(isOwningHandle()); + setDumpMallocStats(nativeHandle_, dumpMallocStats); + return this; + } + + @Override + public boolean dumpMallocStats() { + assert(isOwningHandle()); + return dumpMallocStats(nativeHandle_); + } + + @Override + public DBOptions setAvoidFlushDuringRecovery(final boolean avoidFlushDuringRecovery) { + assert(isOwningHandle()); + setAvoidFlushDuringRecovery(nativeHandle_, avoidFlushDuringRecovery); + return this; + } + + @Override + public boolean avoidFlushDuringRecovery() { + assert(isOwningHandle()); + return avoidFlushDuringRecovery(nativeHandle_); + } + + @Override + public DBOptions setAvoidFlushDuringShutdown(final boolean avoidFlushDuringShutdown) { + assert(isOwningHandle()); + setAvoidFlushDuringShutdown(nativeHandle_, avoidFlushDuringShutdown); + return this; + } + + @Override + public boolean avoidFlushDuringShutdown() { + assert(isOwningHandle()); + return avoidFlushDuringShutdown(nativeHandle_); + } + static final int DEFAULT_NUM_SHARD_BITS = -1; - public DBOptions setDelayedWriteRate(final long delayedWriteRate){ - assert(isOwningHandle()); - setDelayedWriteRate(nativeHandle_, delayedWriteRate); - return this; -} -public long delayedWriteRate(){ - return delayedWriteRate(nativeHandle_); -} /** @@ -630,12 +945,14 @@ public long delayedWriteRate(){ private native static long newDBOptions(); @Override protected final native void disposeInternal(final long handle); + private native void optimizeForSmallDb(final long handle); private native void setIncreaseParallelism(long handle, int totalThreads); private native void setCreateIfMissing(long handle, boolean flag); private native boolean createIfMissing(long handle); private native void setCreateMissingColumnFamilies( long handle, boolean flag); private native boolean createMissingColumnFamilies(long handle); + private native void setEnv(long handle, long envHandle); private native void setErrorIfExists(long handle, boolean errorIfExists); private native boolean errorIfExists(long handle); private native void setParanoidChecks( @@ -649,6 +966,9 @@ public long delayedWriteRate(){ private native byte infoLogLevel(long handle); private native void setMaxOpenFiles(long handle, int maxOpenFiles); private native int maxOpenFiles(long handle); + private native void setMaxFileOpeningThreads(final long handle, + final int maxFileOpeningThreads); + private native int maxFileOpeningThreads(final long handle); private native void setMaxTotalWalSize(long handle, long maxTotalWalSize); private native long maxTotalWalSize(long handle); @@ -656,6 +976,11 @@ public long delayedWriteRate(){ private native long statisticsPtr(long optHandle); private native boolean useFsync(long handle); private native void setUseFsync(long handle, boolean useFsync); + private native void setDbPaths(final long handle, final String[] paths, + final long[] targetSizes); + private native long dbPathsLen(final long handle); + private native void dbPaths(final long handle, final String[] paths, + final long[] targetSizes); private native void setDbLogDir(long handle, String dbLogDir); private native String dbLogDir(long handle); private native void setWalDir(long handle, String walDir); @@ -683,6 +1008,8 @@ public long delayedWriteRate(){ private native void setKeepLogFileNum(long handle, long keepLogFileNum) throws IllegalArgumentException; private native long keepLogFileNum(long handle); + private native void setRecycleLogFileNum(long handle, long recycleLogFileNum); + private native long recycleLogFileNum(long handle); private native void setMaxManifestFileSize( long handle, long maxManifestFileSize); private native long maxManifestFileSize(long handle); @@ -700,6 +1027,9 @@ public long delayedWriteRate(){ private native boolean useDirectReads(long handle); private native void setUseDirectWrites(long handle, boolean useDirectWrites); private native boolean useDirectWrites(long handle); + private native void setAllowFAllocate(final long handle, + final boolean allowFAllocate); + private native boolean allowFAllocate(final long handle); private native void setAllowMmapReads( long handle, boolean allowMmapReads); private native boolean allowMmapReads(long handle); @@ -715,12 +1045,37 @@ public long delayedWriteRate(){ private native void setAdviseRandomOnOpen( long handle, boolean adviseRandomOnOpen); private native boolean adviseRandomOnOpen(long handle); + private native void setDbWriteBufferSize(final long handle, + final long dbWriteBufferSize); + private native long dbWriteBufferSize(final long handle); + private native void setAccessHintOnCompactionStart(final long handle, + final byte accessHintOnCompactionStart); + private native byte accessHintOnCompactionStart(final long handle); + private native void setNewTableReaderForCompactionInputs(final long handle, + final boolean newTableReaderForCompactionInputs); + private native boolean newTableReaderForCompactionInputs(final long handle); + private native void setCompactionReadaheadSize(final long handle, + final long compactionReadaheadSize); + private native long compactionReadaheadSize(final long handle); + private native void setRandomAccessMaxBufferSize(final long handle, + final long randomAccessMaxBufferSize); + private native long randomAccessMaxBufferSize(final long handle); + private native void setWritableFileMaxBufferSize(final long handle, + final long writableFileMaxBufferSize); + private native long writableFileMaxBufferSize(final long handle); private native void setUseAdaptiveMutex( long handle, boolean useAdaptiveMutex); private native boolean useAdaptiveMutex(long handle); private native void setBytesPerSync( long handle, long bytesPerSync); private native long bytesPerSync(long handle); + private native void setWalBytesPerSync(long handle, long walBytesPerSync); + private native long walBytesPerSync(long handle); + private native void setEnableThreadTracking(long handle, + boolean enableThreadTracking); + private native boolean enableThreadTracking(long handle); + private native void setDelayedWriteRate(long handle, long delayedWriteRate); + private native long delayedWriteRate(long handle); private native void setAllowConcurrentMemtableWrite(long handle, boolean allowConcurrentMemtableWrite); private native boolean allowConcurrentMemtableWrite(long handle); @@ -733,10 +1088,33 @@ public long delayedWriteRate(){ private native void setWriteThreadSlowYieldUsec(long handle, long writeThreadSlowYieldUsec); private native long writeThreadSlowYieldUsec(long handle); - - private native void setDelayedWriteRate(long handle, long delayedWriteRate); - private native long delayedWriteRate(long handle); - - int numShardBits_; - RateLimiter rateLimiter_; + private native void setSkipStatsUpdateOnDbOpen(final long handle, + final boolean skipStatsUpdateOnDbOpen); + private native boolean skipStatsUpdateOnDbOpen(final long handle); + private native void setWalRecoveryMode(final long handle, + final byte walRecoveryMode); + private native byte walRecoveryMode(final long handle); + private native void setAllow2pc(final long handle, + final boolean allow2pc); + private native boolean allow2pc(final long handle); + private native void setRowCache(final long handle, + final long row_cache_handle); + private native void setFailIfOptionsFileError(final long handle, + final boolean failIfOptionsFileError); + private native boolean failIfOptionsFileError(final long handle); + private native void setDumpMallocStats(final long handle, + final boolean dumpMallocStats); + private native boolean dumpMallocStats(final long handle); + private native void setAvoidFlushDuringRecovery(final long handle, + final boolean avoidFlushDuringRecovery); + private native boolean avoidFlushDuringRecovery(final long handle); + private native void setAvoidFlushDuringShutdown(final long handle, + final boolean avoidFlushDuringShutdown); + private native boolean avoidFlushDuringShutdown(final long handle); + + // instance variables + private Env env_; + private int numShardBits_; + private RateLimiter rateLimiter_; + private Cache rowCache_; } diff --git a/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/java/src/main/java/org/rocksdb/DBOptionsInterface.java index 1af9b7ced..2e0e295ed 100644 --- a/java/src/main/java/org/rocksdb/DBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/DBOptionsInterface.java @@ -5,7 +5,35 @@ package org.rocksdb; -public interface DBOptionsInterface { +import java.util.Collection; +import java.util.List; + +public interface DBOptionsInterface { + + /** + * Use this if your DB is very small (like under 1GB) and you don't want to + * spend lots of memory for memtables. + * + * @return the instance of the current object. + */ + T optimizeForSmallDb(); + + /** + * Use the specified object to interact with the environment, + * e.g. to read/write files, schedule background work, etc. + * Default: {@link Env#getDefault()} + * + * @param env {@link Env} instance. + * @return the instance of the current Options. + */ + T setEnv(final Env env); + + /** + * Returns the set RocksEnv instance. + * + * @return {@link RocksEnv} instance set in the options. + */ + Env getEnv(); /** *

By default, RocksDB uses only one background thread for flush and @@ -20,7 +48,7 @@ public interface DBOptionsInterface { * * @return the instance of the current Options */ - Object setIncreaseParallelism(int totalThreads); + T setIncreaseParallelism(int totalThreads); /** * If this value is set to true, then the database will be created @@ -33,7 +61,7 @@ public interface DBOptionsInterface { * @return the instance of the current Options * @see RocksDB#open(org.rocksdb.Options, String) */ - Object setCreateIfMissing(boolean flag); + T setCreateIfMissing(boolean flag); /** * Return true if the create_if_missing flag is set to true. @@ -54,7 +82,7 @@ public interface DBOptionsInterface { * @return true if missing column families shall be created automatically * on open. */ - Object setCreateMissingColumnFamilies(boolean flag); + T setCreateMissingColumnFamilies(boolean flag); /** * Return true if the create_missing_column_families flag is set @@ -76,7 +104,7 @@ public interface DBOptionsInterface { * @return the reference to the current option. * @see RocksDB#open(org.rocksdb.Options, String) */ - Object setErrorIfExists(boolean errorIfExists); + T setErrorIfExists(boolean errorIfExists); /** * If true, an error will be thrown during RocksDB.open() if the @@ -102,7 +130,7 @@ public interface DBOptionsInterface { * is on. * @return the reference to the current option. */ - Object setParanoidChecks(boolean paranoidChecks); + T setParanoidChecks(boolean paranoidChecks); /** * If true, the implementation will do aggressive checking of the @@ -124,11 +152,11 @@ public interface DBOptionsInterface { * Default: nullptr * * @param rateLimiter {@link org.rocksdb.RateLimiter} instance. - * @return the instance of the current Object. + * @return the instance of the current object. * * @since 3.10.0 */ - Object setRateLimiter(RateLimiter rateLimiter); + T setRateLimiter(RateLimiter rateLimiter); /** *

Any internal progress/error information generated by @@ -139,17 +167,17 @@ public interface DBOptionsInterface { *

Default: nullptr

* * @param logger {@link Logger} instance. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setLogger(Logger logger); + T setLogger(Logger logger); /** *

Sets the RocksDB log level. Default level is INFO

* * @param infoLogLevel log level to set. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setInfoLogLevel(InfoLogLevel infoLogLevel); + T setInfoLogLevel(InfoLogLevel infoLogLevel); /** *

Returns currently set log level.

@@ -167,9 +195,9 @@ public interface DBOptionsInterface { * Default: 5000 * * @param maxOpenFiles the maximum number of open files. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setMaxOpenFiles(int maxOpenFiles); + T setMaxOpenFiles(int maxOpenFiles); /** * Number of open files that can be used by the DB. You may need to @@ -183,6 +211,31 @@ public interface DBOptionsInterface { */ int maxOpenFiles(); + /** + * If {@link #maxOpenFiles()} is -1, DB will open all files on DB::Open(). You + * can use this option to increase the number of threads used to open the + * files. + * + * Default: 16 + * + * @param maxFileOpeningThreads the maximum number of threads to use to + * open files + * + * @return the reference to the current options. + */ + T setMaxFileOpeningThreads(int maxFileOpeningThreads); + + /** + * If {@link #maxOpenFiles()} is -1, DB will open all files on DB::Open(). You + * can use this option to increase the number of threads used to open the + * files. + * + * Default: 16 + * + * @return the maximum number of threads to use to open files + */ + int maxFileOpeningThreads(); + /** *

Once write-ahead logs exceed this size, we will start forcing the * flush of column families whose memtables are backed by the oldest live @@ -193,9 +246,9 @@ public interface DBOptionsInterface { *

Default: 0

* * @param maxTotalWalSize max total wal size. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setMaxTotalWalSize(long maxTotalWalSize); + T setMaxTotalWalSize(long maxTotalWalSize); /** *

Returns the max total wal size. Once write-ahead logs exceed this size, @@ -216,10 +269,10 @@ public interface DBOptionsInterface { * Statistics objects should not be shared between DB instances as * it does not use any locks to prevent concurrent updates.

* - * @return the instance of the current Object. + * @return the instance of the current object. * @see RocksDB#open(org.rocksdb.Options, String) */ - Object createStatistics(); + T createStatistics(); /** *

Returns statistics object. Calls {@link #createStatistics()} if @@ -238,9 +291,9 @@ public interface DBOptionsInterface { *

Default: false

* * @param useFsync a boolean flag to specify whether to use fsync - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setUseFsync(boolean useFsync); + T setUseFsync(boolean useFsync); /** *

If true, then every store to stable storage will issue a fsync.

@@ -252,6 +305,70 @@ public interface DBOptionsInterface { */ boolean useFsync(); + /** + * A list of paths where SST files can be put into, with its target size. + * Newer data is placed into paths specified earlier in the vector while + * older data gradually moves to paths specified later in the vector. + * + * For example, you have a flash device with 10GB allocated for the DB, + * as well as a hard drive of 2TB, you should config it to be: + * [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] + * + * The system will try to guarantee data under each path is close to but + * not larger than the target size. But current and future file sizes used + * by determining where to place a file are based on best-effort estimation, + * which means there is a chance that the actual size under the directory + * is slightly more than target size under some workloads. User should give + * some buffer room for those cases. + * + * If none of the paths has sufficient room to place a file, the file will + * be placed to the last path anyway, despite to the target size. + * + * Placing newer data to earlier paths is also best-efforts. User should + * expect user files to be placed in higher levels in some extreme cases. + * + * If left empty, only one path will be used, which is db_name passed when + * opening the DB. + * + * Default: empty + * + * @param dbPaths the paths and target sizes + * + * @return the reference to the current options + */ + T setDbPaths(final Collection dbPaths); + + /** + * A list of paths where SST files can be put into, with its target size. + * Newer data is placed into paths specified earlier in the vector while + * older data gradually moves to paths specified later in the vector. + * + * For example, you have a flash device with 10GB allocated for the DB, + * as well as a hard drive of 2TB, you should config it to be: + * [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] + * + * The system will try to guarantee data under each path is close to but + * not larger than the target size. But current and future file sizes used + * by determining where to place a file are based on best-effort estimation, + * which means there is a chance that the actual size under the directory + * is slightly more than target size under some workloads. User should give + * some buffer room for those cases. + * + * If none of the paths has sufficient room to place a file, the file will + * be placed to the last path anyway, despite to the target size. + * + * Placing newer data to earlier paths is also best-efforts. User should + * expect user files to be placed in higher levels in some extreme cases. + * + * If left empty, only one path will be used, which is db_name passed when + * opening the DB. + * + * Default: {@link java.util.Collections#emptyList()} + * + * @return dbPaths the paths and target sizes + */ + List dbPaths(); + /** * This specifies the info LOG dir. * If it is empty, the log files will be in the same dir as data. @@ -260,9 +377,9 @@ public interface DBOptionsInterface { * name's prefix. * * @param dbLogDir the path to the info log directory - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setDbLogDir(String dbLogDir); + T setDbLogDir(String dbLogDir); /** * Returns the directory of info log. @@ -285,9 +402,9 @@ public interface DBOptionsInterface { * all log files in wal_dir and the dir itself is deleted * * @param walDir the path to the write-ahead-log directory. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setWalDir(String walDir); + T setWalDir(String walDir); /** * Returns the path to the write-ahead-logs (WAL) directory. @@ -309,9 +426,9 @@ public interface DBOptionsInterface { * regardless of this setting * * @param micros the time interval in micros - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setDeleteObsoleteFilesPeriodMicros(long micros); + T setDeleteObsoleteFilesPeriodMicros(long micros); /** * The periodicity when obsolete files get deleted. The default @@ -351,13 +468,13 @@ public interface DBOptionsInterface { * * @param maxBackgroundCompactions the maximum number of background * compaction jobs. - * @return the instance of the current Object. + * @return the instance of the current object. * * @see RocksEnv#setBackgroundThreads(int) * @see RocksEnv#setBackgroundThreads(int, int) * @see #maxBackgroundFlushes() */ - Object setMaxBackgroundCompactions(int maxBackgroundCompactions); + T setMaxBackgroundCompactions(int maxBackgroundCompactions); /** * Returns the maximum number of concurrent background compaction jobs, @@ -401,13 +518,13 @@ public interface DBOptionsInterface { * Default: 1 * * @param maxBackgroundFlushes number of max concurrent flush jobs - * @return the instance of the current Object. + * @return the instance of the current object. * * @see RocksEnv#setBackgroundThreads(int) * @see RocksEnv#setBackgroundThreads(int, int) * @see #maxBackgroundCompactions() */ - Object setMaxBackgroundFlushes(int maxBackgroundFlushes); + T setMaxBackgroundFlushes(int maxBackgroundFlushes); /** * Returns the maximum number of concurrent background flush jobs. @@ -428,11 +545,11 @@ public interface DBOptionsInterface { * If 0, all logs will be written to one log file. * * @param maxLogFileSize the maximum size of a info log file. - * @return the instance of the current Object. + * @return the instance of the current object. * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms * while overflowing the underlying platform specific value. */ - Object setMaxLogFileSize(long maxLogFileSize); + T setMaxLogFileSize(long maxLogFileSize); /** * Returns the maximum size of a info log file. If the current log file @@ -450,11 +567,11 @@ public interface DBOptionsInterface { * Default: 0 (disabled) * * @param logFileTimeToRoll the time interval in seconds. - * @return the instance of the current Object. + * @return the instance of the current object. * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms * while overflowing the underlying platform specific value. */ - Object setLogFileTimeToRoll(long logFileTimeToRoll); + T setLogFileTimeToRoll(long logFileTimeToRoll); /** * Returns the time interval for the info log file to roll (in seconds). @@ -471,11 +588,11 @@ public interface DBOptionsInterface { * Default: 1000 * * @param keepLogFileNum the maximum number of info log files to be kept. - * @return the instance of the current Object. + * @return the instance of the current object. * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms * while overflowing the underlying platform specific value. */ - Object setKeepLogFileNum(long keepLogFileNum); + T setKeepLogFileNum(long keepLogFileNum); /** * Returns the maximum number of info log files to be kept. @@ -485,15 +602,53 @@ public interface DBOptionsInterface { */ long keepLogFileNum(); + /** + * Recycle log files. + * + * If non-zero, we will reuse previously written log files for new + * logs, overwriting the old data. The value indicates how many + * such files we will keep around at any point in time for later + * use. + * + * This is more efficient because the blocks are already + * allocated and fdatasync does not need to update the inode after + * each write. + * + * Default: 0 + * + * @param recycleLogFileNum the number of log files to keep for recycling + * + * @return the reference to the current options + */ + T setRecycleLogFileNum(long recycleLogFileNum); + + /** + * Recycle log files. + * + * If non-zero, we will reuse previously written log files for new + * logs, overwriting the old data. The value indicates how many + * such files we will keep around at any point in time for later + * use. + * + * This is more efficient because the blocks are already + * allocated and fdatasync does not need to update the inode after + * each write. + * + * Default: 0 + * + * @return the number of log files kept for recycling + */ + long recycleLogFileNum(); + /** * Manifest file is rolled over on reaching this limit. * The older manifest file be deleted. * The default value is MAX_INT so that roll-over does not take place. * * @param maxManifestFileSize the size limit of a manifest file. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setMaxManifestFileSize(long maxManifestFileSize); + T setMaxManifestFileSize(long maxManifestFileSize); /** * Manifest file is rolled over on reaching this limit. @@ -508,9 +663,9 @@ public interface DBOptionsInterface { * Number of shards used for table cache. * * @param tableCacheNumshardbits the number of chards - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setTableCacheNumshardbits(int tableCacheNumshardbits); + T setTableCacheNumshardbits(int tableCacheNumshardbits); /** * Number of shards used for table cache. @@ -537,10 +692,10 @@ public interface DBOptionsInterface { * * * @param walTtlSeconds the ttl seconds - * @return the instance of the current Object. + * @return the instance of the current object. * @see #setWalSizeLimitMB(long) */ - Object setWalTtlSeconds(long walTtlSeconds); + T setWalTtlSeconds(long walTtlSeconds); /** * WalTtlSeconds() and walSizeLimitMB() affect how archived logs @@ -582,10 +737,10 @@ public interface DBOptionsInterface { * * * @param sizeLimitMB size limit in mega-bytes. - * @return the instance of the current Object. + * @return the instance of the current object. * @see #setWalSizeLimitMB(long) */ - Object setWalSizeLimitMB(long sizeLimitMB); + T setWalSizeLimitMB(long sizeLimitMB); /** * {@link #walTtlSeconds()} and {@code #walSizeLimitMB()} affect how archived logs @@ -615,11 +770,11 @@ public interface DBOptionsInterface { * large amounts of data (such as xfs's allocsize option). * * @param size the size in byte - * @return the instance of the current Object. + * @return the instance of the current object. * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms * while overflowing the underlying platform specific value. */ - Object setManifestPreallocationSize(long size); + T setManifestPreallocationSize(long size); /** * Number of bytes to preallocate (via fallocate) the manifest @@ -636,9 +791,9 @@ public interface DBOptionsInterface { * Default: false * * @param useDirectReads if true, then direct read is enabled - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setUseDirectReads(boolean useDirectReads); + T setUseDirectReads(boolean useDirectReads); /** * Enable the OS to use direct I/O for reading sst tables. @@ -653,9 +808,9 @@ public interface DBOptionsInterface { * Default: false * * @param useDirectWrites if true, then direct write is enabled - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setUseDirectWrites(boolean useDirectWrites); + T setUseDirectWrites(boolean useDirectWrites); /** * Enable the OS to use direct I/O for writing sst tables. @@ -665,14 +820,30 @@ public interface DBOptionsInterface { */ boolean useDirectWrites(); + /** + * Whether fallocate calls are allowed + * + * @param allowFAllocate false if fallocate() calls are bypassed + * + * @return the reference to the current options. + */ + T setAllowFAllocate(boolean allowFAllocate); + + /** + * Whether fallocate calls are allowed + * + * @return false if fallocate() calls are bypassed + */ + boolean allowFAllocate(); + /** * Allow the OS to mmap file for reading sst tables. * Default: false * * @param allowMmapReads true if mmap reads are allowed. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setAllowMmapReads(boolean allowMmapReads); + T setAllowMmapReads(boolean allowMmapReads); /** * Allow the OS to mmap file for reading sst tables. @@ -686,9 +857,9 @@ public interface DBOptionsInterface { * Allow the OS to mmap file for writing. Default: false * * @param allowMmapWrites true if mmap writes are allowd. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setAllowMmapWrites(boolean allowMmapWrites); + T setAllowMmapWrites(boolean allowMmapWrites); /** * Allow the OS to mmap file for writing. Default: false @@ -702,9 +873,9 @@ public interface DBOptionsInterface { * * @param isFdCloseOnExec true if child process inheriting open * files is disabled. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setIsFdCloseOnExec(boolean isFdCloseOnExec); + T setIsFdCloseOnExec(boolean isFdCloseOnExec); /** * Disable child process inherit open files. Default: true @@ -718,9 +889,9 @@ public interface DBOptionsInterface { * Default: 600 (10 minutes) * * @param statsDumpPeriodSec time interval in seconds. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setStatsDumpPeriodSec(int statsDumpPeriodSec); + T setStatsDumpPeriodSec(int statsDumpPeriodSec); /** * If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec @@ -736,9 +907,9 @@ public interface DBOptionsInterface { * Default: true * * @param adviseRandomOnOpen true if hinting random access is on. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setAdviseRandomOnOpen(boolean adviseRandomOnOpen); + T setAdviseRandomOnOpen(boolean adviseRandomOnOpen); /** * If set true, will hint the underlying file system that the file @@ -749,6 +920,200 @@ public interface DBOptionsInterface { */ boolean adviseRandomOnOpen(); + /** + * Amount of data to build up in memtables across all column + * families before writing to disk. + * + * This is distinct from {@link ColumnFamilyOptions#writeBufferSize()}, + * which enforces a limit for a single memtable. + * + * This feature is disabled by default. Specify a non-zero value + * to enable it. + * + * Default: 0 (disabled) + * + * @param dbWriteBufferSize the size of the write buffer + * + * @return the reference to the current options. + */ + T setDbWriteBufferSize(long dbWriteBufferSize); + + /** + * Amount of data to build up in memtables across all column + * families before writing to disk. + * + * This is distinct from {@link ColumnFamilyOptions#writeBufferSize()}, + * which enforces a limit for a single memtable. + * + * This feature is disabled by default. Specify a non-zero value + * to enable it. + * + * Default: 0 (disabled) + * + * @return the size of the write buffer + */ + long dbWriteBufferSize(); + + /** + * Specify the file access pattern once a compaction is started. + * It will be applied to all input files of a compaction. + * + * Default: {@link AccessHint#NORMAL} + * + * @param accessHint The access hint + * + * @return the reference to the current options. + */ + T setAccessHintOnCompactionStart(final AccessHint accessHint); + + /** + * Specify the file access pattern once a compaction is started. + * It will be applied to all input files of a compaction. + * + * Default: {@link AccessHint#NORMAL} + * + * @return The access hint + */ + AccessHint accessHintOnCompactionStart(); + + /** + * If true, always create a new file descriptor and new table reader + * for compaction inputs. Turn this parameter on may introduce extra + * memory usage in the table reader, if it allocates extra memory + * for indexes. This will allow file descriptor prefetch options + * to be set for compaction input files and not to impact file + * descriptors for the same file used by user queries. + * Suggest to enable {@link BlockBasedTableConfig#cacheIndexAndFilterBlocks()} + * for this mode if using block-based table. + * + * Default: false + * + * @param newTableReaderForCompactionInputs true if a new file descriptor and + * table reader should be created for compaction inputs + * + * @return the reference to the current options. + */ + T setNewTableReaderForCompactionInputs( + boolean newTableReaderForCompactionInputs); + + /** + * If true, always create a new file descriptor and new table reader + * for compaction inputs. Turn this parameter on may introduce extra + * memory usage in the table reader, if it allocates extra memory + * for indexes. This will allow file descriptor prefetch options + * to be set for compaction input files and not to impact file + * descriptors for the same file used by user queries. + * Suggest to enable {@link BlockBasedTableConfig#cacheIndexAndFilterBlocks()} + * for this mode if using block-based table. + * + * Default: false + * + * @return true if a new file descriptor and table reader are created for + * compaction inputs + */ + boolean newTableReaderForCompactionInputs(); + + /** + * If non-zero, we perform bigger reads when doing compaction. If you're + * running RocksDB on spinning disks, you should set this to at least 2MB. + * + * That way RocksDB's compaction is doing sequential instead of random reads. + * When non-zero, we also force {@link #newTableReaderForCompactionInputs()} + * to true. + * + * Default: 0 + * + * @param compactionReadaheadSize The compaction read-ahead size + * + * @return the reference to the current options. + */ + T setCompactionReadaheadSize(final long compactionReadaheadSize); + + /** + * If non-zero, we perform bigger reads when doing compaction. If you're + * running RocksDB on spinning disks, you should set this to at least 2MB. + * + * That way RocksDB's compaction is doing sequential instead of random reads. + * When non-zero, we also force {@link #newTableReaderForCompactionInputs()} + * to true. + * + * Default: 0 + * + * @return The compaction read-ahead size + */ + long compactionReadaheadSize(); + + /** + * This is a maximum buffer size that is used by WinMmapReadableFile in + * unbuffered disk I/O mode. We need to maintain an aligned buffer for + * reads. We allow the buffer to grow until the specified value and then + * for bigger requests allocate one shot buffers. In unbuffered mode we + * always bypass read-ahead buffer at ReadaheadRandomAccessFile + * When read-ahead is required we then make use of + * {@link #compactionReadaheadSize()} value and always try to read ahead. + * With read-ahead we always pre-allocate buffer to the size instead of + * growing it up to a limit. + * + * This option is currently honored only on Windows + * + * Default: 1 Mb + * + * Special value: 0 - means do not maintain per instance buffer. Allocate + * per request buffer and avoid locking. + * + * @param randomAccessMaxBufferSize the maximum size of the random access + * buffer + * + * @return the reference to the current options. + */ + T setRandomAccessMaxBufferSize(long randomAccessMaxBufferSize); + + /** + * This is a maximum buffer size that is used by WinMmapReadableFile in + * unbuffered disk I/O mode. We need to maintain an aligned buffer for + * reads. We allow the buffer to grow until the specified value and then + * for bigger requests allocate one shot buffers. In unbuffered mode we + * always bypass read-ahead buffer at ReadaheadRandomAccessFile + * When read-ahead is required we then make use of + * {@link #compactionReadaheadSize()} value and always try to read ahead. + * With read-ahead we always pre-allocate buffer to the size instead of + * growing it up to a limit. + * + * This option is currently honored only on Windows + * + * Default: 1 Mb + * + * Special value: 0 - means do not maintain per instance buffer. Allocate + * per request buffer and avoid locking. + * + * @return the maximum size of the random access buffer + */ + long randomAccessMaxBufferSize(); + + /** + * This is the maximum buffer size that is used by WritableFileWriter. + * On Windows, we need to maintain an aligned buffer for writes. + * We allow the buffer to grow until it's size hits the limit. + * + * Default: 1024 * 1024 (1 MB) + * + * @param writableFileMaxBufferSize the maximum buffer size + * + * @return the reference to the current options. + */ + T setWritableFileMaxBufferSize(long writableFileMaxBufferSize); + + /** + * This is the maximum buffer size that is used by WritableFileWriter. + * On Windows, we need to maintain an aligned buffer for writes. + * We allow the buffer to grow until it's size hits the limit. + * + * Default: 1024 * 1024 (1 MB) + * + * @return the maximum buffer size + */ + long writableFileMaxBufferSize(); + /** * Use adaptive mutex, which spins in the user space before resorting * to kernel. This could reduce context switch when the mutex is not @@ -757,9 +1122,9 @@ public interface DBOptionsInterface { * Default: false * * @param useAdaptiveMutex true if adaptive mutex is used. - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setUseAdaptiveMutex(boolean useAdaptiveMutex); + T setUseAdaptiveMutex(boolean useAdaptiveMutex); /** * Use adaptive mutex, which spins in the user space before resorting @@ -779,9 +1144,9 @@ public interface DBOptionsInterface { * Default: 0 * * @param bytesPerSync size in bytes - * @return the instance of the current Object. + * @return the instance of the current object. */ - Object setBytesPerSync(long bytesPerSync); + T setBytesPerSync(long bytesPerSync); /** * Allows OS to incrementally sync files to disk while they are being @@ -793,6 +1158,83 @@ public interface DBOptionsInterface { */ long bytesPerSync(); + /** + * Same as {@link #setBytesPerSync(long)} , but applies to WAL files + * + * Default: 0, turned off + * + * @param walBytesPerSync size in bytes + * @return the instance of the current object. + */ + T setWalBytesPerSync(long walBytesPerSync); + + /** + * Same as {@link #bytesPerSync()} , but applies to WAL files + * + * Default: 0, turned off + * + * @return size in bytes + */ + long walBytesPerSync(); + + /** + * If true, then the status of the threads involved in this DB will + * be tracked and available via GetThreadList() API. + * + * Default: false + * + * @param enableThreadTracking true to enable tracking + * + * @return the reference to the current options. + */ + T setEnableThreadTracking(boolean enableThreadTracking); + + /** + * If true, then the status of the threads involved in this DB will + * be tracked and available via GetThreadList() API. + * + * Default: false + * + * @return true if tracking is enabled + */ + boolean enableThreadTracking(); + + /** + * The limited write rate to DB if + * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or + * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered, + * or we are writing to the last mem table allowed and we allow more than 3 + * mem tables. It is calculated using size of user write requests before + * compression. RocksDB may decide to slow down more if the compaction still + * gets behind further. + * + * Unit: bytes per second. + * + * Default: 16MB/s + * + * @param delayedWriteRate the rate in bytes per second + * + * @return the reference to the current options. + */ + T setDelayedWriteRate(long delayedWriteRate); + + /** + * The limited write rate to DB if + * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or + * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered, + * or we are writing to the last mem table allowed and we allow more than 3 + * mem tables. It is calculated using size of user write requests before + * compression. RocksDB may decide to slow down more if the compaction still + * gets behind further. + * + * Unit: bytes per second. + * + * Default: 16MB/s + * + * @return the rate in bytes per second + */ + long delayedWriteRate(); + /** * If true, allow multi-writers to update mem tables in parallel. * Only some memtable factorys support concurrent writes; currently it @@ -805,8 +1247,10 @@ public interface DBOptionsInterface { * * @param allowConcurrentMemtableWrite true to enable concurrent writes * for the memtable + * + * @return the reference to the current options. */ - void setAllowConcurrentMemtableWrite(boolean allowConcurrentMemtableWrite); + T setAllowConcurrentMemtableWrite(boolean allowConcurrentMemtableWrite); /** * If true, allow multi-writers to update mem tables in parallel. @@ -831,8 +1275,10 @@ public interface DBOptionsInterface { * * @param enableWriteThreadAdaptiveYield true to enable adaptive yield for the * write threads + * + * @return the reference to the current options. */ - void setEnableWriteThreadAdaptiveYield( + T setEnableWriteThreadAdaptiveYield( boolean enableWriteThreadAdaptiveYield); /** @@ -856,8 +1302,10 @@ public interface DBOptionsInterface { * Default: 100 * * @param writeThreadMaxYieldUsec maximum number of microseconds + * + * @return the reference to the current options. */ - void setWriteThreadMaxYieldUsec(long writeThreadMaxYieldUsec); + T setWriteThreadMaxYieldUsec(long writeThreadMaxYieldUsec); /** * The maximum number of microseconds that a write operation will use @@ -881,8 +1329,10 @@ public interface DBOptionsInterface { * Default: 3 * * @param writeThreadSlowYieldUsec the latency in microseconds + * + * @return the reference to the current options. */ - void setWriteThreadSlowYieldUsec(long writeThreadSlowYieldUsec); + T setWriteThreadSlowYieldUsec(long writeThreadSlowYieldUsec); /** * The latency in microseconds after which a std::this_thread::yield @@ -896,4 +1346,202 @@ public interface DBOptionsInterface { * @return writeThreadSlowYieldUsec the latency in microseconds */ long writeThreadSlowYieldUsec(); + + /** + * If true, then DB::Open() will not update the statistics used to optimize + * compaction decision by loading table properties from many files. + * Turning off this feature will improve DBOpen time especially in + * disk environment. + * + * Default: false + * + * @param skipStatsUpdateOnDbOpen true if updating stats will be skipped + * + * @return the reference to the current options. + */ + T setSkipStatsUpdateOnDbOpen(boolean skipStatsUpdateOnDbOpen); + + /** + * If true, then DB::Open() will not update the statistics used to optimize + * compaction decision by loading table properties from many files. + * Turning off this feature will improve DBOpen time especially in + * disk environment. + * + * Default: false + * + * @return true if updating stats will be skipped + */ + boolean skipStatsUpdateOnDbOpen(); + + /** + * Recovery mode to control the consistency while replaying WAL + * + * Default: {@link WALRecoveryMode#PointInTimeRecovery} + * + * @param walRecoveryMode The WAL recover mode + * + * @return the reference to the current options. + */ + T setWalRecoveryMode(WALRecoveryMode walRecoveryMode); + + /** + * Recovery mode to control the consistency while replaying WAL + * + * Default: {@link WALRecoveryMode#PointInTimeRecovery} + * + * @return The WAL recover mode + */ + WALRecoveryMode walRecoveryMode(); + + /** + * if set to false then recovery will fail when a prepared + * transaction is encountered in the WAL + * + * Default: false + * + * @param allow2pc true if two-phase-commit is enabled + * + * @return the reference to the current options. + */ + T setAllow2pc(boolean allow2pc); + + /** + * if set to false then recovery will fail when a prepared + * transaction is encountered in the WAL + * + * Default: false + * + * @return true if two-phase-commit is enabled + */ + boolean allow2pc(); + + /** + * A global cache for table-level rows. + * + * Default: null (disabled) + * + * @param rowCache The global row cache + * + * @return the reference to the current options. + */ + T setRowCache(final Cache rowCache); + + /** + * A global cache for table-level rows. + * + * Default: null (disabled) + * + * @return The global row cache + */ + Cache rowCache(); + + /** + * If true, then DB::Open / CreateColumnFamily / DropColumnFamily + * / SetOptions will fail if options file is not detected or properly + * persisted. + * + * DEFAULT: false + * + * @param failIfOptionsFileError true if we should fail if there is an error + * in the options file + * + * @return the reference to the current options. + */ + T setFailIfOptionsFileError(boolean failIfOptionsFileError); + + /** + * If true, then DB::Open / CreateColumnFamily / DropColumnFamily + * / SetOptions will fail if options file is not detected or properly + * persisted. + * + * DEFAULT: false + * + * @return true if we should fail if there is an error in the options file + */ + boolean failIfOptionsFileError(); + + /** + * If true, then print malloc stats together with rocksdb.stats + * when printing to LOG. + * + * DEFAULT: false + * + * @param dumpMallocStats true if malloc stats should be printed to LOG + * + * @return the reference to the current options. + */ + T setDumpMallocStats(boolean dumpMallocStats); + + /** + * If true, then print malloc stats together with rocksdb.stats + * when printing to LOG. + * + * DEFAULT: false + * + * @return true if malloc stats should be printed to LOG + */ + boolean dumpMallocStats(); + + /** + * By default RocksDB replay WAL logs and flush them on DB open, which may + * create very small SST files. If this option is enabled, RocksDB will try + * to avoid (but not guarantee not to) flush during recovery. Also, existing + * WAL logs will be kept, so that if crash happened before flush, we still + * have logs to recover from. + * + * DEFAULT: false + * + * @param avoidFlushDuringRecovery true to try to avoid (but not guarantee + * not to) flush during recovery + * + * @return the reference to the current options. + */ + T setAvoidFlushDuringRecovery(boolean avoidFlushDuringRecovery); + + /** + * By default RocksDB replay WAL logs and flush them on DB open, which may + * create very small SST files. If this option is enabled, RocksDB will try + * to avoid (but not guarantee not to) flush during recovery. Also, existing + * WAL logs will be kept, so that if crash happened before flush, we still + * have logs to recover from. + * + * DEFAULT: false + * + * @return true to try to avoid (but not guarantee not to) flush during + * recovery + */ + boolean avoidFlushDuringRecovery(); + + /** + * By default RocksDB will flush all memtables on DB close if there are + * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup + * DB close. Unpersisted data WILL BE LOST. + * + * DEFAULT: false + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} + * API. + * + * @param avoidFlushDuringShutdown true if we should avoid flush during + * shutdown + * + * @return the reference to the current options. + */ + T setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown); + + /** + * By default RocksDB will flush all memtables on DB close if there are + * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup + * DB close. Unpersisted data WILL BE LOST. + * + * DEFAULT: false + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} + * API. + * + * @return true if we should avoid flush during shutdown + */ + boolean avoidFlushDuringShutdown(); } diff --git a/java/src/main/java/org/rocksdb/DbPath.java b/java/src/main/java/org/rocksdb/DbPath.java new file mode 100644 index 000000000..2a48b63d2 --- /dev/null +++ b/java/src/main/java/org/rocksdb/DbPath.java @@ -0,0 +1,47 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.nio.file.Path; + +/** + * Tuple of database path and target size + */ +public class DbPath { + final Path path; + final long targetSize; + + public DbPath(final Path path, final long targetSize) { + this.path = path; + this.targetSize = targetSize; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + final DbPath dbPath = (DbPath) o; + + if (targetSize != dbPath.targetSize) { + return false; + } + + return path != null ? path.equals(dbPath.path) : dbPath.path == null; + } + + @Override + public int hashCode() { + int result = path != null ? path.hashCode() : 0; + result = 31 * result + (int) (targetSize ^ (targetSize >>> 32)); + return result; + } +} diff --git a/java/src/main/java/org/rocksdb/InfoLogLevel.java b/java/src/main/java/org/rocksdb/InfoLogLevel.java index 971c0b2ec..35a206250 100644 --- a/java/src/main/java/org/rocksdb/InfoLogLevel.java +++ b/java/src/main/java/org/rocksdb/InfoLogLevel.java @@ -14,7 +14,7 @@ public enum InfoLogLevel { private final byte value_; - private InfoLogLevel(byte value) { + private InfoLogLevel(final byte value) { value_ = value; } @@ -36,8 +36,8 @@ public enum InfoLogLevel { * @throws java.lang.IllegalArgumentException if an invalid * value is provided. */ - public static InfoLogLevel getInfoLogLevel(byte value) { - for (InfoLogLevel infoLogLevel : InfoLogLevel.values()) { + public static InfoLogLevel getInfoLogLevel(final byte value) { + for (final InfoLogLevel infoLogLevel : InfoLogLevel.values()) { if (infoLogLevel.getValue() == value){ return infoLogLevel; } diff --git a/java/src/main/java/org/rocksdb/LRUCache.java b/java/src/main/java/org/rocksdb/LRUCache.java new file mode 100644 index 000000000..f4a509dce --- /dev/null +++ b/java/src/main/java/org/rocksdb/LRUCache.java @@ -0,0 +1,82 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Least Recently Used Cache + */ +public class LRUCache extends Cache { + + /** + * Create a new cache with a fixed size capacity + * + * @param capacity The fixed size capacity of the cache + */ + public LRUCache(final long capacity) { + this(capacity, -1, false, 0.0); + } + + /** + * Create a new cache with a fixed size capacity. The cache is sharded + * to 2^numShardBits shards, by hash of the key. The total capacity + * is divided and evenly assigned to each shard. + * numShardBits = -1 means it is automatically determined: every shard + * will be at least 512KB and number of shard bits will not exceed 6. + * + * @param capacity The fixed size capacity of the cache + * @param numShardBits The cache is sharded to 2^numShardBits shards, + * by hash of the key + */ + public LRUCache(final long capacity, final int numShardBits) { + super(newLRUCache(capacity, numShardBits, false,0.0)); + } + + /** + * Create a new cache with a fixed size capacity. The cache is sharded + * to 2^numShardBits shards, by hash of the key. The total capacity + * is divided and evenly assigned to each shard. If strictCapacityLimit + * is set, insert to the cache will fail when cache is full. + * numShardBits = -1 means it is automatically determined: every shard + * will be at least 512KB and number of shard bits will not exceed 6. + * + * @param capacity The fixed size capacity of the cache + * @param numShardBits The cache is sharded to 2^numShardBits shards, + * by hash of the key + * @param strictCapacityLimit insert to the cache will fail when cache is full + */ + public LRUCache(final long capacity, final int numShardBits, + final boolean strictCapacityLimit) { + super(newLRUCache(capacity, numShardBits, strictCapacityLimit,0.0)); + } + + /** + * Create a new cache with a fixed size capacity. The cache is sharded + * to 2^numShardBits shards, by hash of the key. The total capacity + * is divided and evenly assigned to each shard. If strictCapacityLimit + * is set, insert to the cache will fail when cache is full. User can also + * set percentage of the cache reserves for high priority entries via + * highPriPoolRatio. + * numShardBits = -1 means it is automatically determined: every shard + * will be at least 512KB and number of shard bits will not exceed 6. + * + * @param capacity The fixed size capacity of the cache + * @param numShardBits The cache is sharded to 2^numShardBits shards, + * by hash of the key + * @param strictCapacityLimit insert to the cache will fail when cache is full + * @param highPriPoolRatio percentage of the cache reserves for high priority + * entries + */ + public LRUCache(final long capacity, final int numShardBits, + final boolean strictCapacityLimit, final double highPriPoolRatio) { + super(newLRUCache(capacity, numShardBits, strictCapacityLimit, + highPriPoolRatio)); + } + + private native static long newLRUCache(final long capacity, + final int numShardBits, final boolean strictCapacityLimit, + final double highPriPoolRatio); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java index 534f57697..959d5b79d 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java @@ -112,7 +112,8 @@ public class MutableColumnFamilyOptions { LONG, INT, BOOLEAN, - INT_ARRAY + INT_ARRAY, + ENUM } public enum MemtableOption implements MutableColumnFamilyOptionKey { @@ -167,7 +168,9 @@ public class MutableColumnFamilyOptions { public enum MiscOption implements MutableColumnFamilyOptionKey { max_sequential_skip_in_iterations(ValueType.LONG), - paranoid_file_checks(ValueType.BOOLEAN); + paranoid_file_checks(ValueType.BOOLEAN), + report_bg_io_stats(ValueType.BOOLEAN), + compression_type(ValueType.ENUM); private final ValueType valueType; MiscOption(final ValueType valueType) { @@ -198,6 +201,7 @@ public class MutableColumnFamilyOptions { abstract boolean asBoolean() throws IllegalStateException; abstract int[] asIntArray() throws IllegalStateException; abstract String asString(); + abstract T asObject(); } private static class MutableColumnFamilyOptionStringValue @@ -235,6 +239,11 @@ public class MutableColumnFamilyOptions { String asString() { return value; } + + @Override + String asObject() { + return value; + } } private static class MutableColumnFamilyOptionDoubleValue @@ -281,6 +290,11 @@ public class MutableColumnFamilyOptions { String asString() { return Double.toString(value); } + + @Override + Double asObject() { + return value; + } } private static class MutableColumnFamilyOptionLongValue @@ -331,6 +345,11 @@ public class MutableColumnFamilyOptions { String asString() { return Long.toString(value); } + + @Override + Long asObject() { + return value; + } } private static class MutableColumnFamilyOptionIntValue @@ -371,6 +390,11 @@ public class MutableColumnFamilyOptions { String asString() { return Integer.toString(value); } + + @Override + Integer asObject() { + return value; + } } private static class MutableColumnFamilyOptionBooleanValue @@ -408,6 +432,11 @@ public class MutableColumnFamilyOptions { String asString() { return Boolean.toString(value); } + + @Override + Boolean asObject() { + return value; + } } private static class MutableColumnFamilyOptionIntArrayValue @@ -452,6 +481,54 @@ public class MutableColumnFamilyOptions { } return builder.toString(); } + + @Override + int[] asObject() { + return value; + } + } + + private static class MutableColumnFamilyOptionEnumValue> + extends MutableColumnFamilyOptionValue { + + MutableColumnFamilyOptionEnumValue(final T value) { + super(value); + } + + @Override + double asDouble() throws NumberFormatException { + throw new NumberFormatException("Enum is not applicable as double"); + } + + @Override + long asLong() throws NumberFormatException { + throw new NumberFormatException("Enum is not applicable as long"); + } + + @Override + int asInt() throws NumberFormatException { + throw new NumberFormatException("Enum is not applicable as int"); + } + + @Override + boolean asBoolean() throws IllegalStateException { + throw new NumberFormatException("Enum is not applicable as boolean"); + } + + @Override + int[] asIntArray() throws IllegalStateException { + throw new NumberFormatException("Enum is not applicable as int[]"); + } + + @Override + String asString() { + return value.name(); + } + + @Override + T asObject() { + return value; + } } public static class MutableColumnFamilyOptionsBuilder @@ -583,6 +660,31 @@ public class MutableColumnFamilyOptions { return value.asIntArray(); } + private > MutableColumnFamilyOptionsBuilder setEnum( + final MutableColumnFamilyOptionKey key, final T value) { + if(key.getValueType() != ValueType.ENUM) { + throw new IllegalArgumentException( + key + " does not accept a Enum value"); + } + options.put(key, new MutableColumnFamilyOptionEnumValue(value)); + return this; + + } + + private > T getEnum(final MutableColumnFamilyOptionKey key) + throws NoSuchElementException, NumberFormatException { + final MutableColumnFamilyOptionValue value = options.get(key); + if(value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + + if(!(value instanceof MutableColumnFamilyOptionEnumValue)) { + throw new NoSuchElementException(key.name() + " is not of Enum type"); + } + + return ((MutableColumnFamilyOptionEnumValue)value).asObject(); + } + public MutableColumnFamilyOptionsBuilder fromString(final String keyStr, final String valueStr) throws IllegalArgumentException { Objects.requireNonNull(keyStr); @@ -715,17 +817,6 @@ public class MutableColumnFamilyOptions { return getBoolean(CompactionOption.disable_auto_compactions); } - @Override - public MutableColumnFamilyOptionsBuilder setSoftRateLimit( - final double softRateLimit) { - return setDouble(CompactionOption.soft_rate_limit, softRateLimit); - } - - @Override - public double softRateLimit() { - return getDouble(CompactionOption.soft_rate_limit); - } - @Override public MutableColumnFamilyOptionsBuilder setSoftPendingCompactionBytesLimit( final long softPendingCompactionBytesLimit) { @@ -738,17 +829,6 @@ public class MutableColumnFamilyOptions { return getLong(CompactionOption.soft_pending_compaction_bytes_limit); } - @Override - public MutableColumnFamilyOptionsBuilder setHardRateLimit( - final double hardRateLimit) { - return setDouble(CompactionOption.hard_rate_limit, hardRateLimit); - } - - @Override - public double hardRateLimit() { - return getDouble(CompactionOption.hard_rate_limit); - } - @Override public MutableColumnFamilyOptionsBuilder setHardPendingCompactionBytesLimit( final long hardPendingCompactionBytesLimit) { @@ -891,5 +971,27 @@ public class MutableColumnFamilyOptions { public boolean paranoidFileChecks() { return getBoolean(MiscOption.paranoid_file_checks); } + + @Override + public MutableColumnFamilyOptionsBuilder setCompressionType( + final CompressionType compressionType) { + return setEnum(MiscOption.compression_type, compressionType); + } + + @Override + public CompressionType compressionType() { + return (CompressionType)getEnum(MiscOption.compression_type); + } + + @Override + public MutableColumnFamilyOptionsBuilder setReportBgIoStats( + final boolean reportBgIoStats) { + return setBoolean(MiscOption.report_bg_io_stats, reportBgIoStats); + } + + @Override + public boolean reportBgIoStats() { + return getBoolean(MiscOption.report_bg_io_stats); + } } } diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java index 7a128c7eb..e6f30a718 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java @@ -5,7 +5,9 @@ package org.rocksdb; -public interface MutableColumnFamilyOptionsInterface { +public interface MutableColumnFamilyOptionsInterface + + extends AdvancedMutableColumnFamilyOptionsInterface { /** * Amount of data to build up in memory (backed by an unsorted log @@ -21,7 +23,7 @@ public interface MutableColumnFamilyOptionsInterface { * * Default: 4MB * @param writeBufferSize the size of write buffer. - * @return the instance of the current Object. + * @return the instance of the current object. * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms * while overflowing the underlying platform specific value. */ @@ -35,171 +37,6 @@ public interface MutableColumnFamilyOptionsInterface { */ long writeBufferSize(); - /** - * The size of one block in arena memory allocation. - * If ≤ 0, a proper value is automatically calculated (usually 1/10 of - * writer_buffer_size). - * - * There are two additional restriction of the The specified size: - * (1) size should be in the range of [4096, 2 << 30] and - * (2) be the multiple of the CPU word (which helps with the memory - * alignment). - * - * We'll automatically check and adjust the size number to make sure it - * conforms to the restrictions. - * Default: 0 - * - * @param arenaBlockSize the size of an arena block - * @return the reference to the current option. - * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms - * while overflowing the underlying platform specific value. - */ - MutableColumnFamilyOptionsInterface setArenaBlockSize(long arenaBlockSize); - - /** - * The size of one block in arena memory allocation. - * If ≤ 0, a proper value is automatically calculated (usually 1/10 of - * writer_buffer_size). - * - * There are two additional restriction of the The specified size: - * (1) size should be in the range of [4096, 2 << 30] and - * (2) be the multiple of the CPU word (which helps with the memory - * alignment). - * - * We'll automatically check and adjust the size number to make sure it - * conforms to the restrictions. - * Default: 0 - * - * @return the size of an arena block - */ - long arenaBlockSize(); - - /** - * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, - * create prefix bloom for memtable with the size of - * write_buffer_size * memtable_prefix_bloom_size_ratio. - * If it is larger than 0.25, it is santinized to 0.25. - * - * Default: 0 (disable) - * - * @param memtablePrefixBloomSizeRatio The ratio - * @return the reference to the current option. - */ - MutableColumnFamilyOptionsInterface setMemtablePrefixBloomSizeRatio( - double memtablePrefixBloomSizeRatio); - - /** - * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, - * create prefix bloom for memtable with the size of - * write_buffer_size * memtable_prefix_bloom_size_ratio. - * If it is larger than 0.25, it is santinized to 0.25. - * - * Default: 0 (disable) - * - * @return the ratio - */ - double memtablePrefixBloomSizeRatio(); - - /** - * Page size for huge page TLB for bloom in memtable. If ≤ 0, not allocate - * from huge page TLB but from malloc. - * Need to reserve huge pages for it to be allocated. For example: - * sysctl -w vm.nr_hugepages=20 - * See linux doc Documentation/vm/hugetlbpage.txt - * - * @param memtableHugePageSize The page size of the huge - * page tlb - * @return the reference to the current option. - */ - MutableColumnFamilyOptionsInterface setMemtableHugePageSize( - long memtableHugePageSize); - - /** - * Page size for huge page TLB for bloom in memtable. If ≤ 0, not allocate - * from huge page TLB but from malloc. - * Need to reserve huge pages for it to be allocated. For example: - * sysctl -w vm.nr_hugepages=20 - * See linux doc Documentation/vm/hugetlbpage.txt - * - * @return The page size of the huge page tlb - */ - long memtableHugePageSize(); - - /** - * Maximum number of successive merge operations on a key in the memtable. - * - * When a merge operation is added to the memtable and the maximum number of - * successive merges is reached, the value of the key will be calculated and - * inserted into the memtable instead of the merge operation. This will - * ensure that there are never more than max_successive_merges merge - * operations in the memtable. - * - * Default: 0 (disabled) - * - * @param maxSuccessiveMerges the maximum number of successive merges. - * @return the reference to the current option. - * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms - * while overflowing the underlying platform specific value. - */ - MutableColumnFamilyOptionsInterface setMaxSuccessiveMerges( - long maxSuccessiveMerges); - - /** - * Maximum number of successive merge operations on a key in the memtable. - * - * When a merge operation is added to the memtable and the maximum number of - * successive merges is reached, the value of the key will be calculated and - * inserted into the memtable instead of the merge operation. This will - * ensure that there are never more than max_successive_merges merge - * operations in the memtable. - * - * Default: 0 (disabled) - * - * @return the maximum number of successive merges. - */ - long maxSuccessiveMerges(); - - /** - * The maximum number of write buffers that are built up in memory. - * The default is 2, so that when 1 write buffer is being flushed to - * storage, new writes can continue to the other write buffer. - * Default: 2 - * - * @param maxWriteBufferNumber maximum number of write buffers. - * @return the instance of the current Object. - */ - MutableColumnFamilyOptionsInterface setMaxWriteBufferNumber( - int maxWriteBufferNumber); - - /** - * Returns maximum number of write buffers. - * - * @return maximum number of write buffers. - * @see #setMaxWriteBufferNumber(int) - */ - int maxWriteBufferNumber(); - - /** - * Number of locks used for inplace update - * Default: 10000, if inplace_update_support = true, else 0. - * - * @param inplaceUpdateNumLocks the number of locks used for - * inplace updates. - * @return the reference to the current option. - * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms - * while overflowing the underlying platform specific value. - */ - MutableColumnFamilyOptionsInterface setInplaceUpdateNumLocks( - long inplaceUpdateNumLocks); - - /** - * Number of locks used for inplace update - * Default: 10000, if inplace_update_support = true, else 0. - * - * @return the number of locks used for inplace update. - */ - long inplaceUpdateNumLocks(); - /** * Disable automatic compactions. Manual compactions can still * be issued on this column family @@ -218,108 +55,6 @@ public interface MutableColumnFamilyOptionsInterface { */ boolean disableAutoCompactions(); - /** - * Puts are delayed 0-1 ms when any level has a compaction score that exceeds - * soft_rate_limit. This is ignored when == 0.0. - * CONSTRAINT: soft_rate_limit ≤ hard_rate_limit. If this constraint does - * not hold, RocksDB will set soft_rate_limit = hard_rate_limit - * Default: 0 (disabled) - * - * @param softRateLimit the soft-rate-limit of a compaction score - * for put delay. - * @return the reference to the current option. - * - * @deprecated Instead use {@link #setSoftPendingCompactionBytesLimit(long)} - */ - @Deprecated - MutableColumnFamilyOptionsInterface setSoftRateLimit(double softRateLimit); - - /** - * Puts are delayed 0-1 ms when any level has a compaction score that exceeds - * soft_rate_limit. This is ignored when == 0.0. - * CONSTRAINT: soft_rate_limit ≤ hard_rate_limit. If this constraint does - * not hold, RocksDB will set soft_rate_limit = hard_rate_limit - * Default: 0 (disabled) - * - * @return soft-rate-limit for put delay. - * - * @deprecated Instead use {@link #softPendingCompactionBytesLimit()} - */ - @Deprecated - double softRateLimit(); - - /** - * All writes will be slowed down to at least delayed_write_rate if estimated - * bytes needed to be compaction exceed this threshold. - * - * Default: 64GB - * - * @param softPendingCompactionBytesLimit The soft limit to impose on - * compaction - * @return the reference to the current option. - */ - MutableColumnFamilyOptionsInterface setSoftPendingCompactionBytesLimit( - long softPendingCompactionBytesLimit); - - /** - * All writes will be slowed down to at least delayed_write_rate if estimated - * bytes needed to be compaction exceed this threshold. - * - * Default: 64GB - * - * @return The soft limit to impose on compaction - */ - long softPendingCompactionBytesLimit(); - - /** - * Puts are delayed 1ms at a time when any level has a compaction score that - * exceeds hard_rate_limit. This is ignored when ≤ 1.0. - * Default: 0 (disabled) - * - * @param hardRateLimit the hard-rate-limit of a compaction score for put - * delay. - * @return the reference to the current option. - * - * @deprecated Instead use {@link #setHardPendingCompactionBytesLimit(long)} - */ - @Deprecated - MutableColumnFamilyOptionsInterface setHardRateLimit(double hardRateLimit); - - /** - * Puts are delayed 1ms at a time when any level has a compaction score that - * exceeds hard_rate_limit. This is ignored when ≤ 1.0. - * Default: 0 (disabled) - * - * @return the hard-rate-limit of a compaction score for put delay. - * - * @deprecated Instead use {@link #hardPendingCompactionBytesLimit()} - */ - @Deprecated - double hardRateLimit(); - - /** - * All writes are stopped if estimated bytes needed to be compaction exceed - * this threshold. - * - * Default: 256GB - * - * @param hardPendingCompactionBytesLimit The hard limit to impose on - * compaction - * @return the reference to the current option. - */ - MutableColumnFamilyOptionsInterface setHardPendingCompactionBytesLimit( - long hardPendingCompactionBytesLimit); - - /** - * All writes are stopped if estimated bytes needed to be compaction exceed - * this threshold. - * - * Default: 256GB - * - * @return The hard limit to impose on compaction - */ - long hardPendingCompactionBytesLimit(); - /** * Number of files to trigger level-0 compaction. A value < 0 means that * level-0 compaction will not be triggered by number of files at all. @@ -343,44 +78,6 @@ public interface MutableColumnFamilyOptionsInterface { */ int level0FileNumCompactionTrigger(); - /** - * Soft limit on number of level-0 files. We start slowing down writes at this - * point. A value < 0 means that no writing slow down will be triggered by - * number of files in level-0. - * - * @param level0SlowdownWritesTrigger The soft limit on the number of - * level-0 files - * @return the reference to the current option. - */ - MutableColumnFamilyOptionsInterface setLevel0SlowdownWritesTrigger( - int level0SlowdownWritesTrigger); - - /** - * Soft limit on number of level-0 files. We start slowing down writes at this - * point. A value < 0 means that no writing slow down will be triggered by - * number of files in level-0. - * - * @return The soft limit on the number of - * level-0 files - */ - int level0SlowdownWritesTrigger(); - - /** - * Maximum number of level-0 files. We stop writes at this point. - * - * @param level0StopWritesTrigger The maximum number of level-0 files - * @return the reference to the current option. - */ - MutableColumnFamilyOptionsInterface setLevel0StopWritesTrigger( - int level0StopWritesTrigger); - - /** - * Maximum number of level-0 files. We stop writes at this point. - * - * @return The maximum number of level-0 files - */ - int level0StopWritesTrigger(); - /** * We try to limit number of bytes in one compaction to be lower than this * threshold. But it's not guaranteed. @@ -402,65 +99,6 @@ public interface MutableColumnFamilyOptionsInterface { */ long maxCompactionBytes(); - /** - * The target file size for compaction. - * This targetFileSizeBase determines a level-1 file size. - * Target file size for level L can be calculated by - * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1)) - * For example, if targetFileSizeBase is 2MB and - * target_file_size_multiplier is 10, then each file on level-1 will - * be 2MB, and each file on level 2 will be 20MB, - * and each file on level-3 will be 200MB. - * by default targetFileSizeBase is 2MB. - * - * @param targetFileSizeBase the target size of a level-0 file. - * @return the reference to the current option. - * - * @see #setTargetFileSizeMultiplier(int) - */ - MutableColumnFamilyOptionsInterface setTargetFileSizeBase( - long targetFileSizeBase); - - /** - * The target file size for compaction. - * This targetFileSizeBase determines a level-1 file size. - * Target file size for level L can be calculated by - * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1)) - * For example, if targetFileSizeBase is 2MB and - * target_file_size_multiplier is 10, then each file on level-1 will - * be 2MB, and each file on level 2 will be 20MB, - * and each file on level-3 will be 200MB. - * by default targetFileSizeBase is 2MB. - * - * @return the target size of a level-0 file. - * - * @see #targetFileSizeMultiplier() - */ - long targetFileSizeBase(); - - /** - * targetFileSizeMultiplier defines the size ratio between a - * level-L file and level-(L+1) file. - * By default target_file_size_multiplier is 1, meaning - * files in different levels have the same target. - * - * @param multiplier the size ratio between a level-(L+1) file - * and level-L file. - * @return the reference to the current option. - */ - MutableColumnFamilyOptionsInterface setTargetFileSizeMultiplier( - int multiplier); - - /** - * targetFileSizeMultiplier defines the size ratio between a - * level-(L+1) file and level-L file. - * By default targetFileSizeMultiplier is 1, meaning - * files in different levels have the same target. - * - * @return the size ratio between a level-(L+1) file and level-L file. - */ - int targetFileSizeMultiplier(); - /** * The upper-bound of the total size of level-1 files in bytes. * Maximum number of bytes for level L can be calculated as @@ -474,9 +112,10 @@ public interface MutableColumnFamilyOptionsInterface { * @param maxBytesForLevelBase maximum bytes for level base. * * @return the reference to the current option. - * @see #setMaxBytesForLevelMultiplier(double) + * + * See {@link AdvancedMutableColumnFamilyOptionsInterface#setMaxBytesForLevelMultiplier(double)} */ - MutableColumnFamilyOptionsInterface setMaxBytesForLevelBase( + T setMaxBytesForLevelBase( long maxBytesForLevelBase); /** @@ -491,101 +130,30 @@ public interface MutableColumnFamilyOptionsInterface { * * @return the upper-bound of the total size of level-1 files * in bytes. - * @see #maxBytesForLevelMultiplier() - */ - long maxBytesForLevelBase(); - - /** - * The ratio between the total size of level-(L+1) files and the total - * size of level-L files for all L. - * DEFAULT: 10 - * - * @param multiplier the ratio between the total size of level-(L+1) - * files and the total size of level-L files for all L. - * @return the reference to the current option. - * @see #setMaxBytesForLevelBase(long) - */ - MutableColumnFamilyOptionsInterface setMaxBytesForLevelMultiplier(double multiplier); - - /** - * The ratio between the total size of level-(L+1) files and the total - * size of level-L files for all L. - * DEFAULT: 10 * - * @return the ratio between the total size of level-(L+1) files and - * the total size of level-L files for all L. - * @see #maxBytesForLevelBase() + * See {@link AdvancedMutableColumnFamilyOptionsInterface#maxBytesForLevelMultiplier()} */ - double maxBytesForLevelMultiplier(); - - /** - * Different max-size multipliers for different levels. - * These are multiplied by max_bytes_for_level_multiplier to arrive - * at the max-size of each level. - * - * Default: 1 - * - * @param maxBytesForLevelMultiplierAdditional The max-size multipliers - * for each level - * @return the reference to the current option. - */ - MutableColumnFamilyOptionsInterface setMaxBytesForLevelMultiplierAdditional( - int[] maxBytesForLevelMultiplierAdditional); - - /** - * Different max-size multipliers for different levels. - * These are multiplied by max_bytes_for_level_multiplier to arrive - * at the max-size of each level. - * - * Default: 1 - * - * @return The max-size multipliers for each level - */ - int[] maxBytesForLevelMultiplierAdditional(); - - - /** - * An iteration->Next() sequentially skips over keys with the same - * user-key unless this option is set. This number specifies the number - * of keys (with the same userkey) that will be sequentially - * skipped before a reseek is issued. - * Default: 8 - * - * @param maxSequentialSkipInIterations the number of keys could - * be skipped in a iteration. - * @return the reference to the current option. - */ - MutableColumnFamilyOptionsInterface setMaxSequentialSkipInIterations( - long maxSequentialSkipInIterations); - - /** - * An iteration->Next() sequentially skips over keys with the same - * user-key unless this option is set. This number specifies the number - * of keys (with the same userkey) that will be sequentially - * skipped before a reseek is issued. - * Default: 8 - * - * @return the number of keys could be skipped in a iteration. - */ - long maxSequentialSkipInIterations(); + long maxBytesForLevelBase(); /** - * After writing every SST file, reopen it and read all the keys. + * Compress blocks using the specified compression algorithm. This + * parameter can be changed dynamically. * - * Default: false + * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. * - * @param paranoidFileChecks true to enable paranoid file checks + * @param compressionType Compression Type. * @return the reference to the current option. */ - MutableColumnFamilyOptionsInterface setParanoidFileChecks( - boolean paranoidFileChecks); + T setCompressionType( + CompressionType compressionType); /** - * After writing every SST file, reopen it and read all the keys. + * Compress blocks using the specified compression algorithm. This + * parameter can be changed dynamically. * - * Default: false + * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. * - * @return true if paranoid file checks are enabled + * @return Compression type. */ - boolean paranoidFileChecks(); + CompressionType compressionType(); } diff --git a/java/src/main/java/org/rocksdb/Options.java b/java/src/main/java/org/rocksdb/Options.java index 05699837d..fce34c2e1 100644 --- a/java/src/main/java/org/rocksdb/Options.java +++ b/java/src/main/java/org/rocksdb/Options.java @@ -5,7 +5,10 @@ package org.rocksdb; +import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.List; /** @@ -16,11 +19,12 @@ import java.util.List; * automaticallyand native resources will be released as part of the process. */ public class Options extends RocksObject - implements DBOptionsInterface, ColumnFamilyOptionsInterface, - MutableColumnFamilyOptionsInterface { + implements DBOptionsInterface, ColumnFamilyOptionsInterface, + MutableColumnFamilyOptionsInterface { static { RocksDB.loadLibrary(); } + /** * Construct options for opening a RocksDB. * @@ -68,14 +72,7 @@ public class Options extends RocksObject return this; } - /** - * Use the specified object to interact with the environment, - * e.g. to read/write files, schedule background work, etc. - * Default: {@link Env#getDefault()} - * - * @param env {@link Env} instance. - * @return the instance of the current Options. - */ + @Override public Options setEnv(final Env env) { assert(isOwningHandle()); setEnv(nativeHandle_, env.nativeHandle_); @@ -83,11 +80,7 @@ public class Options extends RocksObject return this; } - /** - * Returns the set RocksEnv instance. - * - * @return {@link RocksEnv} instance set in the Options. - */ + @Override public Env getEnv() { return env_; } @@ -121,6 +114,12 @@ public class Options extends RocksObject return createMissingColumnFamilies(nativeHandle_); } + @Override + public Options optimizeForSmallDb() { + optimizeForSmallDb(nativeHandle_); + return this; + } + @Override public Options optimizeForPointLookup( long blockCacheSizeMb) { @@ -250,6 +249,19 @@ public class Options extends RocksObject return maxOpenFiles(nativeHandle_); } + @Override + public Options setMaxFileOpeningThreads(final int maxFileOpeningThreads) { + assert(isOwningHandle()); + setMaxFileOpeningThreads(nativeHandle_, maxFileOpeningThreads); + return this; + } + + @Override + public int maxFileOpeningThreads() { + assert(isOwningHandle()); + return maxFileOpeningThreads(nativeHandle_); + } + @Override public Options setMaxTotalWalSize(final long maxTotalWalSize) { assert(isOwningHandle()); @@ -283,6 +295,43 @@ public class Options extends RocksObject return this; } + @Override + public Options setDbPaths(final Collection dbPaths) { + assert(isOwningHandle()); + + final int len = dbPaths.size(); + final String paths[] = new String[len]; + final long targetSizes[] = new long[len]; + + int i = 0; + for(final DbPath dbPath : dbPaths) { + paths[i] = dbPath.path.toString(); + targetSizes[i] = dbPath.targetSize; + i++; + } + setDbPaths(nativeHandle_, paths, targetSizes); + return this; + } + + @Override + public List dbPaths() { + final int len = (int)dbPathsLen(nativeHandle_); + if(len == 0) { + return Collections.emptyList(); + } else { + final String paths[] = new String[len]; + final long targetSizes[] = new long[len]; + + dbPaths(nativeHandle_, paths, targetSizes); + + final List dbPaths = new ArrayList<>(); + for(int i = 0; i < len; i++) { + dbPaths.add(new DbPath(Paths.get(paths[i]), targetSizes[i])); + } + return dbPaths; + } + } + @Override public String dbLogDir() { assert(isOwningHandle()); @@ -435,6 +484,20 @@ public class Options extends RocksObject return this; } + + @Override + public Options setRecycleLogFileNum(final long recycleLogFileNum) { + assert(isOwningHandle()); + setRecycleLogFileNum(nativeHandle_, recycleLogFileNum); + return this; + } + + @Override + public long recycleLogFileNum() { + assert(isOwningHandle()); + return recycleLogFileNum(nativeHandle_); + } + @Override public long maxManifestFileSize() { assert(isOwningHandle()); @@ -542,6 +605,18 @@ public class Options extends RocksObject return useDirectWrites(nativeHandle_); } + @Override + public Options setAllowFAllocate(final boolean allowFAllocate) { + assert(isOwningHandle()); + setAllowFAllocate(nativeHandle_, allowFAllocate); + return this; + } + + @Override + public boolean allowFAllocate() { + assert(isOwningHandle()); + return allowFAllocate(nativeHandle_); + } @Override public boolean allowMmapReads() { @@ -607,6 +682,86 @@ public class Options extends RocksObject return this; } + @Override + public Options setDbWriteBufferSize(final long dbWriteBufferSize) { + assert(isOwningHandle()); + setDbWriteBufferSize(nativeHandle_, dbWriteBufferSize); + return this; + } + + @Override + public long dbWriteBufferSize() { + assert(isOwningHandle()); + return dbWriteBufferSize(nativeHandle_); + } + + @Override + public Options setAccessHintOnCompactionStart(final AccessHint accessHint) { + assert(isOwningHandle()); + setAccessHintOnCompactionStart(nativeHandle_, accessHint.getValue()); + return this; + } + + @Override + public AccessHint accessHintOnCompactionStart() { + assert(isOwningHandle()); + return AccessHint.getAccessHint(accessHintOnCompactionStart(nativeHandle_)); + } + + @Override + public Options setNewTableReaderForCompactionInputs( + final boolean newTableReaderForCompactionInputs) { + assert(isOwningHandle()); + setNewTableReaderForCompactionInputs(nativeHandle_, + newTableReaderForCompactionInputs); + return this; + } + + @Override + public boolean newTableReaderForCompactionInputs() { + assert(isOwningHandle()); + return newTableReaderForCompactionInputs(nativeHandle_); + } + + @Override + public Options setCompactionReadaheadSize(final long compactionReadaheadSize) { + assert(isOwningHandle()); + setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize); + return this; + } + + @Override + public long compactionReadaheadSize() { + assert(isOwningHandle()); + return compactionReadaheadSize(nativeHandle_); + } + + @Override + public Options setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) { + assert(isOwningHandle()); + setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize); + return this; + } + + @Override + public long randomAccessMaxBufferSize() { + assert(isOwningHandle()); + return randomAccessMaxBufferSize(nativeHandle_); + } + + @Override + public Options setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) { + assert(isOwningHandle()); + setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize); + return this; + } + + @Override + public long writableFileMaxBufferSize() { + assert(isOwningHandle()); + return writableFileMaxBufferSize(nativeHandle_); + } + @Override public boolean useAdaptiveMutex() { assert(isOwningHandle()); @@ -633,10 +788,49 @@ public class Options extends RocksObject } @Override - public void setAllowConcurrentMemtableWrite( + public Options setWalBytesPerSync(final long walBytesPerSync) { + assert(isOwningHandle()); + setWalBytesPerSync(nativeHandle_, walBytesPerSync); + return this; + } + + @Override + public long walBytesPerSync() { + assert(isOwningHandle()); + return walBytesPerSync(nativeHandle_); + } + + @Override + public Options setEnableThreadTracking(final boolean enableThreadTracking) { + assert(isOwningHandle()); + setEnableThreadTracking(nativeHandle_, enableThreadTracking); + return this; + } + + @Override + public boolean enableThreadTracking() { + assert(isOwningHandle()); + return enableThreadTracking(nativeHandle_); + } + + @Override + public Options setDelayedWriteRate(final long delayedWriteRate) { + assert(isOwningHandle()); + setDelayedWriteRate(nativeHandle_, delayedWriteRate); + return this; + } + + @Override + public long delayedWriteRate(){ + return delayedWriteRate(nativeHandle_); + } + + @Override + public Options setAllowConcurrentMemtableWrite( final boolean allowConcurrentMemtableWrite) { setAllowConcurrentMemtableWrite(nativeHandle_, allowConcurrentMemtableWrite); + return this; } @Override @@ -645,10 +839,11 @@ public class Options extends RocksObject } @Override - public void setEnableWriteThreadAdaptiveYield( + public Options setEnableWriteThreadAdaptiveYield( final boolean enableWriteThreadAdaptiveYield) { setEnableWriteThreadAdaptiveYield(nativeHandle_, enableWriteThreadAdaptiveYield); + return this; } @Override @@ -657,8 +852,9 @@ public class Options extends RocksObject } @Override - public void setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) { + public Options setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) { setWriteThreadMaxYieldUsec(nativeHandle_, writeThreadMaxYieldUsec); + return this; } @Override @@ -667,8 +863,9 @@ public class Options extends RocksObject } @Override - public void setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) { + public Options setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) { setWriteThreadSlowYieldUsec(nativeHandle_, writeThreadSlowYieldUsec); + return this; } @Override @@ -676,6 +873,116 @@ public class Options extends RocksObject return writeThreadSlowYieldUsec(nativeHandle_); } + @Override + public Options setSkipStatsUpdateOnDbOpen(final boolean skipStatsUpdateOnDbOpen) { + assert(isOwningHandle()); + setSkipStatsUpdateOnDbOpen(nativeHandle_, skipStatsUpdateOnDbOpen); + return this; + } + + @Override + public boolean skipStatsUpdateOnDbOpen() { + assert(isOwningHandle()); + return skipStatsUpdateOnDbOpen(nativeHandle_); + } + + @Override + public Options setWalRecoveryMode(final WALRecoveryMode walRecoveryMode) { + assert(isOwningHandle()); + setWalRecoveryMode(nativeHandle_, walRecoveryMode.getValue()); + return this; + } + + @Override + public WALRecoveryMode walRecoveryMode() { + assert(isOwningHandle()); + return WALRecoveryMode.getWALRecoveryMode(walRecoveryMode(nativeHandle_)); + } + + @Override + public Options setAllow2pc(final boolean allow2pc) { + assert(isOwningHandle()); + setAllow2pc(nativeHandle_, allow2pc); + return this; + } + + @Override + public boolean allow2pc() { + assert(isOwningHandle()); + return allow2pc(nativeHandle_); + } + + @Override + public Options setRowCache(final Cache rowCache) { + assert(isOwningHandle()); + setRowCache(nativeHandle_, rowCache.nativeHandle_); + this.rowCache_ = rowCache; + return this; + } + + @Override + public Cache rowCache() { + assert(isOwningHandle()); + return this.rowCache_; + } + + @Override + public Options setFailIfOptionsFileError(final boolean failIfOptionsFileError) { + assert(isOwningHandle()); + setFailIfOptionsFileError(nativeHandle_, failIfOptionsFileError); + return this; + } + + @Override + public boolean failIfOptionsFileError() { + assert(isOwningHandle()); + return failIfOptionsFileError(nativeHandle_); + } + + @Override + public Options setDumpMallocStats(final boolean dumpMallocStats) { + assert(isOwningHandle()); + setDumpMallocStats(nativeHandle_, dumpMallocStats); + return this; + } + + @Override + public boolean dumpMallocStats() { + assert(isOwningHandle()); + return dumpMallocStats(nativeHandle_); + } + + @Override + public Options setAvoidFlushDuringRecovery(final boolean avoidFlushDuringRecovery) { + assert(isOwningHandle()); + setAvoidFlushDuringRecovery(nativeHandle_, avoidFlushDuringRecovery); + return this; + } + + @Override + public boolean avoidFlushDuringRecovery() { + assert(isOwningHandle()); + return avoidFlushDuringRecovery(nativeHandle_); + } + + @Override + public Options setAvoidFlushDuringShutdown(final boolean avoidFlushDuringShutdown) { + assert(isOwningHandle()); + setAvoidFlushDuringShutdown(nativeHandle_, avoidFlushDuringShutdown); + return this; + } + + @Override + public boolean avoidFlushDuringShutdown() { + assert(isOwningHandle()); + return avoidFlushDuringShutdown(nativeHandle_); + } + + @Override + public MemTableConfig memTableConfig() { + return this.memTableConfig_; + } + @Override public Options setMemTableConfig(final MemTableConfig config) { memTableConfig_ = config; @@ -718,6 +1025,11 @@ public class Options extends RocksObject return memTableFactoryName(nativeHandle_); } + @Override + public TableFormatConfig tableFormatConfig() { + return this.tableFormatConfig_; + } + @Override public Options setTableFormatConfig(final TableFormatConfig config) { tableFormatConfig_ = config; @@ -747,7 +1059,7 @@ public class Options extends RocksObject @Override public CompressionType compressionType() { - return CompressionType.values()[compressionType(nativeHandle_)]; + return CompressionType.getCompressionType(compressionType(nativeHandle_)); } @Override @@ -780,6 +1092,34 @@ public class Options extends RocksObject return this; } + + @Override + public Options setBottommostCompressionType( + final CompressionType bottommostCompressionType) { + setBottommostCompressionType(nativeHandle_, + bottommostCompressionType.getValue()); + return this; + } + + @Override + public CompressionType bottommostCompressionType() { + return CompressionType.getCompressionType( + bottommostCompressionType(nativeHandle_)); + } + + @Override + public Options setCompressionOptions( + final CompressionOptions compressionOptions) { + setCompressionOptions(nativeHandle_, compressionOptions.nativeHandle_); + this.compressionOptions_ = compressionOptions; + return this; + } + + @Override + public CompressionOptions compressionOptions() { + return this.compressionOptions_; + } + @Override public CompactionStyle compactionStyle() { return CompactionStyle.values()[compactionStyle(nativeHandle_)]; @@ -840,17 +1180,6 @@ public class Options extends RocksObject return this; } - @Override - public int maxMemCompactionLevel() { - return 0; - } - - @Override - public Options setMaxMemCompactionLevel( - final int maxMemCompactionLevel) { - return this; - } - @Override public long targetFileSizeBase() { return targetFileSizeBase(nativeHandle_); @@ -919,41 +1248,6 @@ public class Options extends RocksObject return this; } - @Override - public double softRateLimit() { - return softRateLimit(nativeHandle_); - } - - @Override - public Options setSoftRateLimit(final double softRateLimit) { - setSoftRateLimit(nativeHandle_, softRateLimit); - return this; - } - - @Override - public double hardRateLimit() { - return hardRateLimit(nativeHandle_); - } - - @Override - public Options setHardRateLimit(double hardRateLimit) { - setHardRateLimit(nativeHandle_, hardRateLimit); - return this; - } - - @Override - public int rateLimitDelayMaxMilliseconds() { - return rateLimitDelayMaxMilliseconds(nativeHandle_); - } - - @Override - public Options setRateLimitDelayMaxMilliseconds( - final int rateLimitDelayMaxMilliseconds) { - setRateLimitDelayMaxMilliseconds( - nativeHandle_, rateLimitDelayMaxMilliseconds); - return this; - } - @Override public long arenaBlockSize() { return arenaBlockSize(nativeHandle_); @@ -977,19 +1271,6 @@ public class Options extends RocksObject return this; } - @Override - public boolean purgeRedundantKvsWhileFlush() { - return purgeRedundantKvsWhileFlush(nativeHandle_); - } - - @Override - public Options setPurgeRedundantKvsWhileFlush( - final boolean purgeRedundantKvsWhileFlush) { - setPurgeRedundantKvsWhileFlush( - nativeHandle_, purgeRedundantKvsWhileFlush); - return this; - } - @Override public long maxSequentialSkipInIterations() { return maxSequentialSkipInIterations(nativeHandle_); @@ -1144,7 +1425,7 @@ public class Options extends RocksObject } @Override - public MutableColumnFamilyOptionsInterface setLevel0StopWritesTrigger(int level0StopWritesTrigger) { + public Options setLevel0StopWritesTrigger(int level0StopWritesTrigger) { setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger); return this; } @@ -1176,6 +1457,81 @@ public class Options extends RocksObject return paranoidFileChecks(nativeHandle_); } + @Override + public Options setMaxWriteBufferNumberToMaintain( + final int maxWriteBufferNumberToMaintain) { + setMaxWriteBufferNumberToMaintain( + nativeHandle_, maxWriteBufferNumberToMaintain); + return this; + } + + @Override + public int maxWriteBufferNumberToMaintain() { + return maxWriteBufferNumberToMaintain(nativeHandle_); + } + + @Override + public Options setCompactionPriority( + final CompactionPriority compactionPriority) { + setCompactionPriority(nativeHandle_, compactionPriority.getValue()); + return this; + } + + @Override + public CompactionPriority compactionPriority() { + return CompactionPriority.getCompactionPriority( + compactionPriority(nativeHandle_)); + } + + @Override + public Options setReportBgIoStats(final boolean reportBgIoStats) { + setReportBgIoStats(nativeHandle_, reportBgIoStats); + return this; + } + + @Override + public boolean reportBgIoStats() { + return reportBgIoStats(nativeHandle_); + } + + @Override + public Options setCompactionOptionsUniversal( + final CompactionOptionsUniversal compactionOptionsUniversal) { + setCompactionOptionsUniversal(nativeHandle_, + compactionOptionsUniversal.nativeHandle_); + this.compactionOptionsUniversal_ = compactionOptionsUniversal; + return this; + } + + @Override + public CompactionOptionsUniversal compactionOptionsUniversal() { + return this.compactionOptionsUniversal_; + } + + @Override + public Options setCompactionOptionsFIFO(final CompactionOptionsFIFO compactionOptionsFIFO) { + setCompactionOptionsFIFO(nativeHandle_, + compactionOptionsFIFO.nativeHandle_); + this.compactionOptionsFIFO_ = compactionOptionsFIFO; + return this; + } + + @Override + public CompactionOptionsFIFO compactionOptionsFIFO() { + return this.compactionOptionsFIFO_; + } + + @Override + public Options setForceConsistencyChecks(final boolean forceConsistencyChecks) { + setForceConsistencyChecks(nativeHandle_, forceConsistencyChecks); + return this; + } + + @Override + public boolean forceConsistencyChecks() { + return forceConsistencyChecks(nativeHandle_); + } + private native static long newOptions(); private native static long newOptions(long dbOptHandle, long cfOptHandle); @@ -1205,11 +1561,19 @@ public class Options extends RocksObject private native int maxOpenFiles(long handle); private native void setMaxTotalWalSize(long handle, long maxTotalWalSize); + private native void setMaxFileOpeningThreads(final long handle, + final int maxFileOpeningThreads); + private native int maxFileOpeningThreads(final long handle); private native long maxTotalWalSize(long handle); private native void createStatistics(long optHandle); private native long statisticsPtr(long optHandle); private native boolean useFsync(long handle); private native void setUseFsync(long handle, boolean useFsync); + private native void setDbPaths(final long handle, final String[] paths, + final long[] targetSizes); + private native long dbPathsLen(final long handle); + private native void dbPaths(final long handle, final String[] paths, + final long[] targetSizes); private native void setDbLogDir(long handle, String dbLogDir); private native String dbLogDir(long handle); private native void setWalDir(long handle, String walDir); @@ -1237,6 +1601,8 @@ public class Options extends RocksObject private native void setKeepLogFileNum(long handle, long keepLogFileNum) throws IllegalArgumentException; private native long keepLogFileNum(long handle); + private native void setRecycleLogFileNum(long handle, long recycleLogFileNum); + private native long recycleLogFileNum(long handle); private native void setMaxManifestFileSize( long handle, long maxManifestFileSize); private native long maxManifestFileSize(long handle); @@ -1257,6 +1623,9 @@ public class Options extends RocksObject private native boolean useDirectReads(long handle); private native void setUseDirectWrites(long handle, boolean useDirectWrites); private native boolean useDirectWrites(long handle); + private native void setAllowFAllocate(final long handle, + final boolean allowFAllocate); + private native boolean allowFAllocate(final long handle); private native void setAllowMmapReads( long handle, boolean allowMmapReads); private native boolean allowMmapReads(long handle); @@ -1272,12 +1641,37 @@ public class Options extends RocksObject private native void setAdviseRandomOnOpen( long handle, boolean adviseRandomOnOpen); private native boolean adviseRandomOnOpen(long handle); + private native void setDbWriteBufferSize(final long handle, + final long dbWriteBufferSize); + private native long dbWriteBufferSize(final long handle); + private native void setAccessHintOnCompactionStart(final long handle, + final byte accessHintOnCompactionStart); + private native byte accessHintOnCompactionStart(final long handle); + private native void setNewTableReaderForCompactionInputs(final long handle, + final boolean newTableReaderForCompactionInputs); + private native boolean newTableReaderForCompactionInputs(final long handle); + private native void setCompactionReadaheadSize(final long handle, + final long compactionReadaheadSize); + private native long compactionReadaheadSize(final long handle); + private native void setRandomAccessMaxBufferSize(final long handle, + final long randomAccessMaxBufferSize); + private native long randomAccessMaxBufferSize(final long handle); + private native void setWritableFileMaxBufferSize(final long handle, + final long writableFileMaxBufferSize); + private native long writableFileMaxBufferSize(final long handle); private native void setUseAdaptiveMutex( long handle, boolean useAdaptiveMutex); private native boolean useAdaptiveMutex(long handle); private native void setBytesPerSync( long handle, long bytesPerSync); private native long bytesPerSync(long handle); + private native void setWalBytesPerSync(long handle, long walBytesPerSync); + private native long walBytesPerSync(long handle); + private native void setEnableThreadTracking(long handle, + boolean enableThreadTracking); + private native boolean enableThreadTracking(long handle); + private native void setDelayedWriteRate(long handle, long delayedWriteRate); + private native long delayedWriteRate(long handle); private native void setAllowConcurrentMemtableWrite(long handle, boolean allowConcurrentMemtableWrite); private native boolean allowConcurrentMemtableWrite(long handle); @@ -1290,7 +1684,32 @@ public class Options extends RocksObject private native void setWriteThreadSlowYieldUsec(long handle, long writeThreadSlowYieldUsec); private native long writeThreadSlowYieldUsec(long handle); + private native void setSkipStatsUpdateOnDbOpen(final long handle, + final boolean skipStatsUpdateOnDbOpen); + private native boolean skipStatsUpdateOnDbOpen(final long handle); + private native void setWalRecoveryMode(final long handle, + final byte walRecoveryMode); + private native byte walRecoveryMode(final long handle); + private native void setAllow2pc(final long handle, + final boolean allow2pc); + private native boolean allow2pc(final long handle); + private native void setRowCache(final long handle, + final long row_cache_handle); + private native void setFailIfOptionsFileError(final long handle, + final boolean failIfOptionsFileError); + private native boolean failIfOptionsFileError(final long handle); + private native void setDumpMallocStats(final long handle, + final boolean dumpMallocStats); + private native boolean dumpMallocStats(final long handle); + private native void setAvoidFlushDuringRecovery(final long handle, + final boolean avoidFlushDuringRecovery); + private native boolean avoidFlushDuringRecovery(final long handle); + private native void setAvoidFlushDuringShutdown(final long handle, + final boolean avoidFlushDuringShutdown); + private native boolean avoidFlushDuringShutdown(final long handle); + // CF native handles + private native void optimizeForSmallDb(final long handle); private native void optimizeForPointLookup(long handle, long blockCacheSizeMb); private native void optimizeLevelStyleCompaction(long handle, @@ -1318,6 +1737,11 @@ public class Options extends RocksObject private native void setCompressionPerLevel(long handle, byte[] compressionLevels); private native byte[] compressionPerLevel(long handle); + private native void setBottommostCompressionType(long handle, + byte bottommostCompressionType); + private native byte bottommostCompressionType(long handle); + private native void setCompressionOptions(long handle, + long compressionOptionsHandle); private native void useFixedLengthPrefixExtractor( long handle, int prefixLength); private native void useCappedPrefixExtractor( @@ -1351,15 +1775,6 @@ public class Options extends RocksObject private native double maxBytesForLevelMultiplier(long handle); private native void setMaxCompactionBytes(long handle, long maxCompactionBytes); private native long maxCompactionBytes(long handle); - private native void setSoftRateLimit( - long handle, double softRateLimit); - private native double softRateLimit(long handle); - private native void setHardRateLimit( - long handle, double hardRateLimit); - private native double hardRateLimit(long handle); - private native void setRateLimitDelayMaxMilliseconds( - long handle, int rateLimitDelayMaxMilliseconds); - private native int rateLimitDelayMaxMilliseconds(long handle); private native void setArenaBlockSize( long handle, long arenaBlockSize) throws IllegalArgumentException; private native long arenaBlockSize(long handle); @@ -1368,9 +1783,6 @@ public class Options extends RocksObject private native boolean disableAutoCompactions(long handle); private native void setCompactionStyle(long handle, byte compactionStyle); private native byte compactionStyle(long handle); - private native void setPurgeRedundantKvsWhileFlush( - long handle, boolean purgeRedundantKvsWhileFlush); - private native boolean purgeRedundantKvsWhileFlush(long handle); private native void setMaxSequentialSkipInIterations( long handle, long maxSequentialSkipInIterations); private native long maxSequentialSkipInIterations(long handle); @@ -1422,10 +1834,31 @@ public class Options extends RocksObject private native void setParanoidFileChecks(long handle, boolean paranoidFileChecks); private native boolean paranoidFileChecks(long handle); + private native void setMaxWriteBufferNumberToMaintain(final long handle, + final int maxWriteBufferNumberToMaintain); + private native int maxWriteBufferNumberToMaintain(final long handle); + private native void setCompactionPriority(final long handle, + final byte compactionPriority); + private native byte compactionPriority(final long handle); + private native void setReportBgIoStats(final long handle, + final boolean reportBgIoStats); + private native boolean reportBgIoStats(final long handle); + private native void setCompactionOptionsUniversal(final long handle, + final long compactionOptionsUniversalHandle); + private native void setCompactionOptionsFIFO(final long handle, + final long compactionOptionsFIFOHandle); + private native void setForceConsistencyChecks(final long handle, + final boolean forceConsistencyChecks); + private native boolean forceConsistencyChecks(final long handle); + // instance variables - Env env_; - MemTableConfig memTableConfig_; - TableFormatConfig tableFormatConfig_; - RateLimiter rateLimiter_; - AbstractComparator> comparator_; + private Env env_; + private MemTableConfig memTableConfig_; + private TableFormatConfig tableFormatConfig_; + private RateLimiter rateLimiter_; + private AbstractComparator> comparator_; + private CompactionOptionsUniversal compactionOptionsUniversal_; + private CompactionOptionsFIFO compactionOptionsFIFO_; + private CompressionOptions compressionOptions_; + private Cache rowCache_; } diff --git a/java/src/main/java/org/rocksdb/ReadOptions.java b/java/src/main/java/org/rocksdb/ReadOptions.java index 9bb23d013..ccdea2964 100644 --- a/java/src/main/java/org/rocksdb/ReadOptions.java +++ b/java/src/main/java/org/rocksdb/ReadOptions.java @@ -269,6 +269,100 @@ public class ReadOptions extends RocksObject { return this; } + /** + * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we + * schedule a background job in the flush job queue and delete obsolete files + * in background. + * + * Default: false + * + * @return true when PurgeObsoleteFile is called in CleanupIteratorState + */ + public boolean backgroundPurgeOnIteratorCleanup() { + assert(isOwningHandle()); + return backgroundPurgeOnIteratorCleanup(nativeHandle_); + } + + /** + * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we + * schedule a background job in the flush job queue and delete obsolete files + * in background. + * + * Default: false + * + * @param backgroundPurgeOnIteratorCleanup true when PurgeObsoleteFile is + * called in CleanupIteratorState + * @return the reference to the current ReadOptions. + */ + public ReadOptions setBackgroundPurgeOnIteratorCleanup( + final boolean backgroundPurgeOnIteratorCleanup) { + assert(isOwningHandle()); + setBackgroundPurgeOnIteratorCleanup(nativeHandle_, + backgroundPurgeOnIteratorCleanup); + return this; + } + + /** + * If non-zero, NewIterator will create a new table reader which + * performs reads of the given size. Using a large size (> 2MB) can + * improve the performance of forward iteration on spinning disks. + * + * Default: 0 + * + * @return The readahead size is bytes + */ + public long readaheadSize() { + assert(isOwningHandle()); + return readaheadSize(nativeHandle_); + } + + /** + * If non-zero, NewIterator will create a new table reader which + * performs reads of the given size. Using a large size (> 2MB) can + * improve the performance of forward iteration on spinning disks. + * + * Default: 0 + * + * @param readaheadSize The readahead size is bytes + * @return the reference to the current ReadOptions. + */ + public ReadOptions setReadaheadSize(final long readaheadSize) { + assert(isOwningHandle()); + setReadaheadSize(nativeHandle_, readaheadSize); + return this; + } + + /** + * If true, keys deleted using the DeleteRange() API will be visible to + * readers until they are naturally deleted during compaction. This improves + * read performance in DBs with many range deletions. + * + * Default: false + * + * @return true if keys deleted using the DeleteRange() API will be visible + */ + public boolean ignoreRangeDeletions() { + assert(isOwningHandle()); + return ignoreRangeDeletions(nativeHandle_); + } + + /** + * If true, keys deleted using the DeleteRange() API will be visible to + * readers until they are naturally deleted during compaction. This improves + * read performance in DBs with many range deletions. + * + * Default: false + * + * @param ignoreRangeDeletions true if keys deleted using the DeleteRange() + * API should be visible + * @return the reference to the current ReadOptions. + */ + public ReadOptions setIgnoreRangeDeletions(final boolean ignoreRangeDeletions) { + assert(isOwningHandle()); + setIgnoreRangeDeletions(nativeHandle_, ignoreRangeDeletions); + return this; + } + private native static long newReadOptions(); private native boolean verifyChecksums(long handle); private native void setVerifyChecksums(long handle, boolean verifyChecksums); @@ -288,6 +382,15 @@ public class ReadOptions extends RocksObject { private native void setPrefixSameAsStart(long handle, boolean prefixSameAsStart); private native boolean pinData(long handle); private native void setPinData(long handle, boolean pinData); + private native boolean backgroundPurgeOnIteratorCleanup(final long handle); + private native void setBackgroundPurgeOnIteratorCleanup(final long handle, + final boolean backgroundPurgeOnIteratorCleanup); + private native long readaheadSize(final long handle); + private native void setReadaheadSize(final long handle, + final long readaheadSize); + private native boolean ignoreRangeDeletions(final long handle); + private native void setIgnoreRangeDeletions(final long handle, + final boolean ignoreRangeDeletions); @Override protected final native void disposeInternal(final long handle); diff --git a/java/src/main/java/org/rocksdb/WALRecoveryMode.java b/java/src/main/java/org/rocksdb/WALRecoveryMode.java new file mode 100644 index 000000000..c5470da9c --- /dev/null +++ b/java/src/main/java/org/rocksdb/WALRecoveryMode.java @@ -0,0 +1,83 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * The WAL Recover Mode + */ +public enum WALRecoveryMode { + + /** + * Original levelDB recovery + * + * We tolerate incomplete record in trailing data on all logs + * Use case : This is legacy behavior (default) + */ + TolerateCorruptedTailRecords((byte)0x00), + + /** + * Recover from clean shutdown + * + * We don't expect to find any corruption in the WAL + * Use case : This is ideal for unit tests and rare applications that + * can require high consistency guarantee + */ + AbsoluteConsistency((byte)0x01), + + /** + * Recover to point-in-time consistency + * We stop the WAL playback on discovering WAL inconsistency + * Use case : Ideal for systems that have disk controller cache like + * hard disk, SSD without super capacitor that store related data + */ + PointInTimeRecovery((byte)0x02), + + /** + * Recovery after a disaster + * We ignore any corruption in the WAL and try to salvage as much data as + * possible + * Use case : Ideal for last ditch effort to recover data or systems that + * operate with low grade unrelated data + */ + SkipAnyCorruptedRecords((byte)0x03); + + private byte value; + + WALRecoveryMode(final byte value) { + this.value = value; + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + *

Get the WALRecoveryMode enumeration value by + * passing the byte identifier to this method.

+ * + * @param byteIdentifier of WALRecoveryMode. + * + * @return CompressionType instance. + * + * @throws IllegalArgumentException If WALRecoveryMode cannot be found for the + * provided byteIdentifier + */ + public static WALRecoveryMode getWALRecoveryMode(final byte byteIdentifier) { + for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) { + if (walRecoveryMode.getValue() == byteIdentifier) { + return walRecoveryMode; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for WALRecoveryMode."); + } +} diff --git a/java/src/main/java/org/rocksdb/WriteOptions.java b/java/src/main/java/org/rocksdb/WriteOptions.java index 4e7abd873..6055d1761 100644 --- a/java/src/main/java/org/rocksdb/WriteOptions.java +++ b/java/src/main/java/org/rocksdb/WriteOptions.java @@ -92,10 +92,68 @@ public class WriteOptions extends RocksObject { return disableWAL(nativeHandle_); } + /** + * If true and if user is trying to write to column families that don't exist + * (they were dropped), ignore the write (don't return an error). If there + * are multiple writes in a WriteBatch, other writes will succeed. + * + * Default: false + * + * @param ignoreMissingColumnFamilies true to ignore writes to column families + * which don't exist + * @return the instance of the current WriteOptions. + */ + public WriteOptions setIgnoreMissingColumnFamilies( + final boolean ignoreMissingColumnFamilies) { + setIgnoreMissingColumnFamilies(nativeHandle_, ignoreMissingColumnFamilies); + return this; + } + + /** + * If true and if user is trying to write to column families that don't exist + * (they were dropped), ignore the write (don't return an error). If there + * are multiple writes in a WriteBatch, other writes will succeed. + * + * Default: false + * + * @return true if writes to column families which don't exist are ignored + */ + public boolean ignoreMissingColumnFamilies() { + return ignoreMissingColumnFamilies(nativeHandle_); + } + + /** + * If true and we need to wait or sleep for the write request, fails + * immediately with {@link Status.Code#Incomplete}. + * + * @param noSlowdown true to fail write requests if we need to wait or sleep + * @return the instance of the current WriteOptions. + */ + public WriteOptions setNoSlowdown(final boolean noSlowdown) { + setNoSlowdown(nativeHandle_, noSlowdown); + return this; + } + + /** + * If true and we need to wait or sleep for the write request, fails + * immediately with {@link Status.Code#Incomplete}. + * + * @return true when write requests are failed if we need to wait or sleep + */ + public boolean noSlowdown() { + return noSlowdown(nativeHandle_); + } + private native static long newWriteOptions(); private native void setSync(long handle, boolean flag); private native boolean sync(long handle); private native void setDisableWAL(long handle, boolean flag); private native boolean disableWAL(long handle); + private native void setIgnoreMissingColumnFamilies(final long handle, + final boolean ignoreMissingColumnFamilies); + private native boolean ignoreMissingColumnFamilies(final long handle); + private native void setNoSlowdown(final long handle, + final boolean noSlowdown); + private native boolean noSlowdown(final long handle); @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java b/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java index c3836ac9b..597d9723f 100644 --- a/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java +++ b/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java @@ -38,6 +38,21 @@ public class BackupableDBOptionsTest { } } + @Test + public void env() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + assertThat(backupableDBOptions.backupEnv()). + isNull(); + + try(final Env env = new RocksMemEnv()) { + backupableDBOptions.setBackupEnv(env); + assertThat(backupableDBOptions.backupEnv()) + .isEqualTo(env); + } + } + } + @Test public void shareTableFiles() { try (final BackupableDBOptions backupableDBOptions = @@ -49,6 +64,27 @@ public class BackupableDBOptionsTest { } } + @Test + public void infoLog() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + assertThat(backupableDBOptions.infoLog()). + isNull(); + + try(final Options options = new Options(); + final Logger logger = new Logger(options){ + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + + } + }) { + backupableDBOptions.setInfoLog(logger); + assertThat(backupableDBOptions.infoLog()) + .isEqualTo(logger); + } + } + } + @Test public void sync() { try (final BackupableDBOptions backupableDBOptions = @@ -96,6 +132,22 @@ public class BackupableDBOptionsTest { } } + @Test + public void backupRateLimiter() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + assertThat(backupableDBOptions.backupEnv()). + isNull(); + + try(final RateLimiter backupRateLimiter = + new RateLimiter(999)) { + backupableDBOptions.setBackupRateLimiter(backupRateLimiter); + assertThat(backupableDBOptions.backupRateLimiter()) + .isEqualTo(backupRateLimiter); + } + } + } + @Test public void restoreRateLimit() { try (final BackupableDBOptions backupableDBOptions = @@ -111,6 +163,22 @@ public class BackupableDBOptionsTest { } } + @Test + public void restoreRateLimiter() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + assertThat(backupableDBOptions.backupEnv()). + isNull(); + + try(final RateLimiter restoreRateLimiter = + new RateLimiter(911)) { + backupableDBOptions.setRestoreRateLimiter(restoreRateLimiter); + assertThat(backupableDBOptions.restoreRateLimiter()) + .isEqualTo(restoreRateLimiter); + } + } + } + @Test public void shareFilesWithChecksum() { try (final BackupableDBOptions backupableDBOptions = @@ -122,6 +190,28 @@ public class BackupableDBOptionsTest { } } + @Test + public void maxBackgroundOperations() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + final int value = rand.nextInt(); + backupableDBOptions.setMaxBackgroundOperations(value); + assertThat(backupableDBOptions.maxBackgroundOperations()). + isEqualTo(value); + } + } + + @Test + public void callbackTriggerIntervalSize() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + final long value = rand.nextLong(); + backupableDBOptions.setCallbackTriggerIntervalSize(value); + assertThat(backupableDBOptions.callbackTriggerIntervalSize()). + isEqualTo(value); + } + } + @Test public void failBackupDirIsNull() { exception.expect(IllegalArgumentException.class); diff --git a/java/src/test/java/org/rocksdb/ClockCacheTest.java b/java/src/test/java/org/rocksdb/ClockCacheTest.java new file mode 100644 index 000000000..5fc54df60 --- /dev/null +++ b/java/src/test/java/org/rocksdb/ClockCacheTest.java @@ -0,0 +1,26 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.Test; + +public class ClockCacheTest { + + static { + RocksDB.loadLibrary(); + } + + @Test + public void newClockCache() { + final long capacity = 1000; + final int numShardBits = 16; + final boolean strictCapacityLimit = true; + try(final Cache clockCache = new ClockCache(capacity, + numShardBits, strictCapacityLimit)) { + //no op + } + } +} diff --git a/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java index 59962693a..f5438b396 100644 --- a/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java +++ b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java @@ -198,15 +198,6 @@ public class ColumnFamilyOptionsTest { } } - @Test - public void softRateLimit() { - try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { - final double doubleValue = rand.nextDouble(); - opt.setSoftRateLimit(doubleValue); - assertThat(opt.softRateLimit()).isEqualTo(doubleValue); - } - } - @Test public void softPendingCompactionBytesLimit() { try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { @@ -216,15 +207,6 @@ public class ColumnFamilyOptionsTest { } } - @Test - public void hardRateLimit() { - try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { - final double doubleValue = rand.nextDouble(); - opt.setHardRateLimit(doubleValue); - assertThat(opt.hardRateLimit()).isEqualTo(doubleValue); - } - } - @Test public void hardPendingCompactionBytesLimit() { try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { @@ -261,15 +243,6 @@ public class ColumnFamilyOptionsTest { } } - @Test - public void rateLimitDelayMaxMilliseconds() { - try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { - final int intValue = rand.nextInt(); - opt.setRateLimitDelayMaxMilliseconds(intValue); - assertThat(opt.rateLimitDelayMaxMilliseconds()).isEqualTo(intValue); - } - } - @Test public void arenaBlockSize() throws RocksDBException { try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { @@ -288,15 +261,6 @@ public class ColumnFamilyOptionsTest { } } - @Test - public void purgeRedundantKvsWhileFlush() { - try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { - final boolean boolValue = rand.nextBoolean(); - opt.setPurgeRedundantKvsWhileFlush(boolValue); - assertThat(opt.purgeRedundantKvsWhileFlush()).isEqualTo(boolValue); - } - } - @Test public void maxSequentialSkipInIterations() { try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { @@ -393,6 +357,7 @@ public class ColumnFamilyOptionsTest { options.optimizeLevelStyleCompaction(); options.optimizeLevelStyleCompaction(3000); options.optimizeForPointLookup(10); + options.optimizeForSmallDb(); } } @@ -471,6 +436,36 @@ public class ColumnFamilyOptionsTest { } } + @Test + public void bottommostCompressionType() { + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { + assertThat(columnFamilyOptions.bottommostCompressionType()) + .isEqualTo(CompressionType.DISABLE_COMPRESSION_OPTION); + + for (final CompressionType compressionType : CompressionType.values()) { + columnFamilyOptions.setBottommostCompressionType(compressionType); + assertThat(columnFamilyOptions.bottommostCompressionType()) + .isEqualTo(compressionType); + } + } + } + + @Test + public void compressionOptions() { + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions(); + final CompressionOptions compressionOptions = new CompressionOptions() + .setMaxDictBytes(123)) { + + columnFamilyOptions.setCompressionOptions(compressionOptions); + assertThat(columnFamilyOptions.compressionOptions()) + .isEqualTo(compressionOptions); + assertThat(columnFamilyOptions.compressionOptions().maxDictBytes()) + .isEqualTo(123); + } + } + @Test public void compactionStyles() { try (final ColumnFamilyOptions columnFamilyOptions @@ -498,4 +493,75 @@ public class ColumnFamilyOptionsTest { isEqualTo(longValue); } } + + @Test + public void maxWriteBufferNumberToMaintain() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + int intValue = rand.nextInt(); + // Size has to be positive + intValue = (intValue < 0) ? -intValue : intValue; + intValue = (intValue == 0) ? intValue + 1 : intValue; + opt.setMaxWriteBufferNumberToMaintain(intValue); + assertThat(opt.maxWriteBufferNumberToMaintain()). + isEqualTo(intValue); + } + } + + @Test + public void compactionPriorities() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + for (final CompactionPriority compactionPriority : + CompactionPriority.values()) { + opt.setCompactionPriority(compactionPriority); + assertThat(opt.compactionPriority()). + isEqualTo(compactionPriority); + } + } + } + + @Test + public void reportBgIoStats() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean booleanValue = true; + opt.setReportBgIoStats(booleanValue); + assertThat(opt.reportBgIoStats()). + isEqualTo(booleanValue); + } + } + + @Test + public void compactionOptionsUniversal() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions(); + final CompactionOptionsUniversal optUni = new CompactionOptionsUniversal() + .setCompressionSizePercent(7)) { + opt.setCompactionOptionsUniversal(optUni); + assertThat(opt.compactionOptionsUniversal()). + isEqualTo(optUni); + assertThat(opt.compactionOptionsUniversal().compressionSizePercent()) + .isEqualTo(7); + } + } + + @Test + public void compactionOptionsFIFO() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions(); + final CompactionOptionsFIFO optFifo = new CompactionOptionsFIFO() + .setMaxTableFilesSize(2000)) { + opt.setCompactionOptionsFIFO(optFifo); + assertThat(opt.compactionOptionsFIFO()). + isEqualTo(optFifo); + assertThat(opt.compactionOptionsFIFO().maxTableFilesSize()) + .isEqualTo(2000); + } + } + + @Test + public void forceConsistencyChecks() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean booleanValue = true; + opt.setForceConsistencyChecks(booleanValue); + assertThat(opt.forceConsistencyChecks()). + isEqualTo(booleanValue); + } + } } diff --git a/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java b/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java new file mode 100644 index 000000000..90db27274 --- /dev/null +++ b/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java @@ -0,0 +1,26 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionOptionsFIFOTest { + + static { + RocksDB.loadLibrary(); + } + + @Test + public void maxTableFilesSize() { + final long size = 500 * 1024 * 1026; + try(final CompactionOptionsFIFO opt = new CompactionOptionsFIFO()) { + opt.setMaxTableFilesSize(size); + assertThat(opt.maxTableFilesSize()).isEqualTo(size); + } + } +} diff --git a/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java b/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java new file mode 100644 index 000000000..6bc6be544 --- /dev/null +++ b/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java @@ -0,0 +1,80 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionOptionsUniversalTest { + + static { + RocksDB.loadLibrary(); + } + + @Test + public void sizeRatio() { + final int sizeRatio = 4; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setSizeRatio(sizeRatio); + assertThat(opt.sizeRatio()).isEqualTo(sizeRatio); + } + } + + @Test + public void minMergeWidth() { + final int minMergeWidth = 3; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setMinMergeWidth(minMergeWidth); + assertThat(opt.minMergeWidth()).isEqualTo(minMergeWidth); + } + } + + @Test + public void maxMergeWidth() { + final int maxMergeWidth = Integer.MAX_VALUE - 1234; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setMaxMergeWidth(maxMergeWidth); + assertThat(opt.maxMergeWidth()).isEqualTo(maxMergeWidth); + } + } + + @Test + public void maxSizeAmplificationPercent() { + final int maxSizeAmplificationPercent = 150; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setMaxSizeAmplificationPercent(maxSizeAmplificationPercent); + assertThat(opt.maxSizeAmplificationPercent()).isEqualTo(maxSizeAmplificationPercent); + } + } + + @Test + public void compressionSizePercent() { + final int compressionSizePercent = 500; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setCompressionSizePercent(compressionSizePercent); + assertThat(opt.compressionSizePercent()).isEqualTo(compressionSizePercent); + } + } + + @Test + public void stopStyle() { + final CompactionStopStyle stopStyle = CompactionStopStyle.CompactionStopStyleSimilarSize; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setStopStyle(stopStyle); + assertThat(opt.stopStyle()).isEqualTo(stopStyle); + } + } + + @Test + public void allowTrivialMove() { + final boolean allowTrivialMove = true; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setAllowTrivialMove(allowTrivialMove); + assertThat(opt.allowTrivialMove()).isEqualTo(allowTrivialMove); + } + } +} diff --git a/java/src/test/java/org/rocksdb/CompactionPriorityTest.java b/java/src/test/java/org/rocksdb/CompactionPriorityTest.java new file mode 100644 index 000000000..a92991f39 --- /dev/null +++ b/java/src/test/java/org/rocksdb/CompactionPriorityTest.java @@ -0,0 +1,31 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionPriorityTest { + + @Test(expected = IllegalArgumentException.class) + public void failIfIllegalByteValueProvided() { + CompactionPriority.getCompactionPriority((byte) -1); + } + + @Test + public void getCompactionPriority() { + assertThat(CompactionPriority.getCompactionPriority( + CompactionPriority.OldestLargestSeqFirst.getValue())) + .isEqualTo(CompactionPriority.OldestLargestSeqFirst); + } + + @Test + public void valueOf() { + assertThat(CompactionPriority.valueOf("OldestSmallestSeqFirst")). + isEqualTo(CompactionPriority.OldestSmallestSeqFirst); + } +} diff --git a/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java b/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java new file mode 100644 index 000000000..41ebeb8d5 --- /dev/null +++ b/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java @@ -0,0 +1,31 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionStopStyleTest { + + @Test(expected = IllegalArgumentException.class) + public void failIfIllegalByteValueProvided() { + CompactionStopStyle.getCompactionStopStyle((byte) -1); + } + + @Test + public void getCompactionStopStyle() { + assertThat(CompactionStopStyle.getCompactionStopStyle( + CompactionStopStyle.CompactionStopStyleTotalSize.getValue())) + .isEqualTo(CompactionStopStyle.CompactionStopStyleTotalSize); + } + + @Test + public void valueOf() { + assertThat(CompactionStopStyle.valueOf("CompactionStopStyleSimilarSize")). + isEqualTo(CompactionStopStyle.CompactionStopStyleSimilarSize); + } +} diff --git a/java/src/test/java/org/rocksdb/CompressionOptionsTest.java b/java/src/test/java/org/rocksdb/CompressionOptionsTest.java index 51b7259f6..a49a70677 100644 --- a/java/src/test/java/org/rocksdb/CompressionOptionsTest.java +++ b/java/src/test/java/org/rocksdb/CompressionOptionsTest.java @@ -7,14 +7,47 @@ package org.rocksdb; import org.junit.Test; +import static org.assertj.core.api.Assertions.assertThat; public class CompressionOptionsTest { + + static { + RocksDB.loadLibrary(); + } + + @Test + public void windowBits() { + final int windowBits = 7; + try(final CompressionOptions opt = new CompressionOptions()) { + opt.setWindowBits(windowBits); + assertThat(opt.windowBits()).isEqualTo(windowBits); + } + } + + @Test + public void level() { + final int level = 6; + try(final CompressionOptions opt = new CompressionOptions()) { + opt.setLevel(level); + assertThat(opt.level()).isEqualTo(level); + } + } + + @Test + public void strategy() { + final int strategy = 2; + try(final CompressionOptions opt = new CompressionOptions()) { + opt.setStrategy(strategy); + assertThat(opt.strategy()).isEqualTo(strategy); + } + } + @Test - public void getCompressionType() { - for (final CompressionType compressionType : CompressionType.values()) { - String libraryName = compressionType.getLibraryName(); - compressionType.equals(CompressionType.getCompressionType( - libraryName)); + public void maxDictBytes() { + final int maxDictBytes = 999; + try(final CompressionOptions opt = new CompressionOptions()) { + opt.setMaxDictBytes(maxDictBytes); + assertThat(opt.maxDictBytes()).isEqualTo(maxDictBytes); } } } diff --git a/java/src/test/java/org/rocksdb/CompressionTypesTest.java b/java/src/test/java/org/rocksdb/CompressionTypesTest.java new file mode 100644 index 000000000..a3475b41e --- /dev/null +++ b/java/src/test/java/org/rocksdb/CompressionTypesTest.java @@ -0,0 +1,20 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.Test; + + +public class CompressionTypesTest { + @Test + public void getCompressionType() { + for (final CompressionType compressionType : CompressionType.values()) { + String libraryName = compressionType.getLibraryName(); + compressionType.equals(CompressionType.getCompressionType( + libraryName)); + } + } +} diff --git a/java/src/test/java/org/rocksdb/DBOptionsTest.java b/java/src/test/java/org/rocksdb/DBOptionsTest.java index 157f267ff..be2083243 100644 --- a/java/src/test/java/org/rocksdb/DBOptionsTest.java +++ b/java/src/test/java/org/rocksdb/DBOptionsTest.java @@ -8,8 +8,8 @@ package org.rocksdb; import org.junit.ClassRule; import org.junit.Test; -import java.util.Properties; -import java.util.Random; +import java.nio.file.Paths; +import java.util.*; import static org.assertj.core.api.Assertions.assertThat; @@ -63,6 +63,22 @@ public class DBOptionsTest { } } + @Test + public void linkageOfPrepMethods() { + try (final DBOptions opt = new DBOptions()) { + opt.optimizeForSmallDb(); + } + } + + @Test + public void env() { + try (final DBOptions opt = new DBOptions(); + final Env env = Env.getDefault()) { + opt.setEnv(env); + assertThat(opt.getEnv()).isSameAs(env); + } + } + @Test public void setIncreaseParallelism() { try(final DBOptions opt = new DBOptions()) { @@ -125,6 +141,15 @@ public class DBOptionsTest { } } + @Test + public void maxFileOpeningThreads() { + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxFileOpeningThreads(intValue); + assertThat(opt.maxFileOpeningThreads()).isEqualTo(intValue); + } + } + @Test public void useFsync() { try(final DBOptions opt = new DBOptions()) { @@ -134,6 +159,22 @@ public class DBOptionsTest { } } + @Test + public void dbPaths() { + final List dbPaths = new ArrayList<>(); + dbPaths.add(new DbPath(Paths.get("/a"), 10)); + dbPaths.add(new DbPath(Paths.get("/b"), 100)); + dbPaths.add(new DbPath(Paths.get("/c"), 1000)); + + try(final DBOptions opt = new DBOptions()) { + assertThat(opt.dbPaths()).isEqualTo(Collections.emptyList()); + + opt.setDbPaths(dbPaths); + + assertThat(opt.dbPaths()).isEqualTo(dbPaths); + } + } + @Test public void dbLogDir() { try(final DBOptions opt = new DBOptions()) { @@ -226,6 +267,15 @@ public class DBOptionsTest { } } + @Test + public void recycleLogFileNum() throws RocksDBException { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setRecycleLogFileNum(longValue); + assertThat(opt.recycleLogFileNum()).isEqualTo(longValue); + } + } + @Test public void maxManifestFileSize() { try(final DBOptions opt = new DBOptions()) { @@ -289,6 +339,15 @@ public class DBOptionsTest { } } + @Test + public void allowFAllocate() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowFAllocate(boolValue); + assertThat(opt.allowFAllocate()).isEqualTo(boolValue); + } + } + @Test public void allowMmapReads() { try(final DBOptions opt = new DBOptions()) { @@ -334,6 +393,60 @@ public class DBOptionsTest { } } + @Test + public void dbWriteBufferSize() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setDbWriteBufferSize(longValue); + assertThat(opt.dbWriteBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void accessHintOnCompactionStart() { + try(final DBOptions opt = new DBOptions()) { + final AccessHint accessHint = AccessHint.SEQUENTIAL; + opt.setAccessHintOnCompactionStart(accessHint); + assertThat(opt.accessHintOnCompactionStart()).isEqualTo(accessHint); + } + } + + @Test + public void newTableReaderForCompactionInputs() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setNewTableReaderForCompactionInputs(boolValue); + assertThat(opt.newTableReaderForCompactionInputs()).isEqualTo(boolValue); + } + } + + @Test + public void compactionReadaheadSize() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setCompactionReadaheadSize(longValue); + assertThat(opt.compactionReadaheadSize()).isEqualTo(longValue); + } + } + + @Test + public void randomAccessMaxBufferSize() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setRandomAccessMaxBufferSize(longValue); + assertThat(opt.randomAccessMaxBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void writableFileMaxBufferSize() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setWritableFileMaxBufferSize(longValue); + assertThat(opt.writableFileMaxBufferSize()).isEqualTo(longValue); + } + } + @Test public void useAdaptiveMutex() { try(final DBOptions opt = new DBOptions()) { @@ -352,6 +465,33 @@ public class DBOptionsTest { } } + @Test + public void walBytesPerSync() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setWalBytesPerSync(longValue); + assertThat(opt.walBytesPerSync()).isEqualTo(longValue); + } + } + + @Test + public void enableThreadTracking() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setEnableThreadTracking(boolValue); + assertThat(opt.enableThreadTracking()).isEqualTo(boolValue); + } + } + + @Test + public void delayedWriteRate() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setDelayedWriteRate(longValue); + assertThat(opt.delayedWriteRate()).isEqualTo(longValue); + } + } + @Test public void allowConcurrentMemtableWrite() { try (final DBOptions opt = new DBOptions()) { @@ -388,6 +528,87 @@ public class DBOptionsTest { } } + @Test + public void skipStatsUpdateOnDbOpen() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setSkipStatsUpdateOnDbOpen(boolValue); + assertThat(opt.skipStatsUpdateOnDbOpen()).isEqualTo(boolValue); + } + } + + @Test + public void walRecoveryMode() { + try (final DBOptions opt = new DBOptions()) { + for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) { + opt.setWalRecoveryMode(walRecoveryMode); + assertThat(opt.walRecoveryMode()).isEqualTo(walRecoveryMode); + } + } + } + + @Test + public void allow2pc() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllow2pc(boolValue); + assertThat(opt.allow2pc()).isEqualTo(boolValue); + } + } + + @Test + public void rowCache() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.rowCache()).isNull(); + + try(final Cache lruCache = new LRUCache(1000)) { + opt.setRowCache(lruCache); + assertThat(opt.rowCache()).isEqualTo(lruCache); + } + + try(final Cache clockCache = new ClockCache(1000)) { + opt.setRowCache(clockCache); + assertThat(opt.rowCache()).isEqualTo(clockCache); + } + } + } + + @Test + public void failIfOptionsFileError() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setFailIfOptionsFileError(boolValue); + assertThat(opt.failIfOptionsFileError()).isEqualTo(boolValue); + } + } + + @Test + public void dumpMallocStats() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setDumpMallocStats(boolValue); + assertThat(opt.dumpMallocStats()).isEqualTo(boolValue); + } + } + + @Test + public void avoidFlushDuringRecovery() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAvoidFlushDuringRecovery(boolValue); + assertThat(opt.avoidFlushDuringRecovery()).isEqualTo(boolValue); + } + } + + @Test + public void avoidFlushDuringShutdown() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAvoidFlushDuringShutdown(boolValue); + assertThat(opt.avoidFlushDuringShutdown()).isEqualTo(boolValue); + } + } + @Test public void rateLimiter() { try(final DBOptions options = new DBOptions(); diff --git a/java/src/test/java/org/rocksdb/LRUCacheTest.java b/java/src/test/java/org/rocksdb/LRUCacheTest.java new file mode 100644 index 000000000..e9d860baa --- /dev/null +++ b/java/src/test/java/org/rocksdb/LRUCacheTest.java @@ -0,0 +1,27 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.Test; + +public class LRUCacheTest { + + static { + RocksDB.loadLibrary(); + } + + @Test + public void newLRUCache() { + final long capacity = 1000; + final int numShardBits = 16; + final boolean strictCapacityLimit = true; + final double highPriPoolRatio = 5; + try(final Cache lruCache = new LRUCache(capacity, + numShardBits, strictCapacityLimit, highPriPoolRatio)) { + //no op + } + } +} diff --git a/java/src/test/java/org/rocksdb/OptionsTest.java b/java/src/test/java/org/rocksdb/OptionsTest.java index f13356826..67f3a9f00 100644 --- a/java/src/test/java/org/rocksdb/OptionsTest.java +++ b/java/src/test/java/org/rocksdb/OptionsTest.java @@ -5,7 +5,9 @@ package org.rocksdb; +import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Random; @@ -161,15 +163,6 @@ public class OptionsTest { } } - @Test - public void softRateLimit() { - try (final Options opt = new Options()) { - final double doubleValue = rand.nextDouble(); - opt.setSoftRateLimit(doubleValue); - assertThat(opt.softRateLimit()).isEqualTo(doubleValue); - } - } - @Test public void softPendingCompactionBytesLimit() { try (final Options opt = new Options()) { @@ -179,15 +172,6 @@ public class OptionsTest { } } - @Test - public void hardRateLimit() { - try (final Options opt = new Options()) { - final double doubleValue = rand.nextDouble(); - opt.setHardRateLimit(doubleValue); - assertThat(opt.hardRateLimit()).isEqualTo(doubleValue); - } - } - @Test public void hardPendingCompactionBytesLimit() { try (final Options opt = new Options()) { @@ -224,15 +208,6 @@ public class OptionsTest { } } - @Test - public void rateLimitDelayMaxMilliseconds() { - try (final Options opt = new Options()) { - final int intValue = rand.nextInt(); - opt.setRateLimitDelayMaxMilliseconds(intValue); - assertThat(opt.rateLimitDelayMaxMilliseconds()).isEqualTo(intValue); - } - } - @Test public void arenaBlockSize() throws RocksDBException { try (final Options opt = new Options()) { @@ -251,15 +226,6 @@ public class OptionsTest { } } - @Test - public void purgeRedundantKvsWhileFlush() { - try (final Options opt = new Options()) { - final boolean boolValue = rand.nextBoolean(); - opt.setPurgeRedundantKvsWhileFlush(boolValue); - assertThat(opt.purgeRedundantKvsWhileFlush()).isEqualTo(boolValue); - } - } - @Test public void maxSequentialSkipInIterations() { try (final Options opt = new Options()) { @@ -390,6 +356,15 @@ public class OptionsTest { } } + @Test + public void maxFileOpeningThreads() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxFileOpeningThreads(intValue); + assertThat(opt.maxFileOpeningThreads()).isEqualTo(intValue); + } + } + @Test public void useFsync() { try (final Options opt = new Options()) { @@ -399,6 +374,22 @@ public class OptionsTest { } } + @Test + public void dbPaths() { + final List dbPaths = new ArrayList<>(); + dbPaths.add(new DbPath(Paths.get("/a"), 10)); + dbPaths.add(new DbPath(Paths.get("/b"), 100)); + dbPaths.add(new DbPath(Paths.get("/c"), 1000)); + + try (final Options opt = new Options()) { + assertThat(opt.dbPaths()).isEqualTo(Collections.emptyList()); + + opt.setDbPaths(dbPaths); + + assertThat(opt.dbPaths()).isEqualTo(dbPaths); + } + } + @Test public void dbLogDir() { try (final Options opt = new Options()) { @@ -495,6 +486,15 @@ public class OptionsTest { } } + @Test + public void recycleLogFileNum() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setRecycleLogFileNum(longValue); + assertThat(opt.recycleLogFileNum()).isEqualTo(longValue); + } + } + @Test public void maxManifestFileSize() { try (final Options opt = new Options()) { @@ -561,6 +561,15 @@ public class OptionsTest { } } + @Test + public void allowFAllocate() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowFAllocate(boolValue); + assertThat(opt.allowFAllocate()).isEqualTo(boolValue); + } + } + @Test public void allowMmapReads() { try (final Options opt = new Options()) { @@ -606,6 +615,60 @@ public class OptionsTest { } } + @Test + public void dbWriteBufferSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setDbWriteBufferSize(longValue); + assertThat(opt.dbWriteBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void accessHintOnCompactionStart() { + try (final Options opt = new Options()) { + final AccessHint accessHint = AccessHint.SEQUENTIAL; + opt.setAccessHintOnCompactionStart(accessHint); + assertThat(opt.accessHintOnCompactionStart()).isEqualTo(accessHint); + } + } + + @Test + public void newTableReaderForCompactionInputs() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setNewTableReaderForCompactionInputs(boolValue); + assertThat(opt.newTableReaderForCompactionInputs()).isEqualTo(boolValue); + } + } + + @Test + public void compactionReadaheadSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setCompactionReadaheadSize(longValue); + assertThat(opt.compactionReadaheadSize()).isEqualTo(longValue); + } + } + + @Test + public void randomAccessMaxBufferSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setRandomAccessMaxBufferSize(longValue); + assertThat(opt.randomAccessMaxBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void writableFileMaxBufferSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setWritableFileMaxBufferSize(longValue); + assertThat(opt.writableFileMaxBufferSize()).isEqualTo(longValue); + } + } + @Test public void useAdaptiveMutex() { try (final Options opt = new Options()) { @@ -624,6 +687,33 @@ public class OptionsTest { } } + @Test + public void walBytesPerSync() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setWalBytesPerSync(longValue); + assertThat(opt.walBytesPerSync()).isEqualTo(longValue); + } + } + + @Test + public void enableThreadTracking() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setEnableThreadTracking(boolValue); + assertThat(opt.enableThreadTracking()).isEqualTo(boolValue); + } + } + + @Test + public void delayedWriteRate() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setDelayedWriteRate(longValue); + assertThat(opt.delayedWriteRate()).isEqualTo(longValue); + } + } + @Test public void allowConcurrentMemtableWrite() { try (final Options opt = new Options()) { @@ -660,6 +750,87 @@ public class OptionsTest { } } + @Test + public void skipStatsUpdateOnDbOpen() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setSkipStatsUpdateOnDbOpen(boolValue); + assertThat(opt.skipStatsUpdateOnDbOpen()).isEqualTo(boolValue); + } + } + + @Test + public void walRecoveryMode() { + try (final Options opt = new Options()) { + for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) { + opt.setWalRecoveryMode(walRecoveryMode); + assertThat(opt.walRecoveryMode()).isEqualTo(walRecoveryMode); + } + } + } + + @Test + public void allow2pc() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllow2pc(boolValue); + assertThat(opt.allow2pc()).isEqualTo(boolValue); + } + } + + @Test + public void rowCache() { + try (final Options opt = new Options()) { + assertThat(opt.rowCache()).isNull(); + + try(final Cache lruCache = new LRUCache(1000)) { + opt.setRowCache(lruCache); + assertThat(opt.rowCache()).isEqualTo(lruCache); + } + + try(final Cache clockCache = new ClockCache(1000)) { + opt.setRowCache(clockCache); + assertThat(opt.rowCache()).isEqualTo(clockCache); + } + } + } + + @Test + public void failIfOptionsFileError() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setFailIfOptionsFileError(boolValue); + assertThat(opt.failIfOptionsFileError()).isEqualTo(boolValue); + } + } + + @Test + public void dumpMallocStats() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setDumpMallocStats(boolValue); + assertThat(opt.dumpMallocStats()).isEqualTo(boolValue); + } + } + + @Test + public void avoidFlushDuringRecovery() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAvoidFlushDuringRecovery(boolValue); + assertThat(opt.avoidFlushDuringRecovery()).isEqualTo(boolValue); + } + } + + @Test + public void avoidFlushDuringShutdown() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAvoidFlushDuringShutdown(boolValue); + assertThat(opt.avoidFlushDuringShutdown()).isEqualTo(boolValue); + } + } + @Test public void env() { try (final Options options = new Options(); @@ -677,6 +848,7 @@ public class OptionsTest { options.optimizeLevelStyleCompaction(); options.optimizeLevelStyleCompaction(3000); options.optimizeForPointLookup(10); + options.optimizeForSmallDb(); options.prepareForBulkLoad(); } } @@ -738,6 +910,34 @@ public class OptionsTest { } } + @Test + public void bottommostCompressionType() { + try (final Options options = new Options()) { + assertThat(options.bottommostCompressionType()) + .isEqualTo(CompressionType.DISABLE_COMPRESSION_OPTION); + + for (final CompressionType compressionType : CompressionType.values()) { + options.setBottommostCompressionType(compressionType); + assertThat(options.bottommostCompressionType()) + .isEqualTo(compressionType); + } + } + } + + @Test + public void compressionOptions() { + try (final Options options = new Options(); + final CompressionOptions compressionOptions = new CompressionOptions() + .setMaxDictBytes(123)) { + + options.setCompressionOptions(compressionOptions); + assertThat(options.compressionOptions()) + .isEqualTo(compressionOptions); + assertThat(options.compressionOptions().maxDictBytes()) + .isEqualTo(123); + } + } + @Test public void compactionStyles() { try (final Options options = new Options()) { @@ -820,4 +1020,75 @@ public class OptionsTest { } } } + + @Test + public void maxWriteBufferNumberToMaintain() { + try (final Options options = new Options()) { + int intValue = rand.nextInt(); + // Size has to be positive + intValue = (intValue < 0) ? -intValue : intValue; + intValue = (intValue == 0) ? intValue + 1 : intValue; + options.setMaxWriteBufferNumberToMaintain(intValue); + assertThat(options.maxWriteBufferNumberToMaintain()). + isEqualTo(intValue); + } + } + + @Test + public void compactionPriorities() { + try (final Options options = new Options()) { + for (final CompactionPriority compactionPriority : + CompactionPriority.values()) { + options.setCompactionPriority(compactionPriority); + assertThat(options.compactionPriority()). + isEqualTo(compactionPriority); + } + } + } + + @Test + public void reportBgIoStats() { + try (final Options options = new Options()) { + final boolean booleanValue = true; + options.setReportBgIoStats(booleanValue); + assertThat(options.reportBgIoStats()). + isEqualTo(booleanValue); + } + } + + @Test + public void compactionOptionsUniversal() { + try (final Options options = new Options(); + final CompactionOptionsUniversal optUni = new CompactionOptionsUniversal() + .setCompressionSizePercent(7)) { + options.setCompactionOptionsUniversal(optUni); + assertThat(options.compactionOptionsUniversal()). + isEqualTo(optUni); + assertThat(options.compactionOptionsUniversal().compressionSizePercent()) + .isEqualTo(7); + } + } + + @Test + public void compactionOptionsFIFO() { + try (final Options options = new Options(); + final CompactionOptionsFIFO optFifo = new CompactionOptionsFIFO() + .setMaxTableFilesSize(2000)) { + options.setCompactionOptionsFIFO(optFifo); + assertThat(options.compactionOptionsFIFO()). + isEqualTo(optFifo); + assertThat(options.compactionOptionsFIFO().maxTableFilesSize()) + .isEqualTo(2000); + } + } + + @Test + public void forceConsistencyChecks() { + try (final Options options = new Options()) { + final boolean booleanValue = true; + options.setForceConsistencyChecks(booleanValue); + assertThat(options.forceConsistencyChecks()). + isEqualTo(booleanValue); + } + } } diff --git a/java/src/test/java/org/rocksdb/ReadOptionsTest.java b/java/src/test/java/org/rocksdb/ReadOptionsTest.java index 58ed2ecc6..13d795f55 100644 --- a/java/src/test/java/org/rocksdb/ReadOptionsTest.java +++ b/java/src/test/java/org/rocksdb/ReadOptionsTest.java @@ -101,6 +101,32 @@ public class ReadOptionsTest { } } + @Test + public void backgroundPurgeOnIteratorCleanup() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setBackgroundPurgeOnIteratorCleanup(true); + assertThat(opt.backgroundPurgeOnIteratorCleanup()).isTrue(); + } + } + + @Test + public void readaheadSize() { + try (final ReadOptions opt = new ReadOptions()) { + final Random rand = new Random(); + final long longValue = rand.nextLong(); + opt.setReadaheadSize(longValue); + assertThat(opt.readaheadSize()).isEqualTo(longValue); + } + } + + @Test + public void ignoreRangeDeletions() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setIgnoreRangeDeletions(true); + assertThat(opt.ignoreRangeDeletions()).isTrue(); + } + } + @Test public void failSetVerifyChecksumUninitialized() { try (final ReadOptions readOptions = diff --git a/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java b/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java new file mode 100644 index 000000000..b1a3655c3 --- /dev/null +++ b/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java @@ -0,0 +1,22 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + + +public class WALRecoveryModeTest { + + @Test + public void getWALRecoveryMode() { + for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) { + assertThat(WALRecoveryMode.getWALRecoveryMode(walRecoveryMode.getValue())) + .isEqualTo(walRecoveryMode); + } + } +} diff --git a/java/src/test/java/org/rocksdb/WriteOptionsTest.java b/java/src/test/java/org/rocksdb/WriteOptionsTest.java index c6af5c818..733f24b8a 100644 --- a/java/src/test/java/org/rocksdb/WriteOptionsTest.java +++ b/java/src/test/java/org/rocksdb/WriteOptionsTest.java @@ -19,14 +19,27 @@ public class WriteOptionsTest { @Test public void writeOptions() { try (final WriteOptions writeOptions = new WriteOptions()) { - writeOptions.setDisableWAL(true); - assertThat(writeOptions.disableWAL()).isTrue(); - writeOptions.setDisableWAL(false); - assertThat(writeOptions.disableWAL()).isFalse(); + writeOptions.setSync(true); assertThat(writeOptions.sync()).isTrue(); writeOptions.setSync(false); assertThat(writeOptions.sync()).isFalse(); + + writeOptions.setDisableWAL(true); + assertThat(writeOptions.disableWAL()).isTrue(); + writeOptions.setDisableWAL(false); + assertThat(writeOptions.disableWAL()).isFalse(); + + + writeOptions.setIgnoreMissingColumnFamilies(true); + assertThat(writeOptions.ignoreMissingColumnFamilies()).isTrue(); + writeOptions.setIgnoreMissingColumnFamilies(false); + assertThat(writeOptions.ignoreMissingColumnFamilies()).isFalse(); + + writeOptions.setNoSlowdown(true); + assertThat(writeOptions.noSlowdown()).isTrue(); + writeOptions.setNoSlowdown(false); + assertThat(writeOptions.noSlowdown()).isFalse(); } } } diff --git a/src.mk b/src.mk index b1f1aabbf..99699c6b6 100644 --- a/src.mk +++ b/src.mk @@ -332,16 +332,21 @@ JNI_NATIVE_SOURCES = \ java/rocksjni/backupenginejni.cc \ java/rocksjni/backupablejni.cc \ java/rocksjni/checkpoint.cc \ + java/rocksjni/clock_cache.cc \ java/rocksjni/columnfamilyhandle.cc \ java/rocksjni/compaction_filter.cc \ + java/rocksjni/compaction_options_fifo.cc \ + java/rocksjni/compaction_options_universal.cc \ java/rocksjni/comparator.cc \ java/rocksjni/comparatorjnicallback.cc \ + java/rocksjni/compression_options.cc \ java/rocksjni/env.cc \ java/rocksjni/env_options.cc \ java/rocksjni/external_sst_file_info.cc \ java/rocksjni/filter.cc \ java/rocksjni/iterator.cc \ java/rocksjni/loggerjnicallback.cc \ + java/rocksjni/lru_cache.cc \ java/rocksjni/memtablejni.cc \ java/rocksjni/merge_operator.cc \ java/rocksjni/options.cc \