Add missing functionality to RocksJava (#4833)

Summary:
This is my latest round of changes to add missing items to RocksJava. More to come in future PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4833

Differential Revision: D14152266

Pulled By: sagar0

fbshipit-source-id: d6cff67e26da06c131491b5cf6911a8cd0db0775
main
Adam Retter 5 years ago committed by Facebook Github Bot
parent 06f378d75e
commit bb474e9a02
  1. 2
      Makefile
  2. 4
      include/rocksdb/options.h
  3. 1
      include/rocksdb/statistics.h
  4. 2
      include/rocksdb/table.h
  5. 59
      java/CMakeLists.txt
  6. 22
      java/Makefile
  7. 2
      java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java
  8. 3
      java/rocksjni/compaction_filter_factory.cc
  9. 222
      java/rocksjni/compaction_job_info.cc
  10. 361
      java/rocksjni/compaction_job_stats.cc
  11. 116
      java/rocksjni/compaction_options.cc
  12. 20
      java/rocksjni/compaction_options_fifo.cc
  13. 46
      java/rocksjni/compaction_options_universal.cc
  14. 93
      java/rocksjni/compression_options.cc
  15. 216
      java/rocksjni/env.cc
  16. 167
      java/rocksjni/env_options.cc
  17. 77
      java/rocksjni/ingest_external_file_options.cc
  18. 2
      java/rocksjni/memory_util.cc
  19. 10
      java/rocksjni/memtablejni.cc
  20. 59
      java/rocksjni/optimistic_transaction_db.cc
  21. 2618
      java/rocksjni/options.cc
  22. 52
      java/rocksjni/options_util.cc
  23. 53
      java/rocksjni/persistent_cache.cc
  24. 4851
      java/rocksjni/portal.h
  25. 3254
      java/rocksjni/rocksjni.cc
  26. 4
      java/rocksjni/sst_file_manager.cc
  27. 73
      java/rocksjni/statistics.cc
  28. 134
      java/rocksjni/table.cc
  29. 25
      java/rocksjni/table_filter.cc
  30. 62
      java/rocksjni/table_filter_jnicallback.cc
  31. 34
      java/rocksjni/table_filter_jnicallback.h
  32. 121
      java/rocksjni/thread_status.cc
  33. 23
      java/rocksjni/trace_writer.cc
  34. 115
      java/rocksjni/trace_writer_jnicallback.cc
  35. 36
      java/rocksjni/trace_writer_jnicallback.h
  36. 74
      java/rocksjni/transaction_db.cc
  37. 44
      java/rocksjni/ttl.cc
  38. 23
      java/rocksjni/wal_filter.cc
  39. 144
      java/rocksjni/wal_filter_jnicallback.cc
  40. 42
      java/rocksjni/wal_filter_jnicallback.h
  41. 2
      java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
  42. 254
      java/src/main/java/org/rocksdb/AbstractMutableOptions.java
  43. 19
      java/src/main/java/org/rocksdb/AbstractTableFilter.java
  44. 70
      java/src/main/java/org/rocksdb/AbstractTraceWriter.java
  45. 49
      java/src/main/java/org/rocksdb/AbstractWalFilter.java
  46. 28
      java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java
  47. 975
      java/src/main/java/org/rocksdb/BlockBasedTableConfig.java
  48. 70
      java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java
  49. 46
      java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
  50. 22
      java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
  51. 68
      java/src/main/java/org/rocksdb/CompactRangeOptions.java
  52. 159
      java/src/main/java/org/rocksdb/CompactionJobInfo.java
  53. 295
      java/src/main/java/org/rocksdb/CompactionJobStats.java
  54. 121
      java/src/main/java/org/rocksdb/CompactionOptions.java
  55. 27
      java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
  56. 115
      java/src/main/java/org/rocksdb/CompactionReason.java
  57. 46
      java/src/main/java/org/rocksdb/CompactionStyle.java
  58. 66
      java/src/main/java/org/rocksdb/CompressionOptions.java
  59. 273
      java/src/main/java/org/rocksdb/DBOptions.java
  60. 478
      java/src/main/java/org/rocksdb/DBOptionsInterface.java
  61. 32
      java/src/main/java/org/rocksdb/DataBlockIndexType.java
  62. 114
      java/src/main/java/org/rocksdb/Env.java
  63. 255
      java/src/main/java/org/rocksdb/EnvOptions.java
  64. 1
      java/src/main/java/org/rocksdb/Filter.java
  65. 47
      java/src/main/java/org/rocksdb/FlushOptions.java
  66. 27
      java/src/main/java/org/rocksdb/HdfsEnv.java
  67. 2
      java/src/main/java/org/rocksdb/IndexType.java
  68. 114
      java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
  69. 56
      java/src/main/java/org/rocksdb/LevelMetaData.java
  70. 55
      java/src/main/java/org/rocksdb/LiveFileMetaData.java
  71. 75
      java/src/main/java/org/rocksdb/LogFile.java
  72. 612
      java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
  73. 286
      java/src/main/java/org/rocksdb/MutableDBOptions.java
  74. 336
      java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java
  75. 15
      java/src/main/java/org/rocksdb/MutableOptionKey.java
  76. 375
      java/src/main/java/org/rocksdb/MutableOptionValue.java
  77. 59
      java/src/main/java/org/rocksdb/OperationStage.java
  78. 54
      java/src/main/java/org/rocksdb/OperationType.java
  79. 53
      java/src/main/java/org/rocksdb/OptimisticTransactionDB.java
  80. 151
      java/src/main/java/org/rocksdb/Options.java
  81. 26
      java/src/main/java/org/rocksdb/PersistentCache.java
  82. 49
      java/src/main/java/org/rocksdb/Priority.java
  83. 19
      java/src/main/java/org/rocksdb/Range.java
  84. 223
      java/src/main/java/org/rocksdb/ReadOptions.java
  85. 3414
      java/src/main/java/org/rocksdb/RocksDB.java
  86. 13
      java/src/main/java/org/rocksdb/RocksEnv.java
  87. 24
      java/src/main/java/org/rocksdb/RocksMemEnv.java
  88. 30
      java/src/main/java/org/rocksdb/SizeApproximationFlag.java
  89. 3
      java/src/main/java/org/rocksdb/Slice.java
  90. 150
      java/src/main/java/org/rocksdb/SstFileMetaData.java
  91. 53
      java/src/main/java/org/rocksdb/StateType.java
  92. 2
      java/src/main/java/org/rocksdb/StatsLevel.java
  93. 20
      java/src/main/java/org/rocksdb/TableFilter.java
  94. 365
      java/src/main/java/org/rocksdb/TableProperties.java
  95. 224
      java/src/main/java/org/rocksdb/ThreadStatus.java
  96. 65
      java/src/main/java/org/rocksdb/ThreadType.java
  97. 7
      java/src/main/java/org/rocksdb/TickerType.java
  98. 30
      java/src/main/java/org/rocksdb/TimedEnv.java
  99. 32
      java/src/main/java/org/rocksdb/TraceOptions.java
  100. 36
      java/src/main/java/org/rocksdb/TraceWriter.java
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1726,7 +1726,7 @@ endif
fi
tar xvzf snappy-$(SNAPPY_VER).tar.gz
mkdir snappy-$(SNAPPY_VER)/build
cd snappy-$(SNAPPY_VER)/build && CFLAGS='${EXTRA_CFLAGS}' CXXFLAGS='${EXTRA_CXXFLAGS}' LDFLAGS='${EXTRA_LDFLAGS}' cmake .. && $(MAKE) ${SNAPPY_MAKE_TARGET}
cd snappy-$(SNAPPY_VER)/build && CFLAGS='${EXTRA_CFLAGS}' CXXFLAGS='${EXTRA_CXXFLAGS}' LDFLAGS='${EXTRA_LDFLAGS}' cmake -DCMAKE_POSITION_INDEPENDENT_CODE=ON .. && $(MAKE) ${SNAPPY_MAKE_TARGET}
cp snappy-$(SNAPPY_VER)/build/libsnappy.a .
liblz4.a:

@ -709,7 +709,7 @@ struct DBOptions {
// a limit, a flush will be triggered in the next DB to which the next write
// is issued.
//
// If the object is only passed to on DB, the behavior is the same as
// If the object is only passed to one DB, the behavior is the same as
// db_write_buffer_size. When write_buffer_manager is set, the value set will
// override db_write_buffer_size.
//
@ -821,7 +821,7 @@ struct DBOptions {
// Dynamically changeable through SetDBOptions() API.
uint64_t wal_bytes_per_sync = 0;
// A vector of EventListeners which callback functions will be called
// A vector of EventListeners whose callback functions will be called
// when specific RocksDB event happens.
std::vector<std::shared_ptr<EventListener>> listeners;

@ -22,6 +22,7 @@ namespace rocksdb {
* 1. Any ticker should be added before TICKER_ENUM_MAX.
* 2. Add a readable string in TickersNameMap below for the newly added ticker.
* 3. Add a corresponding enum value to TickerType.java in the java API
* 4. Add the enum conversions from Java and C++ to portal.h's toJavaTickerType and toCppTickers
*/
enum Tickers : uint32_t {
// total block cache misses

@ -229,7 +229,7 @@ struct BlockBasedTableOptions {
// Default: 0 (disabled)
uint32_t read_amp_bytes_per_bit = 0;
// We currently have three versions:
// We currently have five versions:
// 0 -- This version is currently written out by all RocksDB's versions by
// default. Can be read by really old RocksDB's. Doesn't support changing
// checksum (default is CRC32).

@ -11,6 +11,9 @@ set(JNI_NATIVE_SOURCES
rocksjni/compaction_filter.cc
rocksjni/compaction_filter_factory.cc
rocksjni/compaction_filter_factory_jnicallback.cc
rocksjni/compaction_job_info.cc
rocksjni/compaction_job_stats.cc
rocksjni/compaction_options.cc
rocksjni/compaction_options_fifo.cc
rocksjni/compaction_options_universal.cc
rocksjni/compact_range_options.cc
@ -33,6 +36,7 @@ set(JNI_NATIVE_SOURCES
rocksjni/optimistic_transaction_options.cc
rocksjni/options.cc
rocksjni/options_util.cc
rocksjni/persistent_cache.cc
rocksjni/ratelimiterjni.cc
rocksjni/remove_emptyvalue_compactionfilterjni.cc
rocksjni/restorejni.cc
@ -46,6 +50,11 @@ set(JNI_NATIVE_SOURCES
rocksjni/statistics.cc
rocksjni/statisticsjni.cc
rocksjni/table.cc
rocksjni/table_filter.cc
rocksjni/table_filter_jnicallback.cc
rocksjni/thread_status.cc
rocksjni/trace_writer.cc
rocksjni/trace_writer_jnicallback.cc
rocksjni/transaction.cc
rocksjni/transaction_db.cc
rocksjni/transaction_db_options.cc
@ -54,6 +63,8 @@ set(JNI_NATIVE_SOURCES
rocksjni/transaction_notifier_jnicallback.cc
rocksjni/transaction_options.cc
rocksjni/ttl.cc
rocksjni/wal_filter.cc
rocksjni/wal_filter_jnicallback.cc
rocksjni/write_batch.cc
rocksjni/writebatchhandlerjnicallback.cc
rocksjni/write_batch_test.cc
@ -69,7 +80,10 @@ set(NATIVE_JAVA_CLASSES
org.rocksdb.AbstractNativeReference
org.rocksdb.AbstractRocksIterator
org.rocksdb.AbstractSlice
org.rocksdb.AbstractTableFilter
org.rocksdb.AbstractTraceWriter
org.rocksdb.AbstractTransactionNotifier
org.rocksdb.AbstractWalFilter
org.rocksdb.BackupableDBOptions
org.rocksdb.BackupEngine
org.rocksdb.BlockBasedTableConfig
@ -80,6 +94,9 @@ set(NATIVE_JAVA_CLASSES
org.rocksdb.ClockCache
org.rocksdb.ColumnFamilyHandle
org.rocksdb.ColumnFamilyOptions
org.rocksdb.CompactionJobInfo
org.rocksdb.CompactionJobStats
org.rocksdb.CompactionOptions
org.rocksdb.CompactionOptionsFIFO
org.rocksdb.CompactionOptionsUniversal
org.rocksdb.CompactRangeOptions
@ -95,6 +112,7 @@ set(NATIVE_JAVA_CLASSES
org.rocksdb.FlushOptions
org.rocksdb.HashLinkedListMemTableConfig
org.rocksdb.HashSkipListMemTableConfig
org.rocksdb.HdfsEnv
org.rocksdb.IngestExternalFileOptions
org.rocksdb.Logger
org.rocksdb.LRUCache
@ -106,6 +124,7 @@ set(NATIVE_JAVA_CLASSES
org.rocksdb.OptimisticTransactionOptions
org.rocksdb.Options
org.rocksdb.OptionsUtil
org.rocksdb.PersistentCache
org.rocksdb.PlainTableConfig
org.rocksdb.RateLimiter
org.rocksdb.ReadOptions
@ -127,6 +146,8 @@ set(NATIVE_JAVA_CLASSES
org.rocksdb.Statistics
org.rocksdb.StringAppendOperator
org.rocksdb.TableFormatConfig
org.rocksdb.ThreadStatus
org.rocksdb.TimedEnv
org.rocksdb.Transaction
org.rocksdb.TransactionDB
org.rocksdb.TransactionDBOptions
@ -172,10 +193,14 @@ add_jar(
src/main/java/org/rocksdb/AbstractCompactionFilter.java
src/main/java/org/rocksdb/AbstractComparator.java
src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
src/main/java/org/rocksdb/AbstractMutableOptions.java
src/main/java/org/rocksdb/AbstractNativeReference.java
src/main/java/org/rocksdb/AbstractRocksIterator.java
src/main/java/org/rocksdb/AbstractSlice.java
src/main/java/org/rocksdb/AbstractTableFilter.java
src/main/java/org/rocksdb/AbstractTraceWriter.java
src/main/java/org/rocksdb/AbstractTransactionNotifier.java
src/main/java/org/rocksdb/AbstractWalFilter.java
src/main/java/org/rocksdb/AbstractWriteBatch.java
src/main/java/org/rocksdb/AccessHint.java
src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
@ -194,11 +219,16 @@ add_jar(
src/main/java/org/rocksdb/ClockCache.java
src/main/java/org/rocksdb/ColumnFamilyDescriptor.java
src/main/java/org/rocksdb/ColumnFamilyHandle.java
src/main/java/org/rocksdb/ColumnFamilyMetaData.java
src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
src/main/java/org/rocksdb/ColumnFamilyOptions.java
src/main/java/org/rocksdb/CompactionJobInfo.java
src/main/java/org/rocksdb/CompactionJobStats.java
src/main/java/org/rocksdb/CompactionOptions.java
src/main/java/org/rocksdb/CompactionOptionsFIFO.java
src/main/java/org/rocksdb/CompactionOptionsUniversal.java
src/main/java/org/rocksdb/CompactionPriority.java
src/main/java/org/rocksdb/CompactionReason.java
src/main/java/org/rocksdb/CompactRangeOptions.java
src/main/java/org/rocksdb/CompactionStopStyle.java
src/main/java/org/rocksdb/CompactionStyle.java
@ -207,6 +237,7 @@ add_jar(
src/main/java/org/rocksdb/ComparatorType.java
src/main/java/org/rocksdb/CompressionOptions.java
src/main/java/org/rocksdb/CompressionType.java
src/main/java/org/rocksdb/DataBlockIndexType.java
src/main/java/org/rocksdb/DBOptionsInterface.java
src/main/java/org/rocksdb/DBOptions.java
src/main/java/org/rocksdb/DbPath.java
@ -220,26 +251,39 @@ add_jar(
src/main/java/org/rocksdb/FlushOptions.java
src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
src/main/java/org/rocksdb/HdfsEnv.java
src/main/java/org/rocksdb/HistogramData.java
src/main/java/org/rocksdb/HistogramType.java
src/main/java/org/rocksdb/IndexType.java
src/main/java/org/rocksdb/InfoLogLevel.java
src/main/java/org/rocksdb/IngestExternalFileOptions.java
src/main/java/org/rocksdb/LevelMetaData.java
src/main/java/org/rocksdb/LiveFileMetaData.java
src/main/java/org/rocksdb/LogFile.java
src/main/java/org/rocksdb/Logger.java
src/main/java/org/rocksdb/LRUCache.java
src/main/java/org/rocksdb/MemoryUsageType.java
src/main/java/org/rocksdb/MemoryUtil.java
src/main/java/org/rocksdb/MemTableConfig.java
src/main/java/org/rocksdb/MergeOperator.java
src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java
src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java
src/main/java/org/rocksdb/MutableDBOptions.java
src/main/java/org/rocksdb/MutableDBOptionsInterface.java
src/main/java/org/rocksdb/MutableOptionKey.java
src/main/java/org/rocksdb/MutableOptionValue.java
src/main/java/org/rocksdb/NativeComparatorWrapper.java
src/main/java/org/rocksdb/NativeLibraryLoader.java
src/main/java/org/rocksdb/OperationStage.java
src/main/java/org/rocksdb/OperationType.java
src/main/java/org/rocksdb/OptimisticTransactionDB.java
src/main/java/org/rocksdb/OptimisticTransactionOptions.java
src/main/java/org/rocksdb/Options.java
src/main/java/org/rocksdb/OptionsUtil.java
src/main/java/org/rocksdb/PersistentCache.java
src/main/java/org/rocksdb/PlainTableConfig.java
src/main/java/org/rocksdb/Priority.java
src/main/java/org/rocksdb/Range.java
src/main/java/org/rocksdb/RateLimiter.java
src/main/java/org/rocksdb/RateLimiterMode.java
src/main/java/org/rocksdb/ReadOptions.java
@ -255,11 +299,14 @@ add_jar(
src/main/java/org/rocksdb/RocksMemEnv.java
src/main/java/org/rocksdb/RocksMutableObject.java
src/main/java/org/rocksdb/RocksObject.java
src/main/java/org/rocksdb/SizeApproximationFlag.java
src/main/java/org/rocksdb/SkipListMemTableConfig.java
src/main/java/org/rocksdb/Slice.java
src/main/java/org/rocksdb/Snapshot.java
src/main/java/org/rocksdb/SstFileManager.java
src/main/java/org/rocksdb/SstFileMetaData.java
src/main/java/org/rocksdb/SstFileWriter.java
src/main/java/org/rocksdb/StateType.java
src/main/java/org/rocksdb/StatisticsCollectorCallback.java
src/main/java/org/rocksdb/StatisticsCollector.java
src/main/java/org/rocksdb/Statistics.java
@ -267,8 +314,15 @@ add_jar(
src/main/java/org/rocksdb/StatsLevel.java
src/main/java/org/rocksdb/Status.java
src/main/java/org/rocksdb/StringAppendOperator.java
src/main/java/org/rocksdb/TableFilter.java
src/main/java/org/rocksdb/TableProperties.java
src/main/java/org/rocksdb/TableFormatConfig.java
src/main/java/org/rocksdb/ThreadType.java
src/main/java/org/rocksdb/ThreadStatus.java
src/main/java/org/rocksdb/TickerType.java
src/main/java/org/rocksdb/TimedEnv.java
src/main/java/org/rocksdb/TraceOptions.java
src/main/java/org/rocksdb/TraceWriter.java
src/main/java/org/rocksdb/TransactionalDB.java
src/main/java/org/rocksdb/TransactionalOptions.java
src/main/java/org/rocksdb/TransactionDB.java
@ -279,6 +333,9 @@ add_jar(
src/main/java/org/rocksdb/TtlDB.java
src/main/java/org/rocksdb/TxnDBWritePolicy.java
src/main/java/org/rocksdb/VectorMemTableConfig.java
src/main/java/org/rocksdb/WalFileType.java
src/main/java/org/rocksdb/WalFilter.java
src/main/java/org/rocksdb/WalProcessingOption.java
src/main/java/org/rocksdb/WALRecoveryMode.java
src/main/java/org/rocksdb/WBWIRocksIterator.java
src/main/java/org/rocksdb/WriteBatchInterface.java

@ -1,7 +1,10 @@
NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\
org.rocksdb.AbstractCompactionFilterFactory\
org.rocksdb.AbstractSlice\
org.rocksdb.AbstractTableFilter\
org.rocksdb.AbstractTraceWriter\
org.rocksdb.AbstractTransactionNotifier\
org.rocksdb.AbstractWalFilter\
org.rocksdb.BackupEngine\
org.rocksdb.BackupableDBOptions\
org.rocksdb.BlockBasedTableConfig\
@ -12,6 +15,9 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\
org.rocksdb.CassandraValueMergeOperator\
org.rocksdb.ColumnFamilyHandle\
org.rocksdb.ColumnFamilyOptions\
org.rocksdb.CompactionJobInfo\
org.rocksdb.CompactionJobStats\
org.rocksdb.CompactionOptions\
org.rocksdb.CompactionOptionsFIFO\
org.rocksdb.CompactionOptionsUniversal\
org.rocksdb.CompactRangeOptions\
@ -28,6 +34,7 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\
org.rocksdb.IngestExternalFileOptions\
org.rocksdb.HashLinkedListMemTableConfig\
org.rocksdb.HashSkipListMemTableConfig\
org.rocksdb.HdfsEnv\
org.rocksdb.Logger\
org.rocksdb.LRUCache\
org.rocksdb.MemoryUsageType\
@ -38,6 +45,7 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\
org.rocksdb.OptimisticTransactionOptions\
org.rocksdb.Options\
org.rocksdb.OptionsUtil\
org.rocksdb.PersistentCache\
org.rocksdb.PlainTableConfig\
org.rocksdb.RateLimiter\
org.rocksdb.ReadOptions\
@ -53,6 +61,8 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\
org.rocksdb.SstFileManager\
org.rocksdb.SstFileWriter\
org.rocksdb.Statistics\
org.rocksdb.ThreadStatus\
org.rocksdb.TimedEnv\
org.rocksdb.Transaction\
org.rocksdb.TransactionDB\
org.rocksdb.TransactionDBOptions\
@ -94,7 +104,10 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
org.rocksdb.ClockCacheTest\
org.rocksdb.ColumnFamilyOptionsTest\
org.rocksdb.ColumnFamilyTest\
org.rocksdb.CompactionFilterFactoryTest\
org.rocksdb.CompactionFilterFactoryTest\
org.rocksdb.CompactionJobInfoTest\
org.rocksdb.CompactionJobStatsTest\
org.rocksdb.CompactionOptionsTest\
org.rocksdb.CompactionOptionsFIFOTest\
org.rocksdb.CompactionOptionsUniversalTest\
org.rocksdb.CompactionPriorityTest\
@ -107,6 +120,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
org.rocksdb.DirectComparatorTest\
org.rocksdb.DirectSliceTest\
org.rocksdb.EnvOptionsTest\
org.rocksdb.HdfsEnvTest\
org.rocksdb.IngestExternalFileOptionsTest\
org.rocksdb.util.EnvironmentTest\
org.rocksdb.FilterTest\
@ -120,6 +134,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
org.rocksdb.MergeTest\
org.rocksdb.MixedOptionsTest\
org.rocksdb.MutableColumnFamilyOptionsTest\
org.rocksdb.MutableDBOptionsTest\
org.rocksdb.NativeComparatorWrapperTest\
org.rocksdb.NativeLibraryLoaderTest\
org.rocksdb.OptimisticTransactionTest\
@ -133,7 +148,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
org.rocksdb.ReadOptionsTest\
org.rocksdb.RocksDBTest\
org.rocksdb.RocksDBExceptionTest\
org.rocksdb.RocksEnvTest\
org.rocksdb.DefaultEnvTest\
org.rocksdb.RocksIteratorTest\
org.rocksdb.RocksMemEnvTest\
org.rocksdb.util.SizeUnitTest\
@ -141,6 +156,8 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
org.rocksdb.SnapshotTest\
org.rocksdb.SstFileManagerTest\
org.rocksdb.SstFileWriterTest\
org.rocksdb.TableFilterTest\
org.rocksdb.TimedEnvTest\
org.rocksdb.TransactionTest\
org.rocksdb.TransactionDBTest\
org.rocksdb.TransactionOptionsTest\
@ -149,6 +166,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
org.rocksdb.TtlDBTest\
org.rocksdb.StatisticsTest\
org.rocksdb.StatisticsCollectorTest\
org.rocksdb.WalFilterTest\
org.rocksdb.WALRecoveryModeTest\
org.rocksdb.WriteBatchHandlerTest\
org.rocksdb.WriteBatchTest\

@ -493,7 +493,7 @@ public class DbBenchmark {
options.setCreateIfMissing(false);
}
if (useMemenv_) {
options.setEnv(new RocksMemEnv());
options.setEnv(new RocksMemEnv(Env.getDefault()));
}
switch (memtable_) {
case "skip_list":

@ -31,9 +31,8 @@ jlong Java_org_rocksdb_AbstractCompactionFilterFactory_createNewCompactionFilter
* Signature: (J)V
*/
void Java_org_rocksdb_AbstractCompactionFilterFactory_disposeInternal(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
JNIEnv*, jobject, jlong jhandle) {
auto* ptr_sptr_cff = reinterpret_cast<
std::shared_ptr<rocksdb::CompactionFilterFactoryJniCallback>*>(jhandle);
delete ptr_sptr_cff;
// @lint-ignore TXT4 T25377293 Grandfathered in
}

@ -0,0 +1,222 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the "bridge" between Java and C++ for
// rocksdb::CompactionJobInfo.
#include <jni.h>
#include "include/org_rocksdb_CompactionJobInfo.h"
#include "rocksdb/listener.h"
#include "rocksjni/portal.h"
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: newCompactionJobInfo
* Signature: ()J
*/
jlong Java_org_rocksdb_CompactionJobInfo_newCompactionJobInfo(
JNIEnv*, jclass) {
auto* compact_job_info = new rocksdb::CompactionJobInfo();
return reinterpret_cast<jlong>(compact_job_info);
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_CompactionJobInfo_disposeInternal(
JNIEnv*, jobject, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
delete compact_job_info;
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: columnFamilyName
* Signature: (J)[B
*/
jbyteArray Java_org_rocksdb_CompactionJobInfo_columnFamilyName(
JNIEnv* env, jclass, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
return rocksdb::JniUtil::copyBytes(
env, compact_job_info->cf_name);
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: status
* Signature: (J)Lorg/rocksdb/Status;
*/
jobject Java_org_rocksdb_CompactionJobInfo_status(
JNIEnv* env, jclass, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
return rocksdb::StatusJni::construct(
env, compact_job_info->status);
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: threadId
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobInfo_threadId(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
return static_cast<jlong>(compact_job_info->thread_id);
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: jobId
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionJobInfo_jobId(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
return static_cast<jint>(compact_job_info->job_id);
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: baseInputLevel
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionJobInfo_baseInputLevel(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
return static_cast<jint>(compact_job_info->base_input_level);
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: outputLevel
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionJobInfo_outputLevel(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
return static_cast<jint>(compact_job_info->output_level);
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: inputFiles
* Signature: (J)[Ljava/lang/String;
*/
jobjectArray Java_org_rocksdb_CompactionJobInfo_inputFiles(
JNIEnv* env, jclass, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
return rocksdb::JniUtil::toJavaStrings(
env, &compact_job_info->input_files);
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: outputFiles
* Signature: (J)[Ljava/lang/String;
*/
jobjectArray Java_org_rocksdb_CompactionJobInfo_outputFiles(
JNIEnv* env, jclass, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
return rocksdb::JniUtil::toJavaStrings(
env, &compact_job_info->output_files);
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: tableProperties
* Signature: (J)Ljava/util/Map;
*/
jobject Java_org_rocksdb_CompactionJobInfo_tableProperties(
JNIEnv* env, jclass, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
auto* map = &compact_job_info->table_properties;
jobject jhash_map = rocksdb::HashMapJni::construct(
env, static_cast<uint32_t>(map->size()));
if (jhash_map == nullptr) {
// exception occurred
return nullptr;
}
const rocksdb::HashMapJni::FnMapKV<const std::string, std::shared_ptr<const rocksdb::TableProperties>, jobject, jobject> fn_map_kv =
[env](const std::pair<const std::string, std::shared_ptr<const rocksdb::TableProperties>>& kv) {
jstring jkey = rocksdb::JniUtil::toJavaString(env, &(kv.first), false);
if (env->ExceptionCheck()) {
// an error occurred
return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
}
jobject jtable_properties = rocksdb::TablePropertiesJni::fromCppTableProperties(
env, *(kv.second.get()));
if (env->ExceptionCheck()) {
// an error occurred
env->DeleteLocalRef(jkey);
return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
}
return std::unique_ptr<std::pair<jobject, jobject>>(
new std::pair<jobject, jobject>(static_cast<jobject>(jkey), jtable_properties));
};
if (!rocksdb::HashMapJni::putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) {
// exception occurred
return nullptr;
}
return jhash_map;
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: compactionReason
* Signature: (J)B
*/
jbyte Java_org_rocksdb_CompactionJobInfo_compactionReason(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
return rocksdb::CompactionReasonJni::toJavaCompactionReason(
compact_job_info->compaction_reason);
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: compression
* Signature: (J)B
*/
jbyte Java_org_rocksdb_CompactionJobInfo_compression(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
return rocksdb::CompressionTypeJni::toJavaCompressionType(
compact_job_info->compression);
}
/*
* Class: org_rocksdb_CompactionJobInfo
* Method: stats
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobInfo_stats(
JNIEnv *, jclass, jlong jhandle) {
auto* compact_job_info =
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
auto* stats = new rocksdb::CompactionJobStats();
stats->Add(compact_job_info->stats);
return reinterpret_cast<jlong>(stats);
}

@ -0,0 +1,361 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the "bridge" between Java and C++ for
// rocksdb::CompactionJobStats.
#include <jni.h>
#include "include/org_rocksdb_CompactionJobStats.h"
#include "rocksdb/compaction_job_stats.h"
#include "rocksjni/portal.h"
/*
* Class: org_rocksdb_CompactionJobStats
* Method: newCompactionJobStats
* Signature: ()J
*/
jlong Java_org_rocksdb_CompactionJobStats_newCompactionJobStats(
JNIEnv*, jclass) {
auto* compact_job_stats = new rocksdb::CompactionJobStats();
return reinterpret_cast<jlong>(compact_job_stats);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_CompactionJobStats_disposeInternal(
JNIEnv *, jobject, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
delete compact_job_stats;
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: reset
* Signature: (J)V
*/
void Java_org_rocksdb_CompactionJobStats_reset(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
compact_job_stats->Reset();
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: add
* Signature: (JJ)V
*/
void Java_org_rocksdb_CompactionJobStats_add(
JNIEnv*, jclass, jlong jhandle, jlong jother_handle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
auto* other_compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jother_handle);
compact_job_stats->Add(*other_compact_job_stats);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: elapsedMicros
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_elapsedMicros(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(compact_job_stats->elapsed_micros);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: numInputRecords
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_numInputRecords(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(compact_job_stats->num_input_records);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: numInputFiles
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_numInputFiles(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(compact_job_stats->num_input_files);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: numInputFilesAtOutputLevel
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_numInputFilesAtOutputLevel(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->num_input_files_at_output_level);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: numOutputRecords
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_numOutputRecords(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->num_output_records);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: numOutputFiles
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_numOutputFiles(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->num_output_files);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: isManualCompaction
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_CompactionJobStats_isManualCompaction(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
if (compact_job_stats->is_manual_compaction) {
return JNI_TRUE;
} else {
return JNI_FALSE;
}
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: totalInputBytes
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_totalInputBytes(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->total_input_bytes);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: totalOutputBytes
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_totalOutputBytes(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->total_output_bytes);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: numRecordsReplaced
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_numRecordsReplaced(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->num_records_replaced);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: totalInputRawKeyBytes
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_totalInputRawKeyBytes(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->total_input_raw_key_bytes);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: totalInputRawValueBytes
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_totalInputRawValueBytes(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->total_input_raw_value_bytes);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: numInputDeletionRecords
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_numInputDeletionRecords(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->num_input_deletion_records);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: numExpiredDeletionRecords
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_numExpiredDeletionRecords(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->num_expired_deletion_records);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: numCorruptKeys
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_numCorruptKeys(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->num_corrupt_keys);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: fileWriteNanos
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_fileWriteNanos(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->file_write_nanos);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: fileRangeSyncNanos
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_fileRangeSyncNanos(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->file_range_sync_nanos);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: fileFsyncNanos
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_fileFsyncNanos(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->file_fsync_nanos);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: filePrepareWriteNanos
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_filePrepareWriteNanos(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->file_prepare_write_nanos);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: smallestOutputKeyPrefix
* Signature: (J)[B
*/
jbyteArray Java_org_rocksdb_CompactionJobStats_smallestOutputKeyPrefix(
JNIEnv* env, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return rocksdb::JniUtil::copyBytes(env,
compact_job_stats->smallest_output_key_prefix);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: largestOutputKeyPrefix
* Signature: (J)[B
*/
jbyteArray Java_org_rocksdb_CompactionJobStats_largestOutputKeyPrefix(
JNIEnv* env, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return rocksdb::JniUtil::copyBytes(env,
compact_job_stats->largest_output_key_prefix);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: numSingleDelFallthru
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_numSingleDelFallthru(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->num_single_del_fallthru);
}
/*
* Class: org_rocksdb_CompactionJobStats
* Method: numSingleDelMismatch
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionJobStats_numSingleDelMismatch(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_job_stats =
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
return static_cast<jlong>(
compact_job_stats->num_single_del_mismatch);
}

@ -0,0 +1,116 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the "bridge" between Java and C++ for
// rocksdb::CompactionOptions.
#include <jni.h>
#include "include/org_rocksdb_CompactionOptions.h"
#include "rocksdb/options.h"
#include "rocksjni/portal.h"
/*
* Class: org_rocksdb_CompactionOptions
* Method: newCompactionOptions
* Signature: ()J
*/
jlong Java_org_rocksdb_CompactionOptions_newCompactionOptions(
JNIEnv*, jclass) {
auto* compact_opts = new rocksdb::CompactionOptions();
return reinterpret_cast<jlong>(compact_opts);
}
/*
* Class: org_rocksdb_CompactionOptions
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_CompactionOptions_disposeInternal(
JNIEnv *, jobject, jlong jhandle) {
auto* compact_opts =
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
delete compact_opts;
}
/*
* Class: org_rocksdb_CompactionOptions
* Method: compression
* Signature: (J)B
*/
jbyte Java_org_rocksdb_CompactionOptions_compression(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_opts =
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
return rocksdb::CompressionTypeJni::toJavaCompressionType(
compact_opts->compression);
}
/*
* Class: org_rocksdb_CompactionOptions
* Method: setCompression
* Signature: (JB)V
*/
void Java_org_rocksdb_CompactionOptions_setCompression(
JNIEnv*, jclass, jlong jhandle, jbyte jcompression_type_value) {
auto* compact_opts =
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
compact_opts->compression =
rocksdb::CompressionTypeJni::toCppCompressionType(
jcompression_type_value);
}
/*
* Class: org_rocksdb_CompactionOptions
* Method: outputFileSizeLimit
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionOptions_outputFileSizeLimit(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_opts =
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
return static_cast<jlong>(
compact_opts->output_file_size_limit);
}
/*
* Class: org_rocksdb_CompactionOptions
* Method: setOutputFileSizeLimit
* Signature: (JJ)V
*/
void Java_org_rocksdb_CompactionOptions_setOutputFileSizeLimit(
JNIEnv*, jclass, jlong jhandle, jlong joutput_file_size_limit) {
auto* compact_opts =
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
compact_opts->output_file_size_limit =
static_cast<uint64_t>(joutput_file_size_limit);
}
/*
* Class: org_rocksdb_CompactionOptions
* Method: maxSubcompactions
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptions_maxSubcompactions(
JNIEnv*, jclass, jlong jhandle) {
auto* compact_opts =
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
return static_cast<jint>(
compact_opts->max_subcompactions);
}
/*
* Class: org_rocksdb_CompactionOptions
* Method: setMaxSubcompactions
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptions_setMaxSubcompactions(
JNIEnv*, jclass, jlong jhandle, jint jmax_subcompactions) {
auto* compact_opts =
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
compact_opts->max_subcompactions =
static_cast<uint32_t>(jmax_subcompactions);
}

@ -17,7 +17,7 @@
* Signature: ()J
*/
jlong Java_org_rocksdb_CompactionOptionsFIFO_newCompactionOptionsFIFO(
JNIEnv* /*env*/, jclass /*jcls*/) {
JNIEnv*, jclass) {
const auto* opt = new rocksdb::CompactionOptionsFIFO();
return reinterpret_cast<jlong>(opt);
}
@ -28,8 +28,7 @@ jlong Java_org_rocksdb_CompactionOptionsFIFO_newCompactionOptionsFIFO(
* Signature: (JJ)V
*/
void Java_org_rocksdb_CompactionOptionsFIFO_setMaxTableFilesSize(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jlong jmax_table_files_size) {
JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
opt->max_table_files_size = static_cast<uint64_t>(jmax_table_files_size);
}
@ -39,9 +38,8 @@ void Java_org_rocksdb_CompactionOptionsFIFO_setMaxTableFilesSize(
* Method: maxTableFilesSize
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize(
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
return static_cast<jlong>(opt->max_table_files_size);
}
@ -52,8 +50,7 @@ jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize(JNIEnv* /*env*/,
* Signature: (JZ)V
*/
void Java_org_rocksdb_CompactionOptionsFIFO_setAllowCompaction(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jboolean allow_compaction) {
JNIEnv*, jobject, jlong jhandle, jboolean allow_compaction) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
opt->allow_compaction = static_cast<bool>(allow_compaction);
}
@ -64,7 +61,7 @@ void Java_org_rocksdb_CompactionOptionsFIFO_setAllowCompaction(
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_CompactionOptionsFIFO_allowCompaction(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
return static_cast<jboolean>(opt->allow_compaction);
}
@ -74,8 +71,7 @@ jboolean Java_org_rocksdb_CompactionOptionsFIFO_allowCompaction(
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_CompactionOptionsFIFO_disposeInternal(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
void Java_org_rocksdb_CompactionOptionsFIFO_disposeInternal(
JNIEnv*, jobject, jlong jhandle) {
delete reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
}

@ -18,7 +18,7 @@
* Signature: ()J
*/
jlong Java_org_rocksdb_CompactionOptionsUniversal_newCompactionOptionsUniversal(
JNIEnv* /*env*/, jclass /*jcls*/) {
JNIEnv*, jclass) {
const auto* opt = new rocksdb::CompactionOptionsUniversal();
return reinterpret_cast<jlong>(opt);
}
@ -29,7 +29,7 @@ jlong Java_org_rocksdb_CompactionOptionsUniversal_newCompactionOptionsUniversal(
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setSizeRatio(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jsize_ratio) {
JNIEnv*, jobject, jlong jhandle, jint jsize_ratio) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->size_ratio = static_cast<unsigned int>(jsize_ratio);
}
@ -39,9 +39,8 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setSizeRatio(
* Method: sizeRatio
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio(
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->size_ratio);
}
@ -52,7 +51,7 @@ jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio(JNIEnv* /*env*/,
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setMinMergeWidth(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jmin_merge_width) {
JNIEnv*, jobject, jlong jhandle, jint jmin_merge_width) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->min_merge_width = static_cast<unsigned int>(jmin_merge_width);
}
@ -62,9 +61,8 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setMinMergeWidth(
* Method: minMergeWidth
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth(
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->min_merge_width);
}
@ -75,7 +73,7 @@ jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth(JNIEnv* /*env*/,
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setMaxMergeWidth(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jmax_merge_width) {
JNIEnv*, jobject, jlong jhandle, jint jmax_merge_width) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->max_merge_width = static_cast<unsigned int>(jmax_merge_width);
}
@ -85,9 +83,8 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setMaxMergeWidth(
* Method: maxMergeWidth
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth(
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->max_merge_width);
}
@ -98,8 +95,7 @@ jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth(JNIEnv* /*env*/,
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setMaxSizeAmplificationPercent(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jint jmax_size_amplification_percent) {
JNIEnv*, jobject, jlong jhandle, jint jmax_size_amplification_percent) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->max_size_amplification_percent =
static_cast<unsigned int>(jmax_size_amplification_percent);
@ -111,7 +107,7 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setMaxSizeAmplificationPercent(
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptionsUniversal_maxSizeAmplificationPercent(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->max_size_amplification_percent);
}
@ -122,7 +118,7 @@ jint Java_org_rocksdb_CompactionOptionsUniversal_maxSizeAmplificationPercent(
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setCompressionSizePercent(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
JNIEnv*, jobject, jlong jhandle,
jint jcompression_size_percent) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->compression_size_percent =
@ -135,7 +131,7 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setCompressionSizePercent(
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptionsUniversal_compressionSizePercent(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->compression_size_percent);
}
@ -146,7 +142,7 @@ jint Java_org_rocksdb_CompactionOptionsUniversal_compressionSizePercent(
* Signature: (JB)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setStopStyle(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jbyte jstop_style_value) {
JNIEnv*, jobject, jlong jhandle, jbyte jstop_style_value) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->stop_style = rocksdb::CompactionStopStyleJni::toCppCompactionStopStyle(
jstop_style_value);
@ -157,9 +153,8 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setStopStyle(
* Method: stopStyle
* Signature: (J)B
*/
jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle(
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return rocksdb::CompactionStopStyleJni::toJavaCompactionStopStyle(
opt->stop_style);
@ -171,8 +166,7 @@ jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle(JNIEnv* /*env*/,
* Signature: (JZ)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setAllowTrivialMove(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jboolean jallow_trivial_move) {
JNIEnv*, jobject, jlong jhandle, jboolean jallow_trivial_move) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->allow_trivial_move = static_cast<bool>(jallow_trivial_move);
}
@ -183,7 +177,7 @@ void Java_org_rocksdb_CompactionOptionsUniversal_setAllowTrivialMove(
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_CompactionOptionsUniversal_allowTrivialMove(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return opt->allow_trivial_move;
}
@ -194,6 +188,6 @@ jboolean Java_org_rocksdb_CompactionOptionsUniversal_allowTrivialMove(
* Signature: (J)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_disposeInternal(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
JNIEnv*, jobject, jlong jhandle) {
delete reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
}

@ -17,7 +17,7 @@
* Signature: ()J
*/
jlong Java_org_rocksdb_CompressionOptions_newCompressionOptions(
JNIEnv* /*env*/, jclass /*jcls*/) {
JNIEnv*, jclass) {
const auto* opt = new rocksdb::CompressionOptions();
return reinterpret_cast<jlong>(opt);
}
@ -27,10 +27,8 @@ jlong Java_org_rocksdb_CompressionOptions_newCompressionOptions(
* Method: setWindowBits
* Signature: (JI)V
*/
void Java_org_rocksdb_CompressionOptions_setWindowBits(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle,
jint jwindow_bits) {
void Java_org_rocksdb_CompressionOptions_setWindowBits(
JNIEnv*, jobject, jlong jhandle, jint jwindow_bits) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->window_bits = static_cast<int>(jwindow_bits);
}
@ -40,9 +38,8 @@ void Java_org_rocksdb_CompressionOptions_setWindowBits(JNIEnv* /*env*/,
* Method: windowBits
* Signature: (J)I
*/
jint Java_org_rocksdb_CompressionOptions_windowBits(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jint Java_org_rocksdb_CompressionOptions_windowBits(
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->window_bits);
}
@ -52,9 +49,8 @@ jint Java_org_rocksdb_CompressionOptions_windowBits(JNIEnv* /*env*/,
* Method: setLevel
* Signature: (JI)V
*/
void Java_org_rocksdb_CompressionOptions_setLevel(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle, jint jlevel) {
void Java_org_rocksdb_CompressionOptions_setLevel(
JNIEnv*, jobject, jlong jhandle, jint jlevel) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->level = static_cast<int>(jlevel);
}
@ -64,9 +60,8 @@ void Java_org_rocksdb_CompressionOptions_setLevel(JNIEnv* /*env*/,
* Method: level
* Signature: (J)I
*/
jint Java_org_rocksdb_CompressionOptions_level(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jint Java_org_rocksdb_CompressionOptions_level(
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->level);
}
@ -76,10 +71,8 @@ jint Java_org_rocksdb_CompressionOptions_level(JNIEnv* /*env*/,
* Method: setStrategy
* Signature: (JI)V
*/
void Java_org_rocksdb_CompressionOptions_setStrategy(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle,
jint jstrategy) {
void Java_org_rocksdb_CompressionOptions_setStrategy(
JNIEnv*, jobject, jlong jhandle, jint jstrategy) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->strategy = static_cast<int>(jstrategy);
}
@ -89,9 +82,8 @@ void Java_org_rocksdb_CompressionOptions_setStrategy(JNIEnv* /*env*/,
* Method: strategy
* Signature: (J)I
*/
jint Java_org_rocksdb_CompressionOptions_strategy(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jint Java_org_rocksdb_CompressionOptions_strategy(
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->strategy);
}
@ -101,12 +93,10 @@ jint Java_org_rocksdb_CompressionOptions_strategy(JNIEnv* /*env*/,
* Method: setMaxDictBytes
* Signature: (JI)V
*/
void Java_org_rocksdb_CompressionOptions_setMaxDictBytes(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle,
jint jmax_dict_bytes) {
void Java_org_rocksdb_CompressionOptions_setMaxDictBytes(
JNIEnv*, jobject, jlong jhandle, jint jmax_dict_bytes) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->max_dict_bytes = static_cast<int>(jmax_dict_bytes);
opt->max_dict_bytes = static_cast<uint32_t>(jmax_dict_bytes);
}
/*
@ -114,44 +104,61 @@ void Java_org_rocksdb_CompressionOptions_setMaxDictBytes(JNIEnv* /*env*/,
* Method: maxDictBytes
* Signature: (J)I
*/
jint Java_org_rocksdb_CompressionOptions_maxDictBytes(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jint Java_org_rocksdb_CompressionOptions_maxDictBytes(
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->max_dict_bytes);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: setEnabled
* Method: setZstdMaxTrainBytes
* Signature: (JI)V
*/
void Java_org_rocksdb_CompressionOptions_setEnabled(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle,
jboolean jenabled) {
void Java_org_rocksdb_CompressionOptions_setZstdMaxTrainBytes(
JNIEnv*, jobject, jlong jhandle, jint jzstd_max_train_bytes) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->enabled = static_cast<int>(jenabled);
opt->zstd_max_train_bytes = static_cast<uint32_t>(jzstd_max_train_bytes);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: Enabled
* Method: zstdMaxTrainBytes
* Signature: (J)I
*/
jint Java_org_rocksdb_CompressionOptions_enabled(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jint Java_org_rocksdb_CompressionOptions_zstdMaxTrainBytes(
JNIEnv *, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->zstd_max_train_bytes);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: setEnabled
* Signature: (JZ)V
*/
void Java_org_rocksdb_CompressionOptions_setEnabled(
JNIEnv*, jobject, jlong jhandle, jboolean jenabled) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->enabled = jenabled == JNI_TRUE;
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: enabled
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_CompressionOptions_enabled(
JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->enabled);
return static_cast<bool>(opt->enabled);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_CompressionOptions_disposeInternal(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
void Java_org_rocksdb_CompressionOptions_disposeInternal(
JNIEnv*, jobject, jlong jhandle) {
delete reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
}

@ -6,66 +6,160 @@
// This file implements the "bridge" between Java and C++ and enables
// calling c++ rocksdb::Env methods from Java side.
#include <jni.h>
#include <vector>
#include "portal.h"
#include "rocksdb/env.h"
#include "include/org_rocksdb_Env.h"
#include "include/org_rocksdb_HdfsEnv.h"
#include "include/org_rocksdb_RocksEnv.h"
#include "include/org_rocksdb_RocksMemEnv.h"
#include "include/org_rocksdb_TimedEnv.h"
/*
* Class: org_rocksdb_Env
* Method: getDefaultEnvInternal
* Signature: ()J
*/
jlong Java_org_rocksdb_Env_getDefaultEnvInternal(JNIEnv* /*env*/,
jclass /*jclazz*/) {
jlong Java_org_rocksdb_Env_getDefaultEnvInternal(
JNIEnv*, jclass) {
return reinterpret_cast<jlong>(rocksdb::Env::Default());
}
/*
* Class: org_rocksdb_RocksEnv
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_RocksEnv_disposeInternal(
JNIEnv*, jobject, jlong jhandle) {
auto* e = reinterpret_cast<rocksdb::Env*>(jhandle);
assert(e != nullptr);
delete e;
}
/*
* Class: org_rocksdb_Env
* Method: setBackgroundThreads
* Signature: (JII)V
* Signature: (JIB)V
*/
void Java_org_rocksdb_Env_setBackgroundThreads(JNIEnv* /*env*/,
jobject /*jobj*/, jlong jhandle,
jint num, jint priority) {
void Java_org_rocksdb_Env_setBackgroundThreads(
JNIEnv*, jobject, jlong jhandle, jint jnum, jbyte jpriority_value) {
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
switch (priority) {
case org_rocksdb_Env_FLUSH_POOL:
rocks_env->SetBackgroundThreads(num, rocksdb::Env::Priority::LOW);
break;
case org_rocksdb_Env_COMPACTION_POOL:
rocks_env->SetBackgroundThreads(num, rocksdb::Env::Priority::HIGH);
break;
}
rocks_env->SetBackgroundThreads(static_cast<int>(jnum),
rocksdb::PriorityJni::toCppPriority(jpriority_value));
}
/*
* Class: org_rocksdb_sEnv
* Class: org_rocksdb_Env
* Method: getBackgroundThreads
* Signature: (JB)I
*/
jint Java_org_rocksdb_Env_getBackgroundThreads(
JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) {
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
const int num = rocks_env->GetBackgroundThreads(
rocksdb::PriorityJni::toCppPriority(jpriority_value));
return static_cast<jint>(num);
}
/*
* Class: org_rocksdb_Env
* Method: getThreadPoolQueueLen
* Signature: (JI)I
* Signature: (JB)I
*/
jint Java_org_rocksdb_Env_getThreadPoolQueueLen(
JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) {
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
const int queue_len = rocks_env->GetThreadPoolQueueLen(
rocksdb::PriorityJni::toCppPriority(jpriority_value));
return static_cast<jint>(queue_len);
}
/*
* Class: org_rocksdb_Env
* Method: incBackgroundThreadsIfNeeded
* Signature: (JIB)V
*/
void Java_org_rocksdb_Env_incBackgroundThreadsIfNeeded(
JNIEnv*, jobject, jlong jhandle, jint jnum, jbyte jpriority_value) {
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
rocks_env->IncBackgroundThreadsIfNeeded(static_cast<int>(jnum),
rocksdb::PriorityJni::toCppPriority(jpriority_value));
}
/*
* Class: org_rocksdb_Env
* Method: lowerThreadPoolIOPriority
* Signature: (JB)V
*/
void Java_org_rocksdb_Env_lowerThreadPoolIOPriority(
JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) {
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
rocks_env->LowerThreadPoolIOPriority(
rocksdb::PriorityJni::toCppPriority(jpriority_value));
}
/*
* Class: org_rocksdb_Env
* Method: lowerThreadPoolCPUPriority
* Signature: (JB)V
*/
void Java_org_rocksdb_Env_lowerThreadPoolCPUPriority(
JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) {
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
rocks_env->LowerThreadPoolCPUPriority(
rocksdb::PriorityJni::toCppPriority(jpriority_value));
}
/*
* Class: org_rocksdb_Env
* Method: getThreadList
* Signature: (J)[Lorg/rocksdb/ThreadStatus;
*/
jint Java_org_rocksdb_Env_getThreadPoolQueueLen(JNIEnv* /*env*/,
jobject /*jobj*/, jlong jhandle,
jint pool_id) {
jobjectArray Java_org_rocksdb_Env_getThreadList(
JNIEnv* env, jobject, jlong jhandle) {
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
switch (pool_id) {
case org_rocksdb_RocksEnv_FLUSH_POOL:
return rocks_env->GetThreadPoolQueueLen(rocksdb::Env::Priority::LOW);
case org_rocksdb_RocksEnv_COMPACTION_POOL:
return rocks_env->GetThreadPoolQueueLen(rocksdb::Env::Priority::HIGH);
std::vector<rocksdb::ThreadStatus> thread_status;
rocksdb::Status s = rocks_env->GetThreadList(&thread_status);
if (!s.ok()) {
// error, throw exception
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return nullptr;
}
// object[]
const jsize len = static_cast<jsize>(thread_status.size());
jobjectArray jthread_status =
env->NewObjectArray(len, rocksdb::ThreadStatusJni::getJClass(env), nullptr);
if (jthread_status == nullptr) {
// an exception occurred
return nullptr;
}
for (jsize i = 0; i < len; ++i) {
jobject jts =
rocksdb::ThreadStatusJni::construct(env, &(thread_status[i]));
env->SetObjectArrayElement(jthread_status, i, jts);
if (env->ExceptionCheck()) {
// exception occurred
env->DeleteLocalRef(jthread_status);
return nullptr;
}
}
return 0;
return jthread_status;
}
/*
* Class: org_rocksdb_RocksMemEnv
* Method: createMemEnv
* Signature: ()J
* Signature: (J)J
*/
jlong Java_org_rocksdb_RocksMemEnv_createMemEnv(JNIEnv* /*env*/,
jclass /*jclazz*/) {
return reinterpret_cast<jlong>(rocksdb::NewMemEnv(rocksdb::Env::Default()));
jlong Java_org_rocksdb_RocksMemEnv_createMemEnv(
JNIEnv*, jclass, jlong jbase_env_handle) {
auto* base_env = reinterpret_cast<rocksdb::Env*>(jbase_env_handle);
return reinterpret_cast<jlong>(rocksdb::NewMemEnv(base_env));
}
/*
@ -73,10 +167,68 @@ jlong Java_org_rocksdb_RocksMemEnv_createMemEnv(JNIEnv* /*env*/,
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_RocksMemEnv_disposeInternal(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
void Java_org_rocksdb_RocksMemEnv_disposeInternal(
JNIEnv*, jobject, jlong jhandle) {
auto* e = reinterpret_cast<rocksdb::Env*>(jhandle);
assert(e != nullptr);
delete e;
}
/*
* Class: org_rocksdb_HdfsEnv
* Method: createHdfsEnv
* Signature: (Ljava/lang/String;)J
*/
jlong Java_org_rocksdb_HdfsEnv_createHdfsEnv(
JNIEnv* env, jclass, jstring jfsname) {
jboolean has_exception = JNI_FALSE;
auto fsname = rocksdb::JniUtil::copyStdString(env, jfsname, &has_exception);
if (has_exception == JNI_TRUE) {
// exception occurred
return 0;
}
rocksdb::Env* hdfs_env;
rocksdb::Status s = rocksdb::NewHdfsEnv(&hdfs_env, fsname);
if (!s.ok()) {
// error occurred
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return 0;
}
return reinterpret_cast<jlong>(hdfs_env);
}
/*
* Class: org_rocksdb_HdfsEnv
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_HdfsEnv_disposeInternal(
JNIEnv*, jobject, jlong jhandle) {
auto* e = reinterpret_cast<rocksdb::Env*>(jhandle);
assert(e != nullptr);
delete e;
}
/*
* Class: org_rocksdb_TimedEnv
* Method: createTimedEnv
* Signature: (J)J
*/
jlong Java_org_rocksdb_TimedEnv_createTimedEnv(
JNIEnv*, jclass, jlong jbase_env_handle) {
auto* base_env = reinterpret_cast<rocksdb::Env*>(jbase_env_handle);
return reinterpret_cast<jlong>(rocksdb::NewTimedEnv(base_env));
}
/*
* Class: org_rocksdb_TimedEnv
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_TimedEnv_disposeInternal(
JNIEnv*, jobject, jlong jhandle) {
auto* e = reinterpret_cast<rocksdb::Env*>(jhandle);
assert(e != nullptr);
delete e;
}

@ -32,20 +32,32 @@
* Method: newEnvOptions
* Signature: ()J
*/
jlong Java_org_rocksdb_EnvOptions_newEnvOptions(JNIEnv * /*env*/,
jclass /*jcls*/) {
jlong Java_org_rocksdb_EnvOptions_newEnvOptions__(
JNIEnv*, jclass) {
auto *env_opt = new rocksdb::EnvOptions();
return reinterpret_cast<jlong>(env_opt);
}
/*
* Class: org_rocksdb_EnvOptions
* Method: newEnvOptions
* Signature: (J)J
*/
jlong Java_org_rocksdb_EnvOptions_newEnvOptions__J(
JNIEnv*, jclass, jlong jdboptions_handle) {
auto* db_options =
reinterpret_cast<rocksdb::DBOptions*>(jdboptions_handle);
auto* env_opt = new rocksdb::EnvOptions(*db_options);
return reinterpret_cast<jlong>(env_opt);
}
/*
* Class: org_rocksdb_EnvOptions
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_EnvOptions_disposeInternal(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle) {
void Java_org_rocksdb_EnvOptions_disposeInternal(
JNIEnv*, jobject, jlong jhandle) {
auto *eo = reinterpret_cast<rocksdb::EnvOptions *>(jhandle);
assert(eo != nullptr);
delete eo;
@ -53,93 +65,82 @@ void Java_org_rocksdb_EnvOptions_disposeInternal(JNIEnv * /*env*/,
/*
* Class: org_rocksdb_EnvOptions
* Method: setUseDirectReads
* Method: setUseMmapReads
* Signature: (JZ)V
*/
void Java_org_rocksdb_EnvOptions_setUseDirectReads(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle,
jboolean use_direct_reads) {
ENV_OPTIONS_SET_BOOL(jhandle, use_direct_reads);
void Java_org_rocksdb_EnvOptions_setUseMmapReads(
JNIEnv*, jobject, jlong jhandle, jboolean use_mmap_reads) {
ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_reads);
}
/*
* Class: org_rocksdb_EnvOptions
* Method: useDirectReads
* Method: useMmapReads
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_EnvOptions_useDirectReads(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, use_direct_reads);
jboolean Java_org_rocksdb_EnvOptions_useMmapReads(
JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, use_mmap_reads);
}
/*
* Class: org_rocksdb_EnvOptions
* Method: setUseDirectWrites
* Method: setUseMmapWrites
* Signature: (JZ)V
*/
void Java_org_rocksdb_EnvOptions_setUseDirectWrites(
JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle,
jboolean use_direct_writes) {
ENV_OPTIONS_SET_BOOL(jhandle, use_direct_writes);
void Java_org_rocksdb_EnvOptions_setUseMmapWrites(
JNIEnv*, jobject, jlong jhandle, jboolean use_mmap_writes) {
ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_writes);
}
/*
* Class: org_rocksdb_EnvOptions
* Method: useDirectWrites
* Method: useMmapWrites
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_EnvOptions_useDirectWrites(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, use_direct_writes);
jboolean Java_org_rocksdb_EnvOptions_useMmapWrites(
JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, use_mmap_writes);
}
/*
* Class: org_rocksdb_EnvOptions
* Method: setUseMmapReads
* Method: setUseDirectReads
* Signature: (JZ)V
*/
void Java_org_rocksdb_EnvOptions_setUseMmapReads(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle,
jboolean use_mmap_reads) {
ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_reads);
void Java_org_rocksdb_EnvOptions_setUseDirectReads(
JNIEnv*, jobject, jlong jhandle, jboolean use_direct_reads) {
ENV_OPTIONS_SET_BOOL(jhandle, use_direct_reads);
}
/*
* Class: org_rocksdb_EnvOptions
* Method: useMmapReads
* Method: useDirectReads
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_EnvOptions_useMmapReads(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, use_mmap_reads);
jboolean Java_org_rocksdb_EnvOptions_useDirectReads(
JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, use_direct_reads);
}
/*
* Class: org_rocksdb_EnvOptions
* Method: setUseMmapWrites
* Method: setUseDirectWrites
* Signature: (JZ)V
*/
void Java_org_rocksdb_EnvOptions_setUseMmapWrites(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle,
jboolean use_mmap_writes) {
ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_writes);
void Java_org_rocksdb_EnvOptions_setUseDirectWrites(
JNIEnv*, jobject, jlong jhandle, jboolean use_direct_writes) {
ENV_OPTIONS_SET_BOOL(jhandle, use_direct_writes);
}
/*
* Class: org_rocksdb_EnvOptions
* Method: useMmapWrites
* Method: useDirectWrites
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_EnvOptions_useMmapWrites(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, use_mmap_writes);
jboolean Java_org_rocksdb_EnvOptions_useDirectWrites(
JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, use_direct_writes);
}
/*
@ -147,10 +148,8 @@ jboolean Java_org_rocksdb_EnvOptions_useMmapWrites(JNIEnv * /*env*/,
* Method: setAllowFallocate
* Signature: (JZ)V
*/
void Java_org_rocksdb_EnvOptions_setAllowFallocate(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle,
jboolean allow_fallocate) {
void Java_org_rocksdb_EnvOptions_setAllowFallocate(
JNIEnv*, jobject, jlong jhandle, jboolean allow_fallocate) {
ENV_OPTIONS_SET_BOOL(jhandle, allow_fallocate);
}
@ -159,9 +158,8 @@ void Java_org_rocksdb_EnvOptions_setAllowFallocate(JNIEnv * /*env*/,
* Method: allowFallocate
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_EnvOptions_allowFallocate(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jboolean Java_org_rocksdb_EnvOptions_allowFallocate(
JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, allow_fallocate);
}
@ -170,10 +168,8 @@ jboolean Java_org_rocksdb_EnvOptions_allowFallocate(JNIEnv * /*env*/,
* Method: setSetFdCloexec
* Signature: (JZ)V
*/
void Java_org_rocksdb_EnvOptions_setSetFdCloexec(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle,
jboolean set_fd_cloexec) {
void Java_org_rocksdb_EnvOptions_setSetFdCloexec(
JNIEnv*, jobject, jlong jhandle, jboolean set_fd_cloexec) {
ENV_OPTIONS_SET_BOOL(jhandle, set_fd_cloexec);
}
@ -182,9 +178,8 @@ void Java_org_rocksdb_EnvOptions_setSetFdCloexec(JNIEnv * /*env*/,
* Method: setFdCloexec
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_EnvOptions_setFdCloexec(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jboolean Java_org_rocksdb_EnvOptions_setFdCloexec(
JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, set_fd_cloexec);
}
@ -193,10 +188,8 @@ jboolean Java_org_rocksdb_EnvOptions_setFdCloexec(JNIEnv * /*env*/,
* Method: setBytesPerSync
* Signature: (JJ)V
*/
void Java_org_rocksdb_EnvOptions_setBytesPerSync(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle,
jlong bytes_per_sync) {
void Java_org_rocksdb_EnvOptions_setBytesPerSync(
JNIEnv*, jobject, jlong jhandle, jlong bytes_per_sync) {
ENV_OPTIONS_SET_UINT64_T(jhandle, bytes_per_sync);
}
@ -205,9 +198,8 @@ void Java_org_rocksdb_EnvOptions_setBytesPerSync(JNIEnv * /*env*/,
* Method: bytesPerSync
* Signature: (J)J
*/
jlong Java_org_rocksdb_EnvOptions_bytesPerSync(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jlong Java_org_rocksdb_EnvOptions_bytesPerSync(
JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, bytes_per_sync);
}
@ -217,8 +209,7 @@ jlong Java_org_rocksdb_EnvOptions_bytesPerSync(JNIEnv * /*env*/,
* Signature: (JZ)V
*/
void Java_org_rocksdb_EnvOptions_setFallocateWithKeepSize(
JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle,
jboolean fallocate_with_keep_size) {
JNIEnv*, jobject, jlong jhandle, jboolean fallocate_with_keep_size) {
ENV_OPTIONS_SET_BOOL(jhandle, fallocate_with_keep_size);
}
@ -227,9 +218,8 @@ void Java_org_rocksdb_EnvOptions_setFallocateWithKeepSize(
* Method: fallocateWithKeepSize
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_EnvOptions_fallocateWithKeepSize(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jboolean Java_org_rocksdb_EnvOptions_fallocateWithKeepSize(
JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, fallocate_with_keep_size);
}
@ -239,8 +229,7 @@ jboolean Java_org_rocksdb_EnvOptions_fallocateWithKeepSize(JNIEnv * /*env*/,
* Signature: (JJ)V
*/
void Java_org_rocksdb_EnvOptions_setCompactionReadaheadSize(
JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle,
jlong compaction_readahead_size) {
JNIEnv*, jobject, jlong jhandle, jlong compaction_readahead_size) {
ENV_OPTIONS_SET_SIZE_T(jhandle, compaction_readahead_size);
}
@ -249,9 +238,8 @@ void Java_org_rocksdb_EnvOptions_setCompactionReadaheadSize(
* Method: compactionReadaheadSize
* Signature: (J)J
*/
jlong Java_org_rocksdb_EnvOptions_compactionReadaheadSize(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jlong Java_org_rocksdb_EnvOptions_compactionReadaheadSize(
JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, compaction_readahead_size);
}
@ -261,8 +249,7 @@ jlong Java_org_rocksdb_EnvOptions_compactionReadaheadSize(JNIEnv * /*env*/,
* Signature: (JJ)V
*/
void Java_org_rocksdb_EnvOptions_setRandomAccessMaxBufferSize(
JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle,
jlong random_access_max_buffer_size) {
JNIEnv*, jobject, jlong jhandle, jlong random_access_max_buffer_size) {
ENV_OPTIONS_SET_SIZE_T(jhandle, random_access_max_buffer_size);
}
@ -271,9 +258,8 @@ void Java_org_rocksdb_EnvOptions_setRandomAccessMaxBufferSize(
* Method: randomAccessMaxBufferSize
* Signature: (J)J
*/
jlong Java_org_rocksdb_EnvOptions_randomAccessMaxBufferSize(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jlong Java_org_rocksdb_EnvOptions_randomAccessMaxBufferSize(
JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, random_access_max_buffer_size);
}
@ -283,8 +269,7 @@ jlong Java_org_rocksdb_EnvOptions_randomAccessMaxBufferSize(JNIEnv * /*env*/,
* Signature: (JJ)V
*/
void Java_org_rocksdb_EnvOptions_setWritableFileMaxBufferSize(
JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle,
jlong writable_file_max_buffer_size) {
JNIEnv*, jobject, jlong jhandle, jlong writable_file_max_buffer_size) {
ENV_OPTIONS_SET_SIZE_T(jhandle, writable_file_max_buffer_size);
}
@ -293,9 +278,8 @@ void Java_org_rocksdb_EnvOptions_setWritableFileMaxBufferSize(
* Method: writableFileMaxBufferSize
* Signature: (J)J
*/
jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize(JNIEnv * /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize(
JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, writable_file_max_buffer_size);
}
@ -304,9 +288,8 @@ jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize(JNIEnv * /*env*/,
* Method: setRateLimiter
* Signature: (JJ)V
*/
void Java_org_rocksdb_EnvOptions_setRateLimiter(JNIEnv * /*env*/,
jobject /*jobj*/, jlong jhandle,
jlong rl_handle) {
void Java_org_rocksdb_EnvOptions_setRateLimiter(
JNIEnv*, jobject, jlong jhandle, jlong rl_handle) {
auto *sptr_rate_limiter =
reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(rl_handle);
auto *env_opt = reinterpret_cast<rocksdb::EnvOptions *>(jhandle);

@ -17,7 +17,7 @@
* Signature: ()J
*/
jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__(
JNIEnv* /*env*/, jclass /*jclazz*/) {
JNIEnv*, jclass) {
auto* options = new rocksdb::IngestExternalFileOptions();
return reinterpret_cast<jlong>(options);
}
@ -28,7 +28,7 @@ jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__(
* Signature: (ZZZZ)J
*/
jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__ZZZZ(
JNIEnv* /*env*/, jclass /*jcls*/, jboolean jmove_files,
JNIEnv*, jclass, jboolean jmove_files,
jboolean jsnapshot_consistency, jboolean jallow_global_seqno,
jboolean jallow_blocking_flush) {
auto* options = new rocksdb::IngestExternalFileOptions();
@ -44,9 +44,8 @@ jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__Z
* Method: moveFiles
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_IngestExternalFileOptions_moveFiles(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jboolean Java_org_rocksdb_IngestExternalFileOptions_moveFiles(
JNIEnv*, jobject, jlong jhandle) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
return static_cast<jboolean>(options->move_files);
@ -58,7 +57,7 @@ jboolean Java_org_rocksdb_IngestExternalFileOptions_moveFiles(JNIEnv* /*env*/,
* Signature: (JZ)V
*/
void Java_org_rocksdb_IngestExternalFileOptions_setMoveFiles(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean jmove_files) {
JNIEnv*, jobject, jlong jhandle, jboolean jmove_files) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
options->move_files = static_cast<bool>(jmove_files);
@ -70,7 +69,7 @@ void Java_org_rocksdb_IngestExternalFileOptions_setMoveFiles(
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_IngestExternalFileOptions_snapshotConsistency(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
JNIEnv*, jobject, jlong jhandle) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
return static_cast<jboolean>(options->snapshot_consistency);
@ -82,8 +81,7 @@ jboolean Java_org_rocksdb_IngestExternalFileOptions_snapshotConsistency(
* Signature: (JZ)V
*/
void Java_org_rocksdb_IngestExternalFileOptions_setSnapshotConsistency(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jboolean jsnapshot_consistency) {
JNIEnv*, jobject, jlong jhandle, jboolean jsnapshot_consistency) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
options->snapshot_consistency = static_cast<bool>(jsnapshot_consistency);
@ -95,7 +93,7 @@ void Java_org_rocksdb_IngestExternalFileOptions_setSnapshotConsistency(
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_IngestExternalFileOptions_allowGlobalSeqNo(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
JNIEnv*, jobject, jlong jhandle) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
return static_cast<jboolean>(options->allow_global_seqno);
@ -107,8 +105,7 @@ jboolean Java_org_rocksdb_IngestExternalFileOptions_allowGlobalSeqNo(
* Signature: (JZ)V
*/
void Java_org_rocksdb_IngestExternalFileOptions_setAllowGlobalSeqNo(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jboolean jallow_global_seqno) {
JNIEnv*, jobject, jlong jhandle, jboolean jallow_global_seqno) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
options->allow_global_seqno = static_cast<bool>(jallow_global_seqno);
@ -120,7 +117,7 @@ void Java_org_rocksdb_IngestExternalFileOptions_setAllowGlobalSeqNo(
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_IngestExternalFileOptions_allowBlockingFlush(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
JNIEnv*, jobject, jlong jhandle) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
return static_cast<jboolean>(options->allow_blocking_flush);
@ -132,22 +129,68 @@ jboolean Java_org_rocksdb_IngestExternalFileOptions_allowBlockingFlush(
* Signature: (JZ)V
*/
void Java_org_rocksdb_IngestExternalFileOptions_setAllowBlockingFlush(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jboolean jallow_blocking_flush) {
JNIEnv*, jobject, jlong jhandle, jboolean jallow_blocking_flush) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
options->allow_blocking_flush = static_cast<bool>(jallow_blocking_flush);
}
/*
* Class: org_rocksdb_IngestExternalFileOptions
* Method: ingestBehind
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_IngestExternalFileOptions_ingestBehind(
JNIEnv*, jobject, jlong jhandle) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
return options->ingest_behind == JNI_TRUE;
}
/*
* Class: org_rocksdb_IngestExternalFileOptions
* Method: setIngestBehind
* Signature: (JZ)V
*/
void Java_org_rocksdb_IngestExternalFileOptions_setIngestBehind(
JNIEnv*, jobject, jlong jhandle, jboolean jingest_behind) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
options->ingest_behind = jingest_behind == JNI_TRUE;
}
/*
* Class: org_rocksdb_IngestExternalFileOptions
* Method: writeGlobalSeqno
* Signature: (J)Z
*/
JNIEXPORT jboolean JNICALL Java_org_rocksdb_IngestExternalFileOptions_writeGlobalSeqno(
JNIEnv*, jobject, jlong jhandle) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
return options->write_global_seqno == JNI_TRUE;
}
/*
* Class: org_rocksdb_IngestExternalFileOptions
* Method: setWriteGlobalSeqno
* Signature: (JZ)V
*/
JNIEXPORT void JNICALL Java_org_rocksdb_IngestExternalFileOptions_setWriteGlobalSeqno(
JNIEnv*, jobject, jlong jhandle, jboolean jwrite_global_seqno) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
options->write_global_seqno = jwrite_global_seqno == JNI_TRUE;
}
/*
* Class: org_rocksdb_IngestExternalFileOptions
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_IngestExternalFileOptions_disposeInternal(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
JNIEnv*, jobject, jlong jhandle) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
delete options;
// @lint-ignore TXT4 T25377293 Grandfathered in
}

@ -66,7 +66,7 @@ jobject Java_org_rocksdb_MemoryUtil_getApproximateMemoryUsageByType(
// exception occurred
return nullptr;
}
const rocksdb::HashMapJni::FnMapKV<const rocksdb::MemoryUtil::UsageType, const uint64_t>
const rocksdb::HashMapJni::FnMapKV<const rocksdb::MemoryUtil::UsageType, const uint64_t, jobject, jobject>
fn_map_kv =
[env](const std::pair<rocksdb::MemoryUtil::UsageType, uint64_t>& pair) {
// Construct key

@ -20,7 +20,7 @@
jlong Java_org_rocksdb_HashSkipListMemTableConfig_newMemTableFactoryHandle(
JNIEnv* env, jobject /*jobj*/, jlong jbucket_count, jint jheight,
jint jbranching_factor) {
rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jbucket_count);
rocksdb::Status s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jbucket_count);
if (s.ok()) {
return reinterpret_cast<jlong>(rocksdb::NewHashSkipListRepFactory(
static_cast<size_t>(jbucket_count), static_cast<int32_t>(jheight),
@ -40,9 +40,9 @@ jlong Java_org_rocksdb_HashLinkedListMemTableConfig_newMemTableFactoryHandle(
jlong jhuge_page_tlb_size, jint jbucket_entries_logging_threshold,
jboolean jif_log_bucket_dist_when_flash, jint jthreshold_use_skiplist) {
rocksdb::Status statusBucketCount =
rocksdb::check_if_jlong_fits_size_t(jbucket_count);
rocksdb::JniUtil::check_if_jlong_fits_size_t(jbucket_count);
rocksdb::Status statusHugePageTlb =
rocksdb::check_if_jlong_fits_size_t(jhuge_page_tlb_size);
rocksdb::JniUtil::check_if_jlong_fits_size_t(jhuge_page_tlb_size);
if (statusBucketCount.ok() && statusHugePageTlb.ok()) {
return reinterpret_cast<jlong>(rocksdb::NewHashLinkListRepFactory(
static_cast<size_t>(jbucket_count),
@ -63,7 +63,7 @@ jlong Java_org_rocksdb_HashLinkedListMemTableConfig_newMemTableFactoryHandle(
*/
jlong Java_org_rocksdb_VectorMemTableConfig_newMemTableFactoryHandle(
JNIEnv* env, jobject /*jobj*/, jlong jreserved_size) {
rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jreserved_size);
rocksdb::Status s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jreserved_size);
if (s.ok()) {
return reinterpret_cast<jlong>(
new rocksdb::VectorRepFactory(static_cast<size_t>(jreserved_size)));
@ -79,7 +79,7 @@ jlong Java_org_rocksdb_VectorMemTableConfig_newMemTableFactoryHandle(
*/
jlong Java_org_rocksdb_SkipListMemTableConfig_newMemTableFactoryHandle0(
JNIEnv* env, jobject /*jobj*/, jlong jlookahead) {
rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jlookahead);
rocksdb::Status s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jlookahead);
if (s.ok()) {
return reinterpret_cast<jlong>(
new rocksdb::SkipListFactory(static_cast<size_t>(jlookahead)));

@ -22,7 +22,7 @@
* Signature: (JLjava/lang/String;)J
*/
jlong Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2(
JNIEnv* env, jclass /*jcls*/, jlong joptions_handle, jstring jdb_path) {
JNIEnv* env, jclass, jlong joptions_handle, jstring jdb_path) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if (db_path == nullptr) {
// exception thrown: OutOfMemoryError
@ -50,7 +50,7 @@ jlong Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2(
*/
jlongArray
Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2_3_3B_3J(
JNIEnv* env, jclass /*jcls*/, jlong jdb_options_handle, jstring jdb_path,
JNIEnv* env, jclass, jlong jdb_options_handle, jstring jdb_path,
jobjectArray jcolumn_names, jlongArray jcolumn_options_handles) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if (db_path == nullptr) {
@ -150,14 +150,40 @@ Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2_3_3B_3J(
return nullptr;
}
/*
* Class: org_rocksdb_OptimisticTransactionDB
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_OptimisticTransactionDB_disposeInternal(
JNIEnv *, jobject, jlong jhandle) {
auto* optimistic_txn_db =
reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
assert(optimistic_txn_db != nullptr);
delete optimistic_txn_db;
}
/*
* Class: org_rocksdb_OptimisticTransactionDB
* Method: closeDatabase
* Signature: (J)V
*/
void Java_org_rocksdb_OptimisticTransactionDB_closeDatabase(
JNIEnv* env, jclass, jlong jhandle) {
auto* optimistic_txn_db =
reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
assert(optimistic_txn_db != nullptr);
rocksdb::Status s = optimistic_txn_db->Close();
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}
/*
* Class: org_rocksdb_OptimisticTransactionDB
* Method: beginTransaction
* Signature: (JJ)J
*/
jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction__JJ(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jlong jwrite_options_handle) {
JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle) {
auto* optimistic_txn_db =
reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
auto* write_options =
@ -193,8 +219,8 @@ jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction__JJJ(
* Signature: (JJJ)J
*/
jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJ(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jlong jwrite_options_handle, jlong jold_txn_handle) {
JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
jlong jold_txn_handle) {
auto* optimistic_txn_db =
reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
auto* write_options =
@ -218,9 +244,8 @@ jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJ(
* Signature: (JJJJ)J
*/
jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJJ(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jlong jwrite_options_handle, jlong joptimistic_txn_options_handle,
jlong jold_txn_handle) {
JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
jlong joptimistic_txn_options_handle, jlong jold_txn_handle) {
auto* optimistic_txn_db =
reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
auto* write_options =
@ -245,21 +270,9 @@ jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJJ(
* Method: getBaseDB
* Signature: (J)J
*/
jlong Java_org_rocksdb_OptimisticTransactionDB_getBaseDB(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
jlong Java_org_rocksdb_OptimisticTransactionDB_getBaseDB(
JNIEnv*, jobject, jlong jhandle) {
auto* optimistic_txn_db =
reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
return reinterpret_cast<jlong>(optimistic_txn_db->GetBaseDB());
}
/*
* Class: org_rocksdb_OptimisticTransactionDB
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_OptimisticTransactionDB_disposeInternal(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
delete reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
}

File diff suppressed because it is too large Load Diff

@ -7,6 +7,7 @@
// calling C++ rocksdb::OptionsUtil methods from Java side.
#include <jni.h>
#include <string>
#include "include/org_rocksdb_OptionsUtil.h"
@ -56,19 +57,23 @@ void build_column_family_descriptor_list(
void Java_org_rocksdb_OptionsUtil_loadLatestOptions(
JNIEnv* env, jclass /*jcls*/, jstring jdbpath, jlong jenv_handle,
jlong jdb_opts_handle, jobject jcfds, jboolean ignore_unknown_options) {
const char* db_path = env->GetStringUTFChars(jdbpath, nullptr);
jboolean has_exception = JNI_FALSE;
auto db_path = rocksdb::JniUtil::copyStdString(env, jdbpath, &has_exception);
if (has_exception == JNI_TRUE) {
// exception occurred
return;
}
std::vector<rocksdb::ColumnFamilyDescriptor> cf_descs;
rocksdb::Status s = rocksdb::LoadLatestOptions(
db_path, reinterpret_cast<rocksdb::Env*>(jenv_handle),
reinterpret_cast<rocksdb::DBOptions*>(jdb_opts_handle), &cf_descs,
ignore_unknown_options);
env->ReleaseStringUTFChars(jdbpath, db_path);
if (!s.ok()) {
// error, raise an exception
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
} else {
build_column_family_descriptor_list(env, jcfds, cf_descs);
}
build_column_family_descriptor_list(env, jcfds, cf_descs);
}
/*
@ -79,19 +84,23 @@ void Java_org_rocksdb_OptionsUtil_loadLatestOptions(
void Java_org_rocksdb_OptionsUtil_loadOptionsFromFile(
JNIEnv* env, jclass /*jcls*/, jstring jopts_file_name, jlong jenv_handle,
jlong jdb_opts_handle, jobject jcfds, jboolean ignore_unknown_options) {
const char* opts_file_name = env->GetStringUTFChars(jopts_file_name, nullptr);
jboolean has_exception = JNI_FALSE;
auto opts_file_name = rocksdb::JniUtil::copyStdString(env, jopts_file_name, &has_exception);
if (has_exception == JNI_TRUE) {
// exception occurred
return;
}
std::vector<rocksdb::ColumnFamilyDescriptor> cf_descs;
rocksdb::Status s = rocksdb::LoadOptionsFromFile(
opts_file_name, reinterpret_cast<rocksdb::Env*>(jenv_handle),
reinterpret_cast<rocksdb::DBOptions*>(jdb_opts_handle), &cf_descs,
ignore_unknown_options);
env->ReleaseStringUTFChars(jopts_file_name, opts_file_name);
if (!s.ok()) {
// error, raise an exception
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
} else {
build_column_family_descriptor_list(env, jcfds, cf_descs);
}
build_column_family_descriptor_list(env, jcfds, cf_descs);
}
/*
@ -101,14 +110,21 @@ void Java_org_rocksdb_OptionsUtil_loadOptionsFromFile(
*/
jstring Java_org_rocksdb_OptionsUtil_getLatestOptionsFileName(
JNIEnv* env, jclass /*jcls*/, jstring jdbpath, jlong jenv_handle) {
const char* db_path = env->GetStringUTFChars(jdbpath, nullptr);
jboolean has_exception = JNI_FALSE;
auto db_path = rocksdb::JniUtil::copyStdString(env, jdbpath, &has_exception);
if (has_exception == JNI_TRUE) {
// exception occurred
return nullptr;
}
std::string options_file_name;
if (db_path != nullptr) {
rocksdb::GetLatestOptionsFileName(
db_path, reinterpret_cast<rocksdb::Env*>(jenv_handle),
&options_file_name);
rocksdb::Status s = rocksdb::GetLatestOptionsFileName(
db_path, reinterpret_cast<rocksdb::Env*>(jenv_handle),
&options_file_name);
if (!s.ok()) {
// error, raise an exception
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return nullptr;
} else {
return env->NewStringUTF(options_file_name.c_str());
}
env->ReleaseStringUTFChars(jdbpath, db_path);
return env->NewStringUTF(options_file_name.c_str());
}

@ -0,0 +1,53 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the "bridge" between Java and C++ for
// rocksdb::PersistentCache.
#include <jni.h>
#include <string>
#include "include/org_rocksdb_PersistentCache.h"
#include "rocksdb/persistent_cache.h"
#include "loggerjnicallback.h"
#include "portal.h"
/*
* Class: org_rocksdb_PersistentCache
* Method: newPersistentCache
* Signature: (JLjava/lang/String;JJZ)J
*/
jlong Java_org_rocksdb_PersistentCache_newPersistentCache(
JNIEnv* env, jclass, jlong jenv_handle, jstring jpath,
jlong jsz, jlong jlogger_handle, jboolean joptimized_for_nvm) {
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jenv_handle);
jboolean has_exception = JNI_FALSE;
std::string path = rocksdb::JniUtil::copyStdString(env, jpath, &has_exception);
if (has_exception == JNI_TRUE) {
return 0;
}
auto* logger =
reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback>*>(jlogger_handle);
auto* cache = new std::shared_ptr<rocksdb::PersistentCache>(nullptr);
rocksdb::Status s = rocksdb::NewPersistentCache(
rocks_env, path, static_cast<uint64_t>(jsz), *logger,
static_cast<bool>(joptimized_for_nvm), cache);
if (!s.ok()) {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}
return reinterpret_cast<jlong>(cache);
}
/*
* Class: org_rocksdb_PersistentCache
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_PersistentCache_disposeInternal(
JNIEnv*, jobject, jlong jhandle) {
auto* cache =
reinterpret_cast<std::shared_ptr<rocksdb::PersistentCache>*>(jhandle);
delete cache; // delete std::shared_ptr
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -129,6 +129,8 @@ jobject Java_org_rocksdb_SstFileManager_getTrackedFiles(JNIEnv* env,
reinterpret_cast<std::shared_ptr<rocksdb::SstFileManager>*>(jhandle);
auto tracked_files = sptr_sst_file_manager->get()->GetTrackedFiles();
//TODO(AR) could refactor to share code with rocksdb::HashMapJni::fromCppMap(env, tracked_files);
const jobject jtracked_files = rocksdb::HashMapJni::construct(
env, static_cast<uint32_t>(tracked_files.size()));
if (jtracked_files == nullptr) {
@ -136,7 +138,7 @@ jobject Java_org_rocksdb_SstFileManager_getTrackedFiles(JNIEnv* env,
return nullptr;
}
const rocksdb::HashMapJni::FnMapKV<const std::string, const uint64_t>
const rocksdb::HashMapJni::FnMapKV<const std::string, const uint64_t, jobject, jobject>
fn_map_kv =
[env](const std::pair<const std::string, const uint64_t>& pair) {
const jstring jtracked_file_path =

@ -20,8 +20,10 @@
* Method: newStatistics
* Signature: ()J
*/
jlong Java_org_rocksdb_Statistics_newStatistics__(JNIEnv* env, jclass jcls) {
return Java_org_rocksdb_Statistics_newStatistics___3BJ(env, jcls, nullptr, 0);
jlong Java_org_rocksdb_Statistics_newStatistics__(
JNIEnv* env, jclass jcls) {
return Java_org_rocksdb_Statistics_newStatistics___3BJ(
env, jcls, nullptr, 0);
}
/*
@ -40,10 +42,10 @@ jlong Java_org_rocksdb_Statistics_newStatistics__J(
* Method: newStatistics
* Signature: ([B)J
*/
jlong Java_org_rocksdb_Statistics_newStatistics___3B(JNIEnv* env, jclass jcls,
jbyteArray jhistograms) {
return Java_org_rocksdb_Statistics_newStatistics___3BJ(env, jcls, jhistograms,
0);
jlong Java_org_rocksdb_Statistics_newStatistics___3B(
JNIEnv* env, jclass jcls, jbyteArray jhistograms) {
return Java_org_rocksdb_Statistics_newStatistics___3BJ(
env, jcls, jhistograms, 0);
}
/*
@ -52,8 +54,7 @@ jlong Java_org_rocksdb_Statistics_newStatistics___3B(JNIEnv* env, jclass jcls,
* Signature: ([BJ)J
*/
jlong Java_org_rocksdb_Statistics_newStatistics___3BJ(
JNIEnv* env, jclass /*jcls*/, jbyteArray jhistograms,
jlong jother_statistics_handle) {
JNIEnv* env, jclass, jbyteArray jhistograms, jlong jother_statistics_handle) {
std::shared_ptr<rocksdb::Statistics>* pSptr_other_statistics = nullptr;
if (jother_statistics_handle > 0) {
pSptr_other_statistics =
@ -97,9 +98,8 @@ jlong Java_org_rocksdb_Statistics_newStatistics___3BJ(
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_Statistics_disposeInternal(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
void Java_org_rocksdb_Statistics_disposeInternal(
JNIEnv*, jobject, jlong jhandle) {
if (jhandle > 0) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
@ -112,8 +112,8 @@ void Java_org_rocksdb_Statistics_disposeInternal(JNIEnv* /*env*/,
* Method: statsLevel
* Signature: (J)B
*/
jbyte Java_org_rocksdb_Statistics_statsLevel(JNIEnv* /*env*/, jobject /*jobj*/,
jlong jhandle) {
jbyte Java_org_rocksdb_Statistics_statsLevel(
JNIEnv*, jobject, jlong jhandle) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
@ -126,9 +126,8 @@ jbyte Java_org_rocksdb_Statistics_statsLevel(JNIEnv* /*env*/, jobject /*jobj*/,
* Method: setStatsLevel
* Signature: (JB)V
*/
void Java_org_rocksdb_Statistics_setStatsLevel(JNIEnv* /*env*/,
jobject /*jobj*/, jlong jhandle,
jbyte jstats_level) {
void Java_org_rocksdb_Statistics_setStatsLevel(
JNIEnv*, jobject, jlong jhandle, jbyte jstats_level) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
@ -141,15 +140,14 @@ void Java_org_rocksdb_Statistics_setStatsLevel(JNIEnv* /*env*/,
* Method: getTickerCount
* Signature: (JB)J
*/
jlong Java_org_rocksdb_Statistics_getTickerCount(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle,
jbyte jticker_type) {
jlong Java_org_rocksdb_Statistics_getTickerCount(
JNIEnv*, jobject, jlong jhandle, jbyte jticker_type) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
auto ticker = rocksdb::TickerTypeJni::toCppTickers(jticker_type);
return pSptr_statistics->get()->getTickerCount(ticker);
uint64_t count = pSptr_statistics->get()->getTickerCount(ticker);
return static_cast<jlong>(count);
}
/*
@ -157,10 +155,8 @@ jlong Java_org_rocksdb_Statistics_getTickerCount(JNIEnv* /*env*/,
* Method: getAndResetTickerCount
* Signature: (JB)J
*/
jlong Java_org_rocksdb_Statistics_getAndResetTickerCount(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle,
jbyte jticker_type) {
jlong Java_org_rocksdb_Statistics_getAndResetTickerCount(
JNIEnv*, jobject, jlong jhandle, jbyte jticker_type) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
@ -173,17 +169,16 @@ jlong Java_org_rocksdb_Statistics_getAndResetTickerCount(JNIEnv* /*env*/,
* Method: getHistogramData
* Signature: (JB)Lorg/rocksdb/HistogramData;
*/
jobject Java_org_rocksdb_Statistics_getHistogramData(JNIEnv* env,
jobject /*jobj*/,
jlong jhandle,
jbyte jhistogram_type) {
jobject Java_org_rocksdb_Statistics_getHistogramData(
JNIEnv* env, jobject, jlong jhandle, jbyte jhistogram_type) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
rocksdb::HistogramData
data; // TODO(AR) perhaps better to construct a Java Object Wrapper that
// uses ptr to C++ `new HistogramData`
// TODO(AR) perhaps better to construct a Java Object Wrapper that
// uses ptr to C++ `new HistogramData`
rocksdb::HistogramData data;
auto histogram = rocksdb::HistogramTypeJni::toCppHistograms(jhistogram_type);
pSptr_statistics->get()->histogramData(
static_cast<rocksdb::Histograms>(histogram), &data);
@ -211,10 +206,8 @@ jobject Java_org_rocksdb_Statistics_getHistogramData(JNIEnv* env,
* Method: getHistogramString
* Signature: (JB)Ljava/lang/String;
*/
jstring Java_org_rocksdb_Statistics_getHistogramString(JNIEnv* env,
jobject /*jobj*/,
jlong jhandle,
jbyte jhistogram_type) {
jstring Java_org_rocksdb_Statistics_getHistogramString(
JNIEnv* env, jobject, jlong jhandle, jbyte jhistogram_type) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
@ -228,8 +221,8 @@ jstring Java_org_rocksdb_Statistics_getHistogramString(JNIEnv* env,
* Method: reset
* Signature: (J)V
*/
void Java_org_rocksdb_Statistics_reset(JNIEnv* env, jobject /*jobj*/,
jlong jhandle) {
void Java_org_rocksdb_Statistics_reset(
JNIEnv* env, jobject, jlong jhandle) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
@ -244,8 +237,8 @@ void Java_org_rocksdb_Statistics_reset(JNIEnv* env, jobject /*jobj*/,
* Method: toString
* Signature: (J)Ljava/lang/String;
*/
jstring Java_org_rocksdb_Statistics_toString(JNIEnv* env, jobject /*jobj*/,
jlong jhandle) {
jstring Java_org_rocksdb_Statistics_toString(
JNIEnv* env, jobject, jlong jhandle) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);

@ -9,6 +9,7 @@
#include <jni.h>
#include "include/org_rocksdb_BlockBasedTableConfig.h"
#include "include/org_rocksdb_PlainTableConfig.h"
#include "portal.h"
#include "rocksdb/cache.h"
#include "rocksdb/filter_policy.h"
@ -35,69 +36,102 @@ jlong Java_org_rocksdb_PlainTableConfig_newTableFactoryHandle(
/*
* Class: org_rocksdb_BlockBasedTableConfig
* Method: newTableFactoryHandle
* Signature: (ZJIJJIIZJZZZZJZZJIBBI)J
* Signature: (ZZZZBBDBZJJJJIIIJZZJZZIIZZJIJI)J
*/
jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle(
JNIEnv * /*env*/, jobject /*jobj*/, jboolean no_block_cache,
jlong block_cache_size, jint block_cache_num_shardbits, jlong jblock_cache,
jlong block_size, jint block_size_deviation, jint block_restart_interval,
jboolean whole_key_filtering, jlong jfilter_policy,
jboolean cache_index_and_filter_blocks,
jboolean cache_index_and_filter_blocks_with_high_priority,
jboolean pin_l0_filter_and_index_blocks_in_cache,
jboolean partition_filters, jlong metadata_block_size,
jboolean pin_top_level_index_and_filter,
jboolean hash_index_allow_collision, jlong block_cache_compressed_size,
jint block_cache_compressd_num_shard_bits, jbyte jchecksum_type,
jbyte jindex_type, jint jformat_version) {
JNIEnv*, jobject, jboolean jcache_index_and_filter_blocks,
jboolean jcache_index_and_filter_blocks_with_high_priority,
jboolean jpin_l0_filter_and_index_blocks_in_cache,
jboolean jpin_top_level_index_and_filter, jbyte jindex_type_value,
jbyte jdata_block_index_type_value,
jdouble jdata_block_hash_table_util_ratio, jbyte jchecksum_type_value,
jboolean jno_block_cache, jlong jblock_cache_handle,
jlong jpersistent_cache_handle,
jlong jblock_cache_compressed_handle, jlong jblock_size,
jint jblock_size_deviation, jint jblock_restart_interval,
jint jindex_block_restart_interval, jlong jmetadata_block_size,
jboolean jpartition_filters, jboolean juse_delta_encoding,
jlong jfilter_policy_handle, jboolean jwhole_key_filtering,
jboolean jverify_compression, jint jread_amp_bytes_per_bit,
jint jformat_version, jboolean jenable_index_compression,
jboolean jblock_align, jlong jblock_cache_size,
jint jblock_cache_num_shard_bits, jlong jblock_cache_compressed_size,
jint jblock_cache_compressed_num_shard_bits) {
rocksdb::BlockBasedTableOptions options;
options.no_block_cache = no_block_cache;
if (!no_block_cache) {
if (jblock_cache > 0) {
options.cache_index_and_filter_blocks =
static_cast<bool>(jcache_index_and_filter_blocks);
options.cache_index_and_filter_blocks_with_high_priority =
static_cast<bool>(jcache_index_and_filter_blocks_with_high_priority);
options.pin_l0_filter_and_index_blocks_in_cache =
static_cast<bool>(jpin_l0_filter_and_index_blocks_in_cache);
options.pin_top_level_index_and_filter =
static_cast<bool>(jpin_top_level_index_and_filter);
options.index_type =
rocksdb::IndexTypeJni::toCppIndexType(jindex_type_value);
options.data_block_index_type =
rocksdb::DataBlockIndexTypeJni::toCppDataBlockIndexType(
jdata_block_index_type_value);
options.data_block_hash_table_util_ratio =
static_cast<double>(jdata_block_hash_table_util_ratio);
options.checksum =
rocksdb::ChecksumTypeJni::toCppChecksumType(jchecksum_type_value);
options.no_block_cache = static_cast<bool>(jno_block_cache);
if (options.no_block_cache) {
options.block_cache = nullptr;
} else {
if (jblock_cache_handle > 0) {
std::shared_ptr<rocksdb::Cache> *pCache =
reinterpret_cast<std::shared_ptr<rocksdb::Cache> *>(jblock_cache);
reinterpret_cast<std::shared_ptr<rocksdb::Cache> *>(jblock_cache_handle);
options.block_cache = *pCache;
} else if (block_cache_size > 0) {
if (block_cache_num_shardbits > 0) {
options.block_cache =
rocksdb::NewLRUCache(block_cache_size, block_cache_num_shardbits);
} else if (jblock_cache_size > 0) {
if (jblock_cache_num_shard_bits > 0) {
options.block_cache = rocksdb::NewLRUCache(
static_cast<size_t>(jblock_cache_size),
static_cast<int>(jblock_cache_num_shard_bits));
} else {
options.block_cache = rocksdb::NewLRUCache(block_cache_size);
options.block_cache = rocksdb::NewLRUCache(
static_cast<size_t>(jblock_cache_size));
}
}
}
options.block_size = block_size;
options.block_size_deviation = block_size_deviation;
options.block_restart_interval = block_restart_interval;
options.whole_key_filtering = whole_key_filtering;
if (jfilter_policy > 0) {
std::shared_ptr<rocksdb::FilterPolicy> *pFilterPolicy =
reinterpret_cast<std::shared_ptr<rocksdb::FilterPolicy> *>(
jfilter_policy);
options.filter_policy = *pFilterPolicy;
if (jpersistent_cache_handle > 0) {
std::shared_ptr<rocksdb::PersistentCache> *pCache =
reinterpret_cast<std::shared_ptr<rocksdb::PersistentCache> *>(jpersistent_cache_handle);
options.persistent_cache = *pCache;
}
options.cache_index_and_filter_blocks = cache_index_and_filter_blocks;
options.cache_index_and_filter_blocks_with_high_priority =
cache_index_and_filter_blocks_with_high_priority;
options.pin_l0_filter_and_index_blocks_in_cache =
pin_l0_filter_and_index_blocks_in_cache;
options.partition_filters = partition_filters;
options.metadata_block_size = metadata_block_size;
options.pin_top_level_index_and_filter = pin_top_level_index_and_filter;
options.hash_index_allow_collision = hash_index_allow_collision;
if (block_cache_compressed_size > 0) {
if (block_cache_compressd_num_shard_bits > 0) {
options.block_cache = rocksdb::NewLRUCache(
block_cache_compressed_size, block_cache_compressd_num_shard_bits);
if (jblock_cache_compressed_handle > 0) {
std::shared_ptr<rocksdb::Cache> *pCache =
reinterpret_cast<std::shared_ptr<rocksdb::Cache> *>(jblock_cache_compressed_handle);
options.block_cache_compressed = *pCache;
} else if (jblock_cache_compressed_size > 0) {
if (jblock_cache_compressed_num_shard_bits > 0) {
options.block_cache_compressed = rocksdb::NewLRUCache(
static_cast<size_t>(jblock_cache_compressed_size),
static_cast<int>(jblock_cache_compressed_num_shard_bits));
} else {
options.block_cache = rocksdb::NewLRUCache(block_cache_compressed_size);
options.block_cache_compressed = rocksdb::NewLRUCache(
static_cast<size_t>(jblock_cache_compressed_size));
}
}
options.checksum = static_cast<rocksdb::ChecksumType>(jchecksum_type);
options.index_type =
static_cast<rocksdb::BlockBasedTableOptions::IndexType>(jindex_type);
options.format_version = jformat_version;
options.block_size = static_cast<size_t>(jblock_size);
options.block_size_deviation = static_cast<int>(jblock_size_deviation);
options.block_restart_interval = static_cast<int>(jblock_restart_interval);
options.index_block_restart_interval = static_cast<int>(jindex_block_restart_interval);
options.metadata_block_size = static_cast<uint64_t>(jmetadata_block_size);
options.partition_filters = static_cast<bool>(jpartition_filters);
options.use_delta_encoding = static_cast<bool>(juse_delta_encoding);
if (jfilter_policy_handle > 0) {
std::shared_ptr<rocksdb::FilterPolicy> *pFilterPolicy =
reinterpret_cast<std::shared_ptr<rocksdb::FilterPolicy> *>(
jfilter_policy_handle);
options.filter_policy = *pFilterPolicy;
}
options.whole_key_filtering = static_cast<bool>(jwhole_key_filtering);
options.verify_compression = static_cast<bool>(jverify_compression);
options.read_amp_bytes_per_bit = static_cast<uint32_t>(jread_amp_bytes_per_bit);
options.format_version = static_cast<uint32_t>(jformat_version);
options.enable_index_compression = static_cast<bool>(jenable_index_compression);
options.block_align = static_cast<bool>(jblock_align);
return reinterpret_cast<jlong>(rocksdb::NewBlockBasedTableFactory(options));
}

@ -0,0 +1,25 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the "bridge" between Java and C++ for
// org.rocksdb.AbstractTableFilter.
#include <jni.h>
#include <memory>
#include "include/org_rocksdb_AbstractTableFilter.h"
#include "rocksjni/table_filter_jnicallback.h"
/*
* Class: org_rocksdb_AbstractTableFilter
* Method: createNewTableFilter
* Signature: ()J
*/
jlong Java_org_rocksdb_AbstractTableFilter_createNewTableFilter(
JNIEnv* env, jobject jtable_filter) {
auto* table_filter_jnicallback =
new rocksdb::TableFilterJniCallback(env, jtable_filter);
return reinterpret_cast<jlong>(table_filter_jnicallback);
}

@ -0,0 +1,62 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the callback "bridge" between Java and C++ for
// rocksdb::TableFilter.
#include "rocksjni/table_filter_jnicallback.h"
#include "rocksjni/portal.h"
namespace rocksdb {
TableFilterJniCallback::TableFilterJniCallback(
JNIEnv* env, jobject jtable_filter)
: JniCallback(env, jtable_filter) {
m_jfilter_methodid =
AbstractTableFilterJni::getFilterMethod(env);
if(m_jfilter_methodid == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
// create the function reference
/*
Note the JNI ENV must be obtained/release
on each call to the function itself as
it may be called from multiple threads
*/
m_table_filter_function = [this](const rocksdb::TableProperties& table_properties) {
jboolean attached_thread = JNI_FALSE;
JNIEnv* thread_env = getJniEnv(&attached_thread);
assert(thread_env != nullptr);
// create a Java TableProperties object
jobject jtable_properties = TablePropertiesJni::fromCppTableProperties(thread_env, table_properties);
if (jtable_properties == nullptr) {
// exception thrown from fromCppTableProperties
thread_env->ExceptionDescribe(); // print out exception to stderr
releaseJniEnv(attached_thread);
return false;
}
jboolean result = thread_env->CallBooleanMethod(m_jcallback_obj, m_jfilter_methodid, jtable_properties);
if (thread_env->ExceptionCheck()) {
// exception thrown from CallBooleanMethod
thread_env->DeleteLocalRef(jtable_properties);
thread_env->ExceptionDescribe(); // print out exception to stderr
releaseJniEnv(attached_thread);
return false;
}
// ok... cleanup and then return
releaseJniEnv(attached_thread);
return static_cast<bool>(result);
};
}
std::function<bool(const rocksdb::TableProperties&)> TableFilterJniCallback::GetTableFilterFunction() {
return m_table_filter_function;
}
} // namespace rocksdb

@ -0,0 +1,34 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the callback "bridge" between Java and C++ for
// rocksdb::TableFilter.
#ifndef JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_
#define JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_
#include <jni.h>
#include <functional>
#include <memory>
#include "rocksdb/table_properties.h"
#include "rocksjni/jnicallback.h"
namespace rocksdb {
class TableFilterJniCallback : public JniCallback {
public:
TableFilterJniCallback(
JNIEnv* env, jobject jtable_filter);
std::function<bool(const rocksdb::TableProperties&)> GetTableFilterFunction();
private:
jmethodID m_jfilter_methodid;
std::function<bool(const rocksdb::TableProperties&)> m_table_filter_function;
};
} //namespace rocksdb
#endif // JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_

@ -0,0 +1,121 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the "bridge" between Java and C++ and enables
// calling c++ rocksdb::ThreadStatus methods from Java side.
#include <jni.h>
#include "portal.h"
#include "include/org_rocksdb_ThreadStatus.h"
#include "rocksdb/thread_status.h"
/*
* Class: org_rocksdb_ThreadStatus
* Method: getThreadTypeName
* Signature: (B)Ljava/lang/String;
*/
jstring Java_org_rocksdb_ThreadStatus_getThreadTypeName(
JNIEnv* env, jclass, jbyte jthread_type_value) {
auto name = rocksdb::ThreadStatus::GetThreadTypeName(
rocksdb::ThreadTypeJni::toCppThreadType(jthread_type_value));
return rocksdb::JniUtil::toJavaString(env, &name, true);
}
/*
* Class: org_rocksdb_ThreadStatus
* Method: getOperationName
* Signature: (B)Ljava/lang/String;
*/
jstring Java_org_rocksdb_ThreadStatus_getOperationName(
JNIEnv* env, jclass, jbyte joperation_type_value) {
auto name = rocksdb::ThreadStatus::GetOperationName(
rocksdb::OperationTypeJni::toCppOperationType(joperation_type_value));
return rocksdb::JniUtil::toJavaString(env, &name, true);
}
/*
* Class: org_rocksdb_ThreadStatus
* Method: microsToStringNative
* Signature: (J)Ljava/lang/String;
*/
jstring Java_org_rocksdb_ThreadStatus_microsToStringNative(
JNIEnv* env, jclass, jlong jmicros) {
auto str =
rocksdb::ThreadStatus::MicrosToString(static_cast<uint64_t>(jmicros));
return rocksdb::JniUtil::toJavaString(env, &str, true);
}
/*
* Class: org_rocksdb_ThreadStatus
* Method: getOperationStageName
* Signature: (B)Ljava/lang/String;
*/
jstring Java_org_rocksdb_ThreadStatus_getOperationStageName(
JNIEnv* env, jclass, jbyte joperation_stage_value) {
auto name = rocksdb::ThreadStatus::GetOperationStageName(
rocksdb::OperationStageJni::toCppOperationStage(joperation_stage_value));
return rocksdb::JniUtil::toJavaString(env, &name, true);
}
/*
* Class: org_rocksdb_ThreadStatus
* Method: getOperationPropertyName
* Signature: (BI)Ljava/lang/String;
*/
jstring Java_org_rocksdb_ThreadStatus_getOperationPropertyName(
JNIEnv* env, jclass, jbyte joperation_type_value, jint jindex) {
auto name = rocksdb::ThreadStatus::GetOperationPropertyName(
rocksdb::OperationTypeJni::toCppOperationType(joperation_type_value),
static_cast<int>(jindex));
return rocksdb::JniUtil::toJavaString(env, &name, true);
}
/*
* Class: org_rocksdb_ThreadStatus
* Method: interpretOperationProperties
* Signature: (B[J)Ljava/util/Map;
*/
jobject Java_org_rocksdb_ThreadStatus_interpretOperationProperties(
JNIEnv* env, jclass, jbyte joperation_type_value,
jlongArray joperation_properties) {
//convert joperation_properties
const jsize len = env->GetArrayLength(joperation_properties);
const std::unique_ptr<uint64_t[]> op_properties(new uint64_t[len]);
jlong* jop = env->GetLongArrayElements(joperation_properties, nullptr);
if (jop == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
for (jsize i = 0; i < len; i++) {
op_properties[i] = static_cast<uint64_t>(jop[i]);
}
env->ReleaseLongArrayElements(joperation_properties, jop, JNI_ABORT);
// call the function
auto result = rocksdb::ThreadStatus::InterpretOperationProperties(
rocksdb::OperationTypeJni::toCppOperationType(joperation_type_value),
op_properties.get());
jobject jresult = rocksdb::HashMapJni::fromCppMap(env, &result);
if (env->ExceptionCheck()) {
// exception occurred
return nullptr;
}
return jresult;
}
/*
* Class: org_rocksdb_ThreadStatus
* Method: getStateName
* Signature: (B)Ljava/lang/String;
*/
jstring Java_org_rocksdb_ThreadStatus_getStateName(
JNIEnv* env, jclass, jbyte jstate_type_value) {
auto name = rocksdb::ThreadStatus::GetStateName(
rocksdb::StateTypeJni::toCppStateType(jstate_type_value));
return rocksdb::JniUtil::toJavaString(env, &name, true);
}

@ -0,0 +1,23 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the "bridge" between Java and C++ for
// rocksdb::CompactionFilterFactory.
#include <jni.h>
#include "include/org_rocksdb_AbstractTraceWriter.h"
#include "rocksjni/trace_writer_jnicallback.h"
/*
* Class: org_rocksdb_AbstractTraceWriter
* Method: createNewTraceWriter
* Signature: ()J
*/
jlong Java_org_rocksdb_AbstractTraceWriter_createNewTraceWriter(
JNIEnv* env, jobject jobj) {
auto* trace_writer = new rocksdb::TraceWriterJniCallback(env, jobj);
return reinterpret_cast<jlong>(trace_writer);
}

@ -0,0 +1,115 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the callback "bridge" between Java and C++ for
// rocksdb::TraceWriter.
#include "rocksjni/trace_writer_jnicallback.h"
#include "rocksjni/portal.h"
namespace rocksdb {
TraceWriterJniCallback::TraceWriterJniCallback(
JNIEnv* env, jobject jtrace_writer)
: JniCallback(env, jtrace_writer) {
m_jwrite_proxy_methodid =
AbstractTraceWriterJni::getWriteProxyMethodId(env);
if(m_jwrite_proxy_methodid == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
m_jclose_writer_proxy_methodid =
AbstractTraceWriterJni::getCloseWriterProxyMethodId(env);
if(m_jclose_writer_proxy_methodid == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
m_jget_file_size_methodid =
AbstractTraceWriterJni::getGetFileSizeMethodId(env);
if(m_jget_file_size_methodid == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
}
Status TraceWriterJniCallback::Write(const Slice& data) {
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = getJniEnv(&attached_thread);
if (env == nullptr) {
return Status::IOError("Unable to attach JNI Environment");
}
jshort jstatus = env->CallShortMethod(m_jcallback_obj,
m_jwrite_proxy_methodid,
&data);
if(env->ExceptionCheck()) {
// exception thrown from CallShortMethod
env->ExceptionDescribe(); // print out exception to stderr
releaseJniEnv(attached_thread);
return Status::IOError("Unable to call AbstractTraceWriter#writeProxy(long)");
}
// unpack status code and status sub-code from jstatus
jbyte jcode_value = (jstatus >> 8) & 0xFF;
jbyte jsub_code_value = jstatus & 0xFF;
std::unique_ptr<Status> s = StatusJni::toCppStatus(jcode_value, jsub_code_value);
releaseJniEnv(attached_thread);
return Status(*s);
}
Status TraceWriterJniCallback::Close() {
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = getJniEnv(&attached_thread);
if (env == nullptr) {
return Status::IOError("Unable to attach JNI Environment");
}
jshort jstatus = env->CallShortMethod(m_jcallback_obj,
m_jclose_writer_proxy_methodid);
if(env->ExceptionCheck()) {
// exception thrown from CallShortMethod
env->ExceptionDescribe(); // print out exception to stderr
releaseJniEnv(attached_thread);
return Status::IOError("Unable to call AbstractTraceWriter#closeWriterProxy()");
}
// unpack status code and status sub-code from jstatus
jbyte code_value = (jstatus >> 8) & 0xFF;
jbyte sub_code_value = jstatus & 0xFF;
std::unique_ptr<Status> s = StatusJni::toCppStatus(code_value, sub_code_value);
releaseJniEnv(attached_thread);
return Status(*s);
}
uint64_t TraceWriterJniCallback::GetFileSize() {
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = getJniEnv(&attached_thread);
if (env == nullptr) {
return 0;
}
jlong jfile_size = env->CallLongMethod(m_jcallback_obj,
m_jget_file_size_methodid);
if(env->ExceptionCheck()) {
// exception thrown from CallLongMethod
env->ExceptionDescribe(); // print out exception to stderr
releaseJniEnv(attached_thread);
return 0;
}
releaseJniEnv(attached_thread);
return static_cast<uint64_t>(jfile_size);
}
} // namespace rocksdb

@ -0,0 +1,36 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the callback "bridge" between Java and C++ for
// rocksdb::TraceWriter.
#ifndef JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_
#define JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_
#include <jni.h>
#include <memory>
#include "rocksdb/trace_reader_writer.h"
#include "rocksjni/jnicallback.h"
namespace rocksdb {
class TraceWriterJniCallback : public JniCallback, public TraceWriter {
public:
TraceWriterJniCallback(
JNIEnv* env, jobject jtrace_writer);
virtual Status Write(const Slice& data);
virtual Status Close();
virtual uint64_t GetFileSize();
private:
jmethodID m_jwrite_proxy_methodid;
jmethodID m_jclose_writer_proxy_methodid;
jmethodID m_jget_file_size_methodid;
};
} //namespace rocksdb
#endif // JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_

@ -25,7 +25,7 @@
* Signature: (JJLjava/lang/String;)J
*/
jlong Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2(
JNIEnv* env, jclass /*jcls*/, jlong joptions_handle,
JNIEnv* env, jclass, jlong joptions_handle,
jlong jtxn_db_options_handle, jstring jdb_path) {
auto* options = reinterpret_cast<rocksdb::Options*>(joptions_handle);
auto* txn_db_options =
@ -54,7 +54,7 @@ jlong Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2(
* Signature: (JJLjava/lang/String;[[B[J)[J
*/
jlongArray Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2_3_3B_3J(
JNIEnv* env, jclass /*jcls*/, jlong jdb_options_handle,
JNIEnv* env, jclass, jlong jdb_options_handle,
jlong jtxn_db_options_handle, jstring jdb_path, jobjectArray jcolumn_names,
jlongArray jcolumn_options_handles) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
@ -151,14 +151,38 @@ jlongArray Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2_3_3B_3J(
}
}
/*
* Class: org_rocksdb_TransactionDB
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_TransactionDB_disposeInternal(
JNIEnv*, jobject, jlong jhandle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
assert(txn_db != nullptr);
delete txn_db;
}
/*
* Class: org_rocksdb_TransactionDB
* Method: closeDatabase
* Signature: (J)V
*/
void Java_org_rocksdb_TransactionDB_closeDatabase(
JNIEnv* env, jclass, jlong jhandle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
assert(txn_db != nullptr);
rocksdb::Status s = txn_db->Close();
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}
/*
* Class: org_rocksdb_TransactionDB
* Method: beginTransaction
* Signature: (JJ)J
*/
jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJ(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jlong jwrite_options_handle) {
JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
auto* write_options =
reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
@ -172,8 +196,8 @@ jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJ(
* Signature: (JJJ)J
*/
jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJJ(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jlong jwrite_options_handle, jlong jtxn_options_handle) {
JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
jlong jtxn_options_handle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
auto* write_options =
reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
@ -190,8 +214,8 @@ jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJJ(
* Signature: (JJJ)J
*/
jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJ(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jlong jwrite_options_handle, jlong jold_txn_handle) {
JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
jlong jold_txn_handle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
auto* write_options =
reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
@ -214,9 +238,8 @@ jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJ(
* Signature: (JJJJ)J
*/
jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJJ(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jlong jwrite_options_handle, jlong jtxn_options_handle,
jlong jold_txn_handle) {
JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
jlong jtxn_options_handle, jlong jold_txn_handle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
auto* write_options =
reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
@ -239,10 +262,8 @@ jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJJ(
* Method: getTransactionByName
* Signature: (JLjava/lang/String;)J
*/
jlong Java_org_rocksdb_TransactionDB_getTransactionByName(JNIEnv* env,
jobject /*jobj*/,
jlong jhandle,
jstring jname) {
jlong Java_org_rocksdb_TransactionDB_getTransactionByName(
JNIEnv* env, jobject, jlong jhandle, jstring jname) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
const char* name = env->GetStringUTFChars(jname, nullptr);
if (name == nullptr) {
@ -260,7 +281,7 @@ jlong Java_org_rocksdb_TransactionDB_getTransactionByName(JNIEnv* env,
* Signature: (J)[J
*/
jlongArray Java_org_rocksdb_TransactionDB_getAllPreparedTransactions(
JNIEnv* env, jobject /*jobj*/, jlong jhandle) {
JNIEnv* env, jobject, jlong jhandle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
std::vector<rocksdb::Transaction*> txns;
txn_db->GetAllPreparedTransactions(&txns);
@ -294,9 +315,8 @@ jlongArray Java_org_rocksdb_TransactionDB_getAllPreparedTransactions(
* Method: getLockStatusData
* Signature: (J)Ljava/util/Map;
*/
jobject Java_org_rocksdb_TransactionDB_getLockStatusData(JNIEnv* env,
jobject /*jobj*/,
jlong jhandle) {
jobject Java_org_rocksdb_TransactionDB_getLockStatusData(
JNIEnv* env, jobject, jlong jhandle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
const std::unordered_multimap<uint32_t, rocksdb::KeyLockInfo>
lock_status_data = txn_db->GetLockStatusData();
@ -307,7 +327,7 @@ jobject Java_org_rocksdb_TransactionDB_getLockStatusData(JNIEnv* env,
return nullptr;
}
const rocksdb::HashMapJni::FnMapKV<const int32_t, const rocksdb::KeyLockInfo>
const rocksdb::HashMapJni::FnMapKV<const int32_t, const rocksdb::KeyLockInfo, jobject, jobject>
fn_map_kv =
[env](
const std::pair<const int32_t, const rocksdb::KeyLockInfo>&
@ -427,19 +447,7 @@ jobjectArray Java_org_rocksdb_TransactionDB_getDeadlockInfoBuffer(
* Signature: (JI)V
*/
void Java_org_rocksdb_TransactionDB_setDeadlockInfoBufferSize(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
jint jdeadlock_info_buffer_size) {
JNIEnv*, jobject, jlong jhandle, jint jdeadlock_info_buffer_size) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
txn_db->SetDeadlockInfoBufferSize(jdeadlock_info_buffer_size);
}
/*
* Class: org_rocksdb_TransactionDB
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_TransactionDB_disposeInternal(JNIEnv* /*env*/,
jobject /*jobj*/,
jlong jhandle) {
delete reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
}

@ -23,9 +23,9 @@
* Method: open
* Signature: (JLjava/lang/String;IZ)J
*/
jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env, jclass /*jcls*/,
jlong joptions_handle, jstring jdb_path,
jint jttl, jboolean jread_only) {
jlong Java_org_rocksdb_TtlDB_open(
JNIEnv* env, jclass, jlong joptions_handle, jstring jdb_path, jint jttl,
jboolean jread_only) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if (db_path == nullptr) {
// exception thrown: OutOfMemoryError
@ -53,11 +53,10 @@ jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env, jclass /*jcls*/,
* Method: openCF
* Signature: (JLjava/lang/String;[[B[J[IZ)[J
*/
jlongArray Java_org_rocksdb_TtlDB_openCF(JNIEnv* env, jclass /*jcls*/,
jlong jopt_handle, jstring jdb_path,
jobjectArray jcolumn_names,
jlongArray jcolumn_options,
jintArray jttls, jboolean jread_only) {
jlongArray Java_org_rocksdb_TtlDB_openCF(
JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path,
jobjectArray jcolumn_names, jlongArray jcolumn_options,
jintArray jttls, jboolean jread_only) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if (db_path == nullptr) {
// exception thrown: OutOfMemoryError
@ -147,13 +146,40 @@ jlongArray Java_org_rocksdb_TtlDB_openCF(JNIEnv* env, jclass /*jcls*/,
}
}
/*
* Class: org_rocksdb_TtlDB
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_TtlDB_disposeInternal(
JNIEnv*, jobject, jlong jhandle) {
auto* ttl_db = reinterpret_cast<rocksdb::DBWithTTL*>(jhandle);
assert(ttl_db != nullptr);
delete ttl_db;
}
/*
* Class: org_rocksdb_TtlDB
* Method: closeDatabase
* Signature: (J)V
*/
void Java_org_rocksdb_TtlDB_closeDatabase(
JNIEnv* /* env */, jclass, jlong /* jhandle */) {
//auto* ttl_db = reinterpret_cast<rocksdb::DBWithTTL*>(jhandle);
//assert(ttl_db != nullptr);
//rocksdb::Status s = ttl_db->Close();
//rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
//TODO(AR) this is disabled until https://github.com/facebook/rocksdb/issues/4818 is resolved!
}
/*
* Class: org_rocksdb_TtlDB
* Method: createColumnFamilyWithTtl
* Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;[BJI)J;
*/
jlong Java_org_rocksdb_TtlDB_createColumnFamilyWithTtl(
JNIEnv* env, jobject /*jobj*/, jlong jdb_handle, jbyteArray jcolumn_name,
JNIEnv* env, jobject, jlong jdb_handle, jbyteArray jcolumn_name,
jlong jcolumn_options, jint jttl) {
jbyte* cfname = env->GetByteArrayElements(jcolumn_name, nullptr);
if (cfname == nullptr) {

@ -0,0 +1,23 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the "bridge" between Java and C++ for
// rocksdb::WalFilter.
#include <jni.h>
#include "include/org_rocksdb_AbstractWalFilter.h"
#include "rocksjni/wal_filter_jnicallback.h"
/*
* Class: org_rocksdb_AbstractWalFilter
* Method: createNewWalFilter
* Signature: ()J
*/
jlong Java_org_rocksdb_AbstractWalFilter_createNewWalFilter(
JNIEnv* env, jobject jobj) {
auto* wal_filter = new rocksdb::WalFilterJniCallback(env, jobj);
return reinterpret_cast<jlong>(wal_filter);
}

@ -0,0 +1,144 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the callback "bridge" between Java and C++ for
// rocksdb::WalFilter.
#include "rocksjni/wal_filter_jnicallback.h"
#include "rocksjni/portal.h"
namespace rocksdb {
WalFilterJniCallback::WalFilterJniCallback(
JNIEnv* env, jobject jwal_filter)
: JniCallback(env, jwal_filter) {
// Note: The name of a WalFilter will not change during it's lifetime,
// so we cache it in a global var
jmethodID jname_mid = AbstractWalFilterJni::getNameMethodId(env);
if(jname_mid == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
jstring jname = (jstring)env->CallObjectMethod(m_jcallback_obj, jname_mid);
if(env->ExceptionCheck()) {
// exception thrown
return;
}
jboolean has_exception = JNI_FALSE;
m_name = JniUtil::copyString(env, jname,
&has_exception); // also releases jname
if (has_exception == JNI_TRUE) {
// exception thrown
return;
}
m_column_family_log_number_map_mid =
AbstractWalFilterJni::getColumnFamilyLogNumberMapMethodId(env);
if(m_column_family_log_number_map_mid == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
m_log_record_found_proxy_mid =
AbstractWalFilterJni::getLogRecordFoundProxyMethodId(env);
if(m_log_record_found_proxy_mid == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
}
void WalFilterJniCallback::ColumnFamilyLogNumberMap(
const std::map<uint32_t, uint64_t>& cf_lognumber_map,
const std::map<std::string, uint32_t>& cf_name_id_map) {
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = getJniEnv(&attached_thread);
if (env == nullptr) {
return;
}
jobject jcf_lognumber_map =
rocksdb::HashMapJni::fromCppMap(env, &cf_lognumber_map);
if (jcf_lognumber_map == nullptr) {
// exception occurred
env->ExceptionDescribe(); // print out exception to stderr
releaseJniEnv(attached_thread);
return;
}
jobject jcf_name_id_map =
rocksdb::HashMapJni::fromCppMap(env, &cf_name_id_map);
if (jcf_name_id_map == nullptr) {
// exception occurred
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jcf_lognumber_map);
releaseJniEnv(attached_thread);
return;
}
env->CallVoidMethod(m_jcallback_obj,
m_column_family_log_number_map_mid,
jcf_lognumber_map,
jcf_name_id_map);
env->DeleteLocalRef(jcf_lognumber_map);
env->DeleteLocalRef(jcf_name_id_map);
if(env->ExceptionCheck()) {
// exception thrown from CallVoidMethod
env->ExceptionDescribe(); // print out exception to stderr
}
releaseJniEnv(attached_thread);
}
WalFilter::WalProcessingOption WalFilterJniCallback::LogRecordFound(
unsigned long long log_number, const std::string& log_file_name,
const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) {
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = getJniEnv(&attached_thread);
if (env == nullptr) {
return WalFilter::WalProcessingOption::kCorruptedRecord;
}
jstring jlog_file_name = JniUtil::toJavaString(env, &log_file_name);
if (jlog_file_name == nullptr) {
// exception occcurred
env->ExceptionDescribe(); // print out exception to stderr
releaseJniEnv(attached_thread);
return WalFilter::WalProcessingOption::kCorruptedRecord;
}
jshort jlog_record_found_result = env->CallShortMethod(m_jcallback_obj,
m_log_record_found_proxy_mid,
static_cast<jlong>(log_number),
jlog_file_name,
reinterpret_cast<jlong>(&batch),
reinterpret_cast<jlong>(new_batch));
env->DeleteLocalRef(jlog_file_name);
if (env->ExceptionCheck()) {
// exception thrown from CallShortMethod
env->ExceptionDescribe(); // print out exception to stderr
releaseJniEnv(attached_thread);
return WalFilter::WalProcessingOption::kCorruptedRecord;
}
// unpack WalProcessingOption and batch_changed from jlog_record_found_result
jbyte jwal_processing_option_value = (jlog_record_found_result >> 8) & 0xFF;
jbyte jbatch_changed_value = jlog_record_found_result & 0xFF;
releaseJniEnv(attached_thread);
*batch_changed = jbatch_changed_value == JNI_TRUE;
return WalProcessingOptionJni::toCppWalProcessingOption(
jwal_processing_option_value);
}
const char* WalFilterJniCallback::Name() const {
return m_name.get();
}
} // namespace rocksdb

@ -0,0 +1,42 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// This file implements the callback "bridge" between Java and C++ for
// rocksdb::WalFilter.
#ifndef JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_
#define JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_
#include <jni.h>
#include <map>
#include <memory>
#include <string>
#include "rocksdb/wal_filter.h"
#include "rocksjni/jnicallback.h"
namespace rocksdb {
class WalFilterJniCallback : public JniCallback, public WalFilter {
public:
WalFilterJniCallback(
JNIEnv* env, jobject jwal_filter);
virtual void ColumnFamilyLogNumberMap(
const std::map<uint32_t, uint64_t>& cf_lognumber_map,
const std::map<std::string, uint32_t>& cf_name_id_map);
virtual WalFilter::WalProcessingOption LogRecordFound(
unsigned long long log_number, const std::string& log_file_name,
const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed);
virtual const char* Name() const;
private:
std::unique_ptr<const char[]> m_name;
jmethodID m_column_family_log_number_map_mid;
jmethodID m_log_record_found_proxy_mid;
};
} //namespace rocksdb
#endif // JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_

@ -20,7 +20,7 @@ public abstract class AbstractImmutableNativeReference
* A flag indicating whether the current {@code AbstractNativeReference} is
* responsible to free the underlying C++ object
*/
private final AtomicBoolean owningHandle_;
protected final AtomicBoolean owningHandle_;
protected AbstractImmutableNativeReference(final boolean owningHandle) {
this.owningHandle_ = new AtomicBoolean(owningHandle);

@ -0,0 +1,254 @@
package org.rocksdb;
import java.util.*;
public abstract class AbstractMutableOptions {
protected static final String KEY_VALUE_PAIR_SEPARATOR = ";";
protected static final char KEY_VALUE_SEPARATOR = '=';
static final String INT_ARRAY_INT_SEPARATOR = ",";
protected final String[] keys;
private final String[] values;
/**
* User must use builder pattern, or parser.
*
* @param keys the keys
* @param values the values
*/
protected AbstractMutableOptions(final String[] keys, final String[] values) {
this.keys = keys;
this.values = values;
}
String[] getKeys() {
return keys;
}
String[] getValues() {
return values;
}
/**
* Returns a string representation of MutableOptions which
* is suitable for consumption by {@code #parse(String)}.
*
* @return String representation of MutableOptions
*/
@Override
public String toString() {
final StringBuilder buffer = new StringBuilder();
for(int i = 0; i < keys.length; i++) {
buffer
.append(keys[i])
.append(KEY_VALUE_SEPARATOR)
.append(values[i]);
if(i + 1 < keys.length) {
buffer.append(KEY_VALUE_PAIR_SEPARATOR);
}
}
return buffer.toString();
}
public static abstract class AbstractMutableOptionsBuilder<
T extends AbstractMutableOptions,
U extends AbstractMutableOptionsBuilder<T, U, K>,
K extends MutableOptionKey> {
private final Map<K, MutableOptionValue<?>> options = new LinkedHashMap<>();
protected abstract U self();
/**
* Get all of the possible keys
*
* @return A map of all keys, indexed by name.
*/
protected abstract Map<String, K> allKeys();
/**
* Construct a sub-class instance of {@link AbstractMutableOptions}.
*
* @param keys the keys
* @param values the values
*
* @return an instance of the options.
*/
protected abstract T build(final String[] keys, final String[] values);
public T build() {
final String keys[] = new String[options.size()];
final String values[] = new String[options.size()];
int i = 0;
for (final Map.Entry<K, MutableOptionValue<?>> option : options.entrySet()) {
keys[i] = option.getKey().name();
values[i] = option.getValue().asString();
i++;
}
return build(keys, values);
}
protected U setDouble(
final K key, final double value) {
if (key.getValueType() != MutableOptionKey.ValueType.DOUBLE) {
throw new IllegalArgumentException(
key + " does not accept a double value");
}
options.put(key, MutableOptionValue.fromDouble(value));
return self();
}
protected double getDouble(final K key)
throws NoSuchElementException, NumberFormatException {
final MutableOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asDouble();
}
protected U setLong(
final K key, final long value) {
if(key.getValueType() != MutableOptionKey.ValueType.LONG) {
throw new IllegalArgumentException(
key + " does not accept a long value");
}
options.put(key, MutableOptionValue.fromLong(value));
return self();
}
protected long getLong(final K key)
throws NoSuchElementException, NumberFormatException {
final MutableOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asLong();
}
protected U setInt(
final K key, final int value) {
if(key.getValueType() != MutableOptionKey.ValueType.INT) {
throw new IllegalArgumentException(
key + " does not accept an integer value");
}
options.put(key, MutableOptionValue.fromInt(value));
return self();
}
protected int getInt(final K key)
throws NoSuchElementException, NumberFormatException {
final MutableOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asInt();
}
protected U setBoolean(
final K key, final boolean value) {
if(key.getValueType() != MutableOptionKey.ValueType.BOOLEAN) {
throw new IllegalArgumentException(
key + " does not accept a boolean value");
}
options.put(key, MutableOptionValue.fromBoolean(value));
return self();
}
protected boolean getBoolean(final K key)
throws NoSuchElementException, NumberFormatException {
final MutableOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asBoolean();
}
protected U setIntArray(
final K key, final int[] value) {
if(key.getValueType() != MutableOptionKey.ValueType.INT_ARRAY) {
throw new IllegalArgumentException(
key + " does not accept an int array value");
}
options.put(key, MutableOptionValue.fromIntArray(value));
return self();
}
protected int[] getIntArray(final K key)
throws NoSuchElementException, NumberFormatException {
final MutableOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asIntArray();
}
protected <N extends Enum<N>> U setEnum(
final K key, final N value) {
if(key.getValueType() != MutableOptionKey.ValueType.ENUM) {
throw new IllegalArgumentException(
key + " does not accept a Enum value");
}
options.put(key, MutableOptionValue.fromEnum(value));
return self();
}
protected <N extends Enum<N>> N getEnum(final K key)
throws NoSuchElementException, NumberFormatException {
final MutableOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
if(!(value instanceof MutableOptionValue.MutableOptionEnumValue)) {
throw new NoSuchElementException(key.name() + " is not of Enum type");
}
return ((MutableOptionValue.MutableOptionEnumValue<N>)value).asObject();
}
public U fromString(
final String keyStr, final String valueStr)
throws IllegalArgumentException {
Objects.requireNonNull(keyStr);
Objects.requireNonNull(valueStr);
final K key = allKeys().get(keyStr);
switch(key.getValueType()) {
case DOUBLE:
return setDouble(key, Double.parseDouble(valueStr));
case LONG:
return setLong(key, Long.parseLong(valueStr));
case INT:
return setInt(key, Integer.parseInt(valueStr));
case BOOLEAN:
return setBoolean(key, Boolean.parseBoolean(valueStr));
case INT_ARRAY:
final String[] strInts = valueStr
.trim().split(INT_ARRAY_INT_SEPARATOR);
if(strInts == null || strInts.length == 0) {
throw new IllegalArgumentException(
"int array value is not correctly formatted");
}
final int value[] = new int[strInts.length];
int i = 0;
for(final String strInt : strInts) {
value[i++] = Integer.parseInt(strInt);
}
return setIntArray(key, value);
}
throw new IllegalStateException(
key + " has unknown value type: " + key.getValueType());
}
}
}

@ -0,0 +1,19 @@
package org.rocksdb;
/**
* Base class for Table Filters.
*/
public abstract class AbstractTableFilter
extends RocksCallbackObject implements TableFilter {
protected AbstractTableFilter() {
super();
}
@Override
protected long initializeNative(final long... nativeParameterHandles) {
return createNewTableFilter();
}
private native long createNewTableFilter();
}

@ -0,0 +1,70 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* Base class for TraceWriters.
*/
public abstract class AbstractTraceWriter
extends RocksCallbackObject implements TraceWriter {
@Override
protected long initializeNative(final long... nativeParameterHandles) {
return createNewTraceWriter();
}
/**
* Called from JNI, proxy for {@link TraceWriter#write(Slice)}.
*
* @param sliceHandle the native handle of the slice (which we do not own)
*
* @return short (2 bytes) where the first byte is the
* {@link Status.Code#getValue()} and the second byte is the
* {@link Status.SubCode#getValue()}.
*/
private short writeProxy(final long sliceHandle) {
try {
write(new Slice(sliceHandle));
return statusToShort(Status.Code.Ok, Status.SubCode.None);
} catch (final RocksDBException e) {
return statusToShort(e.getStatus());
}
}
/**
* Called from JNI, proxy for {@link TraceWriter#closeWriter()}.
*
* @return short (2 bytes) where the first byte is the
* {@link Status.Code#getValue()} and the second byte is the
* {@link Status.SubCode#getValue()}.
*/
private short closeWriterProxy() {
try {
closeWriter();
return statusToShort(Status.Code.Ok, Status.SubCode.None);
} catch (final RocksDBException e) {
return statusToShort(e.getStatus());
}
}
private static short statusToShort(/*@Nullable*/ final Status status) {
final Status.Code code = status != null && status.getCode() != null
? status.getCode()
: Status.Code.IOError;
final Status.SubCode subCode = status != null && status.getSubCode() != null
? status.getSubCode()
: Status.SubCode.None;
return statusToShort(code, subCode);
}
private static short statusToShort(final Status.Code code,
final Status.SubCode subCode) {
short result = (short)(code.getValue() << 8);
return (short)(result | subCode.getValue());
}
private native long createNewTraceWriter();
}

@ -0,0 +1,49 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* Base class for WAL Filters.
*/
public abstract class AbstractWalFilter
extends RocksCallbackObject implements WalFilter {
@Override
protected long initializeNative(final long... nativeParameterHandles) {
return createNewWalFilter();
}
/**
* Called from JNI, proxy for
* {@link WalFilter#logRecordFound(long, String, WriteBatch, WriteBatch)}.
*
* @param logNumber the log handle.
* @param logFileName the log file name
* @param batchHandle the native handle of a WriteBatch (which we do not own)
* @param newBatchHandle the native handle of a
* new WriteBatch (which we do not own)
*
* @return short (2 bytes) where the first byte is the
* {@link WalFilter.LogRecordFoundResult#walProcessingOption}
* {@link WalFilter.LogRecordFoundResult#batchChanged}.
*/
private short logRecordFoundProxy(final long logNumber,
final String logFileName, final long batchHandle,
final long newBatchHandle) {
final LogRecordFoundResult logRecordFoundResult = logRecordFound(
logNumber, logFileName, new WriteBatch(batchHandle),
new WriteBatch(newBatchHandle));
return logRecordFoundResultToShort(logRecordFoundResult);
}
private static short logRecordFoundResultToShort(
final LogRecordFoundResult logRecordFoundResult) {
short result = (short)(logRecordFoundResult.walProcessingOption.getValue() << 8);
return (short)(result | (logRecordFoundResult.batchChanged ? 1 : 0));
}
private native long createNewWalFilter();
}

@ -434,4 +434,32 @@ public interface AdvancedMutableColumnFamilyOptionsInterface
* @return true if reporting is enabled
*/
boolean reportBgIoStats();
/**
* Non-bottom-level files older than TTL will go through the compaction
* process. This needs {@link MutableDBOptionsInterface#maxOpenFiles()} to be
* set to -1.
*
* Enabled only for level compaction for now.
*
* Default: 0 (disabled)
*
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
* @param ttl the time-to-live.
*
* @return the reference to the current options.
*/
T setTtl(final long ttl);
/**
* Get the TTL for Non-bottom-level files that will go through the compaction
* process.
*
* See {@link #setTtl(long)}.
*
* @return the time-to-live.
*/
long ttl();
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,70 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
import java.util.Arrays;
import java.util.List;
/**
* The metadata that describes a column family.
*/
public class ColumnFamilyMetaData {
private final long size;
private final long fileCount;
private final byte[] name;
private final LevelMetaData[] levels;
/**
* Called from JNI C++
*/
private ColumnFamilyMetaData(
final long size,
final long fileCount,
final byte[] name,
final LevelMetaData[] levels) {
this.size = size;
this.fileCount = fileCount;
this.name = name;
this.levels = levels;
}
/**
* The size of this column family in bytes, which is equal to the sum of
* the file size of its {@link #levels()}.
*
* @return the size of this column family
*/
public long size() {
return size;
}
/**
* The number of files in this column family.
*
* @return the number of files
*/
public long fileCount() {
return fileCount;
}
/**
* The name of the column family.
*
* @return the name
*/
public byte[] name() {
return name;
}
/**
* The metadata of all levels in this column family.
*
* @return the levels metadata
*/
public List<LevelMetaData> levels() {
return Arrays.asList(levels);
}
}

@ -50,9 +50,19 @@ public class ColumnFamilyOptions extends RocksObject
this.compactionFilterFactory_ = other.compactionFilterFactory_;
this.compactionOptionsUniversal_ = other.compactionOptionsUniversal_;
this.compactionOptionsFIFO_ = other.compactionOptionsFIFO_;
this.bottommostCompressionOptions_ = other.bottommostCompressionOptions_;
this.compressionOptions_ = other.compressionOptions_;
}
/**
* Constructor from Options
*
* @param options The options.
*/
public ColumnFamilyOptions(final Options options) {
super(newColumnFamilyOptionsFromOptions(options.nativeHandle_));
}
/**
* <p>Constructor to be used by
* {@link #getColumnFamilyOptionsFromProps(java.util.Properties)},
@ -318,6 +328,20 @@ public class ColumnFamilyOptions extends RocksObject
bottommostCompressionType(nativeHandle_));
}
@Override
public ColumnFamilyOptions setBottommostCompressionOptions(
final CompressionOptions bottommostCompressionOptions) {
setBottommostCompressionOptions(nativeHandle_,
bottommostCompressionOptions.nativeHandle_);
this.bottommostCompressionOptions_ = bottommostCompressionOptions;
return this;
}
@Override
public CompressionOptions bottommostCompressionOptions() {
return this.bottommostCompressionOptions_;
}
@Override
public ColumnFamilyOptions setCompressionOptions(
final CompressionOptions compressionOptions) {
@ -482,7 +506,7 @@ public class ColumnFamilyOptions extends RocksObject
@Override
public CompactionStyle compactionStyle() {
return CompactionStyle.values()[compactionStyle(nativeHandle_)];
return CompactionStyle.fromValue(compactionStyle(nativeHandle_));
}
@Override
@ -751,6 +775,17 @@ public class ColumnFamilyOptions extends RocksObject
return reportBgIoStats(nativeHandle_);
}
@Override
public ColumnFamilyOptions setTtl(final long ttl) {
setTtl(nativeHandle_, ttl);
return this;
}
@Override
public long ttl() {
return ttl(nativeHandle_);
}
@Override
public ColumnFamilyOptions setCompactionOptionsUniversal(
final CompactionOptionsUniversal compactionOptionsUniversal) {
@ -793,7 +828,9 @@ public class ColumnFamilyOptions extends RocksObject
String optString);
private static native long newColumnFamilyOptions();
private static native long copyColumnFamilyOptions(long handle);
private static native long copyColumnFamilyOptions(final long handle);
private static native long newColumnFamilyOptionsFromOptions(
final long optionsHandle);
@Override protected final native void disposeInternal(final long handle);
private native void optimizeForSmallDb(final long handle);
@ -829,6 +866,8 @@ public class ColumnFamilyOptions extends RocksObject
private native void setBottommostCompressionType(long handle,
byte bottommostCompressionType);
private native byte bottommostCompressionType(long handle);
private native void setBottommostCompressionOptions(final long handle,
final long bottommostCompressionOptionsHandle);
private native void setCompressionOptions(long handle,
long compressionOptionsHandle);
private native void useFixedLengthPrefixExtractor(
@ -936,6 +975,8 @@ public class ColumnFamilyOptions extends RocksObject
private native void setReportBgIoStats(final long handle,
final boolean reportBgIoStats);
private native boolean reportBgIoStats(final long handle);
private native void setTtl(final long handle, final long ttl);
private native long ttl(final long handle);
private native void setCompactionOptionsUniversal(final long handle,
final long compactionOptionsUniversalHandle);
private native void setCompactionOptionsFIFO(final long handle,
@ -954,6 +995,7 @@ public class ColumnFamilyOptions extends RocksObject
compactionFilterFactory_;
private CompactionOptionsUniversal compactionOptionsUniversal_;
private CompactionOptionsFIFO compactionOptionsFIFO_;
private CompressionOptions bottommostCompressionOptions_;
private CompressionOptions compressionOptions_;
}

@ -399,6 +399,28 @@ public interface ColumnFamilyOptionsInterface
*/
CompressionType bottommostCompressionType();
/**
* Set the options for compression algorithms used by
* {@link #bottommostCompressionType()} if it is enabled.
*
* To enable it, please see the definition of
* {@link CompressionOptions}.
*
* @param compressionOptions the bottom most compression options.
*
* @return the reference of the current options.
*/
T setBottommostCompressionOptions(
final CompressionOptions compressionOptions);
/**
* Get the bottom most compression options.
*
* See {@link #setBottommostCompressionOptions(CompressionOptions)}.
*
* @return the bottom most compression options.
*/
CompressionOptions bottommostCompressionOptions();
/**
* Set the different options for compression algorithms

@ -88,26 +88,6 @@ public class CompactRangeOptions extends RocksObject {
return this;
}
/**
* Returns the policy for compacting the bottommost level
* @return The BottommostLevelCompaction policy
*/
public BottommostLevelCompaction bottommostLevelCompaction() {
return BottommostLevelCompaction.fromRocksId(bottommostLevelCompaction(nativeHandle_));
}
/**
* Sets the policy for compacting the bottommost level
*
* @param bottommostLevelCompaction The policy for compacting the bottommost level
* @return This CompactRangeOptions
*/
public CompactRangeOptions setBottommostLevelCompaction(final BottommostLevelCompaction bottommostLevelCompaction) {
setBottommostLevelCompaction(nativeHandle_, bottommostLevelCompaction.getValue());
return this;
}
/**
* Returns whether compacted files will be moved to the minimum level capable of holding the data or given level
* (specified non-negative target_level).
@ -170,6 +150,25 @@ public class CompactRangeOptions extends RocksObject {
return this;
}
/**
* Returns the policy for compacting the bottommost level
* @return The BottommostLevelCompaction policy
*/
public BottommostLevelCompaction bottommostLevelCompaction() {
return BottommostLevelCompaction.fromRocksId(bottommostLevelCompaction(nativeHandle_));
}
/**
* Sets the policy for compacting the bottommost level
*
* @param bottommostLevelCompaction The policy for compacting the bottommost level
* @return This CompactRangeOptions
*/
public CompactRangeOptions setBottommostLevelCompaction(final BottommostLevelCompaction bottommostLevelCompaction) {
setBottommostLevelCompaction(nativeHandle_, bottommostLevelCompaction.getValue());
return this;
}
/**
* If true, compaction will execute immediately even if doing so would cause the DB to
* enter write stall mode. Otherwise, it'll sleep until load is low enough.
@ -212,22 +211,27 @@ public class CompactRangeOptions extends RocksObject {
}
private native static long newCompactRangeOptions();
@Override protected final native void disposeInternal(final long handle);
private native boolean exclusiveManualCompaction(final long handle);
private native void setExclusiveManualCompaction(final long handle, final boolean exclusive_manual_compaction);
private native int bottommostLevelCompaction(final long handle);
private native void setBottommostLevelCompaction(final long handle, final int bottommostLevelCompaction);
private native void setExclusiveManualCompaction(final long handle,
final boolean exclusive_manual_compaction);
private native boolean changeLevel(final long handle);
private native void setChangeLevel(final long handle, final boolean changeLevel);
private native void setChangeLevel(final long handle,
final boolean changeLevel);
private native int targetLevel(final long handle);
private native void setTargetLevel(final long handle, final int targetLevel);
private native void setTargetLevel(final long handle,
final int targetLevel);
private native int targetPathId(final long handle);
private native void setTargetPathId(final long handle, final int /* uint32_t */ targetPathId);
private native void setTargetPathId(final long handle,
final int targetPathId);
private native int bottommostLevelCompaction(final long handle);
private native void setBottommostLevelCompaction(final long handle,
final int bottommostLevelCompaction);
private native boolean allowWriteStall(final long handle);
private native void setAllowWriteStall(final long handle, final boolean allowWriteStall);
private native void setMaxSubcompactions(final long handle, final int /* uint32_t */ maxSubcompactions);
private native void setAllowWriteStall(final long handle,
final boolean allowWriteStall);
private native void setMaxSubcompactions(final long handle,
final int maxSubcompactions);
private native int maxSubcompactions(final long handle);
@Override
protected final native void disposeInternal(final long handle);
}

@ -0,0 +1,159 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
public class CompactionJobInfo extends RocksObject {
public CompactionJobInfo() {
super(newCompactionJobInfo());
}
/**
* Private as called from JNI C++
*/
private CompactionJobInfo(final long nativeHandle) {
super(nativeHandle);
}
/**
* Get the name of the column family where the compaction happened.
*
* @return the name of the column family
*/
public byte[] columnFamilyName() {
return columnFamilyName(nativeHandle_);
}
/**
* Get the status indicating whether the compaction was successful or not.
*
* @return the status
*/
public Status status() {
return status(nativeHandle_);
}
/**
* Get the id of the thread that completed this compaction job.
*
* @return the id of the thread
*/
public long threadId() {
return threadId(nativeHandle_);
}
/**
* Get the job id, which is unique in the same thread.
*
* @return the id of the thread
*/
public int jobId() {
return jobId(nativeHandle_);
}
/**
* Get the smallest input level of the compaction.
*
* @return the input level
*/
public int baseInputLevel() {
return baseInputLevel(nativeHandle_);
}
/**
* Get the output level of the compaction.
*
* @return the output level
*/
public int outputLevel() {
return outputLevel(nativeHandle_);
}
/**
* Get the names of the compaction input files.
*
* @return the names of the input files.
*/
public List<String> inputFiles() {
return Arrays.asList(inputFiles(nativeHandle_));
}
/**
* Get the names of the compaction output files.
*
* @return the names of the output files.
*/
public List<String> outputFiles() {
return Arrays.asList(outputFiles(nativeHandle_));
}
/**
* Get the table properties for the input and output tables.
*
* The map is keyed by values from {@link #inputFiles()} and
* {@link #outputFiles()}.
*
* @return the table properties
*/
public Map<String, TableProperties> tableProperties() {
return tableProperties(nativeHandle_);
}
/**
* Get the Reason for running the compaction.
*
* @return the reason.
*/
public CompactionReason compactionReason() {
return CompactionReason.fromValue(compactionReason(nativeHandle_));
}
//
/**
* Get the compression algorithm used for output files.
*
* @return the compression algorithm
*/
public CompressionType compression() {
return CompressionType.getCompressionType(compression(nativeHandle_));
}
/**
* Get detailed information about this compaction.
*
* @return the detailed information, or null if not available.
*/
public /* @Nullable */ CompactionJobStats stats() {
final long statsHandle = stats(nativeHandle_);
if (statsHandle == 0) {
return null;
}
return new CompactionJobStats(statsHandle);
}
private static native long newCompactionJobInfo();
@Override protected native void disposeInternal(final long handle);
private static native byte[] columnFamilyName(final long handle);
private static native Status status(final long handle);
private static native long threadId(final long handle);
private static native int jobId(final long handle);
private static native int baseInputLevel(final long handle);
private static native int outputLevel(final long handle);
private static native String[] inputFiles(final long handle);
private static native String[] outputFiles(final long handle);
private static native Map<String, TableProperties> tableProperties(
final long handle);
private static native byte compactionReason(final long handle);
private static native byte compression(final long handle);
private static native long stats(final long handle);
}

@ -0,0 +1,295 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
public class CompactionJobStats extends RocksObject {
public CompactionJobStats() {
super(newCompactionJobStats());
}
/**
* Private as called from JNI C++
*/
CompactionJobStats(final long nativeHandle) {
super(nativeHandle);
}
/**
* Reset the stats.
*/
public void reset() {
reset(nativeHandle_);
}
/**
* Aggregate the CompactionJobStats from another instance with this one.
*
* @param compactionJobStats another instance of stats.
*/
public void add(final CompactionJobStats compactionJobStats) {
add(nativeHandle_, compactionJobStats.nativeHandle_);
}
/**
* Get the elapsed time in micro of this compaction.
*
* @return the elapsed time in micro of this compaction.
*/
public long elapsedMicros() {
return elapsedMicros(nativeHandle_);
}
/**
* Get the number of compaction input records.
*
* @return the number of compaction input records.
*/
public long numInputRecords() {
return numInputRecords(nativeHandle_);
}
/**
* Get the number of compaction input files.
*
* @return the number of compaction input files.
*/
public long numInputFiles() {
return numInputFiles(nativeHandle_);
}
/**
* Get the number of compaction input files at the output level.
*
* @return the number of compaction input files at the output level.
*/
public long numInputFilesAtOutputLevel() {
return numInputFilesAtOutputLevel(nativeHandle_);
}
/**
* Get the number of compaction output records.
*
* @return the number of compaction output records.
*/
public long numOutputRecords() {
return numOutputRecords(nativeHandle_);
}
/**
* Get the number of compaction output files.
*
* @return the number of compaction output files.
*/
public long numOutputFiles() {
return numOutputFiles(nativeHandle_);
}
/**
* Determine if the compaction is a manual compaction.
*
* @return true if the compaction is a manual compaction, false otherwise.
*/
public boolean isManualCompaction() {
return isManualCompaction(nativeHandle_);
}
/**
* Get the size of the compaction input in bytes.
*
* @return the size of the compaction input in bytes.
*/
public long totalInputBytes() {
return totalInputBytes(nativeHandle_);
}
/**
* Get the size of the compaction output in bytes.
*
* @return the size of the compaction output in bytes.
*/
public long totalOutputBytes() {
return totalOutputBytes(nativeHandle_);
}
/**
* Get the number of records being replaced by newer record associated
* with same key.
*
* This could be a new value or a deletion entry for that key so this field
* sums up all updated and deleted keys.
*
* @return the number of records being replaced by newer record associated
* with same key.
*/
public long numRecordsReplaced() {
return numRecordsReplaced(nativeHandle_);
}
/**
* Get the sum of the uncompressed input keys in bytes.
*
* @return the sum of the uncompressed input keys in bytes.
*/
public long totalInputRawKeyBytes() {
return totalInputRawKeyBytes(nativeHandle_);
}
/**
* Get the sum of the uncompressed input values in bytes.
*
* @return the sum of the uncompressed input values in bytes.
*/
public long totalInputRawValueBytes() {
return totalInputRawValueBytes(nativeHandle_);
}
/**
* Get the number of deletion entries before compaction.
*
* Deletion entries can disappear after compaction because they expired.
*
* @return the number of deletion entries before compaction.
*/
public long numInputDeletionRecords() {
return numInputDeletionRecords(nativeHandle_);
}
/**
* Get the number of deletion records that were found obsolete and discarded
* because it is not possible to delete any more keys with this entry.
* (i.e. all possible deletions resulting from it have been completed)
*
* @return the number of deletion records that were found obsolete and
* discarded.
*/
public long numExpiredDeletionRecords() {
return numExpiredDeletionRecords(nativeHandle_);
}
/**
* Get the number of corrupt keys (ParseInternalKey returned false when
* applied to the key) encountered and written out.
*
* @return the number of corrupt keys.
*/
public long numCorruptKeys() {
return numCorruptKeys(nativeHandle_);
}
/**
* Get the Time spent on file's Append() call.
*
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
*
* @return the Time spent on file's Append() call.
*/
public long fileWriteNanos() {
return fileWriteNanos(nativeHandle_);
}
/**
* Get the Time spent on sync file range.
*
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
*
* @return the Time spent on sync file range.
*/
public long fileRangeSyncNanos() {
return fileRangeSyncNanos(nativeHandle_);
}
/**
* Get the Time spent on file fsync.
*
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
*
* @return the Time spent on file fsync.
*/
public long fileFsyncNanos() {
return fileFsyncNanos(nativeHandle_);
}
/**
* Get the Time spent on preparing file write (falocate, etc)
*
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
*
* @return the Time spent on preparing file write (falocate, etc).
*/
public long filePrepareWriteNanos() {
return filePrepareWriteNanos(nativeHandle_);
}
/**
* Get the smallest output key prefix.
*
* @return the smallest output key prefix.
*/
public byte[] smallestOutputKeyPrefix() {
return smallestOutputKeyPrefix(nativeHandle_);
}
/**
* Get the largest output key prefix.
*
* @return the smallest output key prefix.
*/
public byte[] largestOutputKeyPrefix() {
return largestOutputKeyPrefix(nativeHandle_);
}
/**
* Get the number of single-deletes which do not meet a put.
*
* @return number of single-deletes which do not meet a put.
*/
@Experimental("Performance optimization for a very specific workload")
public long numSingleDelFallthru() {
return numSingleDelFallthru(nativeHandle_);
}
/**
* Get the number of single-deletes which meet something other than a put.
*
* @return the number of single-deletes which meet something other than a put.
*/
@Experimental("Performance optimization for a very specific workload")
public long numSingleDelMismatch() {
return numSingleDelMismatch(nativeHandle_);
}
private static native long newCompactionJobStats();
@Override protected native void disposeInternal(final long handle);
private static native void reset(final long handle);
private static native void add(final long handle,
final long compactionJobStatsHandle);
private static native long elapsedMicros(final long handle);
private static native long numInputRecords(final long handle);
private static native long numInputFiles(final long handle);
private static native long numInputFilesAtOutputLevel(final long handle);
private static native long numOutputRecords(final long handle);
private static native long numOutputFiles(final long handle);
private static native boolean isManualCompaction(final long handle);
private static native long totalInputBytes(final long handle);
private static native long totalOutputBytes(final long handle);
private static native long numRecordsReplaced(final long handle);
private static native long totalInputRawKeyBytes(final long handle);
private static native long totalInputRawValueBytes(final long handle);
private static native long numInputDeletionRecords(final long handle);
private static native long numExpiredDeletionRecords(final long handle);
private static native long numCorruptKeys(final long handle);
private static native long fileWriteNanos(final long handle);
private static native long fileRangeSyncNanos(final long handle);
private static native long fileFsyncNanos(final long handle);
private static native long filePrepareWriteNanos(final long handle);
private static native byte[] smallestOutputKeyPrefix(final long handle);
private static native byte[] largestOutputKeyPrefix(final long handle);
private static native long numSingleDelFallthru(final long handle);
private static native long numSingleDelMismatch(final long handle);
}

@ -0,0 +1,121 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
import java.util.List;
/**
* CompactionOptions are used in
* {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)}
* calls.
*/
public class CompactionOptions extends RocksObject {
public CompactionOptions() {
super(newCompactionOptions());
}
/**
* Get the compaction output compression type.
*
* See {@link #setCompression(CompressionType)}.
*
* @return the compression type.
*/
public CompressionType compression() {
return CompressionType.getCompressionType(
compression(nativeHandle_));
}
/**
* Set the compaction output compression type.
*
* Default: snappy
*
* If set to {@link CompressionType#DISABLE_COMPRESSION_OPTION},
* RocksDB will choose compression type according to the
* {@link ColumnFamilyOptions#compressionType()}, taking into account
* the output level if {@link ColumnFamilyOptions#compressionPerLevel()}
* is specified.
*
* @param compression the compression type to use for compaction output.
*
* @return the instance of the current Options.
*/
public CompactionOptions setCompression(final CompressionType compression) {
setCompression(nativeHandle_, compression.getValue());
return this;
}
/**
* Get the compaction output file size limit.
*
* See {@link #setOutputFileSizeLimit(long)}.
*
* @return the file size limit.
*/
public long outputFileSizeLimit() {
return outputFileSizeLimit(nativeHandle_);
}
/**
* Compaction will create files of size {@link #outputFileSizeLimit()}.
*
* Default: 2^64-1, which means that compaction will create a single file
*
* @param outputFileSizeLimit the size limit
*
* @return the instance of the current Options.
*/
public CompactionOptions setOutputFileSizeLimit(
final long outputFileSizeLimit) {
setOutputFileSizeLimit(nativeHandle_, outputFileSizeLimit);
return this;
}
/**
* Get the maximum number of threads that will concurrently perform a
* compaction job.
*
* @return the maximum number of threads.
*/
public int maxSubcompactions() {
return maxSubcompactions(nativeHandle_);
}
/**
* This value represents the maximum number of threads that will
* concurrently perform a compaction job by breaking it into multiple,
* smaller ones that are run simultaneously.
*
* Default: 0 (i.e. no subcompactions)
*
* If &gt; 0, it will replace the option in
* {@link DBOptions#maxSubcompactions()} for this compaction.
*
* @param maxSubcompactions The maximum number of threads that will
* concurrently perform a compaction job
*
* @return the instance of the current Options.
*/
public CompactionOptions setMaxSubcompactions(final int maxSubcompactions) {
setMaxSubcompactions(nativeHandle_, maxSubcompactions);
return this;
}
private static native long newCompactionOptions();
@Override protected final native void disposeInternal(final long handle);
private static native byte compression(final long handle);
private static native void setCompression(final long handle,
final byte compressionTypeValue);
private static native long outputFileSizeLimit(final long handle);
private static native void setOutputFileSizeLimit(final long handle,
final long outputFileSizeLimit);
private static native int maxSubcompactions(final long handle);
private static native void setMaxSubcompactions(final long handle,
final int maxSubcompactions);
}

@ -51,36 +51,39 @@ public class CompactionOptionsFIFO extends RocksObject {
*
* Default: false
*
* @param allowCompaction should allow intra-L0 compaction?
* @param allowCompaction true to allow intra-L0 compaction
*
* @return the reference to the current options.
*/
public CompactionOptionsFIFO setAllowCompaction(final boolean allowCompaction) {
public CompactionOptionsFIFO setAllowCompaction(
final boolean allowCompaction) {
setAllowCompaction(nativeHandle_, allowCompaction);
return this;
}
/**
* Check if intra-L0 compaction is enabled.
* If true, try to do compaction to compact smaller files into larger ones.
* Minimum files to compact follows options.level0_file_num_compaction_trigger
* and compaction won't trigger if average compact bytes per del file is
* larger than options.write_buffer_size. This is to protect large files
* from being compacted again.
* When enabled, we try to compact smaller files into larger ones.
*
* See {@link #setAllowCompaction(boolean)}.
*
* Default: false
*
* @return a boolean value indicating whether intra-L0 compaction is enabled
* @return true if intra-L0 compaction is enabled, false otherwise.
*/
public boolean allowCompaction() {
return allowCompaction(nativeHandle_);
}
private native void setMaxTableFilesSize(long handle, long maxTableFilesSize);
private native long maxTableFilesSize(long handle);
private native void setAllowCompaction(long handle, boolean allowCompaction);
private native boolean allowCompaction(long handle);
private native static long newCompactionOptionsFIFO();
@Override protected final native void disposeInternal(final long handle);
private native void setMaxTableFilesSize(final long handle,
final long maxTableFilesSize);
private native long maxTableFilesSize(final long handle);
private native void setAllowCompaction(final long handle,
final boolean allowCompaction);
private native boolean allowCompaction(final long handle);
}

@ -0,0 +1,115 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
public enum CompactionReason {
kUnknown((byte)0x0),
/**
* [Level] number of L0 files &gt; level0_file_num_compaction_trigger
*/
kLevelL0FilesNum((byte)0x1),
/**
* [Level] total size of level &gt; MaxBytesForLevel()
*/
kLevelMaxLevelSize((byte)0x2),
/**
* [Universal] Compacting for size amplification
*/
kUniversalSizeAmplification((byte)0x3),
/**
* [Universal] Compacting for size ratio
*/
kUniversalSizeRatio((byte)0x4),
/**
* [Universal] number of sorted runs &gt; level0_file_num_compaction_trigger
*/
kUniversalSortedRunNum((byte)0x5),
/**
* [FIFO] total size &gt; max_table_files_size
*/
kFIFOMaxSize((byte)0x6),
/**
* [FIFO] reduce number of files.
*/
kFIFOReduceNumFiles((byte)0x7),
/**
* [FIFO] files with creation time &lt; (current_time - interval)
*/
kFIFOTtl((byte)0x8),
/**
* Manual compaction
*/
kManualCompaction((byte)0x9),
/**
* DB::SuggestCompactRange() marked files for compaction
*/
kFilesMarkedForCompaction((byte)0x10),
/**
* [Level] Automatic compaction within bottommost level to cleanup duplicate
* versions of same user key, usually due to a released snapshot.
*/
kBottommostFiles((byte)0x0A),
/**
* Compaction based on TTL
*/
kTtl((byte)0x0B),
/**
* According to the comments in flush_job.cc, RocksDB treats flush as
* a level 0 compaction in internal stats.
*/
kFlush((byte)0x0C),
/**
* Compaction caused by external sst file ingestion
*/
kExternalSstIngestion((byte)0x0D);
private final byte value;
CompactionReason(final byte value) {
this.value = value;
}
/**
* Get the internal representation value.
*
* @return the internal representation value
*/
byte getValue() {
return value;
}
/**
* Get the CompactionReason from the internal representation value.
*
* @return the compaction reason.
*
* @throws IllegalArgumentException if the value is unknown.
*/
static CompactionReason fromValue(final byte value) {
for (final CompactionReason compactionReason : CompactionReason.values()) {
if(compactionReason.value == value) {
return compactionReason;
}
}
throw new IllegalArgumentException(
"Illegal value provided for CompactionReason: " + value);
}
}

@ -5,6 +5,8 @@
package org.rocksdb;
import java.util.List;
/**
* Enum CompactionStyle
*
@ -21,6 +23,9 @@ package org.rocksdb;
* compaction strategy. It is suited for keeping event log data with
* very low overhead (query log for example). It periodically deletes
* the old data, so it's basically a TTL compaction style.</li>
* <li><strong>NONE</strong> - Disable background compaction.
* Compaction jobs are submitted
* {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)} ()}.</li>
* </ol>
*
* @see <a
@ -31,22 +36,45 @@ package org.rocksdb;
* FIFO Compaction</a>
*/
public enum CompactionStyle {
LEVEL((byte) 0),
UNIVERSAL((byte) 1),
FIFO((byte) 2);
LEVEL((byte) 0x0),
UNIVERSAL((byte) 0x1),
FIFO((byte) 0x2),
NONE((byte) 0x3);
private final byte value_;
private final byte value;
private CompactionStyle(byte value) {
value_ = value;
CompactionStyle(final byte value) {
this.value = value;
}
/**
* Returns the byte value of the enumerations value
* Get the internal representation value.
*
* @return byte representation
* @return the internal representation value.
*/
//TODO(AR) should be made package-private
public byte getValue() {
return value_;
return value;
}
/**
* Get the Compaction style from the internal representation value.
*
* @param value the internal representation value.
*
* @return the Compaction style
*
* @throws IllegalArgumentException if the value does not match a
* CompactionStyle
*/
static CompactionStyle fromValue(final byte value)
throws IllegalArgumentException {
for (final CompactionStyle compactionStyle : CompactionStyle.values()) {
if (compactionStyle.value == value) {
return compactionStyle;
}
}
throw new IllegalArgumentException("Unknown value for CompactionStyle: "
+ value);
}
}

@ -71,6 +71,67 @@ public class CompressionOptions extends RocksObject {
return maxDictBytes(nativeHandle_);
}
/**
* Maximum size of training data passed to zstd's dictionary trainer. Using
* zstd's dictionary trainer can achieve even better compression ratio
* improvements than using {@link #setMaxDictBytes(int)} alone.
*
* The training data will be used to generate a dictionary
* of {@link #maxDictBytes()}.
*
* Default: 0.
*
* @param zstdMaxTrainBytes Maximum bytes to use for training ZStd.
*
* @return the reference to the current options
*/
public CompressionOptions setZStdMaxTrainBytes(final int zstdMaxTrainBytes) {
setZstdMaxTrainBytes(nativeHandle_, zstdMaxTrainBytes);
return this;
}
/**
* Maximum size of training data passed to zstd's dictionary trainer.
*
* @return Maximum bytes to use for training ZStd
*/
public int zstdMaxTrainBytes() {
return zstdMaxTrainBytes(nativeHandle_);
}
/**
* When the compression options are set by the user, it will be set to "true".
* For bottommost_compression_opts, to enable it, user must set enabled=true.
* Otherwise, bottommost compression will use compression_opts as default
* compression options.
*
* For compression_opts, if compression_opts.enabled=false, it is still
* used as compression options for compression process.
*
* Default: false.
*
* @param enabled true to use these compression options
* for the bottommost_compression_opts, false otherwise
*
* @return the reference to the current options
*/
public CompressionOptions setEnabled(final boolean enabled) {
setEnabled(nativeHandle_, enabled);
return this;
}
/**
* Determine whether these compression options
* are used for the bottommost_compression_opts.
*
* @return true if these compression options are used
* for the bottommost_compression_opts, false otherwise
*/
public boolean enabled() {
return enabled(nativeHandle_);
}
private native static long newCompressionOptions();
@Override protected final native void disposeInternal(final long handle);
@ -82,4 +143,9 @@ public class CompressionOptions extends RocksObject {
private native int strategy(final long handle);
private native void setMaxDictBytes(final long handle, final int maxDictBytes);
private native int maxDictBytes(final long handle);
private native void setZstdMaxTrainBytes(final long handle,
final int zstdMaxTrainBytes);
private native int zstdMaxTrainBytes(final long handle);
private native void setEnabled(final long handle, final boolean enabled);
private native boolean enabled(final long handle);
}

@ -15,8 +15,9 @@ import java.util.*;
* If {@link #dispose()} function is not called, then it will be GC'd
* automatically and native resources will be released as part of the process.
*/
public class DBOptions
extends RocksObject implements DBOptionsInterface<DBOptions> {
public class DBOptions extends RocksObject
implements DBOptionsInterface<DBOptions>,
MutableDBOptionsInterface<DBOptions> {
static {
RocksDB.loadLibrary();
}
@ -46,9 +47,19 @@ public class DBOptions
this.numShardBits_ = other.numShardBits_;
this.rateLimiter_ = other.rateLimiter_;
this.rowCache_ = other.rowCache_;
this.walFilter_ = other.walFilter_;
this.writeBufferManager_ = other.writeBufferManager_;
}
/**
* Constructor from Options
*
* @param options The options.
*/
public DBOptions(final Options options) {
super(newDBOptionsFromOptions(options.nativeHandle_));
}
/**
* <p>Method to get a options instance by using pre-configured
* property values. If one or many values are undefined in
@ -131,18 +142,6 @@ public class DBOptions
return createMissingColumnFamilies(nativeHandle_);
}
@Override
public DBOptions setEnv(final Env env) {
setEnv(nativeHandle_, env.nativeHandle_);
this.env_ = env;
return this;
}
@Override
public Env getEnv() {
return env_;
}
@Override
public DBOptions setErrorIfExists(
final boolean errorIfExists) {
@ -171,6 +170,18 @@ public class DBOptions
return paranoidChecks(nativeHandle_);
}
@Override
public DBOptions setEnv(final Env env) {
setEnv(nativeHandle_, env.nativeHandle_);
this.env_ = env;
return this;
}
@Override
public Env getEnv() {
return env_;
}
@Override
public DBOptions setRateLimiter(final RateLimiter rateLimiter) {
assert(isOwningHandle());
@ -286,8 +297,8 @@ public class DBOptions
assert(isOwningHandle());
final int len = dbPaths.size();
final String paths[] = new String[len];
final long targetSizes[] = new long[len];
final String[] paths = new String[len];
final long[] targetSizes = new long[len];
int i = 0;
for(final DbPath dbPath : dbPaths) {
@ -305,8 +316,8 @@ public class DBOptions
if(len == 0) {
return Collections.emptyList();
} else {
final String paths[] = new String[len];
final long targetSizes[] = new long[len];
final String[] paths = new String[len];
final long[] targetSizes = new long[len];
dbPaths(nativeHandle_, paths, targetSizes);
@ -360,6 +371,19 @@ public class DBOptions
return deleteObsoleteFilesPeriodMicros(nativeHandle_);
}
@Override
public DBOptions setMaxBackgroundJobs(final int maxBackgroundJobs) {
assert(isOwningHandle());
setMaxBackgroundJobs(nativeHandle_, maxBackgroundJobs);
return this;
}
@Override
public int maxBackgroundJobs() {
assert(isOwningHandle());
return maxBackgroundJobs(nativeHandle_);
}
@Override
public void setBaseBackgroundCompactions(
final int baseBackgroundCompactions) {
@ -388,9 +412,10 @@ public class DBOptions
}
@Override
public void setMaxSubcompactions(final int maxSubcompactions) {
public DBOptions setMaxSubcompactions(final int maxSubcompactions) {
assert(isOwningHandle());
setMaxSubcompactions(nativeHandle_, maxSubcompactions);
return this;
}
@Override
@ -413,19 +438,6 @@ public class DBOptions
return maxBackgroundFlushes(nativeHandle_);
}
@Override
public DBOptions setMaxBackgroundJobs(final int maxBackgroundJobs) {
assert(isOwningHandle());
setMaxBackgroundJobs(nativeHandle_, maxBackgroundJobs);
return this;
}
@Override
public int maxBackgroundJobs() {
assert(isOwningHandle());
return maxBackgroundJobs(nativeHandle_);
}
@Override
public DBOptions setMaxLogFileSize(final long maxLogFileSize) {
assert(isOwningHandle());
@ -551,73 +563,73 @@ public class DBOptions
}
@Override
public DBOptions setUseDirectReads(
final boolean useDirectReads) {
public DBOptions setAllowMmapReads(
final boolean allowMmapReads) {
assert(isOwningHandle());
setUseDirectReads(nativeHandle_, useDirectReads);
setAllowMmapReads(nativeHandle_, allowMmapReads);
return this;
}
@Override
public boolean useDirectReads() {
public boolean allowMmapReads() {
assert(isOwningHandle());
return useDirectReads(nativeHandle_);
return allowMmapReads(nativeHandle_);
}
@Override
public DBOptions setUseDirectIoForFlushAndCompaction(
final boolean useDirectIoForFlushAndCompaction) {
public DBOptions setAllowMmapWrites(
final boolean allowMmapWrites) {
assert(isOwningHandle());
setUseDirectIoForFlushAndCompaction(nativeHandle_,
useDirectIoForFlushAndCompaction);
setAllowMmapWrites(nativeHandle_, allowMmapWrites);
return this;
}
@Override
public boolean useDirectIoForFlushAndCompaction() {
public boolean allowMmapWrites() {
assert(isOwningHandle());
return useDirectIoForFlushAndCompaction(nativeHandle_);
return allowMmapWrites(nativeHandle_);
}
@Override
public DBOptions setAllowFAllocate(final boolean allowFAllocate) {
public DBOptions setUseDirectReads(
final boolean useDirectReads) {
assert(isOwningHandle());
setAllowFAllocate(nativeHandle_, allowFAllocate);
setUseDirectReads(nativeHandle_, useDirectReads);
return this;
}
@Override
public boolean allowFAllocate() {
public boolean useDirectReads() {
assert(isOwningHandle());
return allowFAllocate(nativeHandle_);
return useDirectReads(nativeHandle_);
}
@Override
public DBOptions setAllowMmapReads(
final boolean allowMmapReads) {
public DBOptions setUseDirectIoForFlushAndCompaction(
final boolean useDirectIoForFlushAndCompaction) {
assert(isOwningHandle());
setAllowMmapReads(nativeHandle_, allowMmapReads);
setUseDirectIoForFlushAndCompaction(nativeHandle_,
useDirectIoForFlushAndCompaction);
return this;
}
@Override
public boolean allowMmapReads() {
public boolean useDirectIoForFlushAndCompaction() {
assert(isOwningHandle());
return allowMmapReads(nativeHandle_);
return useDirectIoForFlushAndCompaction(nativeHandle_);
}
@Override
public DBOptions setAllowMmapWrites(
final boolean allowMmapWrites) {
public DBOptions setAllowFAllocate(final boolean allowFAllocate) {
assert(isOwningHandle());
setAllowMmapWrites(nativeHandle_, allowMmapWrites);
setAllowFAllocate(nativeHandle_, allowFAllocate);
return this;
}
@Override
public boolean allowMmapWrites() {
public boolean allowFAllocate() {
assert(isOwningHandle());
return allowMmapWrites(nativeHandle_);
return allowFAllocate(nativeHandle_);
}
@Override
@ -682,7 +694,7 @@ public class DBOptions
return this.writeBufferManager_;
}
@Override
@Override
public long dbWriteBufferSize() {
assert(isOwningHandle());
return dbWriteBufferSize(nativeHandle_);
@ -795,6 +807,33 @@ public class DBOptions
return walBytesPerSync(nativeHandle_);
}
//TODO(AR) NOW
// @Override
// public DBOptions setListeners(final List<EventListener> listeners) {
// assert(isOwningHandle());
// final long[] eventListenerHandlers = new long[listeners.size()];
// for (int i = 0; i < eventListenerHandlers.length; i++) {
// eventListenerHandlers[i] = listeners.get(i).nativeHandle_;
// }
// setEventListeners(nativeHandle_, eventListenerHandlers);
// return this;
// }
//
// @Override
// public Collection<EventListener> listeners() {
// assert(isOwningHandle());
// final long[] eventListenerHandlers = listeners(nativeHandle_);
// if (eventListenerHandlers == null || eventListenerHandlers.length == 0) {
// return Collections.emptyList();
// }
//
// final List<EventListener> eventListeners = new ArrayList<>();
// for (final long eventListenerHandle : eventListenerHandlers) {
// eventListeners.add(new EventListener(eventListenerHandle)); //TODO(AR) check ownership is set to false!
// }
// return eventListeners;
// }
@Override
public DBOptions setEnableThreadTracking(final boolean enableThreadTracking) {
assert(isOwningHandle());
@ -820,6 +859,19 @@ public class DBOptions
return delayedWriteRate(nativeHandle_);
}
@Override
public DBOptions setEnablePipelinedWrite(final boolean enablePipelinedWrite) {
assert(isOwningHandle());
setEnablePipelinedWrite(nativeHandle_, enablePipelinedWrite);
return this;
}
@Override
public boolean enablePipelinedWrite() {
assert(isOwningHandle());
return enablePipelinedWrite(nativeHandle_);
}
@Override
public DBOptions setAllowConcurrentMemtableWrite(
final boolean allowConcurrentMemtableWrite) {
@ -921,6 +973,20 @@ public class DBOptions
return this.rowCache_;
}
@Override
public DBOptions setWalFilter(final AbstractWalFilter walFilter) {
assert(isOwningHandle());
setWalFilter(nativeHandle_, walFilter.nativeHandle_);
this.walFilter_ = walFilter;
return this;
}
@Override
public WalFilter walFilter() {
assert(isOwningHandle());
return this.walFilter_;
}
@Override
public DBOptions setFailIfOptionsFileError(final boolean failIfOptionsFileError) {
assert(isOwningHandle());
@ -973,6 +1039,69 @@ public class DBOptions
return avoidFlushDuringShutdown(nativeHandle_);
}
@Override
public DBOptions setAllowIngestBehind(final boolean allowIngestBehind) {
assert(isOwningHandle());
setAllowIngestBehind(nativeHandle_, allowIngestBehind);
return this;
}
@Override
public boolean allowIngestBehind() {
assert(isOwningHandle());
return allowIngestBehind(nativeHandle_);
}
@Override
public DBOptions setPreserveDeletes(final boolean preserveDeletes) {
assert(isOwningHandle());
setPreserveDeletes(nativeHandle_, preserveDeletes);
return this;
}
@Override
public boolean preserveDeletes() {
assert(isOwningHandle());
return preserveDeletes(nativeHandle_);
}
@Override
public DBOptions setTwoWriteQueues(final boolean twoWriteQueues) {
assert(isOwningHandle());
setTwoWriteQueues(nativeHandle_, twoWriteQueues);
return this;
}
@Override
public boolean twoWriteQueues() {
assert(isOwningHandle());
return twoWriteQueues(nativeHandle_);
}
@Override
public DBOptions setManualWalFlush(final boolean manualWalFlush) {
assert(isOwningHandle());
setManualWalFlush(nativeHandle_, manualWalFlush);
return this;
}
@Override
public boolean manualWalFlush() {
assert(isOwningHandle());
return manualWalFlush(nativeHandle_);
}
@Override
public DBOptions setAtomicFlush(final boolean atomicFlush) {
setAtomicFlush(nativeHandle_, atomicFlush);
return this;
}
@Override
public boolean atomicFlush() {
return atomicFlush(nativeHandle_);
}
static final int DEFAULT_NUM_SHARD_BITS = -1;
@ -991,8 +1120,9 @@ public class DBOptions
private static native long getDBOptionsFromProps(
String optString);
private native static long newDBOptions();
private native static long copyDBOptions(long handle);
private static native long newDBOptions();
private static native long copyDBOptions(final long handle);
private static native long newDBOptionsFromOptions(final long optionsHandle);
@Override protected final native void disposeInternal(final long handle);
private native void optimizeForSmallDb(final long handle);
@ -1133,6 +1263,9 @@ public class DBOptions
private native boolean enableThreadTracking(long handle);
private native void setDelayedWriteRate(long handle, long delayedWriteRate);
private native long delayedWriteRate(long handle);
private native void setEnablePipelinedWrite(final long handle,
final boolean enablePipelinedWrite);
private native boolean enablePipelinedWrite(final long handle);
private native void setAllowConcurrentMemtableWrite(long handle,
boolean allowConcurrentMemtableWrite);
private native boolean allowConcurrentMemtableWrite(long handle);
@ -1155,7 +1288,9 @@ public class DBOptions
final boolean allow2pc);
private native boolean allow2pc(final long handle);
private native void setRowCache(final long handle,
final long row_cache_handle);
final long rowCacheHandle);
private native void setWalFilter(final long handle,
final long walFilterHandle);
private native void setFailIfOptionsFileError(final long handle,
final boolean failIfOptionsFileError);
private native boolean failIfOptionsFileError(final long handle);
@ -1168,6 +1303,21 @@ public class DBOptions
private native void setAvoidFlushDuringShutdown(final long handle,
final boolean avoidFlushDuringShutdown);
private native boolean avoidFlushDuringShutdown(final long handle);
private native void setAllowIngestBehind(final long handle,
final boolean allowIngestBehind);
private native boolean allowIngestBehind(final long handle);
private native void setPreserveDeletes(final long handle,
final boolean preserveDeletes);
private native boolean preserveDeletes(final long handle);
private native void setTwoWriteQueues(final long handle,
final boolean twoWriteQueues);
private native boolean twoWriteQueues(final long handle);
private native void setManualWalFlush(final long handle,
final boolean manualWalFlush);
private native boolean manualWalFlush(final long handle);
private native void setAtomicFlush(final long handle,
final boolean atomicFlush);
private native boolean atomicFlush(final long handle);
// instance variables
// NOTE: If you add new member variables, please update the copy constructor above!
@ -1175,5 +1325,6 @@ public class DBOptions
private int numShardBits_;
private RateLimiter rateLimiter_;
private Cache rowCache_;
private WalFilter walFilter_;
private WriteBufferManager writeBufferManager_;
}

@ -206,35 +206,9 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
InfoLogLevel infoLogLevel();
/**
* Number of open files that can be used by the DB. You may need to
* increase this if your database has a large working set. Value -1 means
* files opened are always kept open. You can estimate number of files based
* on {@code target_file_size_base} and {@code target_file_size_multiplier}
* for level-based compaction. For universal-style compaction, you can usually
* set it to -1.
* Default: 5000
*
* @param maxOpenFiles the maximum number of open files.
* @return the instance of the current object.
*/
T setMaxOpenFiles(int maxOpenFiles);
/**
* Number of open files that can be used by the DB. You may need to
* increase this if your database has a large working set. Value -1 means
* files opened are always kept open. You can estimate number of files based
* on {@code target_file_size_base} and {@code target_file_size_multiplier}
* for level-based compaction. For universal-style compaction, you can usually
* set it to -1.
*
* @return the maximum number of open files.
*/
int maxOpenFiles();
/**
* If {@link #maxOpenFiles()} is -1, DB will open all files on DB::Open(). You
* can use this option to increase the number of threads used to open the
* files.
* If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open
* all files on DB::Open(). You can use this option to increase the number
* of threads used to open the files.
*
* Default: 16
*
@ -246,9 +220,9 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
T setMaxFileOpeningThreads(int maxFileOpeningThreads);
/**
* If {@link #maxOpenFiles()} is -1, DB will open all files on DB::Open(). You
* can use this option to increase the number of threads used to open the
* files.
* If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open all
* files on DB::Open(). You can use this option to increase the number of
* threads used to open the files.
*
* Default: 16
*
@ -256,36 +230,6 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
*/
int maxFileOpeningThreads();
/**
* <p>Once write-ahead logs exceed this size, we will start forcing the
* flush of column families whose memtables are backed by the oldest live
* WAL file (i.e. the ones that are causing all the space amplification).
* </p>
* <p>If set to 0 (default), we will dynamically choose the WAL size limit to
* be [sum of all write_buffer_size * max_write_buffer_number] * 2</p>
* <p>This option takes effect only when there are more than one column family as
* otherwise the wal size is dictated by the write_buffer_size.</p>
* <p>Default: 0</p>
*
* @param maxTotalWalSize max total wal size.
* @return the instance of the current object.
*/
T setMaxTotalWalSize(long maxTotalWalSize);
/**
* <p>Returns the max total wal size. Once write-ahead logs exceed this size,
* we will start forcing the flush of column families whose memtables are
* backed by the oldest live WAL file (i.e. the ones that are causing all
* the space amplification).</p>
*
* <p>If set to 0 (default), we will dynamically choose the WAL size limit
* to be [sum of all write_buffer_size * max_write_buffer_number] * 2
* </p>
*
* @return max total wal size
*/
long maxTotalWalSize();
/**
* <p>Sets the statistics object which collects metrics about database operations.
* Statistics objects should not be shared between DB instances as
@ -466,59 +410,6 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
*/
long deleteObsoleteFilesPeriodMicros();
/**
* Suggested number of concurrent background compaction jobs, submitted to
* the default LOW priority thread pool.
* Default: 1
*
* @param baseBackgroundCompactions Suggested number of background compaction
* jobs
*
* @deprecated Use {@link #setMaxBackgroundJobs(int)}
*/
void setBaseBackgroundCompactions(int baseBackgroundCompactions);
/**
* Suggested number of concurrent background compaction jobs, submitted to
* the default LOW priority thread pool.
* Default: 1
*
* @return Suggested number of background compaction jobs
*/
int baseBackgroundCompactions();
/**
* Specifies the maximum number of concurrent background compaction jobs,
* submitted to the default LOW priority thread pool.
* If you're increasing this, also consider increasing number of threads in
* LOW priority thread pool. For more information, see
* Default: 1
*
* @param maxBackgroundCompactions the maximum number of background
* compaction jobs.
* @return the instance of the current object.
*
* @see RocksEnv#setBackgroundThreads(int)
* @see RocksEnv#setBackgroundThreads(int, int)
* @see #maxBackgroundFlushes()
*/
T setMaxBackgroundCompactions(int maxBackgroundCompactions);
/**
* Returns the maximum number of concurrent background compaction jobs,
* submitted to the default LOW priority thread pool.
* When increasing this number, we may also want to consider increasing
* number of threads in LOW priority thread pool.
* Default: 1
*
* @return the maximum number of concurrent background compaction jobs.
* @see RocksEnv#setBackgroundThreads(int)
* @see RocksEnv#setBackgroundThreads(int, int)
*
* @deprecated Use {@link #setMaxBackgroundJobs(int)}
*/
int maxBackgroundCompactions();
/**
* This value represents the maximum number of threads that will
* concurrently perform a compaction job by breaking it into multiple,
@ -527,8 +418,10 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
*
* @param maxSubcompactions The maximum number of threads that will
* concurrently perform a compaction job
*
* @return the instance of the current object.
*/
void setMaxSubcompactions(int maxSubcompactions);
T setMaxSubcompactions(int maxSubcompactions);
/**
* This value represents the maximum number of threads that will
@ -551,11 +444,12 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
* @return the instance of the current object.
*
* @see RocksEnv#setBackgroundThreads(int)
* @see RocksEnv#setBackgroundThreads(int, int)
* @see #maxBackgroundCompactions()
* @see RocksEnv#setBackgroundThreads(int, Priority)
* @see MutableDBOptionsInterface#maxBackgroundCompactions()
*
* @deprecated Use {@link #setMaxBackgroundJobs(int)}
* @deprecated Use {@link MutableDBOptionsInterface#setMaxBackgroundJobs(int)}
*/
@Deprecated
T setMaxBackgroundFlushes(int maxBackgroundFlushes);
/**
@ -566,29 +460,11 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
*
* @return the maximum number of concurrent background flush jobs.
* @see RocksEnv#setBackgroundThreads(int)
* @see RocksEnv#setBackgroundThreads(int, int)
* @see RocksEnv#setBackgroundThreads(int, Priority)
*/
@Deprecated
int maxBackgroundFlushes();
/**
* Specifies the maximum number of concurrent background jobs (both flushes
* and compactions combined).
* Default: 2
*
* @param maxBackgroundJobs number of max concurrent background jobs
* @return the instance of the current object.
*/
T setMaxBackgroundJobs(int maxBackgroundJobs);
/**
* Returns the maximum number of concurrent background jobs (both flushes
* and compactions combined).
* Default: 2
*
* @return the maximum number of concurrent background jobs.
*/
int maxBackgroundJobs();
/**
* Specifies the maximum size of a info log file. If the current log file
* is larger than `max_log_file_size`, a new info log file will
@ -938,23 +814,6 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
*/
boolean isFdCloseOnExec();
/**
* if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
* Default: 600 (10 minutes)
*
* @param statsDumpPeriodSec time interval in seconds.
* @return the instance of the current object.
*/
T setStatsDumpPeriodSec(int statsDumpPeriodSec);
/**
* If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
* Default: 600 (10 minutes)
*
* @return time interval in seconds.
*/
int statsDumpPeriodSec();
/**
* If set true, will hint the underlying file system that the file
* access pattern is random, when a sst file is opened.
@ -1089,36 +948,6 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
*/
boolean newTableReaderForCompactionInputs();
/**
* If non-zero, we perform bigger reads when doing compaction. If you're
* running RocksDB on spinning disks, you should set this to at least 2MB.
*
* That way RocksDB's compaction is doing sequential instead of random reads.
* When non-zero, we also force {@link #newTableReaderForCompactionInputs()}
* to true.
*
* Default: 0
*
* @param compactionReadaheadSize The compaction read-ahead size
*
* @return the reference to the current options.
*/
T setCompactionReadaheadSize(final long compactionReadaheadSize);
/**
* If non-zero, we perform bigger reads when doing compaction. If you're
* running RocksDB on spinning disks, you should set this to at least 2MB.
*
* That way RocksDB's compaction is doing sequential instead of random reads.
* When non-zero, we also force {@link #newTableReaderForCompactionInputs()}
* to true.
*
* Default: 0
*
* @return The compaction read-ahead size
*/
long compactionReadaheadSize();
/**
* This is a maximum buffer size that is used by WinMmapReadableFile in
* unbuffered disk I/O mode. We need to maintain an aligned buffer for
@ -1126,7 +955,8 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
* for bigger requests allocate one shot buffers. In unbuffered mode we
* always bypass read-ahead buffer at ReadaheadRandomAccessFile
* When read-ahead is required we then make use of
* {@link #compactionReadaheadSize()} value and always try to read ahead.
* {@link MutableDBOptionsInterface#compactionReadaheadSize()} value and
* always try to read ahead.
* With read-ahead we always pre-allocate buffer to the size instead of
* growing it up to a limit.
*
@ -1151,9 +981,9 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
* for bigger requests allocate one shot buffers. In unbuffered mode we
* always bypass read-ahead buffer at ReadaheadRandomAccessFile
* When read-ahead is required we then make use of
* {@link #compactionReadaheadSize()} value and always try to read ahead.
* With read-ahead we always pre-allocate buffer to the size instead of
* growing it up to a limit.
* {@link MutableDBOptionsInterface#compactionReadaheadSize()} value and
* always try to read ahead. With read-ahead we always pre-allocate buffer
* to the size instead of growing it up to a limit.
*
* This option is currently honored only on Windows
*
@ -1166,30 +996,6 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
*/
long randomAccessMaxBufferSize();
/**
* This is the maximum buffer size that is used by WritableFileWriter.
* On Windows, we need to maintain an aligned buffer for writes.
* We allow the buffer to grow until it's size hits the limit.
*
* Default: 1024 * 1024 (1 MB)
*
* @param writableFileMaxBufferSize the maximum buffer size
*
* @return the reference to the current options.
*/
T setWritableFileMaxBufferSize(long writableFileMaxBufferSize);
/**
* This is the maximum buffer size that is used by WritableFileWriter.
* On Windows, we need to maintain an aligned buffer for writes.
* We allow the buffer to grow until it's size hits the limit.
*
* Default: 1024 * 1024 (1 MB)
*
* @return the maximum buffer size
*/
long writableFileMaxBufferSize();
/**
* Use adaptive mutex, which spins in the user space before resorting
* to kernel. This could reduce context switch when the mutex is not
@ -1213,45 +1019,24 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
*/
boolean useAdaptiveMutex();
/**
* Allows OS to incrementally sync files to disk while they are being
* written, asynchronously, in the background.
* Issue one request for every bytes_per_sync written. 0 turns it off.
* Default: 0
*
* @param bytesPerSync size in bytes
* @return the instance of the current object.
*/
T setBytesPerSync(long bytesPerSync);
/**
* Allows OS to incrementally sync files to disk while they are being
* written, asynchronously, in the background.
* Issue one request for every bytes_per_sync written. 0 turns it off.
* Default: 0
*
* @return size in bytes
*/
long bytesPerSync();
/**
* Same as {@link #setBytesPerSync(long)} , but applies to WAL files
*
* Default: 0, turned off
*
* @param walBytesPerSync size in bytes
* @return the instance of the current object.
*/
T setWalBytesPerSync(long walBytesPerSync);
/**
* Same as {@link #bytesPerSync()} , but applies to WAL files
*
* Default: 0, turned off
*
* @return size in bytes
*/
long walBytesPerSync();
//TODO(AR) NOW
// /**
// * Sets the {@link EventListener}s whose callback functions
// * will be called when specific RocksDB event happens.
// *
// * @param listeners the listeners who should be notified on various events.
// *
// * @return the instance of the current object.
// */
// T setListeners(final List<EventListener> listeners);
//
// /**
// * Gets the {@link EventListener}s whose callback functions
// * will be called when specific RocksDB event happens.
// *
// * @return a collection of Event listeners.
// */
// Collection<EventListener> listeners();
/**
* If true, then the status of the threads involved in this DB will
@ -1276,40 +1061,33 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
boolean enableThreadTracking();
/**
* The limited write rate to DB if
* {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
* {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
* or we are writing to the last mem table allowed and we allow more than 3
* mem tables. It is calculated using size of user write requests before
* compression. RocksDB may decide to slow down more if the compaction still
* gets behind further.
* By default, a single write thread queue is maintained. The thread gets
* to the head of the queue becomes write batch group leader and responsible
* for writing to WAL and memtable for the batch group.
*
* Unit: bytes per second.
* If {@link #enablePipelinedWrite()} is true, separate write thread queue is
* maintained for WAL write and memtable write. A write thread first enter WAL
* writer queue and then memtable writer queue. Pending thread on the WAL
* writer queue thus only have to wait for previous writers to finish their
* WAL writing but not the memtable writing. Enabling the feature may improve
* write throughput and reduce latency of the prepare phase of two-phase
* commit.
*
* Default: 16MB/s
* Default: false
*
* @param delayedWriteRate the rate in bytes per second
* @param enablePipelinedWrite true to enabled pipelined writes
*
* @return the reference to the current options.
*/
T setDelayedWriteRate(long delayedWriteRate);
T setEnablePipelinedWrite(final boolean enablePipelinedWrite);
/**
* The limited write rate to DB if
* {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
* {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
* or we are writing to the last mem table allowed and we allow more than 3
* mem tables. It is calculated using size of user write requests before
* compression. RocksDB may decide to slow down more if the compaction still
* gets behind further.
*
* Unit: bytes per second.
* Returns true if pipelined writes are enabled.
* See {@link #setEnablePipelinedWrite(boolean)}.
*
* Default: 16MB/s
*
* @return the rate in bytes per second
* @return true if pipelined writes are enabled, false otherwise.
*/
long delayedWriteRate();
boolean enablePipelinedWrite();
/**
* If true, allow multi-writers to update mem tables in parallel.
@ -1511,6 +1289,27 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
*/
Cache rowCache();
/**
* A filter object supplied to be invoked while processing write-ahead-logs
* (WALs) during recovery. The filter provides a way to inspect log
* records, ignoring a particular record or skipping replay.
* The filter is invoked at startup and is invoked from a single-thread
* currently.
*
* @param walFilter the filter for processing WALs during recovery.
*
* @return the reference to the current options.
*/
T setWalFilter(final AbstractWalFilter walFilter);
/**
* Get's the filter for processing WALs during recovery.
* See {@link #setWalFilter(AbstractWalFilter)}.
*
* @return the filter used for processing WALs during recovery.
*/
WalFilter walFilter();
/**
* If true, then DB::Open / CreateColumnFamily / DropColumnFamily
* / SetOptions will fail if options file is not detected or properly
@ -1589,35 +1388,126 @@ public interface DBOptionsInterface<T extends DBOptionsInterface> {
boolean avoidFlushDuringRecovery();
/**
* By default RocksDB will flush all memtables on DB close if there are
* unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
* DB close. Unpersisted data WILL BE LOST.
* Set this option to true during creation of database if you want
* to be able to ingest behind (call IngestExternalFile() skipping keys
* that already exist, rather than overwriting matching keys).
* Setting this option to true will affect 2 things:
* 1) Disable some internal optimizations around SST file compression
* 2) Reserve bottom-most level for ingested files only.
* 3) Note that num_levels should be &gt;= 3 if this option is turned on.
*
* DEFAULT: false
*
* @param allowIngestBehind true to allow ingest behind, false to disallow.
*
* @return the reference to the current options.
*/
T setAllowIngestBehind(final boolean allowIngestBehind);
/**
* Returns true if ingest behind is allowed.
* See {@link #setAllowIngestBehind(boolean)}.
*
* @return true if ingest behind is allowed, false otherwise.
*/
boolean allowIngestBehind();
/**
* Needed to support differential snapshots.
* If set to true then DB will only process deletes with sequence number
* less than what was set by SetPreserveDeletesSequenceNumber(uint64_t ts).
* Clients are responsible to periodically call this method to advance
* the cutoff time. If this method is never called and preserve_deletes
* is set to true NO deletes will ever be processed.
* At the moment this only keeps normal deletes, SingleDeletes will
* not be preserved.
*
* DEFAULT: false
*
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
* API.
* @param preserveDeletes true to preserve deletes.
*
* @return the reference to the current options.
*/
T setPreserveDeletes(final boolean preserveDeletes);
/**
* Returns true if deletes are preserved.
* See {@link #setPreserveDeletes(boolean)}.
*
* @return true if deletes are preserved, false otherwise.
*/
boolean preserveDeletes();
/**
* If enabled it uses two queues for writes, one for the ones with
* disable_memtable and one for the ones that also write to memtable. This
* allows the memtable writes not to lag behind other writes. It can be used
* to optimize MySQL 2PC in which only the commits, which are serial, write to
* memtable.
*
* DEFAULT: false
*
* @param avoidFlushDuringShutdown true if we should avoid flush during
* shutdown
* @param twoWriteQueues true to enable two write queues, false otherwise.
*
* @return the reference to the current options.
*/
T setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown);
T setTwoWriteQueues(final boolean twoWriteQueues);
/**
* By default RocksDB will flush all memtables on DB close if there are
* unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
* DB close. Unpersisted data WILL BE LOST.
* Returns true if two write queues are enabled.
*
* @return true if two write queues are enabled, false otherwise.
*/
boolean twoWriteQueues();
/**
* If true WAL is not flushed automatically after each write. Instead it
* relies on manual invocation of FlushWAL to write the WAL buffer to its
* file.
*
* DEFAULT: false
*
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
* API.
* @param manualWalFlush true to set disable automatic WAL flushing,
* false otherwise.
*
* @return the reference to the current options.
*/
T setManualWalFlush(final boolean manualWalFlush);
/**
* Returns true if automatic WAL flushing is disabled.
* See {@link #setManualWalFlush(boolean)}.
*
* @return true if automatic WAL flushing is disabled, false otherwise.
*/
boolean manualWalFlush();
/**
* If true, RocksDB supports flushing multiple column families and committing
* their results atomically to MANIFEST. Note that it is not
* necessary to set atomic_flush to true if WAL is always enabled since WAL
* allows the database to be restored to the last persistent state in WAL.
* This option is useful when there are column families with writes NOT
* protected by WAL.
* For manual flush, application has to specify which column families to
* flush atomically in {@link RocksDB#flush(FlushOptions, List)}.
* For auto-triggered flush, RocksDB atomically flushes ALL column families.
*
* Currently, any WAL-enabled writes after atomic flush may be replayed
* independently if the process crashes later and tries to recover.
*
* @param atomicFlush true to enable atomic flush of multiple column families.
*
* @return the reference to the current options.
*/
T setAtomicFlush(final boolean atomicFlush);
/**
* Determine if atomic flush of multiple column families is enabled.
*
* See {@link #setAtomicFlush(boolean)}.
*
* @return true if we should avoid flush during shutdown
* @return true if atomic flush is enabled.
*/
boolean avoidFlushDuringShutdown();
boolean atomicFlush();
}

@ -0,0 +1,32 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* DataBlockIndexType used in conjunction with BlockBasedTable.
*/
public enum DataBlockIndexType {
/**
* traditional block type
*/
kDataBlockBinarySearch((byte)0x0),
/**
* additional hash index
*/
kDataBlockBinaryAndHash((byte)0x1);
private final byte value;
DataBlockIndexType(final byte value) {
this.value = value;
}
byte getValue() {
return value;
}
}

@ -5,12 +5,23 @@
package org.rocksdb;
import java.util.Arrays;
import java.util.List;
/**
* Base class for all Env implementations in RocksDB.
*/
public abstract class Env extends RocksObject {
public static final int FLUSH_POOL = 0;
public static final int COMPACTION_POOL = 1;
private static final Env DEFAULT_ENV = new RocksEnv(getDefaultEnvInternal());
static {
/**
* The Ownership of the Default Env belongs to C++
* and so we disown the native handle here so that
* we cannot accidentally free it from Java.
*/
DEFAULT_ENV.disOwnNativeHandle();
}
/**
* <p>Returns the default environment suitable for the current operating
@ -18,13 +29,13 @@ public abstract class Env extends RocksObject {
*
* <p>The result of {@code getDefault()} is a singleton whose ownership
* belongs to rocksdb c++. As a result, the returned RocksEnv will not
* have the ownership of its c++ resource, and calling its dispose()
* have the ownership of its c++ resource, and calling its dispose()/close()
* will be no-op.</p>
*
* @return the default {@link org.rocksdb.RocksEnv} instance.
*/
public static Env getDefault() {
return default_env_;
return DEFAULT_ENV;
}
/**
@ -32,27 +43,36 @@ public abstract class Env extends RocksObject {
* for this environment.</p>
* <p>Default number: 1</p>
*
* @param num the number of threads
* @param number the number of threads
*
* @return current {@link RocksEnv} instance.
*/
public Env setBackgroundThreads(final int num) {
return setBackgroundThreads(num, FLUSH_POOL);
public Env setBackgroundThreads(final int number) {
return setBackgroundThreads(number, Priority.LOW);
}
/**
* <p>Gets the number of background worker threads of the pool
* for this environment.</p>
*
* @return the number of threads.
*/
public int getBackgroundThreads(final Priority priority) {
return getBackgroundThreads(nativeHandle_, priority.getValue());
}
/**
* <p>Sets the number of background worker threads of the specified thread
* pool for this environment.</p>
*
* @param num the number of threads
* @param poolID the id to specified a thread pool. Should be either
* FLUSH_POOL or COMPACTION_POOL.
* @param number the number of threads
* @param priority the priority id of a specified thread pool.
*
* <p>Default number: 1</p>
* @return current {@link RocksEnv} instance.
*/
public Env setBackgroundThreads(final int num, final int poolID) {
setBackgroundThreads(nativeHandle_, num, poolID);
public Env setBackgroundThreads(final int number, final Priority priority) {
setBackgroundThreads(nativeHandle_, number, priority.getValue());
return this;
}
@ -60,33 +80,75 @@ public abstract class Env extends RocksObject {
* <p>Returns the length of the queue associated with the specified
* thread pool.</p>
*
* @param poolID the id to specified a thread pool. Should be either
* FLUSH_POOL or COMPACTION_POOL.
* @param priority the priority id of a specified thread pool.
*
* @return the thread pool queue length.
*/
public int getThreadPoolQueueLen(final int poolID) {
return getThreadPoolQueueLen(nativeHandle_, poolID);
public int getThreadPoolQueueLen(final Priority priority) {
return getThreadPoolQueueLen(nativeHandle_, priority.getValue());
}
/**
* Enlarge number of background worker threads of a specific thread pool
* for this environment if it is smaller than specified. 'LOW' is the default
* pool.
*
* @param number the number of threads.
*
* @return current {@link RocksEnv} instance.
*/
public Env incBackgroundThreadsIfNeeded(final int number,
final Priority priority) {
incBackgroundThreadsIfNeeded(nativeHandle_, number, priority.getValue());
return this;
}
protected Env(final long nativeHandle) {
super(nativeHandle);
/**
* Lower IO priority for threads from the specified pool.
*
* @param priority the priority id of a specified thread pool.
*/
public Env lowerThreadPoolIOPriority(final Priority priority) {
lowerThreadPoolIOPriority(nativeHandle_, priority.getValue());
return this;
}
static {
default_env_ = new RocksEnv(getDefaultEnvInternal());
/**
* Lower CPU priority for threads from the specified pool.
*
* @param priority the priority id of a specified thread pool.
*/
public Env lowerThreadPoolCPUPriority(final Priority priority) {
lowerThreadPoolCPUPriority(nativeHandle_, priority.getValue());
return this;
}
/**
* <p>The static default Env. The ownership of its native handle
* belongs to rocksdb c++ and is not able to be released on the Java
* side.</p>
* Returns the status of all threads that belong to the current Env.
*
* @return the status of all threads belong to this env.
*/
static Env default_env_;
public List<ThreadStatus> getThreadList() throws RocksDBException {
return Arrays.asList(getThreadList(nativeHandle_));
}
Env(final long nativeHandle) {
super(nativeHandle);
}
private static native long getDefaultEnvInternal();
private native void setBackgroundThreads(
long handle, int num, int priority);
private native int getThreadPoolQueueLen(long handle, int poolID);
final long handle, final int number, final byte priority);
private native int getBackgroundThreads(final long handle,
final byte priority);
private native int getThreadPoolQueueLen(final long handle,
final byte priority);
private native void incBackgroundThreadsIfNeeded(final long handle,
final int number, final byte priority);
private native void lowerThreadPoolIOPriority(final long handle,
final byte priority);
private native void lowerThreadPoolCPUPriority(final long handle,
final byte priority);
private native ThreadStatus[] getThreadList(final long handle)
throws RocksDBException;
}

@ -5,203 +5,362 @@
package org.rocksdb;
/**
* Options while opening a file to read/write
*/
public class EnvOptions extends RocksObject {
static {
RocksDB.loadLibrary();
}
/**
* Construct with default Options
*/
public EnvOptions() {
super(newEnvOptions());
}
public EnvOptions setUseOsBuffer(final boolean useOsBuffer) {
setUseOsBuffer(nativeHandle_, useOsBuffer);
return this;
}
public boolean useOsBuffer() {
assert(isOwningHandle());
return useOsBuffer(nativeHandle_);
/**
* Construct from {@link DBOptions}.
*
* @param dbOptions the database options.
*/
public EnvOptions(final DBOptions dbOptions) {
super(newEnvOptions(dbOptions.nativeHandle_));
}
/**
* Enable/Disable memory mapped reads.
*
* Default: false
*
* @param useMmapReads true to enable memory mapped reads, false to disable.
*
* @return the reference to these options.
*/
public EnvOptions setUseMmapReads(final boolean useMmapReads) {
setUseMmapReads(nativeHandle_, useMmapReads);
return this;
}
/**
* Determine if memory mapped reads are in-use.
*
* @return true if memory mapped reads are in-use, false otherwise.
*/
public boolean useMmapReads() {
assert(isOwningHandle());
return useMmapReads(nativeHandle_);
}
/**
* Enable/Disable memory mapped Writes.
*
* Default: true
*
* @param useMmapWrites true to enable memory mapped writes, false to disable.
*
* @return the reference to these options.
*/
public EnvOptions setUseMmapWrites(final boolean useMmapWrites) {
setUseMmapWrites(nativeHandle_, useMmapWrites);
return this;
}
/**
* Determine if memory mapped writes are in-use.
*
* @return true if memory mapped writes are in-use, false otherwise.
*/
public boolean useMmapWrites() {
assert(isOwningHandle());
return useMmapWrites(nativeHandle_);
}
/**
* Enable/Disable direct reads, i.e. {@code O_DIRECT}.
*
* Default: false
*
* @param useDirectReads true to enable direct reads, false to disable.
*
* @return the reference to these options.
*/
public EnvOptions setUseDirectReads(final boolean useDirectReads) {
setUseDirectReads(nativeHandle_, useDirectReads);
return this;
}
/**
* Determine if direct reads are in-use.
*
* @return true if direct reads are in-use, false otherwise.
*/
public boolean useDirectReads() {
assert(isOwningHandle());
return useDirectReads(nativeHandle_);
}
/**
* Enable/Disable direct writes, i.e. {@code O_DIRECT}.
*
* Default: false
*
* @param useDirectWrites true to enable direct writes, false to disable.
*
* @return the reference to these options.
*/
public EnvOptions setUseDirectWrites(final boolean useDirectWrites) {
setUseDirectWrites(nativeHandle_, useDirectWrites);
return this;
}
/**
* Determine if direct writes are in-use.
*
* @return true if direct writes are in-use, false otherwise.
*/
public boolean useDirectWrites() {
assert(isOwningHandle());
return useDirectWrites(nativeHandle_);
}
/**
* Enable/Disable fallocate calls.
*
* Default: true
*
* If false, {@code fallocate()} calls are bypassed.
*
* @param allowFallocate true to enable fallocate calls, false to disable.
*
* @return the reference to these options.
*/
public EnvOptions setAllowFallocate(final boolean allowFallocate) {
setAllowFallocate(nativeHandle_, allowFallocate);
return this;
}
/**
* Determine if fallocate calls are used.
*
* @return true if fallocate calls are used, false otherwise.
*/
public boolean allowFallocate() {
assert(isOwningHandle());
return allowFallocate(nativeHandle_);
}
/**
* Enable/Disable the {@code FD_CLOEXEC} bit when opening file descriptors.
*
* Default: true
*
* @param setFdCloexec true to enable the {@code FB_CLOEXEC} bit,
* false to disable.
*
* @return the reference to these options.
*/
public EnvOptions setSetFdCloexec(final boolean setFdCloexec) {
setSetFdCloexec(nativeHandle_, setFdCloexec);
return this;
}
/**
* Determine i fthe {@code FD_CLOEXEC} bit is set when opening file
* descriptors.
*
* @return true if the {@code FB_CLOEXEC} bit is enabled, false otherwise.
*/
public boolean setFdCloexec() {
assert(isOwningHandle());
return setFdCloexec(nativeHandle_);
}
/**
* Allows OS to incrementally sync files to disk while they are being
* written, in the background. Issue one request for every
* {@code bytesPerSync} written.
*
* Default: 0
*
* @param bytesPerSync 0 to disable, otherwise the number of bytes.
*
* @return the reference to these options.
*/
public EnvOptions setBytesPerSync(final long bytesPerSync) {
setBytesPerSync(nativeHandle_, bytesPerSync);
return this;
}
/**
* Get the number of incremental bytes per sync written in the background.
*
* @return 0 if disabled, otherwise the number of bytes.
*/
public long bytesPerSync() {
assert(isOwningHandle());
return bytesPerSync(nativeHandle_);
}
public EnvOptions setFallocateWithKeepSize(final boolean fallocateWithKeepSize) {
/**
* If true, we will preallocate the file with {@code FALLOC_FL_KEEP_SIZE}
* flag, which means that file size won't change as part of preallocation.
* If false, preallocation will also change the file size. This option will
* improve the performance in workloads where you sync the data on every
* write. By default, we set it to true for MANIFEST writes and false for
* WAL writes
*
* @param fallocateWithKeepSize true to preallocate, false otherwise.
*
* @return the reference to these options.
*/
public EnvOptions setFallocateWithKeepSize(
final boolean fallocateWithKeepSize) {
setFallocateWithKeepSize(nativeHandle_, fallocateWithKeepSize);
return this;
}
/**
* Determine if file is preallocated.
*
* @return true if the file is preallocated, false otherwise.
*/
public boolean fallocateWithKeepSize() {
assert(isOwningHandle());
return fallocateWithKeepSize(nativeHandle_);
}
public EnvOptions setCompactionReadaheadSize(final long compactionReadaheadSize) {
/**
* See {@link DBOptions#setCompactionReadaheadSize(long)}.
*
* @param compactionReadaheadSize the compaction read-ahead size.
*
* @return the reference to these options.
*/
public EnvOptions setCompactionReadaheadSize(
final long compactionReadaheadSize) {
setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize);
return this;
}
/**
* See {@link DBOptions#compactionReadaheadSize()}.
*
* @return the compaction read-ahead size.
*/
public long compactionReadaheadSize() {
assert(isOwningHandle());
return compactionReadaheadSize(nativeHandle_);
}
public EnvOptions setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) {
/**
* See {@link DBOptions#setRandomAccessMaxBufferSize(long)}.
*
* @param randomAccessMaxBufferSize the max buffer size for random access.
*
* @return the reference to these options.
*/
public EnvOptions setRandomAccessMaxBufferSize(
final long randomAccessMaxBufferSize) {
setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize);
return this;
}
/**
* See {@link DBOptions#randomAccessMaxBufferSize()}.
*
* @return the max buffer size for random access.
*/
public long randomAccessMaxBufferSize() {
assert(isOwningHandle());
return randomAccessMaxBufferSize(nativeHandle_);
}
public EnvOptions setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) {
/**
* See {@link DBOptions#setWritableFileMaxBufferSize(long)}.
*
* @param writableFileMaxBufferSize the max buffer size.
*
* @return the reference to these options.
*/
public EnvOptions setWritableFileMaxBufferSize(
final long writableFileMaxBufferSize) {
setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize);
return this;
}
/**
* See {@link DBOptions#writableFileMaxBufferSize()}.
*
* @return the max buffer size.
*/
public long writableFileMaxBufferSize() {
assert(isOwningHandle());
return writableFileMaxBufferSize(nativeHandle_);
}
/**
* Set the write rate limiter for flush and compaction.
*
* @param rateLimiter the rate limiter.
*
* @return the reference to these options.
*/
public EnvOptions setRateLimiter(final RateLimiter rateLimiter) {
this.rateLimiter = rateLimiter;
setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_);
return this;
}
/**
* Get the write rate limiter for flush and compaction.
*
* @return the rate limiter.
*/
public RateLimiter rateLimiter() {
assert(isOwningHandle());
return rateLimiter;
}
private native static long newEnvOptions();
private native static long newEnvOptions(final long dboptions_handle);
@Override protected final native void disposeInternal(final long handle);
private native void setUseOsBuffer(final long handle, final boolean useOsBuffer);
private native boolean useOsBuffer(final long handle);
private native void setUseMmapReads(final long handle, final boolean useMmapReads);
private native void setUseMmapReads(final long handle,
final boolean useMmapReads);
private native boolean useMmapReads(final long handle);
private native void setUseMmapWrites(final long handle, final boolean useMmapWrites);
private native void setUseMmapWrites(final long handle,
final boolean useMmapWrites);
private native boolean useMmapWrites(final long handle);
private native void setUseDirectReads(final long handle, final boolean useDirectReads);
private native void setUseDirectReads(final long handle,
final boolean useDirectReads);
private native boolean useDirectReads(final long handle);
private native void setUseDirectWrites(final long handle, final boolean useDirectWrites);
private native void setUseDirectWrites(final long handle,
final boolean useDirectWrites);
private native boolean useDirectWrites(final long handle);
private native void setAllowFallocate(final long handle, final boolean allowFallocate);
private native void setAllowFallocate(final long handle,
final boolean allowFallocate);
private native boolean allowFallocate(final long handle);
private native void setSetFdCloexec(final long handle, final boolean setFdCloexec);
private native void setSetFdCloexec(final long handle,
final boolean setFdCloexec);
private native boolean setFdCloexec(final long handle);
private native void setBytesPerSync(final long handle, final long bytesPerSync);
private native void setBytesPerSync(final long handle,
final long bytesPerSync);
private native long bytesPerSync(final long handle);
private native void setFallocateWithKeepSize(
final long handle, final boolean fallocateWithKeepSize);
private native boolean fallocateWithKeepSize(final long handle);
private native void setCompactionReadaheadSize(
final long handle, final long compactionReadaheadSize);
private native long compactionReadaheadSize(final long handle);
private native void setRandomAccessMaxBufferSize(
final long handle, final long randomAccessMaxBufferSize);
private native long randomAccessMaxBufferSize(final long handle);
private native void setWritableFileMaxBufferSize(
final long handle, final long writableFileMaxBufferSize);
private native long writableFileMaxBufferSize(final long handle);
private native void setRateLimiter(final long handle, final long rateLimiterHandle);
private native void setRateLimiter(final long handle,
final long rateLimiterHandle);
private RateLimiter rateLimiter;
}

@ -12,6 +12,7 @@ package org.rocksdb;
* number of disk seeks form a handful to a single disk seek per
* DB::Get() call.
*/
//TODO(AR) should be renamed FilterPolicy
public abstract class Filter extends RocksObject {
protected Filter(final long nativeHandle) {

@ -1,3 +1,8 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
@ -41,9 +46,45 @@ public class FlushOptions extends RocksObject {
return waitForFlush(nativeHandle_);
}
/**
* Set to true so that flush would proceeds immediately even it it means
* writes will stall for the duration of the flush.
*
* Set to false so that the operation will wait until it's possible to do
* the flush without causing stall or until required flush is performed by
* someone else (foreground call or background thread).
*
* Default: false
*
* @param allowWriteStall true to allow writes to stall for flush, false
* otherwise.
*
* @return instance of current FlushOptions.
*/
public FlushOptions setAllowWriteStall(final boolean allowWriteStall) {
assert(isOwningHandle());
setAllowWriteStall(nativeHandle_, allowWriteStall);
return this;
}
/**
* Returns true if writes are allowed to stall for flushes to complete, false
* otherwise.
*
* @return true if writes are allowed to stall for flushes
*/
public boolean allowWriteStall() {
assert(isOwningHandle());
return allowWriteStall(nativeHandle_);
}
private native static long newFlushOptions();
@Override protected final native void disposeInternal(final long handle);
private native void setWaitForFlush(long handle,
boolean wait);
private native boolean waitForFlush(long handle);
private native void setWaitForFlush(final long handle,
final boolean wait);
private native boolean waitForFlush(final long handle);
private native void setAllowWriteStall(final long handle,
final boolean allowWriteStall);
private native boolean allowWriteStall(final long handle);
}

@ -0,0 +1,27 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* HDFS environment.
*/
public class HdfsEnv extends Env {
/**
<p>Creates a new environment that is used for HDFS environment.</p>
*
* <p>The caller must delete the result when it is
* no longer needed.</p>
*
* @param fsName the HDFS as a string in the form "hdfs://hostname:port/"
*/
public HdfsEnv(final String fsName) {
super(createHdfsEnv(fsName));
}
private static native long createHdfsEnv(final String fsName);
@Override protected final native void disposeInternal(final long handle);
}

@ -33,7 +33,7 @@ public enum IndexType {
return value_;
}
private IndexType(byte value) {
IndexType(byte value) {
value_ = value;
}

@ -7,7 +7,8 @@ package org.rocksdb;
import java.util.List;
/**
* IngestExternalFileOptions is used by {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}
* IngestExternalFileOptions is used by
* {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}.
*/
public class IngestExternalFileOptions extends RocksObject {
@ -41,9 +42,12 @@ public class IngestExternalFileOptions extends RocksObject {
* Can be set to true to move the files instead of copying them.
*
* @param moveFiles true if files should be moved instead of copied
*
* @return the reference to the current IngestExternalFileOptions.
*/
public void setMoveFiles(final boolean moveFiles) {
public IngestExternalFileOptions setMoveFiles(final boolean moveFiles) {
setMoveFiles(nativeHandle_, moveFiles);
return this;
}
/**
@ -61,9 +65,13 @@ public class IngestExternalFileOptions extends RocksObject {
* that where created before the file was ingested.
*
* @param snapshotConsistency true if snapshot consistency is required
*
* @return the reference to the current IngestExternalFileOptions.
*/
public void setSnapshotConsistency(final boolean snapshotConsistency) {
public IngestExternalFileOptions setSnapshotConsistency(
final boolean snapshotConsistency) {
setSnapshotConsistency(nativeHandle_, snapshotConsistency);
return this;
}
/**
@ -81,9 +89,13 @@ public class IngestExternalFileOptions extends RocksObject {
* will fail if the file key range overlaps with existing keys or tombstones in the DB.
*
* @param allowGlobalSeqNo true if global seq numbers are required
*
* @return the reference to the current IngestExternalFileOptions.
*/
public void setAllowGlobalSeqNo(final boolean allowGlobalSeqNo) {
public IngestExternalFileOptions setAllowGlobalSeqNo(
final boolean allowGlobalSeqNo) {
setAllowGlobalSeqNo(nativeHandle_, allowGlobalSeqNo);
return this;
}
/**
@ -101,15 +113,100 @@ public class IngestExternalFileOptions extends RocksObject {
* (memtable flush required), IngestExternalFile will fail.
*
* @param allowBlockingFlush true if blocking flushes are allowed
*
* @return the reference to the current IngestExternalFileOptions.
*/
public void setAllowBlockingFlush(final boolean allowBlockingFlush) {
public IngestExternalFileOptions setAllowBlockingFlush(
final boolean allowBlockingFlush) {
setAllowBlockingFlush(nativeHandle_, allowBlockingFlush);
return this;
}
/**
* Returns true if duplicate keys in the file being ingested are
* to be skipped rather than overwriting existing data under that key.
*
* @return true if duplicate keys in the file being ingested are to be
* skipped, false otherwise.
*/
public boolean ingestBehind() {
return ingestBehind(nativeHandle_);
}
/**
* Set to true if you would like duplicate keys in the file being ingested
* to be skipped rather than overwriting existing data under that key.
*
* Usecase: back-fill of some historical data in the database without
* over-writing existing newer version of data.
*
* This option could only be used if the DB has been running
* with DBOptions#allowIngestBehind() == true since the dawn of time.
*
* All files will be ingested at the bottommost level with seqno=0.
*
* Default: false
*
* @param ingestBehind true if you would like duplicate keys in the file being
* ingested to be skipped.
*
* @return the reference to the current IngestExternalFileOptions.
*/
public IngestExternalFileOptions setIngestBehind(final boolean ingestBehind) {
setIngestBehind(nativeHandle_, ingestBehind);
return this;
}
/**
* Returns true write if the global_seqno is written to a given offset
* in the external SST file for backward compatibility.
*
* See {@link #setWriteGlobalSeqno(boolean)}.
*
* @return true if the global_seqno is written to a given offset,
* false otherwise.
*/
public boolean writeGlobalSeqno() {
return writeGlobalSeqno(nativeHandle_);
}
/**
* Set to true if you would like to write the global_seqno to a given offset
* in the external SST file for backward compatibility.
*
* Older versions of RocksDB write the global_seqno to a given offset within
* the ingested SST files, and new versions of RocksDB do not.
*
* If you ingest an external SST using new version of RocksDB and would like
* to be able to downgrade to an older version of RocksDB, you should set
* {@link #writeGlobalSeqno()} to true.
*
* If your service is just starting to use the new RocksDB, we recommend that
* you set this option to false, which brings two benefits:
* 1. No extra random write for global_seqno during ingestion.
* 2. Without writing external SST file, it's possible to do checksum.
*
* We have a plan to set this option to false by default in the future.
*
* Default: true
*
* @param writeGlobalSeqno true to write the gloal_seqno to a given offset,
* false otherwise
*
* @return the reference to the current IngestExternalFileOptions.
*/
public IngestExternalFileOptions setWriteGlobalSeqno(
final boolean writeGlobalSeqno) {
setWriteGlobalSeqno(nativeHandle_, writeGlobalSeqno);
return this;
}
private native static long newIngestExternalFileOptions();
private native static long newIngestExternalFileOptions(
final boolean moveFiles, final boolean snapshotConsistency,
final boolean allowGlobalSeqNo, final boolean allowBlockingFlush);
@Override protected final native void disposeInternal(final long handle);
private native boolean moveFiles(final long handle);
private native void setMoveFiles(final long handle, final boolean move_files);
private native boolean snapshotConsistency(final long handle);
@ -121,5 +218,10 @@ public class IngestExternalFileOptions extends RocksObject {
private native boolean allowBlockingFlush(final long handle);
private native void setAllowBlockingFlush(final long handle,
final boolean allowBlockingFlush);
@Override protected final native void disposeInternal(final long handle);
private native boolean ingestBehind(final long handle);
private native void setIngestBehind(final long handle,
final boolean ingestBehind);
private native boolean writeGlobalSeqno(final long handle);
private native void setWriteGlobalSeqno(final long handle,
final boolean writeGlobalSeqNo);
}

@ -0,0 +1,56 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
import java.util.Arrays;
import java.util.List;
/**
* The metadata that describes a level.
*/
public class LevelMetaData {
private final int level;
private final long size;
private final SstFileMetaData[] files;
/**
* Called from JNI C++
*/
private LevelMetaData(final int level, final long size,
final SstFileMetaData[] files) {
this.level = level;
this.size = size;
this.files = files;
}
/**
* The level which this meta data describes.
*
* @return the level
*/
public int level() {
return level;
}
/**
* The size of this level in bytes, which is equal to the sum of
* the file size of its {@link #files()}.
*
* @return the size
*/
public long size() {
return size;
}
/**
* The metadata of all sst files in this level.
*
* @return the metadata of the files
*/
public List<SstFileMetaData> files() {
return Arrays.asList(files);
}
}

@ -0,0 +1,55 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* The full set of metadata associated with each SST file.
*/
public class LiveFileMetaData extends SstFileMetaData {
private final byte[] columnFamilyName;
private final int level;
/**
* Called from JNI C++
*/
private LiveFileMetaData(
final byte[] columnFamilyName,
final int level,
final String fileName,
final String path,
final long size,
final long smallestSeqno,
final long largestSeqno,
final byte[] smallestKey,
final byte[] largestKey,
final long numReadsSampled,
final boolean beingCompacted,
final long numEntries,
final long numDeletions) {
super(fileName, path, size, smallestSeqno, largestSeqno, smallestKey,
largestKey, numReadsSampled, beingCompacted, numEntries, numDeletions);
this.columnFamilyName = columnFamilyName;
this.level = level;
}
/**
* Get the name of the column family.
*
* @return the name of the column family
*/
public byte[] columnFamilyName() {
return columnFamilyName;
}
/**
* Get the level at which this file resides.
*
* @return the level at which the file resides.
*/
public int level() {
return level;
}
}

@ -0,0 +1,75 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
public class LogFile {
private final String pathName;
private final long logNumber;
private final WalFileType type;
private final long startSequence;
private final long sizeFileBytes;
/**
* Called from JNI C++
*/
private LogFile(final String pathName, final long logNumber,
final byte walFileTypeValue, final long startSequence,
final long sizeFileBytes) {
this.pathName = pathName;
this.logNumber = logNumber;
this.type = WalFileType.fromValue(walFileTypeValue);
this.startSequence = startSequence;
this.sizeFileBytes = sizeFileBytes;
}
/**
* Returns log file's pathname relative to the main db dir
* Eg. For a live-log-file = /000003.log
* For an archived-log-file = /archive/000003.log
*
* @return log file's pathname
*/
public String pathName() {
return pathName;
}
/**
* Primary identifier for log file.
* This is directly proportional to creation time of the log file
*
* @return the log number
*/
public long logNumber() {
return logNumber;
}
/**
* Log file can be either alive or archived.
*
* @return the type of the log file.
*/
public WalFileType type() {
return type;
}
/**
* Starting sequence number of writebatch written in this log file.
*
* @return the stating sequence number
*/
public long startSequence() {
return startSequence;
}
/**
* Size of log file on disk in Bytes.
*
* @return size of log file
*/
public long sizeFileBytes() {
return sizeFileBytes;
}
}

@ -7,27 +7,20 @@ package org.rocksdb;
import java.util.*;
public class MutableColumnFamilyOptions {
private final static String KEY_VALUE_PAIR_SEPARATOR = ";";
private final static char KEY_VALUE_SEPARATOR = '=';
private final static String INT_ARRAY_INT_SEPARATOR = ",";
private final String[] keys;
private final String[] values;
// user must use builder pattern, or parser
private MutableColumnFamilyOptions(final String keys[],
final String values[]) {
this.keys = keys;
this.values = values;
}
String[] getKeys() {
return keys;
}
public class MutableColumnFamilyOptions
extends AbstractMutableOptions {
String[] getValues() {
return values;
/**
* User must use builder pattern, or parser.
*
* @param keys the keys
* @param values the values
*
* See {@link #builder()} and {@link #parse(String)}.
*/
private MutableColumnFamilyOptions(final String[] keys,
final String[] values) {
super(keys, values);
}
/**
@ -60,7 +53,7 @@ public class MutableColumnFamilyOptions {
final MutableColumnFamilyOptionsBuilder builder =
new MutableColumnFamilyOptionsBuilder();
final String options[] = str.trim().split(KEY_VALUE_PAIR_SEPARATOR);
final String[] options = str.trim().split(KEY_VALUE_PAIR_SEPARATOR);
for(final String option : options) {
final int equalsOffset = option.indexOf(KEY_VALUE_SEPARATOR);
if(equalsOffset <= 0) {
@ -69,12 +62,12 @@ public class MutableColumnFamilyOptions {
}
final String key = option.substring(0, equalsOffset);
if(key == null || key.isEmpty()) {
if(key.isEmpty()) {
throw new IllegalArgumentException("options string is invalid");
}
final String value = option.substring(equalsOffset + 1);
if(value == null || value.isEmpty()) {
if(value.isEmpty()) {
throw new IllegalArgumentException("options string is invalid");
}
@ -84,37 +77,7 @@ public class MutableColumnFamilyOptions {
return builder;
}
/**
* Returns a string representation
* of MutableColumnFamilyOptions which is
* suitable for consumption by {@link #parse(String)}
*
* @return String representation of MutableColumnFamilyOptions
*/
@Override
public String toString() {
final StringBuilder buffer = new StringBuilder();
for(int i = 0; i < keys.length; i++) {
buffer
.append(keys[i])
.append(KEY_VALUE_SEPARATOR)
.append(values[i]);
if(i + 1 < keys.length) {
buffer.append(KEY_VALUE_PAIR_SEPARATOR);
}
}
return buffer.toString();
}
public enum ValueType {
DOUBLE,
LONG,
INT,
BOOLEAN,
INT_ARRAY,
ENUM
}
private interface MutableColumnFamilyOptionKey extends MutableOptionKey {}
public enum MemtableOption implements MutableColumnFamilyOptionKey {
write_buffer_size(ValueType.LONG),
@ -153,7 +116,8 @@ public class MutableColumnFamilyOptions {
target_file_size_multiplier(ValueType.INT),
max_bytes_for_level_base(ValueType.LONG),
max_bytes_for_level_multiplier(ValueType.INT),
max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY);
max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY),
ttl(ValueType.LONG);
private final ValueType valueType;
CompactionOption(final ValueType valueType) {
@ -183,356 +147,9 @@ public class MutableColumnFamilyOptions {
}
}
private interface MutableColumnFamilyOptionKey {
String name();
ValueType getValueType();
}
private static abstract class MutableColumnFamilyOptionValue<T> {
protected final T value;
MutableColumnFamilyOptionValue(final T value) {
this.value = value;
}
abstract double asDouble() throws NumberFormatException;
abstract long asLong() throws NumberFormatException;
abstract int asInt() throws NumberFormatException;
abstract boolean asBoolean() throws IllegalStateException;
abstract int[] asIntArray() throws IllegalStateException;
abstract String asString();
abstract T asObject();
}
private static class MutableColumnFamilyOptionStringValue
extends MutableColumnFamilyOptionValue<String> {
MutableColumnFamilyOptionStringValue(final String value) {
super(value);
}
@Override
double asDouble() throws NumberFormatException {
return Double.parseDouble(value);
}
@Override
long asLong() throws NumberFormatException {
return Long.parseLong(value);
}
@Override
int asInt() throws NumberFormatException {
return Integer.parseInt(value);
}
@Override
boolean asBoolean() throws IllegalStateException {
return Boolean.parseBoolean(value);
}
@Override
int[] asIntArray() throws IllegalStateException {
throw new IllegalStateException("String is not applicable as int[]");
}
@Override
String asString() {
return value;
}
@Override
String asObject() {
return value;
}
}
private static class MutableColumnFamilyOptionDoubleValue
extends MutableColumnFamilyOptionValue<Double> {
MutableColumnFamilyOptionDoubleValue(final double value) {
super(value);
}
@Override
double asDouble() {
return value;
}
@Override
long asLong() throws NumberFormatException {
return value.longValue();
}
@Override
int asInt() throws NumberFormatException {
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
throw new NumberFormatException(
"double value lies outside the bounds of int");
}
return value.intValue();
}
@Override
boolean asBoolean() throws IllegalStateException {
throw new IllegalStateException(
"double is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
throw new NumberFormatException(
"double value lies outside the bounds of int");
}
return new int[] { value.intValue() };
}
@Override
String asString() {
return Double.toString(value);
}
@Override
Double asObject() {
return value;
}
}
private static class MutableColumnFamilyOptionLongValue
extends MutableColumnFamilyOptionValue<Long> {
MutableColumnFamilyOptionLongValue(final long value) {
super(value);
}
@Override
double asDouble() {
if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
throw new NumberFormatException(
"long value lies outside the bounds of int");
}
return value.doubleValue();
}
@Override
long asLong() throws NumberFormatException {
return value;
}
@Override
int asInt() throws NumberFormatException {
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
throw new NumberFormatException(
"long value lies outside the bounds of int");
}
return value.intValue();
}
@Override
boolean asBoolean() throws IllegalStateException {
throw new IllegalStateException(
"long is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
throw new NumberFormatException(
"long value lies outside the bounds of int");
}
return new int[] { value.intValue() };
}
@Override
String asString() {
return Long.toString(value);
}
@Override
Long asObject() {
return value;
}
}
private static class MutableColumnFamilyOptionIntValue
extends MutableColumnFamilyOptionValue<Integer> {
MutableColumnFamilyOptionIntValue(final int value) {
super(value);
}
@Override
double asDouble() {
if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
throw new NumberFormatException("int value lies outside the bounds of int");
}
return value.doubleValue();
}
@Override
long asLong() throws NumberFormatException {
return value;
}
@Override
int asInt() throws NumberFormatException {
return value;
}
@Override
boolean asBoolean() throws IllegalStateException {
throw new IllegalStateException("int is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
return new int[] { value };
}
@Override
String asString() {
return Integer.toString(value);
}
@Override
Integer asObject() {
return value;
}
}
private static class MutableColumnFamilyOptionBooleanValue
extends MutableColumnFamilyOptionValue<Boolean> {
MutableColumnFamilyOptionBooleanValue(final boolean value) {
super(value);
}
@Override
double asDouble() {
throw new NumberFormatException("boolean is not applicable as double");
}
@Override
long asLong() throws NumberFormatException {
throw new NumberFormatException("boolean is not applicable as Long");
}
@Override
int asInt() throws NumberFormatException {
throw new NumberFormatException("boolean is not applicable as int");
}
@Override
boolean asBoolean() {
return value;
}
@Override
int[] asIntArray() throws IllegalStateException {
throw new IllegalStateException("boolean is not applicable as int[]");
}
@Override
String asString() {
return Boolean.toString(value);
}
@Override
Boolean asObject() {
return value;
}
}
private static class MutableColumnFamilyOptionIntArrayValue
extends MutableColumnFamilyOptionValue<int[]> {
MutableColumnFamilyOptionIntArrayValue(final int[] value) {
super(value);
}
@Override
double asDouble() {
throw new NumberFormatException("int[] is not applicable as double");
}
@Override
long asLong() throws NumberFormatException {
throw new NumberFormatException("int[] is not applicable as Long");
}
@Override
int asInt() throws NumberFormatException {
throw new NumberFormatException("int[] is not applicable as int");
}
@Override
boolean asBoolean() {
throw new NumberFormatException("int[] is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
return value;
}
@Override
String asString() {
final StringBuilder builder = new StringBuilder();
for(int i = 0; i < value.length; i++) {
builder.append(Integer.toString(i));
if(i + 1 < value.length) {
builder.append(INT_ARRAY_INT_SEPARATOR);
}
}
return builder.toString();
}
@Override
int[] asObject() {
return value;
}
}
private static class MutableColumnFamilyOptionEnumValue<T extends Enum<T>>
extends MutableColumnFamilyOptionValue<T> {
MutableColumnFamilyOptionEnumValue(final T value) {
super(value);
}
@Override
double asDouble() throws NumberFormatException {
throw new NumberFormatException("Enum is not applicable as double");
}
@Override
long asLong() throws NumberFormatException {
throw new NumberFormatException("Enum is not applicable as long");
}
@Override
int asInt() throws NumberFormatException {
throw new NumberFormatException("Enum is not applicable as int");
}
@Override
boolean asBoolean() throws IllegalStateException {
throw new NumberFormatException("Enum is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
throw new NumberFormatException("Enum is not applicable as int[]");
}
@Override
String asString() {
return value.name();
}
@Override
T asObject() {
return value;
}
}
public static class MutableColumnFamilyOptionsBuilder
implements MutableColumnFamilyOptionsInterface {
extends AbstractMutableOptionsBuilder<MutableColumnFamilyOptions, MutableColumnFamilyOptionsBuilder, MutableColumnFamilyOptionKey>
implements MutableColumnFamilyOptionsInterface<MutableColumnFamilyOptionsBuilder> {
private final static Map<String, MutableColumnFamilyOptionKey> ALL_KEYS_LOOKUP = new HashMap<>();
static {
@ -549,179 +166,24 @@ public class MutableColumnFamilyOptions {
}
}
private final Map<MutableColumnFamilyOptionKey, MutableColumnFamilyOptionValue<?>> options = new LinkedHashMap<>();
public MutableColumnFamilyOptions build() {
final String keys[] = new String[options.size()];
final String values[] = new String[options.size()];
int i = 0;
for(final Map.Entry<MutableColumnFamilyOptionKey, MutableColumnFamilyOptionValue<?>> option : options.entrySet()) {
keys[i] = option.getKey().name();
values[i] = option.getValue().asString();
i++;
}
return new MutableColumnFamilyOptions(keys, values);
}
private MutableColumnFamilyOptionsBuilder setDouble(
final MutableColumnFamilyOptionKey key, final double value) {
if(key.getValueType() != ValueType.DOUBLE) {
throw new IllegalArgumentException(
key + " does not accept a double value");
}
options.put(key, new MutableColumnFamilyOptionDoubleValue(value));
return this;
}
private double getDouble(final MutableColumnFamilyOptionKey key)
throws NoSuchElementException, NumberFormatException {
final MutableColumnFamilyOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asDouble();
}
private MutableColumnFamilyOptionsBuilder setLong(
final MutableColumnFamilyOptionKey key, final long value) {
if(key.getValueType() != ValueType.LONG) {
throw new IllegalArgumentException(
key + " does not accept a long value");
}
options.put(key, new MutableColumnFamilyOptionLongValue(value));
return this;
}
private long getLong(final MutableColumnFamilyOptionKey key)
throws NoSuchElementException, NumberFormatException {
final MutableColumnFamilyOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asLong();
}
private MutableColumnFamilyOptionsBuilder setInt(
final MutableColumnFamilyOptionKey key, final int value) {
if(key.getValueType() != ValueType.INT) {
throw new IllegalArgumentException(
key + " does not accept an integer value");
}
options.put(key, new MutableColumnFamilyOptionIntValue(value));
return this;
}
private int getInt(final MutableColumnFamilyOptionKey key)
throws NoSuchElementException, NumberFormatException {
final MutableColumnFamilyOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asInt();
}
private MutableColumnFamilyOptionsBuilder setBoolean(
final MutableColumnFamilyOptionKey key, final boolean value) {
if(key.getValueType() != ValueType.BOOLEAN) {
throw new IllegalArgumentException(
key + " does not accept a boolean value");
}
options.put(key, new MutableColumnFamilyOptionBooleanValue(value));
return this;
}
private boolean getBoolean(final MutableColumnFamilyOptionKey key)
throws NoSuchElementException, NumberFormatException {
final MutableColumnFamilyOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asBoolean();
private MutableColumnFamilyOptionsBuilder() {
super();
}
private MutableColumnFamilyOptionsBuilder setIntArray(
final MutableColumnFamilyOptionKey key, final int[] value) {
if(key.getValueType() != ValueType.INT_ARRAY) {
throw new IllegalArgumentException(
key + " does not accept an int array value");
}
options.put(key, new MutableColumnFamilyOptionIntArrayValue(value));
return this;
}
private int[] getIntArray(final MutableColumnFamilyOptionKey key)
throws NoSuchElementException, NumberFormatException {
final MutableColumnFamilyOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asIntArray();
}
private <T extends Enum<T>> MutableColumnFamilyOptionsBuilder setEnum(
final MutableColumnFamilyOptionKey key, final T value) {
if(key.getValueType() != ValueType.ENUM) {
throw new IllegalArgumentException(
key + " does not accept a Enum value");
}
options.put(key, new MutableColumnFamilyOptionEnumValue<T>(value));
@Override
protected MutableColumnFamilyOptionsBuilder self() {
return this;
}
private <T extends Enum<T>> T getEnum(final MutableColumnFamilyOptionKey key)
throws NoSuchElementException, NumberFormatException {
final MutableColumnFamilyOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
if(!(value instanceof MutableColumnFamilyOptionEnumValue)) {
throw new NoSuchElementException(key.name() + " is not of Enum type");
}
return ((MutableColumnFamilyOptionEnumValue<T>)value).asObject();
@Override
protected Map<String, MutableColumnFamilyOptionKey> allKeys() {
return ALL_KEYS_LOOKUP;
}
public MutableColumnFamilyOptionsBuilder fromString(final String keyStr,
final String valueStr) throws IllegalArgumentException {
Objects.requireNonNull(keyStr);
Objects.requireNonNull(valueStr);
final MutableColumnFamilyOptionKey key = ALL_KEYS_LOOKUP.get(keyStr);
switch(key.getValueType()) {
case DOUBLE:
return setDouble(key, Double.parseDouble(valueStr));
case LONG:
return setLong(key, Long.parseLong(valueStr));
case INT:
return setInt(key, Integer.parseInt(valueStr));
case BOOLEAN:
return setBoolean(key, Boolean.parseBoolean(valueStr));
case INT_ARRAY:
final String[] strInts = valueStr
.trim().split(INT_ARRAY_INT_SEPARATOR);
if(strInts == null || strInts.length == 0) {
throw new IllegalArgumentException(
"int array value is not correctly formatted");
}
final int value[] = new int[strInts.length];
int i = 0;
for(final String strInt : strInts) {
value[i++] = Integer.parseInt(strInt);
}
return setIntArray(key, value);
}
throw new IllegalStateException(
key + " has unknown value type: " + key.getValueType());
@Override
protected MutableColumnFamilyOptions build(final String[] keys,
final String[] values) {
return new MutableColumnFamilyOptions(keys, values);
}
@Override
@ -993,5 +455,15 @@ public class MutableColumnFamilyOptions {
public boolean reportBgIoStats() {
return getBoolean(MiscOption.report_bg_io_stats);
}
@Override
public MutableColumnFamilyOptionsBuilder setTtl(final long ttl) {
return setLong(CompactionOption.ttl, ttl);
}
@Override
public long ttl() {
return getLong(CompactionOption.ttl);
}
}
}

@ -0,0 +1,286 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
public class MutableDBOptions extends AbstractMutableOptions {
/**
* User must use builder pattern, or parser.
*
* @param keys the keys
* @param values the values
*
* See {@link #builder()} and {@link #parse(String)}.
*/
private MutableDBOptions(final String[] keys, final String[] values) {
super(keys, values);
}
/**
* Creates a builder which allows you
* to set MutableDBOptions in a fluent
* manner
*
* @return A builder for MutableDBOptions
*/
public static MutableDBOptionsBuilder builder() {
return new MutableDBOptionsBuilder();
}
/**
* Parses a String representation of MutableDBOptions
*
* The format is: key1=value1;key2=value2;key3=value3 etc
*
* For int[] values, each int should be separated by a comma, e.g.
*
* key1=value1;intArrayKey1=1,2,3
*
* @param str The string representation of the mutable db options
*
* @return A builder for the mutable db options
*/
public static MutableDBOptionsBuilder parse(final String str) {
Objects.requireNonNull(str);
final MutableDBOptionsBuilder builder =
new MutableDBOptionsBuilder();
final String[] options = str.trim().split(KEY_VALUE_PAIR_SEPARATOR);
for(final String option : options) {
final int equalsOffset = option.indexOf(KEY_VALUE_SEPARATOR);
if(equalsOffset <= 0) {
throw new IllegalArgumentException(
"options string has an invalid key=value pair");
}
final String key = option.substring(0, equalsOffset);
if(key.isEmpty()) {
throw new IllegalArgumentException("options string is invalid");
}
final String value = option.substring(equalsOffset + 1);
if(value.isEmpty()) {
throw new IllegalArgumentException("options string is invalid");
}
builder.fromString(key, value);
}
return builder;
}
private interface MutableDBOptionKey extends MutableOptionKey {}
public enum DBOption implements MutableDBOptionKey {
max_background_jobs(ValueType.INT),
base_background_compactions(ValueType.INT),
max_background_compactions(ValueType.INT),
avoid_flush_during_shutdown(ValueType.BOOLEAN),
writable_file_max_buffer_size(ValueType.LONG),
delayed_write_rate(ValueType.LONG),
max_total_wal_size(ValueType.LONG),
delete_obsolete_files_period_micros(ValueType.LONG),
stats_dump_period_sec(ValueType.INT),
max_open_files(ValueType.INT),
bytes_per_sync(ValueType.LONG),
wal_bytes_per_sync(ValueType.LONG),
compaction_readahead_size(ValueType.LONG);
private final ValueType valueType;
DBOption(final ValueType valueType) {
this.valueType = valueType;
}
@Override
public ValueType getValueType() {
return valueType;
}
}
public static class MutableDBOptionsBuilder
extends AbstractMutableOptionsBuilder<MutableDBOptions, MutableDBOptionsBuilder, MutableDBOptionKey>
implements MutableDBOptionsInterface<MutableDBOptionsBuilder> {
private final static Map<String, MutableDBOptionKey> ALL_KEYS_LOOKUP = new HashMap<>();
static {
for(final MutableDBOptionKey key : DBOption.values()) {
ALL_KEYS_LOOKUP.put(key.name(), key);
}
}
private MutableDBOptionsBuilder() {
super();
}
@Override
protected MutableDBOptionsBuilder self() {
return this;
}
@Override
protected Map<String, MutableDBOptionKey> allKeys() {
return ALL_KEYS_LOOKUP;
}
@Override
protected MutableDBOptions build(final String[] keys,
final String[] values) {
return new MutableDBOptions(keys, values);
}
@Override
public MutableDBOptionsBuilder setMaxBackgroundJobs(
final int maxBackgroundJobs) {
return setInt(DBOption.max_background_jobs, maxBackgroundJobs);
}
@Override
public int maxBackgroundJobs() {
return getInt(DBOption.max_background_jobs);
}
@Override
public void setBaseBackgroundCompactions(
final int baseBackgroundCompactions) {
setInt(DBOption.base_background_compactions,
baseBackgroundCompactions);
}
@Override
public int baseBackgroundCompactions() {
return getInt(DBOption.base_background_compactions);
}
@Override
public MutableDBOptionsBuilder setMaxBackgroundCompactions(
final int maxBackgroundCompactions) {
return setInt(DBOption.max_background_compactions,
maxBackgroundCompactions);
}
@Override
public int maxBackgroundCompactions() {
return getInt(DBOption.max_background_compactions);
}
@Override
public MutableDBOptionsBuilder setAvoidFlushDuringShutdown(
final boolean avoidFlushDuringShutdown) {
return setBoolean(DBOption.avoid_flush_during_shutdown,
avoidFlushDuringShutdown);
}
@Override
public boolean avoidFlushDuringShutdown() {
return getBoolean(DBOption.avoid_flush_during_shutdown);
}
@Override
public MutableDBOptionsBuilder setWritableFileMaxBufferSize(
final long writableFileMaxBufferSize) {
return setLong(DBOption.writable_file_max_buffer_size,
writableFileMaxBufferSize);
}
@Override
public long writableFileMaxBufferSize() {
return getLong(DBOption.writable_file_max_buffer_size);
}
@Override
public MutableDBOptionsBuilder setDelayedWriteRate(
final long delayedWriteRate) {
return setLong(DBOption.delayed_write_rate,
delayedWriteRate);
}
@Override
public long delayedWriteRate() {
return getLong(DBOption.delayed_write_rate);
}
@Override
public MutableDBOptionsBuilder setMaxTotalWalSize(
final long maxTotalWalSize) {
return setLong(DBOption.max_total_wal_size, maxTotalWalSize);
}
@Override
public long maxTotalWalSize() {
return getLong(DBOption.max_total_wal_size);
}
@Override
public MutableDBOptionsBuilder setDeleteObsoleteFilesPeriodMicros(
final long micros) {
return setLong(DBOption.delete_obsolete_files_period_micros, micros);
}
@Override
public long deleteObsoleteFilesPeriodMicros() {
return getLong(DBOption.delete_obsolete_files_period_micros);
}
@Override
public MutableDBOptionsBuilder setStatsDumpPeriodSec(
final int statsDumpPeriodSec) {
return setInt(DBOption.stats_dump_period_sec, statsDumpPeriodSec);
}
@Override
public int statsDumpPeriodSec() {
return getInt(DBOption.stats_dump_period_sec);
}
@Override
public MutableDBOptionsBuilder setMaxOpenFiles(final int maxOpenFiles) {
return setInt(DBOption.max_open_files, maxOpenFiles);
}
@Override
public int maxOpenFiles() {
return getInt(DBOption.max_open_files);
}
@Override
public MutableDBOptionsBuilder setBytesPerSync(final long bytesPerSync) {
return setLong(DBOption.bytes_per_sync, bytesPerSync);
}
@Override
public long bytesPerSync() {
return getLong(DBOption.bytes_per_sync);
}
@Override
public MutableDBOptionsBuilder setWalBytesPerSync(
final long walBytesPerSync) {
return setLong(DBOption.wal_bytes_per_sync, walBytesPerSync);
}
@Override
public long walBytesPerSync() {
return getLong(DBOption.wal_bytes_per_sync);
}
@Override
public MutableDBOptionsBuilder setCompactionReadaheadSize(
final long compactionReadaheadSize) {
return setLong(DBOption.compaction_readahead_size,
compactionReadaheadSize);
}
@Override
public long compactionReadaheadSize() {
return getLong(DBOption.compaction_readahead_size);
}
}
}

@ -0,0 +1,336 @@
package org.rocksdb;
public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface> {
/**
* Specifies the maximum number of concurrent background jobs (both flushes
* and compactions combined).
* Default: 2
*
* @param maxBackgroundJobs number of max concurrent background jobs
* @return the instance of the current object.
*/
T setMaxBackgroundJobs(int maxBackgroundJobs);
/**
* Returns the maximum number of concurrent background jobs (both flushes
* and compactions combined).
* Default: 2
*
* @return the maximum number of concurrent background jobs.
*/
int maxBackgroundJobs();
/**
* Suggested number of concurrent background compaction jobs, submitted to
* the default LOW priority thread pool.
* Default: 1
*
* @param baseBackgroundCompactions Suggested number of background compaction
* jobs
*
* @deprecated Use {@link #setMaxBackgroundJobs(int)}
*/
@Deprecated
void setBaseBackgroundCompactions(int baseBackgroundCompactions);
/**
* Suggested number of concurrent background compaction jobs, submitted to
* the default LOW priority thread pool.
* Default: 1
*
* @return Suggested number of background compaction jobs
*/
int baseBackgroundCompactions();
/**
* Specifies the maximum number of concurrent background compaction jobs,
* submitted to the default LOW priority thread pool.
* If you're increasing this, also consider increasing number of threads in
* LOW priority thread pool. For more information, see
* Default: 1
*
* @param maxBackgroundCompactions the maximum number of background
* compaction jobs.
* @return the instance of the current object.
*
* @see RocksEnv#setBackgroundThreads(int)
* @see RocksEnv#setBackgroundThreads(int, Priority)
* @see DBOptionsInterface#maxBackgroundFlushes()
*/
T setMaxBackgroundCompactions(int maxBackgroundCompactions);
/**
* Returns the maximum number of concurrent background compaction jobs,
* submitted to the default LOW priority thread pool.
* When increasing this number, we may also want to consider increasing
* number of threads in LOW priority thread pool.
* Default: 1
*
* @return the maximum number of concurrent background compaction jobs.
* @see RocksEnv#setBackgroundThreads(int)
* @see RocksEnv#setBackgroundThreads(int, Priority)
*
* @deprecated Use {@link #setMaxBackgroundJobs(int)}
*/
@Deprecated
int maxBackgroundCompactions();
/**
* By default RocksDB will flush all memtables on DB close if there are
* unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
* DB close. Unpersisted data WILL BE LOST.
*
* DEFAULT: false
*
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
* API.
*
* @param avoidFlushDuringShutdown true if we should avoid flush during
* shutdown
*
* @return the reference to the current options.
*/
T setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown);
/**
* By default RocksDB will flush all memtables on DB close if there are
* unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
* DB close. Unpersisted data WILL BE LOST.
*
* DEFAULT: false
*
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
* API.
*
* @return true if we should avoid flush during shutdown
*/
boolean avoidFlushDuringShutdown();
/**
* This is the maximum buffer size that is used by WritableFileWriter.
* On Windows, we need to maintain an aligned buffer for writes.
* We allow the buffer to grow until it's size hits the limit.
*
* Default: 1024 * 1024 (1 MB)
*
* @param writableFileMaxBufferSize the maximum buffer size
*
* @return the reference to the current options.
*/
T setWritableFileMaxBufferSize(long writableFileMaxBufferSize);
/**
* This is the maximum buffer size that is used by WritableFileWriter.
* On Windows, we need to maintain an aligned buffer for writes.
* We allow the buffer to grow until it's size hits the limit.
*
* Default: 1024 * 1024 (1 MB)
*
* @return the maximum buffer size
*/
long writableFileMaxBufferSize();
/**
* The limited write rate to DB if
* {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
* {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
* or we are writing to the last mem table allowed and we allow more than 3
* mem tables. It is calculated using size of user write requests before
* compression. RocksDB may decide to slow down more if the compaction still
* gets behind further.
*
* Unit: bytes per second.
*
* Default: 16MB/s
*
* @param delayedWriteRate the rate in bytes per second
*
* @return the reference to the current options.
*/
T setDelayedWriteRate(long delayedWriteRate);
/**
* The limited write rate to DB if
* {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
* {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
* or we are writing to the last mem table allowed and we allow more than 3
* mem tables. It is calculated using size of user write requests before
* compression. RocksDB may decide to slow down more if the compaction still
* gets behind further.
*
* Unit: bytes per second.
*
* Default: 16MB/s
*
* @return the rate in bytes per second
*/
long delayedWriteRate();
/**
* <p>Once write-ahead logs exceed this size, we will start forcing the
* flush of column families whose memtables are backed by the oldest live
* WAL file (i.e. the ones that are causing all the space amplification).
* </p>
* <p>If set to 0 (default), we will dynamically choose the WAL size limit to
* be [sum of all write_buffer_size * max_write_buffer_number] * 2</p>
* <p>This option takes effect only when there are more than one column family as
* otherwise the wal size is dictated by the write_buffer_size.</p>
* <p>Default: 0</p>
*
* @param maxTotalWalSize max total wal size.
* @return the instance of the current object.
*/
T setMaxTotalWalSize(long maxTotalWalSize);
/**
* <p>Returns the max total wal size. Once write-ahead logs exceed this size,
* we will start forcing the flush of column families whose memtables are
* backed by the oldest live WAL file (i.e. the ones that are causing all
* the space amplification).</p>
*
* <p>If set to 0 (default), we will dynamically choose the WAL size limit
* to be [sum of all write_buffer_size * max_write_buffer_number] * 2
* </p>
*
* @return max total wal size
*/
long maxTotalWalSize();
/**
* The periodicity when obsolete files get deleted. The default
* value is 6 hours. The files that get out of scope by compaction
* process will still get automatically delete on every compaction,
* regardless of this setting
*
* @param micros the time interval in micros
* @return the instance of the current object.
*/
T setDeleteObsoleteFilesPeriodMicros(long micros);
/**
* The periodicity when obsolete files get deleted. The default
* value is 6 hours. The files that get out of scope by compaction
* process will still get automatically delete on every compaction,
* regardless of this setting
*
* @return the time interval in micros when obsolete files will be deleted.
*/
long deleteObsoleteFilesPeriodMicros();
/**
* if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
* Default: 600 (10 minutes)
*
* @param statsDumpPeriodSec time interval in seconds.
* @return the instance of the current object.
*/
T setStatsDumpPeriodSec(int statsDumpPeriodSec);
/**
* If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
* Default: 600 (10 minutes)
*
* @return time interval in seconds.
*/
int statsDumpPeriodSec();
/**
* Number of open files that can be used by the DB. You may need to
* increase this if your database has a large working set. Value -1 means
* files opened are always kept open. You can estimate number of files based
* on {@code target_file_size_base} and {@code target_file_size_multiplier}
* for level-based compaction. For universal-style compaction, you can usually
* set it to -1.
* Default: 5000
*
* @param maxOpenFiles the maximum number of open files.
* @return the instance of the current object.
*/
T setMaxOpenFiles(int maxOpenFiles);
/**
* Number of open files that can be used by the DB. You may need to
* increase this if your database has a large working set. Value -1 means
* files opened are always kept open. You can estimate number of files based
* on {@code target_file_size_base} and {@code target_file_size_multiplier}
* for level-based compaction. For universal-style compaction, you can usually
* set it to -1.
*
* @return the maximum number of open files.
*/
int maxOpenFiles();
/**
* Allows OS to incrementally sync files to disk while they are being
* written, asynchronously, in the background.
* Issue one request for every bytes_per_sync written. 0 turns it off.
* Default: 0
*
* @param bytesPerSync size in bytes
* @return the instance of the current object.
*/
T setBytesPerSync(long bytesPerSync);
/**
* Allows OS to incrementally sync files to disk while they are being
* written, asynchronously, in the background.
* Issue one request for every bytes_per_sync written. 0 turns it off.
* Default: 0
*
* @return size in bytes
*/
long bytesPerSync();
/**
* Same as {@link #setBytesPerSync(long)} , but applies to WAL files
*
* Default: 0, turned off
*
* @param walBytesPerSync size in bytes
* @return the instance of the current object.
*/
T setWalBytesPerSync(long walBytesPerSync);
/**
* Same as {@link #bytesPerSync()} , but applies to WAL files
*
* Default: 0, turned off
*
* @return size in bytes
*/
long walBytesPerSync();
/**
* If non-zero, we perform bigger reads when doing compaction. If you're
* running RocksDB on spinning disks, you should set this to at least 2MB.
*
* That way RocksDB's compaction is doing sequential instead of random reads.
* When non-zero, we also force
* {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true.
*
* Default: 0
*
* @param compactionReadaheadSize The compaction read-ahead size
*
* @return the reference to the current options.
*/
T setCompactionReadaheadSize(final long compactionReadaheadSize);
/**
* If non-zero, we perform bigger reads when doing compaction. If you're
* running RocksDB on spinning disks, you should set this to at least 2MB.
*
* That way RocksDB's compaction is doing sequential instead of random reads.
* When non-zero, we also force
* {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true.
*
* Default: 0
*
* @return The compaction read-ahead size
*/
long compactionReadaheadSize();
}

@ -0,0 +1,15 @@
package org.rocksdb;
public interface MutableOptionKey {
enum ValueType {
DOUBLE,
LONG,
INT,
BOOLEAN,
INT_ARRAY,
ENUM
}
String name();
ValueType getValueType();
}

@ -0,0 +1,375 @@
package org.rocksdb;
import static org.rocksdb.AbstractMutableOptions.INT_ARRAY_INT_SEPARATOR;
public abstract class MutableOptionValue<T> {
abstract double asDouble() throws NumberFormatException;
abstract long asLong() throws NumberFormatException;
abstract int asInt() throws NumberFormatException;
abstract boolean asBoolean() throws IllegalStateException;
abstract int[] asIntArray() throws IllegalStateException;
abstract String asString();
abstract T asObject();
private static abstract class MutableOptionValueObject<T>
extends MutableOptionValue<T> {
protected final T value;
private MutableOptionValueObject(final T value) {
this.value = value;
}
@Override T asObject() {
return value;
}
}
static MutableOptionValue<String> fromString(final String s) {
return new MutableOptionStringValue(s);
}
static MutableOptionValue<Double> fromDouble(final double d) {
return new MutableOptionDoubleValue(d);
}
static MutableOptionValue<Long> fromLong(final long d) {
return new MutableOptionLongValue(d);
}
static MutableOptionValue<Integer> fromInt(final int i) {
return new MutableOptionIntValue(i);
}
static MutableOptionValue<Boolean> fromBoolean(final boolean b) {
return new MutableOptionBooleanValue(b);
}
static MutableOptionValue<int[]> fromIntArray(final int[] ix) {
return new MutableOptionIntArrayValue(ix);
}
static <N extends Enum<N>> MutableOptionValue<N> fromEnum(final N value) {
return new MutableOptionEnumValue<>(value);
}
static class MutableOptionStringValue
extends MutableOptionValueObject<String> {
MutableOptionStringValue(final String value) {
super(value);
}
@Override
double asDouble() throws NumberFormatException {
return Double.parseDouble(value);
}
@Override
long asLong() throws NumberFormatException {
return Long.parseLong(value);
}
@Override
int asInt() throws NumberFormatException {
return Integer.parseInt(value);
}
@Override
boolean asBoolean() throws IllegalStateException {
return Boolean.parseBoolean(value);
}
@Override
int[] asIntArray() throws IllegalStateException {
throw new IllegalStateException("String is not applicable as int[]");
}
@Override
String asString() {
return value;
}
}
static class MutableOptionDoubleValue
extends MutableOptionValue<Double> {
private final double value;
MutableOptionDoubleValue(final double value) {
this.value = value;
}
@Override
double asDouble() {
return value;
}
@Override
long asLong() throws NumberFormatException {
return Double.valueOf(value).longValue();
}
@Override
int asInt() throws NumberFormatException {
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
throw new NumberFormatException(
"double value lies outside the bounds of int");
}
return Double.valueOf(value).intValue();
}
@Override
boolean asBoolean() throws IllegalStateException {
throw new IllegalStateException(
"double is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
throw new NumberFormatException(
"double value lies outside the bounds of int");
}
return new int[] { Double.valueOf(value).intValue() };
}
@Override
String asString() {
return String.valueOf(value);
}
@Override
Double asObject() {
return value;
}
}
static class MutableOptionLongValue
extends MutableOptionValue<Long> {
private final long value;
MutableOptionLongValue(final long value) {
this.value = value;
}
@Override
double asDouble() {
if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
throw new NumberFormatException(
"long value lies outside the bounds of int");
}
return Long.valueOf(value).doubleValue();
}
@Override
long asLong() throws NumberFormatException {
return value;
}
@Override
int asInt() throws NumberFormatException {
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
throw new NumberFormatException(
"long value lies outside the bounds of int");
}
return Long.valueOf(value).intValue();
}
@Override
boolean asBoolean() throws IllegalStateException {
throw new IllegalStateException(
"long is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
throw new NumberFormatException(
"long value lies outside the bounds of int");
}
return new int[] { Long.valueOf(value).intValue() };
}
@Override
String asString() {
return String.valueOf(value);
}
@Override
Long asObject() {
return value;
}
}
static class MutableOptionIntValue
extends MutableOptionValue<Integer> {
private final int value;
MutableOptionIntValue(final int value) {
this.value = value;
}
@Override
double asDouble() {
if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
throw new NumberFormatException("int value lies outside the bounds of int");
}
return Integer.valueOf(value).doubleValue();
}
@Override
long asLong() throws NumberFormatException {
return value;
}
@Override
int asInt() throws NumberFormatException {
return value;
}
@Override
boolean asBoolean() throws IllegalStateException {
throw new IllegalStateException("int is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
return new int[] { value };
}
@Override
String asString() {
return String.valueOf(value);
}
@Override
Integer asObject() {
return value;
}
}
static class MutableOptionBooleanValue
extends MutableOptionValue<Boolean> {
private final boolean value;
MutableOptionBooleanValue(final boolean value) {
this.value = value;
}
@Override
double asDouble() {
throw new NumberFormatException("boolean is not applicable as double");
}
@Override
long asLong() throws NumberFormatException {
throw new NumberFormatException("boolean is not applicable as Long");
}
@Override
int asInt() throws NumberFormatException {
throw new NumberFormatException("boolean is not applicable as int");
}
@Override
boolean asBoolean() {
return value;
}
@Override
int[] asIntArray() throws IllegalStateException {
throw new IllegalStateException("boolean is not applicable as int[]");
}
@Override
String asString() {
return String.valueOf(value);
}
@Override
Boolean asObject() {
return value;
}
}
static class MutableOptionIntArrayValue
extends MutableOptionValueObject<int[]> {
MutableOptionIntArrayValue(final int[] value) {
super(value);
}
@Override
double asDouble() {
throw new NumberFormatException("int[] is not applicable as double");
}
@Override
long asLong() throws NumberFormatException {
throw new NumberFormatException("int[] is not applicable as Long");
}
@Override
int asInt() throws NumberFormatException {
throw new NumberFormatException("int[] is not applicable as int");
}
@Override
boolean asBoolean() {
throw new NumberFormatException("int[] is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
return value;
}
@Override
String asString() {
final StringBuilder builder = new StringBuilder();
for(int i = 0; i < value.length; i++) {
builder.append(i);
if(i + 1 < value.length) {
builder.append(INT_ARRAY_INT_SEPARATOR);
}
}
return builder.toString();
}
}
static class MutableOptionEnumValue<T extends Enum<T>>
extends MutableOptionValueObject<T> {
MutableOptionEnumValue(final T value) {
super(value);
}
@Override
double asDouble() throws NumberFormatException {
throw new NumberFormatException("Enum is not applicable as double");
}
@Override
long asLong() throws NumberFormatException {
throw new NumberFormatException("Enum is not applicable as long");
}
@Override
int asInt() throws NumberFormatException {
throw new NumberFormatException("Enum is not applicable as int");
}
@Override
boolean asBoolean() throws IllegalStateException {
throw new NumberFormatException("Enum is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
throw new NumberFormatException("Enum is not applicable as int[]");
}
@Override
String asString() {
return value.name();
}
}
}

@ -0,0 +1,59 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* The operation stage.
*/
public enum OperationStage {
STAGE_UNKNOWN((byte)0x0),
STAGE_FLUSH_RUN((byte)0x1),
STAGE_FLUSH_WRITE_L0((byte)0x2),
STAGE_COMPACTION_PREPARE((byte)0x3),
STAGE_COMPACTION_RUN((byte)0x4),
STAGE_COMPACTION_PROCESS_KV((byte)0x5),
STAGE_COMPACTION_INSTALL((byte)0x6),
STAGE_COMPACTION_SYNC_FILE((byte)0x7),
STAGE_PICK_MEMTABLES_TO_FLUSH((byte)0x8),
STAGE_MEMTABLE_ROLLBACK((byte)0x9),
STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS((byte)0xA);
private final byte value;
OperationStage(final byte value) {
this.value = value;
}
/**
* Get the internal representation value.
*
* @return the internal representation value.
*/
byte getValue() {
return value;
}
/**
* Get the Operation stage from the internal representation value.
*
* @param value the internal representation value.
*
* @return the operation stage
*
* @throws IllegalArgumentException if the value does not match
* an OperationStage
*/
static OperationStage fromValue(final byte value)
throws IllegalArgumentException {
for (final OperationStage threadType : OperationStage.values()) {
if (threadType.value == value) {
return threadType;
}
}
throw new IllegalArgumentException(
"Unknown value for OperationStage: " + value);
}
}

@ -0,0 +1,54 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* The type used to refer to a thread operation.
*
* A thread operation describes high-level action of a thread,
* examples include compaction and flush.
*/
public enum OperationType {
OP_UNKNOWN((byte)0x0),
OP_COMPACTION((byte)0x1),
OP_FLUSH((byte)0x2);
private final byte value;
OperationType(final byte value) {
this.value = value;
}
/**
* Get the internal representation value.
*
* @return the internal representation value.
*/
byte getValue() {
return value;
}
/**
* Get the Operation type from the internal representation value.
*
* @param value the internal representation value.
*
* @return the operation type
*
* @throws IllegalArgumentException if the value does not match
* an OperationType
*/
static OperationType fromValue(final byte value)
throws IllegalArgumentException {
for (final OperationType threadType : OperationType.values()) {
if (threadType.value == value) {
return threadType;
}
}
throw new IllegalArgumentException(
"Unknown value for OperationType: " + value);
}
}

@ -94,6 +94,54 @@ public class OptimisticTransactionDB extends RocksDB
return otdb;
}
/**
* This is similar to {@link #close()} except that it
* throws an exception if any error occurs.
*
* This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true.
*
* See also {@link #close()}.
*
* @throws RocksDBException if an error occurs whilst closing.
*/
public void closeE() throws RocksDBException {
if (owningHandle_.compareAndSet(true, false)) {
try {
closeDatabase(nativeHandle_);
} finally {
disposeInternal();
}
}
}
/**
* This is similar to {@link #closeE()} except that it
* silently ignores any errors.
*
* This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true.
*
* See also {@link #close()}.
*/
@Override
public void close() {
if (owningHandle_.compareAndSet(true, false)) {
try {
closeDatabase(nativeHandle_);
} catch (final RocksDBException e) {
// silently ignore the error report
} finally {
disposeInternal();
}
}
}
@Override
public Transaction beginTransaction(final WriteOptions writeOptions) {
return new Transaction(this, beginTransaction(nativeHandle_,
@ -155,10 +203,14 @@ public class OptimisticTransactionDB extends RocksDB
return db;
}
@Override protected final native void disposeInternal(final long handle);
protected static native long open(final long optionsHandle,
final String path) throws RocksDBException;
protected static native long[] open(final long handle, final String path,
final byte[][] columnFamilyNames, final long[] columnFamilyOptions);
private native static void closeDatabase(final long handle)
throws RocksDBException;
private native long beginTransaction(final long handle,
final long writeOptionsHandle);
private native long beginTransaction(final long handle,
@ -171,5 +223,4 @@ public class OptimisticTransactionDB extends RocksDB
final long optimisticTransactionOptionsHandle,
final long oldTransactionHandle);
private native long getBaseDB(final long handle);
@Override protected final native void disposeInternal(final long handle);
}

@ -19,7 +19,9 @@ import java.util.List;
* automaticallyand native resources will be released as part of the process.
*/
public class Options extends RocksObject
implements DBOptionsInterface<Options>, ColumnFamilyOptionsInterface<Options>,
implements DBOptionsInterface<Options>,
MutableDBOptionsInterface<Options>,
ColumnFamilyOptionsInterface<Options>,
MutableColumnFamilyOptionsInterface<Options> {
static {
RocksDB.loadLibrary();
@ -472,9 +474,10 @@ public class Options extends RocksObject
}
@Override
public void setMaxSubcompactions(final int maxSubcompactions) {
public Options setMaxSubcompactions(final int maxSubcompactions) {
assert(isOwningHandle());
setMaxSubcompactions(nativeHandle_, maxSubcompactions);
return this;
}
@Override
@ -905,6 +908,17 @@ public class Options extends RocksObject
return delayedWriteRate(nativeHandle_);
}
@Override
public Options setEnablePipelinedWrite(final boolean enablePipelinedWrite) {
setEnablePipelinedWrite(nativeHandle_, enablePipelinedWrite);
return this;
}
@Override
public boolean enablePipelinedWrite() {
return enablePipelinedWrite(nativeHandle_);
}
@Override
public Options setAllowConcurrentMemtableWrite(
final boolean allowConcurrentMemtableWrite) {
@ -1006,6 +1020,20 @@ public class Options extends RocksObject
return this.rowCache_;
}
@Override
public Options setWalFilter(final AbstractWalFilter walFilter) {
assert(isOwningHandle());
setWalFilter(nativeHandle_, walFilter.nativeHandle_);
this.walFilter_ = walFilter;
return this;
}
@Override
public WalFilter walFilter() {
assert(isOwningHandle());
return this.walFilter_;
}
@Override
public Options setFailIfOptionsFileError(final boolean failIfOptionsFileError) {
assert(isOwningHandle());
@ -1058,6 +1086,58 @@ public class Options extends RocksObject
return avoidFlushDuringShutdown(nativeHandle_);
}
@Override
public Options setAllowIngestBehind(final boolean allowIngestBehind) {
assert(isOwningHandle());
setAllowIngestBehind(nativeHandle_, allowIngestBehind);
return this;
}
@Override
public boolean allowIngestBehind() {
assert(isOwningHandle());
return allowIngestBehind(nativeHandle_);
}
@Override
public Options setPreserveDeletes(final boolean preserveDeletes) {
assert(isOwningHandle());
setPreserveDeletes(nativeHandle_, preserveDeletes);
return this;
}
@Override
public boolean preserveDeletes() {
assert(isOwningHandle());
return preserveDeletes(nativeHandle_);
}
@Override
public Options setTwoWriteQueues(final boolean twoWriteQueues) {
assert(isOwningHandle());
setTwoWriteQueues(nativeHandle_, twoWriteQueues);
return this;
}
@Override
public boolean twoWriteQueues() {
assert(isOwningHandle());
return twoWriteQueues(nativeHandle_);
}
@Override
public Options setManualWalFlush(final boolean manualWalFlush) {
assert(isOwningHandle());
setManualWalFlush(nativeHandle_, manualWalFlush);
return this;
}
@Override
public boolean manualWalFlush() {
assert(isOwningHandle());
return manualWalFlush(nativeHandle_);
}
@Override
public MemTableConfig memTableConfig() {
return this.memTableConfig_;
@ -1194,6 +1274,20 @@ public class Options extends RocksObject
bottommostCompressionType(nativeHandle_));
}
@Override
public Options setBottommostCompressionOptions(
final CompressionOptions bottommostCompressionOptions) {
setBottommostCompressionOptions(nativeHandle_,
bottommostCompressionOptions.nativeHandle_);
this.bottommostCompressionOptions_ = bottommostCompressionOptions;
return this;
}
@Override
public CompressionOptions bottommostCompressionOptions() {
return this.bottommostCompressionOptions_;
}
@Override
public Options setCompressionOptions(
final CompressionOptions compressionOptions) {
@ -1209,7 +1303,7 @@ public class Options extends RocksObject
@Override
public CompactionStyle compactionStyle() {
return CompactionStyle.values()[compactionStyle(nativeHandle_)];
return CompactionStyle.fromValue(compactionStyle(nativeHandle_));
}
@Override
@ -1581,6 +1675,17 @@ public class Options extends RocksObject
return reportBgIoStats(nativeHandle_);
}
@Override
public Options setTtl(final long ttl) {
setTtl(nativeHandle_, ttl);
return this;
}
@Override
public long ttl() {
return ttl(nativeHandle_);
}
@Override
public Options setCompactionOptionsUniversal(
final CompactionOptionsUniversal compactionOptionsUniversal) {
@ -1619,6 +1724,17 @@ public class Options extends RocksObject
return forceConsistencyChecks(nativeHandle_);
}
@Override
public Options setAtomicFlush(final boolean atomicFlush) {
setAtomicFlush(nativeHandle_, atomicFlush);
return this;
}
@Override
public boolean atomicFlush() {
return atomicFlush(nativeHandle_);
}
private native static long newOptions();
private native static long newOptions(long dbOptHandle,
long cfOptHandle);
@ -1767,6 +1883,9 @@ public class Options extends RocksObject
private native boolean enableThreadTracking(long handle);
private native void setDelayedWriteRate(long handle, long delayedWriteRate);
private native long delayedWriteRate(long handle);
private native void setEnablePipelinedWrite(final long handle,
final boolean pipelinedWrite);
private native boolean enablePipelinedWrite(final long handle);
private native void setAllowConcurrentMemtableWrite(long handle,
boolean allowConcurrentMemtableWrite);
private native boolean allowConcurrentMemtableWrite(long handle);
@ -1789,7 +1908,9 @@ public class Options extends RocksObject
final boolean allow2pc);
private native boolean allow2pc(final long handle);
private native void setRowCache(final long handle,
final long row_cache_handle);
final long rowCacheHandle);
private native void setWalFilter(final long handle,
final long walFilterHandle);
private native void setFailIfOptionsFileError(final long handle,
final boolean failIfOptionsFileError);
private native boolean failIfOptionsFileError(final long handle);
@ -1802,6 +1923,19 @@ public class Options extends RocksObject
private native void setAvoidFlushDuringShutdown(final long handle,
final boolean avoidFlushDuringShutdown);
private native boolean avoidFlushDuringShutdown(final long handle);
private native void setAllowIngestBehind(final long handle,
final boolean allowIngestBehind);
private native boolean allowIngestBehind(final long handle);
private native void setPreserveDeletes(final long handle,
final boolean preserveDeletes);
private native boolean preserveDeletes(final long handle);
private native void setTwoWriteQueues(final long handle,
final boolean twoWriteQueues);
private native boolean twoWriteQueues(final long handle);
private native void setManualWalFlush(final long handle,
final boolean manualWalFlush);
private native boolean manualWalFlush(final long handle);
// CF native handles
private native void optimizeForSmallDb(final long handle);
@ -1839,6 +1973,8 @@ public class Options extends RocksObject
private native void setBottommostCompressionType(long handle,
byte bottommostCompressionType);
private native byte bottommostCompressionType(long handle);
private native void setBottommostCompressionOptions(final long handle,
final long bottommostCompressionOptionsHandle);
private native void setCompressionOptions(long handle,
long compressionOptionsHandle);
private native void useFixedLengthPrefixExtractor(
@ -1942,6 +2078,8 @@ public class Options extends RocksObject
private native void setReportBgIoStats(final long handle,
final boolean reportBgIoStats);
private native boolean reportBgIoStats(final long handle);
private native void setTtl(final long handle, final long ttl);
private native long ttl(final long handle);
private native void setCompactionOptionsUniversal(final long handle,
final long compactionOptionsUniversalHandle);
private native void setCompactionOptionsFIFO(final long handle,
@ -1949,6 +2087,9 @@ public class Options extends RocksObject
private native void setForceConsistencyChecks(final long handle,
final boolean forceConsistencyChecks);
private native boolean forceConsistencyChecks(final long handle);
private native void setAtomicFlush(final long handle,
final boolean atomicFlush);
private native boolean atomicFlush(final long handle);
// instance variables
// NOTE: If you add new member variables, please update the copy constructor above!
@ -1962,7 +2103,9 @@ public class Options extends RocksObject
compactionFilterFactory_;
private CompactionOptionsUniversal compactionOptionsUniversal_;
private CompactionOptionsFIFO compactionOptionsFIFO_;
private CompressionOptions bottommostCompressionOptions_;
private CompressionOptions compressionOptions_;
private Cache rowCache_;
private WalFilter walFilter_;
private WriteBufferManager writeBufferManager_;
}

@ -0,0 +1,26 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* Persistent cache for caching IO pages on a persistent medium. The
* cache is specifically designed for persistent read cache.
*/
public class PersistentCache extends RocksObject {
public PersistentCache(final Env env, final String path, final long size,
final Logger logger, final boolean optimizedForNvm)
throws RocksDBException {
super(newPersistentCache(env.nativeHandle_, path, size,
logger.nativeHandle_, optimizedForNvm));
}
private native static long newPersistentCache(final long envHandle,
final String path, final long size, final long loggerHandle,
final boolean optimizedForNvm) throws RocksDBException;
@Override protected final native void disposeInternal(final long handle);
}

@ -0,0 +1,49 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* The Thread Pool priority.
*/
public enum Priority {
BOTTOM((byte) 0x0),
LOW((byte) 0x1),
HIGH((byte)0x2),
TOTAL((byte)0x3);
private final byte value;
Priority(final byte value) {
this.value = value;
}
/**
* <p>Returns the byte value of the enumerations value.</p>
*
* @return byte representation
*/
byte getValue() {
return value;
}
/**
* Get Priority by byte value.
*
* @param value byte representation of Priority.
*
* @return {@link org.rocksdb.Priority} instance.
* @throws java.lang.IllegalArgumentException if an invalid
* value is provided.
*/
static Priority getPriority(final byte value) {
for (final Priority priority : Priority.values()) {
if (priority.getValue() == value){
return priority;
}
}
throw new IllegalArgumentException("Illegal value provided for Priority.");
}
}

@ -0,0 +1,19 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* Range from start to limit.
*/
public class Range {
final Slice start;
final Slice limit;
public Range(final Slice start, final Slice limit) {
this.start = start;
this.limit = limit;
}
}

@ -16,6 +16,15 @@ public class ReadOptions extends RocksObject {
super(newReadOptions());
}
/**
* @param verifyChecksums verification will be performed on every read
* when set to true
* @param fillCache if true, then fill-cache behavior will be performed.
*/
public ReadOptions(final boolean verifyChecksums, final boolean fillCache) {
super(newReadOptions(verifyChecksums, fillCache));
}
/**
* Copy constructor.
*
@ -26,8 +35,8 @@ public class ReadOptions extends RocksObject {
*/
public ReadOptions(ReadOptions other) {
super(copyReadOptions(other.nativeHandle_));
iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_;
iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_;
this.iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_;
this.iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_;
}
/**
@ -182,8 +191,12 @@ public class ReadOptions extends RocksObject {
/**
* Returns whether managed iterators will be used.
*
* @return the setting of whether managed iterators will be used, by default false
* @return the setting of whether managed iterators will be used,
* by default false
*
* @deprecated This options is not used anymore.
*/
@Deprecated
public boolean managed() {
assert(isOwningHandle());
return managed(nativeHandle_);
@ -196,7 +209,10 @@ public class ReadOptions extends RocksObject {
*
* @param managed if true, then managed iterators will be enabled.
* @return the reference to the current ReadOptions.
*
* @deprecated This options is not used anymore.
*/
@Deprecated
public ReadOptions setManaged(final boolean managed) {
assert(isOwningHandle());
setManaged(nativeHandle_, managed);
@ -238,7 +254,6 @@ public class ReadOptions extends RocksObject {
return prefixSameAsStart(nativeHandle_);
}
/**
* Enforce that the iterator only iterates over the same prefix as the seek.
* This option is effective only for prefix seeks, i.e. prefix_extractor is
@ -346,6 +361,37 @@ public class ReadOptions extends RocksObject {
return this;
}
/**
* A threshold for the number of keys that can be skipped before failing an
* iterator seek as incomplete.
*
* @return the number of keys that can be skipped
* before failing an iterator seek as incomplete.
*/
public long maxSkippableInternalKeys() {
assert(isOwningHandle());
return maxSkippableInternalKeys(nativeHandle_);
}
/**
* A threshold for the number of keys that can be skipped before failing an
* iterator seek as incomplete. The default value of 0 should be used to
* never fail a request as incomplete, even on skipping too many keys.
*
* Default: 0
*
* @param maxSkippableInternalKeys the number of keys that can be skipped
* before failing an iterator seek as incomplete.
*
* @return the reference to the current ReadOptions.
*/
public ReadOptions setMaxSkippableInternalKeys(
final long maxSkippableInternalKeys) {
assert(isOwningHandle());
setMaxSkippableInternalKeys(nativeHandle_, maxSkippableInternalKeys);
return this;
}
/**
* If true, keys deleted using the DeleteRange() API will be visible to
* readers until they are naturally deleted during compaction. This improves
@ -378,14 +424,63 @@ public class ReadOptions extends RocksObject {
}
/**
* Defines the extent upto which the forward iterator can returns entries.
* Once the bound is reached, Valid() will be false. iterate_upper_bound
* is exclusive ie the bound value is not a valid entry. If
* iterator_extractor is not null, the Seek target and iterator_upper_bound
* Defines the smallest key at which the backward
* iterator can return an entry. Once the bound is passed,
* {@link RocksIterator#isValid()} will be false.
*
* The lower bound is inclusive i.e. the bound value is a valid
* entry.
*
* If prefix_extractor is not null, the Seek target and `iterate_lower_bound`
* need to have the same prefix. This is because ordering is not guaranteed
* outside of prefix domain. There is no lower bound on the iterator.
* outside of prefix domain.
*
* Default: nullptr
* Default: null
*
* @param iterateLowerBound Slice representing the upper bound
* @return the reference to the current ReadOptions.
*/
public ReadOptions setIterateLowerBound(final Slice iterateLowerBound) {
assert(isOwningHandle());
if (iterateLowerBound != null) {
// Hold onto a reference so it doesn't get garbage collected out from under us.
iterateLowerBoundSlice_ = iterateLowerBound;
setIterateLowerBound(nativeHandle_, iterateLowerBoundSlice_.getNativeHandle());
}
return this;
}
/**
* Returns the smallest key at which the backward
* iterator can return an entry.
*
* The lower bound is inclusive i.e. the bound value is a valid entry.
*
* @return the smallest key, or null if there is no lower bound defined.
*/
public Slice iterateLowerBound() {
assert(isOwningHandle());
final long lowerBoundSliceHandle = iterateLowerBound(nativeHandle_);
if (lowerBoundSliceHandle != 0) {
// Disown the new slice - it's owned by the C++ side of the JNI boundary
// from the perspective of this method.
return new Slice(lowerBoundSliceHandle, false);
}
return null;
}
/**
* Defines the extent up to which the forward iterator
* can returns entries. Once the bound is reached,
* {@link RocksIterator#isValid()} will be false.
*
* The upper bound is exclusive i.e. the bound value is not a valid entry.
*
* If iterator_extractor is not null, the Seek target and iterate_upper_bound
* need to have the same prefix. This is because ordering is not guaranteed
* outside of prefix domain.
*
* Default: null
*
* @param iterateUpperBound Slice representing the upper bound
* @return the reference to the current ReadOptions.
@ -393,7 +488,7 @@ public class ReadOptions extends RocksObject {
public ReadOptions setIterateUpperBound(final Slice iterateUpperBound) {
assert(isOwningHandle());
if (iterateUpperBound != null) {
// Hold onto a reference so it doesn't get garbaged collected out from under us.
// Hold onto a reference so it doesn't get garbage collected out from under us.
iterateUpperBoundSlice_ = iterateUpperBound;
setIterateUpperBound(nativeHandle_, iterateUpperBoundSlice_.getNativeHandle());
}
@ -401,21 +496,16 @@ public class ReadOptions extends RocksObject {
}
/**
* Defines the extent upto which the forward iterator can returns entries.
* Once the bound is reached, Valid() will be false. iterate_upper_bound
* is exclusive ie the bound value is not a valid entry. If
* iterator_extractor is not null, the Seek target and iterator_upper_bound
* need to have the same prefix. This is because ordering is not guaranteed
* outside of prefix domain. There is no lower bound on the iterator.
* Returns the largest key at which the forward
* iterator can return an entry.
*
* Default: nullptr
* The upper bound is exclusive i.e. the bound value is not a valid entry.
*
* @return Slice representing current iterate_upper_bound setting, or null if
* one does not exist.
* @return the largest key, or null if there is no upper bound defined.
*/
public Slice iterateUpperBound() {
assert(isOwningHandle());
long upperBoundSliceHandle = iterateUpperBound(nativeHandle_);
final long upperBoundSliceHandle = iterateUpperBound(nativeHandle_);
if (upperBoundSliceHandle != 0) {
// Disown the new slice - it's owned by the C++ side of the JNI boundary
// from the perspective of this method.
@ -425,67 +515,70 @@ public class ReadOptions extends RocksObject {
}
/**
* Defines the smallest key at which the backward iterator can return an
* entry. Once the bound is passed, Valid() will be false.
* `iterate_lower_bound` is inclusive ie the bound value is a valid entry.
* A callback to determine whether relevant keys for this scan exist in a
* given table based on the table's properties. The callback is passed the
* properties of each table during iteration. If the callback returns false,
* the table will not be scanned. This option only affects Iterators and has
* no impact on point lookups.
*
* If prefix_extractor is not null, the Seek target and `iterate_lower_bound`
* need to have the same prefix. This is because ordering is not guaranteed
* outside of prefix domain.
* Default: null (every table will be scanned)
*
* Default: nullptr
* @param tableFilter the table filter for the callback.
*
* @param iterateLowerBound Slice representing the lower bound
* @return the reference to the current ReadOptions.
*/
public ReadOptions setIterateLowerBound(final Slice iterateLowerBound) {
public ReadOptions setTableFilter(final AbstractTableFilter tableFilter) {
assert(isOwningHandle());
if (iterateLowerBound != null) {
// Hold onto a reference so it doesn't get garbaged collected out from under us.
iterateLowerBoundSlice_ = iterateLowerBound;
setIterateLowerBound(nativeHandle_, iterateLowerBoundSlice_.getNativeHandle());
}
setTableFilter(nativeHandle_, tableFilter.nativeHandle_);
return this;
}
/**
* Defines the smallest key at which the backward iterator can return an
* entry. Once the bound is passed, Valid() will be false.
* `iterate_lower_bound` is inclusive ie the bound value is a valid entry.
* Needed to support differential snapshots. Has 2 effects:
* 1) Iterator will skip all internal keys with seqnum &lt; iter_start_seqnum
* 2) if this param &gt; 0 iterator will return INTERNAL keys instead of user
* keys; e.g. return tombstones as well.
*
* If prefix_extractor is not null, the Seek target and `iterate_lower_bound`
* need to have the same prefix. This is because ordering is not guaranteed
* outside of prefix domain.
* Default: 0 (don't filter by seqnum, return user keys)
*
* Default: nullptr
* @param startSeqnum the starting sequence number.
*
* @return Slice representing current iterate_lower_bound setting, or null if
* one does not exist.
* @return the reference to the current ReadOptions.
*/
public Slice iterateLowerBound() {
public ReadOptions setIterStartSeqnum(final long startSeqnum) {
assert(isOwningHandle());
long lowerBoundSliceHandle = iterateLowerBound(nativeHandle_);
if (lowerBoundSliceHandle != 0) {
// Disown the new slice - it's owned by the C++ side of the JNI boundary
// from the perspective of this method.
return new Slice(lowerBoundSliceHandle, false);
}
return null;
setIterStartSeqnum(nativeHandle_, startSeqnum);
return this;
}
/**
* Returns the starting Sequence Number of any iterator.
* See {@link #setIterStartSeqnum(long)}.
*
* @return the starting sequence number of any iterator.
*/
public long iterStartSeqnum() {
assert(isOwningHandle());
return iterStartSeqnum(nativeHandle_);
}
// instance variables
// NOTE: If you add new member variables, please update the copy constructor above!
//
// Hold a reference to any iterate upper/lower bound that was set on this object
// until we're destroyed or it's overwritten. That way the caller can freely
// leave scope without us losing the Java Slice object, which during close()
// would also reap its associated rocksdb::Slice native object since it's
// possibly (likely) to be an owning handle.
protected Slice iterateUpperBoundSlice_;
protected Slice iterateLowerBoundSlice_;
// Hold a reference to any iterate lower or upper bound that was set on this
// object until we're destroyed or it's overwritten. That way the caller can
// freely leave scope without us losing the Java Slice object, which during
// close() would also reap its associated rocksdb::Slice native object since
// it's possibly (likely) to be an owning handle.
private Slice iterateLowerBoundSlice_;
private Slice iterateUpperBoundSlice_;
private native static long newReadOptions();
private native static long newReadOptions(final boolean verifyChecksums,
final boolean fillCache);
private native static long copyReadOptions(long handle);
@Override protected final native void disposeInternal(final long handle);
private native boolean verifyChecksums(long handle);
private native void setVerifyChecksums(long handle, boolean verifyChecksums);
private native boolean fillCache(long handle);
@ -510,6 +603,9 @@ public class ReadOptions extends RocksObject {
private native long readaheadSize(final long handle);
private native void setReadaheadSize(final long handle,
final long readaheadSize);
private native long maxSkippableInternalKeys(final long handle);
private native void setMaxSkippableInternalKeys(final long handle,
final long maxSkippableInternalKeys);
private native boolean ignoreRangeDeletions(final long handle);
private native void setIgnoreRangeDeletions(final long handle,
final boolean ignoreRangeDeletions);
@ -517,9 +613,10 @@ public class ReadOptions extends RocksObject {
final long upperBoundSliceHandle);
private native long iterateUpperBound(final long handle);
private native void setIterateLowerBound(final long handle,
final long upperBoundSliceHandle);
final long lowerBoundSliceHandle);
private native long iterateLowerBound(final long handle);
@Override protected final native void disposeInternal(final long handle);
private native void setTableFilter(final long handle,
final long tableFilterHandle);
private native void setIterStartSeqnum(final long handle, final long seqNum);
private native long iterStartSeqnum(final long handle);
}

File diff suppressed because it is too large Load Diff

@ -25,19 +25,8 @@ public class RocksEnv extends Env {
*/
RocksEnv(final long handle) {
super(handle);
disOwnNativeHandle();
}
/**
* <p>The helper function of {@link #dispose()} which all subclasses of
* {@link RocksObject} must implement to release their associated C++
* resource.</p>
*
* <p><strong>Note:</strong> this class is used to use the default
* RocksEnv with RocksJava. The default env allocation is managed
* by C++.</p>
*/
@Override
protected final void disposeInternal(final long handle) {
}
protected native final void disposeInternal(final long handle);
}

@ -6,22 +6,34 @@
package org.rocksdb;
/**
* RocksDB memory environment.
* Memory environment.
*/
//TODO(AR) rename to MemEnv
public class RocksMemEnv extends Env {
/**
* <p>Creates a new RocksDB environment that stores its data
* <p>Creates a new environment that stores its data
* in memory and delegates all non-file-storage tasks to
* base_env. The caller must delete the result when it is
* {@code baseEnv}.</p>
*
* <p>The caller must delete the result when it is
* no longer needed.</p>
*
* <p>{@code *base_env} must remain live while the result is in use.</p>
* @param baseEnv the base environment,
* must remain live while the result is in use.
*/
public RocksMemEnv(final Env baseEnv) {
super(createMemEnv(baseEnv.nativeHandle_));
}
/**
* @deprecated Use {@link #RocksMemEnv(Env)}.
*/
@Deprecated
public RocksMemEnv() {
super(createMemEnv());
this(Env.getDefault());
}
private static native long createMemEnv();
private static native long createMemEnv(final long baseEnvHandle);
@Override protected final native void disposeInternal(final long handle);
}

@ -0,0 +1,30 @@
package org.rocksdb;
import java.util.List;
/**
* Flags for
* {@link RocksDB#getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)}
* that specify whether memtable stats should be included,
* or file stats approximation or both.
*/
public enum SizeApproximationFlag {
NONE((byte)0x0),
INCLUDE_MEMTABLES((byte)0x1),
INCLUDE_FILES((byte)0x2);
private final byte value;
SizeApproximationFlag(final byte value) {
this.value = value;
}
/**
* Get the internal byte representation.
*
* @return the internal representation.
*/
byte getValue() {
return value;
}
}

@ -55,7 +55,8 @@ public class Slice extends AbstractSlice<byte[]> {
* Slice instances using a handle. </p>
*
* @param nativeHandle address of native instance.
* @param owningNativeHandle whether to own this reference from the C++ side or not
* @param owningNativeHandle true if the Java side owns the memory pointed to
* by this reference, false if ownership belongs to the C++ side
*/
Slice(final long nativeHandle, final boolean owningNativeHandle) {
super();

@ -0,0 +1,150 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* The metadata that describes a SST file.
*/
public class SstFileMetaData {
private final String fileName;
private final String path;
private final long size;
private final long smallestSeqno;
private final long largestSeqno;
private final byte[] smallestKey;
private final byte[] largestKey;
private final long numReadsSampled;
private final boolean beingCompacted;
private final long numEntries;
private final long numDeletions;
/**
* Called from JNI C++
*/
protected SstFileMetaData(
final String fileName,
final String path,
final long size,
final long smallestSeqno,
final long largestSeqno,
final byte[] smallestKey,
final byte[] largestKey,
final long numReadsSampled,
final boolean beingCompacted,
final long numEntries,
final long numDeletions) {
this.fileName = fileName;
this.path = path;
this.size = size;
this.smallestSeqno = smallestSeqno;
this.largestSeqno = largestSeqno;
this.smallestKey = smallestKey;
this.largestKey = largestKey;
this.numReadsSampled = numReadsSampled;
this.beingCompacted = beingCompacted;
this.numEntries = numEntries;
this.numDeletions = numDeletions;
}
/**
* Get the name of the file.
*
* @return the name of the file.
*/
public String fileName() {
return fileName;
}
/**
* Get the full path where the file locates.
*
* @return the full path
*/
public String path() {
return path;
}
/**
* Get the file size in bytes.
*
* @return file size
*/
public long size() {
return size;
}
/**
* Get the smallest sequence number in file.
*
* @return the smallest sequence number
*/
public long smallestSeqno() {
return smallestSeqno;
}
/**
* Get the largest sequence number in file.
*
* @return the largest sequence number
*/
public long largestSeqno() {
return largestSeqno;
}
/**
* Get the smallest user defined key in the file.
*
* @return the smallest user defined key
*/
public byte[] smallestKey() {
return smallestKey;
}
/**
* Get the largest user defined key in the file.
*
* @return the largest user defined key
*/
public byte[] largestKey() {
return largestKey;
}
/**
* Get the number of times the file has been read.
*
* @return the number of times the file has been read
*/
public long numReadsSampled() {
return numReadsSampled;
}
/**
* Returns true if the file is currently being compacted.
*
* @return true if the file is currently being compacted, false otherwise.
*/
public boolean beingCompacted() {
return beingCompacted;
}
/**
* Get the number of entries.
*
* @return the number of entries.
*/
public long numEntries() {
return numEntries;
}
/**
* Get the number of deletions.
*
* @return the number of deletions.
*/
public long numDeletions() {
return numDeletions;
}
}

@ -0,0 +1,53 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* The type used to refer to a thread state.
*
* A state describes lower-level action of a thread
* such as reading / writing a file or waiting for a mutex.
*/
public enum StateType {
STATE_UNKNOWN((byte)0x0),
STATE_MUTEX_WAIT((byte)0x1);
private final byte value;
StateType(final byte value) {
this.value = value;
}
/**
* Get the internal representation value.
*
* @return the internal representation value.
*/
byte getValue() {
return value;
}
/**
* Get the State type from the internal representation value.
*
* @param value the internal representation value.
*
* @return the state type
*
* @throws IllegalArgumentException if the value does not match
* a StateType
*/
static StateType fromValue(final byte value)
throws IllegalArgumentException {
for (final StateType threadType : StateType.values()) {
if (threadType.value == value) {
return threadType;
}
}
throw new IllegalArgumentException(
"Unknown value for StateType: " + value);
}
}

@ -60,6 +60,6 @@ public enum StatsLevel {
}
}
throw new IllegalArgumentException(
"Illegal value provided for InfoLogLevel.");
"Illegal value provided for StatsLevel.");
}
}

@ -0,0 +1,20 @@
package org.rocksdb;
/**
* Filter for iterating a table.
*/
public interface TableFilter {
/**
* A callback to determine whether relevant keys for this scan exist in a
* given table based on the table's properties. The callback is passed the
* properties of each table during iteration. If the callback returns false,
* the table will not be scanned. This option only affects Iterators and has
* no impact on point lookups.
*
* @param tableProperties the table properties.
*
* @return true if the table should be scanned, false otherwise.
*/
boolean filter(final TableProperties tableProperties);
}

@ -0,0 +1,365 @@
package org.rocksdb;
import java.util.Map;
/**
* TableProperties contains read-only properties of its associated
* table.
*/
public class TableProperties {
private final long dataSize;
private final long indexSize;
private final long indexPartitions;
private final long topLevelIndexSize;
private final long indexKeyIsUserKey;
private final long indexValueIsDeltaEncoded;
private final long filterSize;
private final long rawKeySize;
private final long rawValueSize;
private final long numDataBlocks;
private final long numEntries;
private final long numDeletions;
private final long numMergeOperands;
private final long numRangeDeletions;
private final long formatVersion;
private final long fixedKeyLen;
private final long columnFamilyId;
private final long creationTime;
private final long oldestKeyTime;
private final byte[] columnFamilyName;
private final String filterPolicyName;
private final String comparatorName;
private final String mergeOperatorName;
private final String prefixExtractorName;
private final String propertyCollectorsNames;
private final String compressionName;
private final Map<String, String> userCollectedProperties;
private final Map<String, String> readableProperties;
private final Map<String, Long> propertiesOffsets;
/**
* Access is private as this will only be constructed from
* C++ via JNI.
*/
private TableProperties(final long dataSize, final long indexSize,
final long indexPartitions, final long topLevelIndexSize,
final long indexKeyIsUserKey, final long indexValueIsDeltaEncoded,
final long filterSize, final long rawKeySize, final long rawValueSize,
final long numDataBlocks, final long numEntries, final long numDeletions,
final long numMergeOperands, final long numRangeDeletions,
final long formatVersion, final long fixedKeyLen,
final long columnFamilyId, final long creationTime,
final long oldestKeyTime, final byte[] columnFamilyName,
final String filterPolicyName, final String comparatorName,
final String mergeOperatorName, final String prefixExtractorName,
final String propertyCollectorsNames, final String compressionName,
final Map<String, String> userCollectedProperties,
final Map<String, String> readableProperties,
final Map<String, Long> propertiesOffsets) {
this.dataSize = dataSize;
this.indexSize = indexSize;
this.indexPartitions = indexPartitions;
this.topLevelIndexSize = topLevelIndexSize;
this.indexKeyIsUserKey = indexKeyIsUserKey;
this.indexValueIsDeltaEncoded = indexValueIsDeltaEncoded;
this.filterSize = filterSize;
this.rawKeySize = rawKeySize;
this.rawValueSize = rawValueSize;
this.numDataBlocks = numDataBlocks;
this.numEntries = numEntries;
this.numDeletions = numDeletions;
this.numMergeOperands = numMergeOperands;
this.numRangeDeletions = numRangeDeletions;
this.formatVersion = formatVersion;
this.fixedKeyLen = fixedKeyLen;
this.columnFamilyId = columnFamilyId;
this.creationTime = creationTime;
this.oldestKeyTime = oldestKeyTime;
this.columnFamilyName = columnFamilyName;
this.filterPolicyName = filterPolicyName;
this.comparatorName = comparatorName;
this.mergeOperatorName = mergeOperatorName;
this.prefixExtractorName = prefixExtractorName;
this.propertyCollectorsNames = propertyCollectorsNames;
this.compressionName = compressionName;
this.userCollectedProperties = userCollectedProperties;
this.readableProperties = readableProperties;
this.propertiesOffsets = propertiesOffsets;
}
/**
* Get the total size of all data blocks.
*
* @return the total size of all data blocks.
*/
public long getDataSize() {
return dataSize;
}
/**
* Get the size of index block.
*
* @return the size of index block.
*/
public long getIndexSize() {
return indexSize;
}
/**
* Get the total number of index partitions
* if {@link IndexType#kTwoLevelIndexSearch} is used.
*
* @return the total number of index partitions.
*/
public long getIndexPartitions() {
return indexPartitions;
}
/**
* Size of the top-level index
* if {@link IndexType#kTwoLevelIndexSearch} is used.
*
* @return the size of the top-level index.
*/
public long getTopLevelIndexSize() {
return topLevelIndexSize;
}
/**
* Whether the index key is user key.
* Otherwise it includes 8 byte of sequence
* number added by internal key format.
*
* @return the index key
*/
public long getIndexKeyIsUserKey() {
return indexKeyIsUserKey;
}
/**
* Whether delta encoding is used to encode the index values.
*
* @return whether delta encoding is used to encode the index values.
*/
public long getIndexValueIsDeltaEncoded() {
return indexValueIsDeltaEncoded;
}
/**
* Get the size of filter block.
*
* @return the size of filter block.
*/
public long getFilterSize() {
return filterSize;
}
/**
* Get the total raw key size.
*
* @return the total raw key size.
*/
public long getRawKeySize() {
return rawKeySize;
}
/**
* Get the total raw value size.
*
* @return the total raw value size.
*/
public long getRawValueSize() {
return rawValueSize;
}
/**
* Get the number of blocks in this table.
*
* @return the number of blocks in this table.
*/
public long getNumDataBlocks() {
return numDataBlocks;
}
/**
* Get the number of entries in this table.
*
* @return the number of entries in this table.
*/
public long getNumEntries() {
return numEntries;
}
/**
* Get the number of deletions in the table.
*
* @return the number of deletions in the table.
*/
public long getNumDeletions() {
return numDeletions;
}
/**
* Get the number of merge operands in the table.
*
* @return the number of merge operands in the table.
*/
public long getNumMergeOperands() {
return numMergeOperands;
}
/**
* Get the number of range deletions in this table.
*
* @return the number of range deletions in this table.
*/
public long getNumRangeDeletions() {
return numRangeDeletions;
}
/**
* Get the format version, reserved for backward compatibility.
*
* @return the format version.
*/
public long getFormatVersion() {
return formatVersion;
}
/**
* Get the length of the keys.
*
* @return 0 when the key is variable length, otherwise number of
* bytes for each key.
*/
public long getFixedKeyLen() {
return fixedKeyLen;
}
/**
* Get the ID of column family for this SST file,
* corresponding to the column family identified by
* {@link #getColumnFamilyName()}.
*
* @return the id of the column family.
*/
public long getColumnFamilyId() {
return columnFamilyId;
}
/**
* The time when the SST file was created.
* Since SST files are immutable, this is equivalent
* to last modified time.
*
* @return the created time.
*/
public long getCreationTime() {
return creationTime;
}
/**
* Get the timestamp of the earliest key.
*
* @return 0 means unknown, otherwise the timestamp.
*/
public long getOldestKeyTime() {
return oldestKeyTime;
}
/**
* Get the name of the column family with which this
* SST file is associated.
*
* @return the name of the column family, or null if the
* column family is unknown.
*/
/*@Nullable*/ public byte[] getColumnFamilyName() {
return columnFamilyName;
}
/**
* Get the name of the filter policy used in this table.
*
* @return the name of the filter policy, or null if
* no filter policy is used.
*/
/*@Nullable*/ public String getFilterPolicyName() {
return filterPolicyName;
}
/**
* Get the name of the comparator used in this table.
*
* @return the name of the comparator.
*/
public String getComparatorName() {
return comparatorName;
}
/**
* Get the name of the merge operator used in this table.
*
* @return the name of the merge operator, or null if no merge operator
* is used.
*/
/*@Nullable*/ public String getMergeOperatorName() {
return mergeOperatorName;
}
/**
* Get the name of the prefix extractor used in this table.
*
* @return the name of the prefix extractor, or null if no prefix
* extractor is used.
*/
/*@Nullable*/ public String getPrefixExtractorName() {
return prefixExtractorName;
}
/**
* Get the names of the property collectors factories used in this table.
*
* @return the names of the property collector factories separated
* by commas, e.g. {collector_name[1]},{collector_name[2]},...
*/
public String getPropertyCollectorsNames() {
return propertyCollectorsNames;
}
/**
* Get the name of the compression algorithm used to compress the SST files.
*
* @return the name of the compression algorithm.
*/
public String getCompressionName() {
return compressionName;
}
/**
* Get the user collected properties.
*
* @return the user collected properties.
*/
public Map<String, String> getUserCollectedProperties() {
return userCollectedProperties;
}
/**
* Get the readable properties.
*
* @return the readable properties.
*/
public Map<String, String> getReadableProperties() {
return readableProperties;
}
/**
* The offset of the value of each property in the file.
*
* @return the offset of each property.
*/
public Map<String, Long> getPropertiesOffsets() {
return propertiesOffsets;
}
}

@ -0,0 +1,224 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
import java.util.Map;
public class ThreadStatus {
private final long threadId;
private final ThreadType threadType;
private final String dbName;
private final String cfName;
private final OperationType operationType;
private final long operationElapsedTime; // microseconds
private final OperationStage operationStage;
private final long operationProperties[];
private final StateType stateType;
/**
* Invoked from C++ via JNI
*/
private ThreadStatus(final long threadId,
final byte threadTypeValue,
final String dbName,
final String cfName,
final byte operationTypeValue,
final long operationElapsedTime,
final byte operationStageValue,
final long[] operationProperties,
final byte stateTypeValue) {
this.threadId = threadId;
this.threadType = ThreadType.fromValue(threadTypeValue);
this.dbName = dbName;
this.cfName = cfName;
this.operationType = OperationType.fromValue(operationTypeValue);
this.operationElapsedTime = operationElapsedTime;
this.operationStage = OperationStage.fromValue(operationStageValue);
this.operationProperties = operationProperties;
this.stateType = StateType.fromValue(stateTypeValue);
}
/**
* Get the unique ID of the thread.
*
* @return the thread id
*/
public long getThreadId() {
return threadId;
}
/**
* Get the type of the thread.
*
* @return the type of the thread.
*/
public ThreadType getThreadType() {
return threadType;
}
/**
* The name of the DB instance that the thread is currently
* involved with.
*
* @return the name of the db, or null if the thread is not involved
* in any DB operation.
*/
/* @Nullable */ public String getDbName() {
return dbName;
}
/**
* The name of the Column Family that the thread is currently
* involved with.
*
* @return the name of the db, or null if the thread is not involved
* in any column Family operation.
*/
/* @Nullable */ public String getCfName() {
return cfName;
}
/**
* Get the operation (high-level action) that the current thread is involved
* with.
*
* @return the operation
*/
public OperationType getOperationType() {
return operationType;
}
/**
* Get the elapsed time of the current thread operation in microseconds.
*
* @return the elapsed time
*/
public long getOperationElapsedTime() {
return operationElapsedTime;
}
/**
* Get the current stage where the thread is involved in the current
* operation.
*
* @return the current stage of the current operation
*/
public OperationStage getOperationStage() {
return operationStage;
}
/**
* Get the list of properties that describe some details about the current
* operation.
*
* Each field in might have different meanings for different operations.
*
* @return the properties
*/
public long[] getOperationProperties() {
return operationProperties;
}
/**
* Get the state (lower-level action) that the current thread is involved
* with.
*
* @return the state
*/
public StateType getStateType() {
return stateType;
}
/**
* Get the name of the thread type.
*
* @param threadType the thread type
*
* @return the name of the thread type.
*/
public static String getThreadTypeName(final ThreadType threadType) {
return getThreadTypeName(threadType.getValue());
}
/**
* Get the name of an operation given its type.
*
* @param operationType the type of operation.
*
* @return the name of the operation.
*/
public static String getOperationName(final OperationType operationType) {
return getOperationName(operationType.getValue());
}
public static String microsToString(final long operationElapsedTime) {
return microsToStringNative(operationElapsedTime);
}
/**
* Obtain a human-readable string describing the specified operation stage.
*
* @param operationStage the stage of the operation.
*
* @return the description of the operation stage.
*/
public static String getOperationStageName(
final OperationStage operationStage) {
return getOperationStageName(operationStage.getValue());
}
/**
* Obtain the name of the "i"th operation property of the
* specified operation.
*
* @param operationType the operation type.
* @param i the index of the operation property.
*
* @return the name of the operation property
*/
public static String getOperationPropertyName(
final OperationType operationType, final int i) {
return getOperationPropertyName(operationType.getValue(), i);
}
/**
* Translate the "i"th property of the specified operation given
* a property value.
*
* @param operationType the operation type.
* @param operationProperties the operation properties.
*
* @return the property values.
*/
public static Map<String, Long> interpretOperationProperties(
final OperationType operationType, final long[] operationProperties) {
return interpretOperationProperties(operationType.getValue(),
operationProperties);
}
/**
* Obtain the name of a state given its type.
*
* @param stateType the state type.
*
* @return the name of the state.
*/
public static String getStateName(final StateType stateType) {
return getStateName(stateType.getValue());
}
private static native String getThreadTypeName(final byte threadTypeValue);
private static native String getOperationName(final byte operationTypeValue);
private static native String microsToStringNative(
final long operationElapsedTime);
private static native String getOperationStageName(
final byte operationStageTypeValue);
private static native String getOperationPropertyName(
final byte operationTypeValue, final int i);
private static native Map<String, Long>interpretOperationProperties(
final byte operationTypeValue, final long[] operationProperties);
private static native String getStateName(final byte stateTypeValue);
}

@ -0,0 +1,65 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* The type of a thread.
*/
public enum ThreadType {
/**
* RocksDB BG thread in high-pri thread pool.
*/
HIGH_PRIORITY((byte)0x0),
/**
* RocksDB BG thread in low-pri thread pool.
*/
LOW_PRIORITY((byte)0x1),
/**
* User thread (Non-RocksDB BG thread).
*/
USER((byte)0x2),
/**
* RocksDB BG thread in bottom-pri thread pool
*/
BOTTOM_PRIORITY((byte)0x3);
private final byte value;
ThreadType(final byte value) {
this.value = value;
}
/**
* Get the internal representation value.
*
* @return the internal representation value.
*/
byte getValue() {
return value;
}
/**
* Get the Thread type from the internal representation value.
*
* @param value the internal representation value.
*
* @return the thread type
*
* @throws IllegalArgumentException if the value does not match a ThreadType
*/
static ThreadType fromValue(final byte value)
throws IllegalArgumentException {
for (final ThreadType threadType : ThreadType.values()) {
if (threadType.value == value) {
return threadType;
}
}
throw new IllegalArgumentException("Unknown value for ThreadType: " + value);
}
}

@ -726,9 +726,10 @@ public enum TickerType {
}
/**
* @deprecated
* Exposes internal value of native enum mappings. This method will be marked private in the
* next major release.
* @deprecated Exposes internal value of native enum mappings.
* This method will be marked package private in the next major release.
*
* @return the internal representation
*/
@Deprecated
public byte getValue() {

@ -0,0 +1,30 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* Timed environment.
*/
public class TimedEnv extends Env {
/**
* <p>Creates a new environment that measures function call times for
* filesystem operations, reporting results to variables in PerfContext.</p>
*
*
* <p>The caller must delete the result when it is
* no longer needed.</p>
*
* @param baseEnv the base environment,
* must remain live while the result is in use.
*/
public TimedEnv(final Env baseEnv) {
super(createTimedEnv(baseEnv.nativeHandle_));
}
private static native long createTimedEnv(final long baseEnvHandle);
@Override protected final native void disposeInternal(final long handle);
}

@ -0,0 +1,32 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* TraceOptions is used for
* {@link RocksDB#startTrace(TraceOptions, AbstractTraceWriter)}.
*/
public class TraceOptions {
private final long maxTraceFileSize;
public TraceOptions() {
this.maxTraceFileSize = 64 * 1024 * 1024 * 1024; // 64 GB
}
public TraceOptions(final long maxTraceFileSize) {
this.maxTraceFileSize = maxTraceFileSize;
}
/**
* To avoid the trace file size grows large than the storage space,
* user can set the max trace file size in Bytes. Default is 64GB
*
* @return the max trace size
*/
public long getMaxTraceFileSize() {
return maxTraceFileSize;
}
}

@ -0,0 +1,36 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* TraceWriter allows exporting RocksDB traces to any system,
* one operation at a time.
*/
public interface TraceWriter {
/**
* Write the data.
*
* @param data the data
*
* @throws RocksDBException if an error occurs whilst writing.
*/
void write(final Slice data) throws RocksDBException;
/**
* Close the writer.
*
* @throws RocksDBException if an error occurs whilst closing the writer.
*/
void closeWriter() throws RocksDBException;
/**
* Get the size of the file that this writer is writing to.
*
* @return the file size
*/
long getFileSize();
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save