From 66f62e5c78a189c910424442b5634467dd609530 Mon Sep 17 00:00:00 2001 From: Lei Jin Date: Mon, 25 Aug 2014 14:22:55 -0700 Subject: [PATCH] JNI changes corresponding to BlockBasedTableOptions migration Summary: as title Test Plan: tested on my mac make rocksdbjava make jtest Reviewers: sdong, igor, yhchiang Reviewed By: yhchiang Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D21963 --- java/Makefile | 2 +- java/RocksDBSample.java | 18 +- java/org/rocksdb/BlockBasedTableConfig.java | 210 ++++++++++++++++ java/org/rocksdb/Options.java | 250 +------------------- java/org/rocksdb/RocksDB.java | 14 +- java/org/rocksdb/benchmark/DbBenchmark.java | 37 +-- java/org/rocksdb/test/OptionsTest.java | 36 --- java/rocksjni/options.cc | 140 ----------- java/rocksjni/rocksjni.cc | 15 +- java/rocksjni/table.cc | 34 +++ 10 files changed, 284 insertions(+), 472 deletions(-) create mode 100644 java/org/rocksdb/BlockBasedTableConfig.java diff --git a/java/Makefile b/java/Makefile index 238ddd93e..47b2afb9e 100644 --- a/java/Makefile +++ b/java/Makefile @@ -1,4 +1,4 @@ -NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.BackupableDB org.rocksdb.BackupableDBOptions org.rocksdb.Statistics org.rocksdb.RocksIterator org.rocksdb.VectorMemTableConfig org.rocksdb.SkipListMemTableConfig org.rocksdb.HashLinkedListMemTableConfig org.rocksdb.HashSkipListMemTableConfig org.rocksdb.PlainTableConfig org.rocksdb.ReadOptions org.rocksdb.Filter org.rocksdb.BloomFilter org.rocksdb.RestoreOptions org.rocksdb.RestoreBackupableDB org.rocksdb.RocksEnv +NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.BackupableDB org.rocksdb.BackupableDBOptions org.rocksdb.Statistics org.rocksdb.RocksIterator org.rocksdb.VectorMemTableConfig org.rocksdb.SkipListMemTableConfig org.rocksdb.HashLinkedListMemTableConfig org.rocksdb.HashSkipListMemTableConfig org.rocksdb.PlainTableConfig org.rocksdb.BlockBasedTableConfig org.rocksdb.ReadOptions org.rocksdb.Filter org.rocksdb.BloomFilter org.rocksdb.RestoreOptions org.rocksdb.RestoreBackupableDB org.rocksdb.RocksEnv NATIVE_INCLUDE = ./include ROCKSDB_JAR = rocksdbjni.jar diff --git a/java/RocksDBSample.java b/java/RocksDBSample.java index dfecde342..72da4b5e8 100644 --- a/java/RocksDBSample.java +++ b/java/RocksDBSample.java @@ -35,16 +35,11 @@ public class RocksDBSample { assert(db == null); } - Filter filter = new BloomFilter(10); options.setCreateIfMissing(true) .createStatistics() .setWriteBufferSize(8 * SizeUnit.KB) .setMaxWriteBufferNumber(3) - .setDisableSeekCompaction(true) - .setBlockSize(64 * SizeUnit.KB) .setMaxBackgroundCompactions(10) - .setFilter(filter) - .setCacheNumShardBits(6) .setCompressionType(CompressionType.SNAPPY_COMPRESSION) .setCompactionStyle(CompactionStyle.UNIVERSAL); Statistics stats = options.statisticsPtr(); @@ -52,10 +47,7 @@ public class RocksDBSample { assert(options.createIfMissing() == true); assert(options.writeBufferSize() == 8 * SizeUnit.KB); assert(options.maxWriteBufferNumber() == 3); - assert(options.disableSeekCompaction() == true); - assert(options.blockSize() == 64 * SizeUnit.KB); assert(options.maxBackgroundCompactions() == 10); - assert(options.cacheNumShardBits() == 6); assert(options.compressionType() == CompressionType.SNAPPY_COMPRESSION); assert(options.compactionStyle() == CompactionStyle.UNIVERSAL); @@ -82,6 +74,15 @@ public class RocksDBSample { options.setTableFormatConfig(new PlainTableConfig()); assert(options.tableFactoryName().equals("PlainTable")); + BlockBasedTableConfig table_options = new BlockBasedTableConfig(); + table_options.setBlockCacheSize(64 * SizeUnit.KB) + .setFilterBitsPerKey(10) + .setCacheNumShardBits(6); + assert(table_options.blockCacheSize() == 64 * SizeUnit.KB); + assert(table_options.cacheNumShardBits() == 6); + options.setTableFormatConfig(table_options); + assert(options.tableFactoryName().equals("BlockBasedTable")); + try { db = RocksDB.open(options, db_path_not_found); db.put("hello".getBytes(), "world".getBytes()); @@ -254,6 +255,5 @@ public class RocksDBSample { // be sure to dispose c++ pointers options.dispose(); readOptions.dispose(); - filter.dispose(); } } diff --git a/java/org/rocksdb/BlockBasedTableConfig.java b/java/org/rocksdb/BlockBasedTableConfig.java new file mode 100644 index 000000000..523a57691 --- /dev/null +++ b/java/org/rocksdb/BlockBasedTableConfig.java @@ -0,0 +1,210 @@ +// Copyright (c) 2014, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +/** + * The config for plain table sst format. + * + * BlockBasedTable is a RocksDB's default SST file format. + */ +public class BlockBasedTableConfig extends TableFormatConfig { + + public BlockBasedTableConfig() { + noBlockCache_ = false; + blockCacheSize_ = 8 * 1024 * 1024; + blockSize_ = 4 * 1024; + blockSizeDeviation_ =10; + blockRestartInterval_ =16; + wholeKeyFiltering_ = true; + bitsPerKey_ = 0; + } + + /** + * Disable block cache. If this is set to true, + * then no block cache should be used, and the block_cache should + * point to a nullptr object. + * Default: false + * + * @param noBlockCache if use block cache + * @return the reference to the current config. + */ + public BlockBasedTableConfig setNoBlockCache(boolean noBlockCache) { + noBlockCache_ = noBlockCache; + return this; + } + + /** + * @return if block cache is disabled + */ + public boolean noBlockCache() { + return noBlockCache_; + } + + /** + * Set the amount of cache in bytes that will be used by RocksDB. + * If cacheSize is non-positive, then cache will not be used. + * DEFAULT: 8M + * + * @param blockCacheSize block cache size in bytes + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockCacheSize(long blockCacheSize) { + blockCacheSize_ = blockCacheSize; + return this; + } + + /** + * @return block cache size in bytes + */ + public long blockCacheSize() { + return blockCacheSize_; + } + + /** + * Controls the number of shards for the block cache. + * This is applied only if cacheSize is set to non-negative. + * + * @param numShardBits the number of shard bits. The resulting + * number of shards would be 2 ^ numShardBits. Any negative + * number means use default settings." + * @return the reference to the current option. + */ + public BlockBasedTableConfig setCacheNumShardBits(int numShardBits) { + numShardBits_ = numShardBits; + return this; + } + + /** + * Returns the number of shard bits used in the block cache. + * The resulting number of shards would be 2 ^ (returned value). + * Any negative number means use default settings. + * + * @return the number of shard bits used in the block cache. + */ + public int cacheNumShardBits() { + return numShardBits_; + } + + /** + * Approximate size of user data packed per block. Note that the + * block size specified here corresponds to uncompressed data. The + * actual size of the unit read from disk may be smaller if + * compression is enabled. This parameter can be changed dynamically. + * Default: 4K + * + * @param blockSize block size in bytes + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockSize(long blockSize) { + blockSize_ = blockSize; + return this; + } + + /** + * @return block size in bytes + */ + public long blockSize() { + return blockSize_; + } + + /** + * This is used to close a block before it reaches the configured + * 'block_size'. If the percentage of free space in the current block is less + * than this specified number and adding a new record to the block will + * exceed the configured block size, then this block will be closed and the + * new record will be written to the next block. + * Default is 10. + * + * @param blockSizeDeviation the deviation to block size allowed + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockSizeDeviation(int blockSizeDeviation) { + blockSizeDeviation_ = blockSizeDeviation; + return this; + } + + /** + * @return the hash table ratio. + */ + public int blockSizeDeviation() { + return blockSizeDeviation_; + } + + /** + * Set block restart interval + * + * @param restartInterval block restart interval. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockRestartInterval(int restartInterval) { + blockRestartInterval_ = restartInterval; + return this; + } + + /** + * @return block restart interval + */ + public int blockRestartInterval() { + return blockRestartInterval_; + } + + /** + * If true, place whole keys in the filter (not just prefixes). + * This must generally be true for gets to be efficient. + * Default: true + * + * @param wholeKeyFiltering if enable whole key filtering + * @return the reference to the current config. + */ + public BlockBasedTableConfig setWholeKeyFiltering(boolean wholeKeyFiltering) { + wholeKeyFiltering_ = wholeKeyFiltering; + return this; + } + + /** + * @return if whole key filtering is enabled + */ + public boolean wholeKeyFiltering() { + return wholeKeyFiltering_; + } + + /** + * Use the specified filter policy to reduce disk reads. + * + * Filter should not be disposed before options instances using this filter is + * disposed. If dispose() function is not called, then filter object will be + * GC'd automatically. + * + * Filter instance can be re-used in multiple options instances. + * + * @param Filter policy java instance. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setFilterBitsPerKey(int bitsPerKey) { + bitsPerKey_ = bitsPerKey; + return this; + } + + @Override protected long newTableFactoryHandle() { + return newTableFactoryHandle(noBlockCache_, blockCacheSize_, numShardBits_, + blockSize_, blockSizeDeviation_, blockRestartInterval_, + wholeKeyFiltering_, bitsPerKey_); + } + + private native long newTableFactoryHandle( + boolean noBlockCache, long blockCacheSize, int numShardbits, + long blockSize, int blockSizeDeviation, int blockRestartInterval, + boolean wholeKeyFiltering, int bitsPerKey); + + private boolean noBlockCache_; + private long blockCacheSize_; + private int numShardBits_; + private long shard; + private long blockSize_; + private int blockSizeDeviation_; + private int blockRestartInterval_; + private boolean wholeKeyFiltering_; + private int bitsPerKey_; +} diff --git a/java/org/rocksdb/Options.java b/java/org/rocksdb/Options.java index 95f994606..125f06afd 100644 --- a/java/org/rocksdb/Options.java +++ b/java/org/rocksdb/Options.java @@ -136,135 +136,6 @@ public class Options extends RocksObject { return maxWriteBufferNumber(nativeHandle_); } - /* - * Approximate size of user data packed per block. Note that the - * block size specified here corresponds to uncompressed data. The - * actual size of the unit read from disk may be smaller if - * compression is enabled. This parameter can be changed dynamically. - * - * Default: 4K - * - * @param blockSize the size of each block in bytes. - * @return the instance of the current Options. - * @see RocksDB.open() - */ - public Options setBlockSize(long blockSize) { - assert(isInitialized()); - setBlockSize(nativeHandle_, blockSize); - return this; - } - - /* - * Returns the size of a block in bytes. - * - * @return block size. - * @see setBlockSize() - */ - public long blockSize() { - assert(isInitialized()); - return blockSize(nativeHandle_); - } - - /** - * Use the specified filter policy to reduce disk reads. - * - * Filter should not be disposed before options instances using this filter is - * disposed. If dispose() function is not called, then filter object will be - * GC'd automatically. - * - * Filter instance can be re-used in multiple options instances. - * - * @param Filter policy java instance. - * @return the instance of the current Options. - * @see RocksDB.open() - */ - public Options setFilter(Filter filter) { - assert(isInitialized()); - setFilterHandle(nativeHandle_, filter.nativeHandle_); - filter_ = filter; - return this; - } - private native void setFilterHandle(long optHandle, long filterHandle); - - /* - * Disable compaction triggered by seek. - * With bloomfilter and fast storage, a miss on one level - * is very cheap if the file handle is cached in table cache - * (which is true if max_open_files is large). - * Default: true - * - * @param disableSeekCompaction a boolean value to specify whether - * to disable seek compaction. - * @return the instance of the current Options. - * @see RocksDB.open() - */ - public Options setDisableSeekCompaction(boolean disableSeekCompaction) { - assert(isInitialized()); - setDisableSeekCompaction(nativeHandle_, disableSeekCompaction); - return this; - } - - /* - * Returns true if disable seek compaction is set to true. - * - * @return true if disable seek compaction is set to true. - * @see setDisableSeekCompaction() - */ - public boolean disableSeekCompaction() { - assert(isInitialized()); - return disableSeekCompaction(nativeHandle_); - } - - /** - * Set the amount of cache in bytes that will be used by RocksDB. - * If cacheSize is non-positive, then cache will not be used. - * - * DEFAULT: 8M - * @see setCacheNumShardBits() - */ - public Options setCacheSize(long cacheSize) { - cacheSize_ = cacheSize; - return this; - } - - /** - * @return the amount of cache in bytes that will be used by RocksDB. - * - * @see cacheNumShardBits() - */ - public long cacheSize() { - return cacheSize_; - } - - /** - * Controls the number of shards for the block cache. - * This is applied only if cacheSize is set to non-negative. - * - * @param numShardBits the number of shard bits. The resulting - * number of shards would be 2 ^ numShardBits. Any negative - * number means use default settings." - * @return the reference to the current option. - * - * @see setCacheSize() - */ - public Options setCacheNumShardBits(int numShardBits) { - numShardBits_ = numShardBits; - return this; - } - - /** - * Returns the number of shard bits used in the block cache. - * The resulting number of shards would be 2 ^ (returned value). - * Any negative number means use default settings. - * - * @return the number of shard bits used in the block cache. - * - * @see cacheSize() - */ - public int cacheNumShardBits() { - return numShardBits_; - } - /** * If true, an error will be thrown during RocksDB.open() if the * database already exists. @@ -1344,26 +1215,26 @@ public class Options extends RocksObject { } private native void setBlockRestartInterval( long handle, int blockRestartInterval); - + /** * Compress blocks using the specified compression algorithm. This parameter can be changed dynamically. - * + * * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. - * + * * @return Compression type. - */ + */ public CompressionType compressionType() { return CompressionType.values()[compressionType(nativeHandle_)]; } private native byte compressionType(long handle); - + /** * Compress blocks using the specified compression algorithm. This parameter can be changed dynamically. - * + * * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. - * + * * @param compressionType Compression Type. * @return the reference to the current option. */ @@ -1372,22 +1243,22 @@ public class Options extends RocksObject { return this; } private native void setCompressionType(long handle, byte compressionType); - + /** * Compaction style for DB. - * + * * @return Compaction style. - */ + */ public CompactionStyle compactionStyle() { return CompactionStyle.values()[compactionStyle(nativeHandle_)]; } private native byte compactionStyle(long handle); - + /** * Set compaction style for DB. - * + * * Default: LEVEL. - * + * * @param compactionStyle Compaction style. * @return the reference to the current option. */ @@ -1397,33 +1268,6 @@ public class Options extends RocksObject { } private native void setCompactionStyle(long handle, byte compactionStyle); - /** - * If true, place whole keys in the filter (not just prefixes). - * This must generally be true for gets to be efficient. - * Default: true - * - * @return if true, then whole-key-filtering is on. - */ - public boolean wholeKeyFiltering() { - return wholeKeyFiltering(nativeHandle_); - } - private native boolean wholeKeyFiltering(long handle); - - /** - * If true, place whole keys in the filter (not just prefixes). - * This must generally be true for gets to be efficient. - * Default: true - * - * @param wholeKeyFiltering if true, then whole-key-filtering is on. - * @return the reference to the current option. - */ - public Options setWholeKeyFiltering(boolean wholeKeyFiltering) { - setWholeKeyFiltering(nativeHandle_, wholeKeyFiltering); - return this; - } - private native void setWholeKeyFiltering( - long handle, boolean wholeKeyFiltering); - /** * If level-styled compaction is used, then this number determines * the total number of levels. @@ -1897,35 +1741,6 @@ public class Options extends RocksObject { private native void setRateLimitDelayMaxMilliseconds( long handle, int rateLimitDelayMaxMilliseconds); - /** - * Disable block cache. If this is set to true, - * then no block cache should be used, and the block_cache should - * point to a nullptr object. - * Default: false - * - * @return true if block cache is disabled. - */ - public boolean noBlockCache() { - return noBlockCache(nativeHandle_); - } - private native boolean noBlockCache(long handle); - - /** - * Disable block cache. If this is set to true, - * then no block cache should be used, and the block_cache should - * point to a nullptr object. - * Default: false - * - * @param noBlockCache true if block-cache is disabled. - * @return the reference to the current option. - */ - public Options setNoBlockCache(boolean noBlockCache) { - setNoBlockCache(nativeHandle_, noBlockCache); - return this; - } - private native void setNoBlockCache( - long handle, boolean noBlockCache); - /** * The size of one block in arena memory allocation. * If <= 0, a proper value is automatically calculated (usually 1/10 of @@ -2023,39 +1838,6 @@ public class Options extends RocksObject { private native void setPurgeRedundantKvsWhileFlush( long handle, boolean purgeRedundantKvsWhileFlush); - /** - * This is used to close a block before it reaches the configured - * 'block_size'. If the percentage of free space in the current block is less - * than this specified number and adding a new record to the block will - * exceed the configured block size, then this block will be closed and the - * new record will be written to the next block. - * Default is 10. - * - * @return the target block size - */ - public int blockSizeDeviation() { - return blockSizeDeviation(nativeHandle_); - } - private native int blockSizeDeviation(long handle); - - /** - * This is used to close a block before it reaches the configured - * 'block_size'. If the percentage of free space in the current block is less - * than this specified number and adding a new record to the block will - * exceed the configured block size, then this block will be closed and the - * new record will be written to the next block. - * Default is 10. - * - * @param blockSizeDeviation the target block size - * @return the reference to the current option. - */ - public Options setBlockSizeDeviation(int blockSizeDeviation) { - setBlockSizeDeviation(nativeHandle_, blockSizeDeviation); - return this; - } - private native void setBlockSizeDeviation( - long handle, int blockSizeDeviation); - /** * If true, compaction will verify checksum on every read that happens * as part of compaction @@ -2437,11 +2219,6 @@ public class Options extends RocksObject { private native void setMaxWriteBufferNumber( long handle, int maxWriteBufferNumber); private native int maxWriteBufferNumber(long handle); - private native void setBlockSize(long handle, long blockSize); - private native long blockSize(long handle); - private native void setDisableSeekCompaction( - long handle, boolean disableSeekCompaction); - private native boolean disableSeekCompaction(long handle); private native void setMaxBackgroundCompactions( long handle, int maxBackgroundCompactions); private native int maxBackgroundCompactions(long handle); @@ -2459,6 +2236,5 @@ public class Options extends RocksObject { long cacheSize_; int numShardBits_; - Filter filter_; RocksEnv env_; } diff --git a/java/org/rocksdb/RocksDB.java b/java/org/rocksdb/RocksDB.java index c7b06cc6d..f8968d14d 100644 --- a/java/org/rocksdb/RocksDB.java +++ b/java/org/rocksdb/RocksDB.java @@ -99,11 +99,11 @@ public class RocksDB extends RocksObject { /** * The factory constructor of RocksDB that opens a RocksDB instance given * the path to the database using the specified options and db path. - * + * * Options instance *should* not be disposed before all DBs using this options * instance have been closed. If user doesn't call options dispose explicitly, * then this options instance will be GC'd automatically. - * + * * Options instance can be re-used to open multiple DBs if DB statistics is * not used. If DB statistics are required, then its recommended to open DB * with new Options instance as underlying native statistics instance does not @@ -115,13 +115,12 @@ public class RocksDB extends RocksObject { // in RocksDB can prevent Java to GC during the life-time of // the currently-created RocksDB. RocksDB db = new RocksDB(); - db.open(options.nativeHandle_, options.cacheSize_, - options.numShardBits_, path); - + db.open(options.nativeHandle_, path); + db.storeOptionsInstance(options); return db; } - + private void storeOptionsInstance(Options options) { options_ = options; } @@ -334,8 +333,7 @@ public class RocksDB extends RocksObject { // native methods protected native void open( - long optionsHandle, long cacheSize, int numShardBits, - String path) throws RocksDBException; + long optionsHandle, String path) throws RocksDBException; protected native void put( long handle, byte[] key, int keyLen, byte[] value, int valueLen) throws RocksDBException; diff --git a/java/org/rocksdb/benchmark/DbBenchmark.java b/java/org/rocksdb/benchmark/DbBenchmark.java index 36eea0c17..b715f9af1 100644 --- a/java/org/rocksdb/benchmark/DbBenchmark.java +++ b/java/org/rocksdb/benchmark/DbBenchmark.java @@ -446,7 +446,6 @@ public class DbBenchmark { randSeed_ = (Long) flags.get(Flag.seed); databaseDir_ = (String) flags.get(Flag.db); writesPerSeconds_ = (Integer) flags.get(Flag.writes_per_second); - cacheSize_ = (Long) flags.get(Flag.cache_size); memtable_ = (String) flags.get(Flag.memtablerep); maxWriteBufferNumber_ = (Integer) flags.get(Flag.max_write_buffer_number); prefixSize_ = (Integer) flags.get(Flag.prefix_size); @@ -491,7 +490,6 @@ public class DbBenchmark { } private void prepareOptions(Options options) { - options.setCacheSize(cacheSize_); if (!useExisting_) { options.setCreateIfMissing(true); } else { @@ -521,6 +519,13 @@ public class DbBenchmark { if (usePlainTable_) { options.setTableFormatConfig( new PlainTableConfig().setKeySize(keySize_)); + } else { + BlockBasedTableConfig table_options = new BlockBasedTableConfig(); + table_options.setBlockSize((Long)flags_.get(Flag.block_size)) + .setBlockCacheSize((Long)flags_.get(Flag.cache_size)) + .setFilterBitsPerKey((Integer)flags_.get(Flag.bloom_bits)) + .setCacheNumShardBits((Integer)flags_.get(Flag.cache_numshardbits)); + options.setTableFormatConfig(table_options); } options.setWriteBufferSize( (Long)flags_.get(Flag.write_buffer_size)); @@ -532,12 +537,6 @@ public class DbBenchmark { (Integer)flags_.get(Flag.max_background_compactions)); options.setMaxBackgroundFlushes( (Integer)flags_.get(Flag.max_background_flushes)); - options.setCacheSize( - (Long)flags_.get(Flag.cache_size)); - options.setCacheNumShardBits( - (Integer)flags_.get(Flag.cache_numshardbits)); - options.setBlockSize( - (Long)flags_.get(Flag.block_size)); options.setMaxOpenFiles( (Integer)flags_.get(Flag.open_files)); options.setTableCacheRemoveScanCountLimit( @@ -548,8 +547,6 @@ public class DbBenchmark { (Boolean)flags_.get(Flag.use_fsync)); options.setWalDir( (String)flags_.get(Flag.wal_dir)); - options.setDisableSeekCompaction( - (Boolean)flags_.get(Flag.disable_seek_compaction)); options.setDeleteObsoleteFilesPeriodMicros( (Integer)flags_.get(Flag.delete_obsolete_files_period_micros)); options.setTableCacheNumshardbits( @@ -604,15 +601,6 @@ public class DbBenchmark { (Integer)flags_.get(Flag.max_successive_merges)); options.setWalTtlSeconds((Long)flags_.get(Flag.wal_ttl_seconds)); options.setWalSizeLimitMB((Long)flags_.get(Flag.wal_size_limit_MB)); - int bloomBits = (Integer)flags_.get(Flag.bloom_bits); - if (bloomBits > 0) { - // Internally, options will keep a reference to this BloomFilter. - // This will disallow Java to GC this BloomFilter. In addition, - // options.dispose() will release the c++ object of this BloomFilter. - // As a result, the caller should not directly call - // BloomFilter.dispose(). - options.setFilter(new BloomFilter(bloomBits)); - } /* TODO(yhchiang): enable the following parameters options.setCompressionType((String)flags_.get(Flag.compression_type)); options.setCompressionLevel((Integer)flags_.get(Flag.compression_level)); @@ -1160,7 +1148,7 @@ public class DbBenchmark { return Integer.parseInt(value); } }, - block_size(defaultOptions_.blockSize(), + block_size(defaultBlockBasedTableOptions_.blockSize(), "Number of bytes in a block.") { @Override public Object parseValue(String value) { return Long.parseLong(value); @@ -1312,12 +1300,6 @@ public class DbBenchmark { return Integer.parseInt(value); } }, - disable_seek_compaction(false,"Option to disable compaction\n" + - "\ttriggered by read.") { - @Override public Object parseValue(String value) { - return parseBoolean(value); - } - }, delete_obsolete_files_period_micros(0,"Option to delete\n" + "\tobsolete files periodically. 0 means that obsolete files are\n" + "\tdeleted after every compaction run.") { @@ -1597,7 +1579,6 @@ public class DbBenchmark { final int threadNum_; final int writesPerSeconds_; final long randSeed_; - final long cacheSize_; final boolean useExisting_; final String databaseDir_; double compressionRatio_; @@ -1620,6 +1601,8 @@ public class DbBenchmark { // as the scope of a static member equals to the scope of the problem, // we let its c++ pointer to be disposed in its finalizer. static Options defaultOptions_ = new Options(); + static BlockBasedTableConfig defaultBlockBasedTableOptions_ = + new BlockBasedTableConfig(); String compressionType_; CompressionType compression_; } diff --git a/java/org/rocksdb/test/OptionsTest.java b/java/org/rocksdb/test/OptionsTest.java index e1e0e059e..b065c9023 100644 --- a/java/org/rocksdb/test/OptionsTest.java +++ b/java/org/rocksdb/test/OptionsTest.java @@ -214,24 +214,6 @@ public class OptionsTest { assert(opt.minWriteBufferNumberToMerge() == intValue); } - { // BlockSize test - long longValue = rand.nextLong(); - opt.setBlockSize(longValue); - assert(opt.blockSize() == longValue); - } - - { // BlockRestartInterval test - int intValue = rand.nextInt(); - opt.setBlockRestartInterval(intValue); - assert(opt.blockRestartInterval() == intValue); - } - - { // WholeKeyFiltering test - boolean boolValue = rand.nextBoolean(); - opt.setWholeKeyFiltering(boolValue); - assert(opt.wholeKeyFiltering() == boolValue); - } - { // NumLevels test int intValue = rand.nextInt(); opt.setNumLevels(intValue); @@ -304,12 +286,6 @@ public class OptionsTest { assert(opt.maxGrandparentOverlapFactor() == intValue); } - { // DisableSeekCompaction test - boolean boolValue = rand.nextBoolean(); - opt.setDisableSeekCompaction(boolValue); - assert(opt.disableSeekCompaction() == boolValue); - } - { // SoftRateLimit test double doubleValue = rand.nextDouble(); opt.setSoftRateLimit(doubleValue); @@ -328,12 +304,6 @@ public class OptionsTest { assert(opt.rateLimitDelayMaxMilliseconds() == intValue); } - { // NoBlockCache test - boolean boolValue = rand.nextBoolean(); - opt.setNoBlockCache(boolValue); - assert(opt.noBlockCache() == boolValue); - } - { // ArenaBlockSize test long longValue = rand.nextLong(); opt.setArenaBlockSize(longValue); @@ -352,12 +322,6 @@ public class OptionsTest { assert(opt.purgeRedundantKvsWhileFlush() == boolValue); } - { // BlockSizeDeviation test - int intValue = rand.nextInt(); - opt.setBlockSizeDeviation(intValue); - assert(opt.blockSizeDeviation() == intValue); - } - { // VerifyChecksumsInCompaction test boolean boolValue = rand.nextBoolean(); opt.setVerifyChecksumsInCompaction(boolValue); diff --git a/java/rocksjni/options.cc b/java/rocksjni/options.cc index 88b7a3821..da420c78f 100644 --- a/java/rocksjni/options.cc +++ b/java/rocksjni/options.cc @@ -21,7 +21,6 @@ #include "rocksdb/memtablerep.h" #include "rocksdb/table.h" #include "rocksdb/slice_transform.h" -#include "rocksdb/filter_policy.h" /* * Class: org_rocksdb_Options @@ -118,17 +117,6 @@ jlong Java_org_rocksdb_Options_statisticsPtr( return reinterpret_cast(st); } -/* - * Class: org_rocksdb_Options - * Method: setFilterHandle - * Signature: (JJ)V - */ -void Java_org_rocksdb_Options_setFilterHandle( - JNIEnv* env, jobject jobj, jlong jopt_handle, jlong jfilter_handle) { - reinterpret_cast(jopt_handle)->filter_policy = - reinterpret_cast(jfilter_handle); -} - /* * Class: org_rocksdb_Options * Method: maxWriteBufferNumber @@ -139,49 +127,6 @@ jint Java_org_rocksdb_Options_maxWriteBufferNumber( return reinterpret_cast(jhandle)->max_write_buffer_number; } -/* - * Class: org_rocksdb_Options - * Method: setBlockSize - * Signature: (JJ)V - */ -void Java_org_rocksdb_Options_setBlockSize( - JNIEnv* env, jobject jobj, jlong jhandle, jlong jblock_size) { - reinterpret_cast(jhandle)->block_size = - static_cast(jblock_size); -} - -/* - * Class: org_rocksdb_Options - * Method: blockSize - * Signature: (J)J - */ -jlong Java_org_rocksdb_Options_blockSize( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)->block_size; -} - -/* - * Class: org_rocksdb_Options - * Method: setDisableSeekCompaction - * Signature: (JZ)V - */ -void Java_org_rocksdb_Options_setDisableSeekCompaction( - JNIEnv* env, jobject jobj, jlong jhandle, - jboolean jdisable_seek_compaction) { - reinterpret_cast(jhandle)->disable_seek_compaction = - jdisable_seek_compaction; -} - -/* - * Class: org_rocksdb_Options - * Method: disableSeekCompaction - * Signature: (J)Z - */ -jboolean Java_org_rocksdb_Options_disableSeekCompaction( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)->disable_seek_compaction; -} - /* * Class: org_rocksdb_Options * Method: errorIfExists @@ -893,27 +838,6 @@ void Java_org_rocksdb_Options_setMinWriteBufferNumberToMerge( static_cast(jmin_write_buffer_number_to_merge); } -/* - * Class: org_rocksdb_Options - * Method: blockRestartInterval - * Signature: (J)I - */ -jint Java_org_rocksdb_Options_blockRestartInterval( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)->block_restart_interval; -} - -/* - * Class: org_rocksdb_Options - * Method: setBlockRestartInterval - * Signature: (JI)V - */ -void Java_org_rocksdb_Options_setBlockRestartInterval( - JNIEnv* env, jobject jobj, jlong jhandle, jint jblock_restart_interval) { - reinterpret_cast(jhandle)->block_restart_interval = - static_cast(jblock_restart_interval); -} - /* * Class: org_rocksdb_Options * Method: setCompressionType @@ -956,27 +880,6 @@ jbyte Java_org_rocksdb_Options_compactionStyle( return reinterpret_cast(jhandle)->compaction_style; } -/* - * Class: org_rocksdb_Options - * Method: wholeKeyFiltering - * Signature: (J)Z - */ -jboolean Java_org_rocksdb_Options_wholeKeyFiltering( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)->whole_key_filtering; -} - -/* - * Class: org_rocksdb_Options - * Method: setWholeKeyFiltering - * Signature: (JZ)V - */ -void Java_org_rocksdb_Options_setWholeKeyFiltering( - JNIEnv* env, jobject jobj, jlong jhandle, jboolean jwhole_key_filtering) { - reinterpret_cast(jhandle)->whole_key_filtering = - static_cast(jwhole_key_filtering); -} - /* * Class: org_rocksdb_Options * Method: numLevels @@ -1324,27 +1227,6 @@ void Java_org_rocksdb_Options_setRateLimitDelayMaxMilliseconds( static_cast(jrate_limit_delay_max_milliseconds); } -/* - * Class: org_rocksdb_Options - * Method: noBlockCache - * Signature: (J)Z - */ -jboolean Java_org_rocksdb_Options_noBlockCache( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)->no_block_cache; -} - -/* - * Class: org_rocksdb_Options - * Method: setNoBlockCache - * Signature: (JZ)V - */ -void Java_org_rocksdb_Options_setNoBlockCache( - JNIEnv* env, jobject jobj, jlong jhandle, jboolean jno_block_cache) { - reinterpret_cast(jhandle)->no_block_cache = - static_cast(jno_block_cache); -} - /* * Class: org_rocksdb_Options * Method: arenaBlockSize @@ -1414,28 +1296,6 @@ void Java_org_rocksdb_Options_setPurgeRedundantKvsWhileFlush( static_cast(jpurge_redundant_kvs_while_flush); } -/* - * Class: org_rocksdb_Options - * Method: blockSizeDeviation - * Signature: (J)I - */ -jint Java_org_rocksdb_Options_blockSizeDeviation( - JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast(jhandle)->block_size_deviation; -} - -/* - * Class: org_rocksdb_Options - * Method: setBlockSizeDeviation - * Signature: (JI)V - */ -void Java_org_rocksdb_Options_setBlockSizeDeviation( - JNIEnv* env, jobject jobj, jlong jhandle, - jint jblock_size_deviation) { - reinterpret_cast(jhandle)->block_size_deviation = - static_cast(jblock_size_deviation); -} - /* * Class: org_rocksdb_Options * Method: verifyChecksumsInCompaction diff --git a/java/rocksjni/rocksjni.cc b/java/rocksjni/rocksjni.cc index 2e0da85e5..f55290f64 100644 --- a/java/rocksjni/rocksjni.cc +++ b/java/rocksjni/rocksjni.cc @@ -26,21 +26,8 @@ * Signature: (JLjava/lang/String;)V */ void Java_org_rocksdb_RocksDB_open( - JNIEnv* env, jobject jdb, jlong jopt_handle, - jlong jcache_size, jint jnum_shardbits, jstring jdb_path) { + JNIEnv* env, jobject jdb, jlong jopt_handle, jstring jdb_path) { auto opt = reinterpret_cast(jopt_handle); - if (jcache_size > 0) { - opt->no_block_cache = false; - if (jnum_shardbits >= 1) { - opt->block_cache = rocksdb::NewLRUCache(jcache_size, jnum_shardbits); - } else { - opt->block_cache = rocksdb::NewLRUCache(jcache_size); - } - } else { - opt->no_block_cache = true; - opt->block_cache = nullptr; - } - rocksdb::DB* db = nullptr; const char* db_path = env->GetStringUTFChars(jdb_path, 0); rocksdb::Status s = rocksdb::DB::Open(*opt, db_path, &db); diff --git a/java/rocksjni/table.cc b/java/rocksjni/table.cc index 4d6114f18..ffda1a2ba 100644 --- a/java/rocksjni/table.cc +++ b/java/rocksjni/table.cc @@ -7,7 +7,10 @@ #include #include "include/org_rocksdb_PlainTableConfig.h" +#include "include/org_rocksdb_BlockBasedTableConfig.h" #include "rocksdb/table.h" +#include "rocksdb/cache.h" +#include "rocksdb/filter_policy.h" /* * Class: org_rocksdb_PlainTableConfig @@ -24,3 +27,34 @@ jlong Java_org_rocksdb_PlainTableConfig_newTableFactoryHandle( options.index_sparseness = jindex_sparseness; return reinterpret_cast(rocksdb::NewPlainTableFactory(options)); } + +/* + * Class: org_rocksdb_BlockBasedTableConfig + * Method: newTableFactoryHandle + * Signature: (ZJIJIIZI)J + */ +jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle( + JNIEnv* env, jobject jobj, jboolean no_block_cache, jlong block_cache_size, + jint num_shardbits, jlong block_size, jint block_size_deviation, + jint block_restart_interval, jboolean whole_key_filtering, + jint bits_per_key) { + rocksdb::BlockBasedTableOptions options; + options.no_block_cache = no_block_cache; + + if (!no_block_cache && block_cache_size > 0) { + if (num_shardbits > 0) { + options.block_cache = + rocksdb::NewLRUCache(block_cache_size, num_shardbits); + } else { + options.block_cache = rocksdb::NewLRUCache(block_cache_size); + } + } + options.block_size = block_size; + options.block_size_deviation = block_size_deviation; + options.block_restart_interval = block_restart_interval; + options.whole_key_filtering = whole_key_filtering; + if (bits_per_key > 0) { + options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(bits_per_key)); + } + return reinterpret_cast(rocksdb::NewBlockBasedTableFactory(options)); +}