JNI changes corresponding to BlockBasedTableOptions migration

Summary: as title

Test Plan:
tested on my mac
make rocksdbjava
make jtest

Reviewers: sdong, igor, yhchiang

Reviewed By: yhchiang

Subscribers: leveldb

Differential Revision: https://reviews.facebook.net/D21963
main
Lei Jin 10 years ago
parent 384400128f
commit 66f62e5c78
  1. 2
      java/Makefile
  2. 18
      java/RocksDBSample.java
  3. 210
      java/org/rocksdb/BlockBasedTableConfig.java
  4. 250
      java/org/rocksdb/Options.java
  5. 14
      java/org/rocksdb/RocksDB.java
  6. 37
      java/org/rocksdb/benchmark/DbBenchmark.java
  7. 36
      java/org/rocksdb/test/OptionsTest.java
  8. 140
      java/rocksjni/options.cc
  9. 15
      java/rocksjni/rocksjni.cc
  10. 34
      java/rocksjni/table.cc

@ -1,4 +1,4 @@
NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.BackupableDB org.rocksdb.BackupableDBOptions org.rocksdb.Statistics org.rocksdb.RocksIterator org.rocksdb.VectorMemTableConfig org.rocksdb.SkipListMemTableConfig org.rocksdb.HashLinkedListMemTableConfig org.rocksdb.HashSkipListMemTableConfig org.rocksdb.PlainTableConfig org.rocksdb.ReadOptions org.rocksdb.Filter org.rocksdb.BloomFilter org.rocksdb.RestoreOptions org.rocksdb.RestoreBackupableDB org.rocksdb.RocksEnv NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.BackupableDB org.rocksdb.BackupableDBOptions org.rocksdb.Statistics org.rocksdb.RocksIterator org.rocksdb.VectorMemTableConfig org.rocksdb.SkipListMemTableConfig org.rocksdb.HashLinkedListMemTableConfig org.rocksdb.HashSkipListMemTableConfig org.rocksdb.PlainTableConfig org.rocksdb.BlockBasedTableConfig org.rocksdb.ReadOptions org.rocksdb.Filter org.rocksdb.BloomFilter org.rocksdb.RestoreOptions org.rocksdb.RestoreBackupableDB org.rocksdb.RocksEnv
NATIVE_INCLUDE = ./include NATIVE_INCLUDE = ./include
ROCKSDB_JAR = rocksdbjni.jar ROCKSDB_JAR = rocksdbjni.jar

@ -35,16 +35,11 @@ public class RocksDBSample {
assert(db == null); assert(db == null);
} }
Filter filter = new BloomFilter(10);
options.setCreateIfMissing(true) options.setCreateIfMissing(true)
.createStatistics() .createStatistics()
.setWriteBufferSize(8 * SizeUnit.KB) .setWriteBufferSize(8 * SizeUnit.KB)
.setMaxWriteBufferNumber(3) .setMaxWriteBufferNumber(3)
.setDisableSeekCompaction(true)
.setBlockSize(64 * SizeUnit.KB)
.setMaxBackgroundCompactions(10) .setMaxBackgroundCompactions(10)
.setFilter(filter)
.setCacheNumShardBits(6)
.setCompressionType(CompressionType.SNAPPY_COMPRESSION) .setCompressionType(CompressionType.SNAPPY_COMPRESSION)
.setCompactionStyle(CompactionStyle.UNIVERSAL); .setCompactionStyle(CompactionStyle.UNIVERSAL);
Statistics stats = options.statisticsPtr(); Statistics stats = options.statisticsPtr();
@ -52,10 +47,7 @@ public class RocksDBSample {
assert(options.createIfMissing() == true); assert(options.createIfMissing() == true);
assert(options.writeBufferSize() == 8 * SizeUnit.KB); assert(options.writeBufferSize() == 8 * SizeUnit.KB);
assert(options.maxWriteBufferNumber() == 3); assert(options.maxWriteBufferNumber() == 3);
assert(options.disableSeekCompaction() == true);
assert(options.blockSize() == 64 * SizeUnit.KB);
assert(options.maxBackgroundCompactions() == 10); assert(options.maxBackgroundCompactions() == 10);
assert(options.cacheNumShardBits() == 6);
assert(options.compressionType() == CompressionType.SNAPPY_COMPRESSION); assert(options.compressionType() == CompressionType.SNAPPY_COMPRESSION);
assert(options.compactionStyle() == CompactionStyle.UNIVERSAL); assert(options.compactionStyle() == CompactionStyle.UNIVERSAL);
@ -82,6 +74,15 @@ public class RocksDBSample {
options.setTableFormatConfig(new PlainTableConfig()); options.setTableFormatConfig(new PlainTableConfig());
assert(options.tableFactoryName().equals("PlainTable")); assert(options.tableFactoryName().equals("PlainTable"));
BlockBasedTableConfig table_options = new BlockBasedTableConfig();
table_options.setBlockCacheSize(64 * SizeUnit.KB)
.setFilterBitsPerKey(10)
.setCacheNumShardBits(6);
assert(table_options.blockCacheSize() == 64 * SizeUnit.KB);
assert(table_options.cacheNumShardBits() == 6);
options.setTableFormatConfig(table_options);
assert(options.tableFactoryName().equals("BlockBasedTable"));
try { try {
db = RocksDB.open(options, db_path_not_found); db = RocksDB.open(options, db_path_not_found);
db.put("hello".getBytes(), "world".getBytes()); db.put("hello".getBytes(), "world".getBytes());
@ -254,6 +255,5 @@ public class RocksDBSample {
// be sure to dispose c++ pointers // be sure to dispose c++ pointers
options.dispose(); options.dispose();
readOptions.dispose(); readOptions.dispose();
filter.dispose();
} }
} }

@ -0,0 +1,210 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* The config for plain table sst format.
*
* BlockBasedTable is a RocksDB's default SST file format.
*/
public class BlockBasedTableConfig extends TableFormatConfig {
public BlockBasedTableConfig() {
noBlockCache_ = false;
blockCacheSize_ = 8 * 1024 * 1024;
blockSize_ = 4 * 1024;
blockSizeDeviation_ =10;
blockRestartInterval_ =16;
wholeKeyFiltering_ = true;
bitsPerKey_ = 0;
}
/**
* Disable block cache. If this is set to true,
* then no block cache should be used, and the block_cache should
* point to a nullptr object.
* Default: false
*
* @param noBlockCache if use block cache
* @return the reference to the current config.
*/
public BlockBasedTableConfig setNoBlockCache(boolean noBlockCache) {
noBlockCache_ = noBlockCache;
return this;
}
/**
* @return if block cache is disabled
*/
public boolean noBlockCache() {
return noBlockCache_;
}
/**
* Set the amount of cache in bytes that will be used by RocksDB.
* If cacheSize is non-positive, then cache will not be used.
* DEFAULT: 8M
*
* @param blockCacheSize block cache size in bytes
* @return the reference to the current config.
*/
public BlockBasedTableConfig setBlockCacheSize(long blockCacheSize) {
blockCacheSize_ = blockCacheSize;
return this;
}
/**
* @return block cache size in bytes
*/
public long blockCacheSize() {
return blockCacheSize_;
}
/**
* Controls the number of shards for the block cache.
* This is applied only if cacheSize is set to non-negative.
*
* @param numShardBits the number of shard bits. The resulting
* number of shards would be 2 ^ numShardBits. Any negative
* number means use default settings."
* @return the reference to the current option.
*/
public BlockBasedTableConfig setCacheNumShardBits(int numShardBits) {
numShardBits_ = numShardBits;
return this;
}
/**
* Returns the number of shard bits used in the block cache.
* The resulting number of shards would be 2 ^ (returned value).
* Any negative number means use default settings.
*
* @return the number of shard bits used in the block cache.
*/
public int cacheNumShardBits() {
return numShardBits_;
}
/**
* Approximate size of user data packed per block. Note that the
* block size specified here corresponds to uncompressed data. The
* actual size of the unit read from disk may be smaller if
* compression is enabled. This parameter can be changed dynamically.
* Default: 4K
*
* @param blockSize block size in bytes
* @return the reference to the current config.
*/
public BlockBasedTableConfig setBlockSize(long blockSize) {
blockSize_ = blockSize;
return this;
}
/**
* @return block size in bytes
*/
public long blockSize() {
return blockSize_;
}
/**
* This is used to close a block before it reaches the configured
* 'block_size'. If the percentage of free space in the current block is less
* than this specified number and adding a new record to the block will
* exceed the configured block size, then this block will be closed and the
* new record will be written to the next block.
* Default is 10.
*
* @param blockSizeDeviation the deviation to block size allowed
* @return the reference to the current config.
*/
public BlockBasedTableConfig setBlockSizeDeviation(int blockSizeDeviation) {
blockSizeDeviation_ = blockSizeDeviation;
return this;
}
/**
* @return the hash table ratio.
*/
public int blockSizeDeviation() {
return blockSizeDeviation_;
}
/**
* Set block restart interval
*
* @param restartInterval block restart interval.
* @return the reference to the current config.
*/
public BlockBasedTableConfig setBlockRestartInterval(int restartInterval) {
blockRestartInterval_ = restartInterval;
return this;
}
/**
* @return block restart interval
*/
public int blockRestartInterval() {
return blockRestartInterval_;
}
/**
* If true, place whole keys in the filter (not just prefixes).
* This must generally be true for gets to be efficient.
* Default: true
*
* @param wholeKeyFiltering if enable whole key filtering
* @return the reference to the current config.
*/
public BlockBasedTableConfig setWholeKeyFiltering(boolean wholeKeyFiltering) {
wholeKeyFiltering_ = wholeKeyFiltering;
return this;
}
/**
* @return if whole key filtering is enabled
*/
public boolean wholeKeyFiltering() {
return wholeKeyFiltering_;
}
/**
* Use the specified filter policy to reduce disk reads.
*
* Filter should not be disposed before options instances using this filter is
* disposed. If dispose() function is not called, then filter object will be
* GC'd automatically.
*
* Filter instance can be re-used in multiple options instances.
*
* @param Filter policy java instance.
* @return the reference to the current config.
*/
public BlockBasedTableConfig setFilterBitsPerKey(int bitsPerKey) {
bitsPerKey_ = bitsPerKey;
return this;
}
@Override protected long newTableFactoryHandle() {
return newTableFactoryHandle(noBlockCache_, blockCacheSize_, numShardBits_,
blockSize_, blockSizeDeviation_, blockRestartInterval_,
wholeKeyFiltering_, bitsPerKey_);
}
private native long newTableFactoryHandle(
boolean noBlockCache, long blockCacheSize, int numShardbits,
long blockSize, int blockSizeDeviation, int blockRestartInterval,
boolean wholeKeyFiltering, int bitsPerKey);
private boolean noBlockCache_;
private long blockCacheSize_;
private int numShardBits_;
private long shard;
private long blockSize_;
private int blockSizeDeviation_;
private int blockRestartInterval_;
private boolean wholeKeyFiltering_;
private int bitsPerKey_;
}

@ -136,135 +136,6 @@ public class Options extends RocksObject {
return maxWriteBufferNumber(nativeHandle_); return maxWriteBufferNumber(nativeHandle_);
} }
/*
* Approximate size of user data packed per block. Note that the
* block size specified here corresponds to uncompressed data. The
* actual size of the unit read from disk may be smaller if
* compression is enabled. This parameter can be changed dynamically.
*
* Default: 4K
*
* @param blockSize the size of each block in bytes.
* @return the instance of the current Options.
* @see RocksDB.open()
*/
public Options setBlockSize(long blockSize) {
assert(isInitialized());
setBlockSize(nativeHandle_, blockSize);
return this;
}
/*
* Returns the size of a block in bytes.
*
* @return block size.
* @see setBlockSize()
*/
public long blockSize() {
assert(isInitialized());
return blockSize(nativeHandle_);
}
/**
* Use the specified filter policy to reduce disk reads.
*
* Filter should not be disposed before options instances using this filter is
* disposed. If dispose() function is not called, then filter object will be
* GC'd automatically.
*
* Filter instance can be re-used in multiple options instances.
*
* @param Filter policy java instance.
* @return the instance of the current Options.
* @see RocksDB.open()
*/
public Options setFilter(Filter filter) {
assert(isInitialized());
setFilterHandle(nativeHandle_, filter.nativeHandle_);
filter_ = filter;
return this;
}
private native void setFilterHandle(long optHandle, long filterHandle);
/*
* Disable compaction triggered by seek.
* With bloomfilter and fast storage, a miss on one level
* is very cheap if the file handle is cached in table cache
* (which is true if max_open_files is large).
* Default: true
*
* @param disableSeekCompaction a boolean value to specify whether
* to disable seek compaction.
* @return the instance of the current Options.
* @see RocksDB.open()
*/
public Options setDisableSeekCompaction(boolean disableSeekCompaction) {
assert(isInitialized());
setDisableSeekCompaction(nativeHandle_, disableSeekCompaction);
return this;
}
/*
* Returns true if disable seek compaction is set to true.
*
* @return true if disable seek compaction is set to true.
* @see setDisableSeekCompaction()
*/
public boolean disableSeekCompaction() {
assert(isInitialized());
return disableSeekCompaction(nativeHandle_);
}
/**
* Set the amount of cache in bytes that will be used by RocksDB.
* If cacheSize is non-positive, then cache will not be used.
*
* DEFAULT: 8M
* @see setCacheNumShardBits()
*/
public Options setCacheSize(long cacheSize) {
cacheSize_ = cacheSize;
return this;
}
/**
* @return the amount of cache in bytes that will be used by RocksDB.
*
* @see cacheNumShardBits()
*/
public long cacheSize() {
return cacheSize_;
}
/**
* Controls the number of shards for the block cache.
* This is applied only if cacheSize is set to non-negative.
*
* @param numShardBits the number of shard bits. The resulting
* number of shards would be 2 ^ numShardBits. Any negative
* number means use default settings."
* @return the reference to the current option.
*
* @see setCacheSize()
*/
public Options setCacheNumShardBits(int numShardBits) {
numShardBits_ = numShardBits;
return this;
}
/**
* Returns the number of shard bits used in the block cache.
* The resulting number of shards would be 2 ^ (returned value).
* Any negative number means use default settings.
*
* @return the number of shard bits used in the block cache.
*
* @see cacheSize()
*/
public int cacheNumShardBits() {
return numShardBits_;
}
/** /**
* If true, an error will be thrown during RocksDB.open() if the * If true, an error will be thrown during RocksDB.open() if the
* database already exists. * database already exists.
@ -1344,26 +1215,26 @@ public class Options extends RocksObject {
} }
private native void setBlockRestartInterval( private native void setBlockRestartInterval(
long handle, int blockRestartInterval); long handle, int blockRestartInterval);
/** /**
* Compress blocks using the specified compression algorithm. This * Compress blocks using the specified compression algorithm. This
parameter can be changed dynamically. parameter can be changed dynamically.
* *
* Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
* *
* @return Compression type. * @return Compression type.
*/ */
public CompressionType compressionType() { public CompressionType compressionType() {
return CompressionType.values()[compressionType(nativeHandle_)]; return CompressionType.values()[compressionType(nativeHandle_)];
} }
private native byte compressionType(long handle); private native byte compressionType(long handle);
/** /**
* Compress blocks using the specified compression algorithm. This * Compress blocks using the specified compression algorithm. This
parameter can be changed dynamically. parameter can be changed dynamically.
* *
* Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
* *
* @param compressionType Compression Type. * @param compressionType Compression Type.
* @return the reference to the current option. * @return the reference to the current option.
*/ */
@ -1372,22 +1243,22 @@ public class Options extends RocksObject {
return this; return this;
} }
private native void setCompressionType(long handle, byte compressionType); private native void setCompressionType(long handle, byte compressionType);
/** /**
* Compaction style for DB. * Compaction style for DB.
* *
* @return Compaction style. * @return Compaction style.
*/ */
public CompactionStyle compactionStyle() { public CompactionStyle compactionStyle() {
return CompactionStyle.values()[compactionStyle(nativeHandle_)]; return CompactionStyle.values()[compactionStyle(nativeHandle_)];
} }
private native byte compactionStyle(long handle); private native byte compactionStyle(long handle);
/** /**
* Set compaction style for DB. * Set compaction style for DB.
* *
* Default: LEVEL. * Default: LEVEL.
* *
* @param compactionStyle Compaction style. * @param compactionStyle Compaction style.
* @return the reference to the current option. * @return the reference to the current option.
*/ */
@ -1397,33 +1268,6 @@ public class Options extends RocksObject {
} }
private native void setCompactionStyle(long handle, byte compactionStyle); private native void setCompactionStyle(long handle, byte compactionStyle);
/**
* If true, place whole keys in the filter (not just prefixes).
* This must generally be true for gets to be efficient.
* Default: true
*
* @return if true, then whole-key-filtering is on.
*/
public boolean wholeKeyFiltering() {
return wholeKeyFiltering(nativeHandle_);
}
private native boolean wholeKeyFiltering(long handle);
/**
* If true, place whole keys in the filter (not just prefixes).
* This must generally be true for gets to be efficient.
* Default: true
*
* @param wholeKeyFiltering if true, then whole-key-filtering is on.
* @return the reference to the current option.
*/
public Options setWholeKeyFiltering(boolean wholeKeyFiltering) {
setWholeKeyFiltering(nativeHandle_, wholeKeyFiltering);
return this;
}
private native void setWholeKeyFiltering(
long handle, boolean wholeKeyFiltering);
/** /**
* If level-styled compaction is used, then this number determines * If level-styled compaction is used, then this number determines
* the total number of levels. * the total number of levels.
@ -1897,35 +1741,6 @@ public class Options extends RocksObject {
private native void setRateLimitDelayMaxMilliseconds( private native void setRateLimitDelayMaxMilliseconds(
long handle, int rateLimitDelayMaxMilliseconds); long handle, int rateLimitDelayMaxMilliseconds);
/**
* Disable block cache. If this is set to true,
* then no block cache should be used, and the block_cache should
* point to a nullptr object.
* Default: false
*
* @return true if block cache is disabled.
*/
public boolean noBlockCache() {
return noBlockCache(nativeHandle_);
}
private native boolean noBlockCache(long handle);
/**
* Disable block cache. If this is set to true,
* then no block cache should be used, and the block_cache should
* point to a nullptr object.
* Default: false
*
* @param noBlockCache true if block-cache is disabled.
* @return the reference to the current option.
*/
public Options setNoBlockCache(boolean noBlockCache) {
setNoBlockCache(nativeHandle_, noBlockCache);
return this;
}
private native void setNoBlockCache(
long handle, boolean noBlockCache);
/** /**
* The size of one block in arena memory allocation. * The size of one block in arena memory allocation.
* If <= 0, a proper value is automatically calculated (usually 1/10 of * If <= 0, a proper value is automatically calculated (usually 1/10 of
@ -2023,39 +1838,6 @@ public class Options extends RocksObject {
private native void setPurgeRedundantKvsWhileFlush( private native void setPurgeRedundantKvsWhileFlush(
long handle, boolean purgeRedundantKvsWhileFlush); long handle, boolean purgeRedundantKvsWhileFlush);
/**
* This is used to close a block before it reaches the configured
* 'block_size'. If the percentage of free space in the current block is less
* than this specified number and adding a new record to the block will
* exceed the configured block size, then this block will be closed and the
* new record will be written to the next block.
* Default is 10.
*
* @return the target block size
*/
public int blockSizeDeviation() {
return blockSizeDeviation(nativeHandle_);
}
private native int blockSizeDeviation(long handle);
/**
* This is used to close a block before it reaches the configured
* 'block_size'. If the percentage of free space in the current block is less
* than this specified number and adding a new record to the block will
* exceed the configured block size, then this block will be closed and the
* new record will be written to the next block.
* Default is 10.
*
* @param blockSizeDeviation the target block size
* @return the reference to the current option.
*/
public Options setBlockSizeDeviation(int blockSizeDeviation) {
setBlockSizeDeviation(nativeHandle_, blockSizeDeviation);
return this;
}
private native void setBlockSizeDeviation(
long handle, int blockSizeDeviation);
/** /**
* If true, compaction will verify checksum on every read that happens * If true, compaction will verify checksum on every read that happens
* as part of compaction * as part of compaction
@ -2437,11 +2219,6 @@ public class Options extends RocksObject {
private native void setMaxWriteBufferNumber( private native void setMaxWriteBufferNumber(
long handle, int maxWriteBufferNumber); long handle, int maxWriteBufferNumber);
private native int maxWriteBufferNumber(long handle); private native int maxWriteBufferNumber(long handle);
private native void setBlockSize(long handle, long blockSize);
private native long blockSize(long handle);
private native void setDisableSeekCompaction(
long handle, boolean disableSeekCompaction);
private native boolean disableSeekCompaction(long handle);
private native void setMaxBackgroundCompactions( private native void setMaxBackgroundCompactions(
long handle, int maxBackgroundCompactions); long handle, int maxBackgroundCompactions);
private native int maxBackgroundCompactions(long handle); private native int maxBackgroundCompactions(long handle);
@ -2459,6 +2236,5 @@ public class Options extends RocksObject {
long cacheSize_; long cacheSize_;
int numShardBits_; int numShardBits_;
Filter filter_;
RocksEnv env_; RocksEnv env_;
} }

@ -99,11 +99,11 @@ public class RocksDB extends RocksObject {
/** /**
* The factory constructor of RocksDB that opens a RocksDB instance given * The factory constructor of RocksDB that opens a RocksDB instance given
* the path to the database using the specified options and db path. * the path to the database using the specified options and db path.
* *
* Options instance *should* not be disposed before all DBs using this options * Options instance *should* not be disposed before all DBs using this options
* instance have been closed. If user doesn't call options dispose explicitly, * instance have been closed. If user doesn't call options dispose explicitly,
* then this options instance will be GC'd automatically. * then this options instance will be GC'd automatically.
* *
* Options instance can be re-used to open multiple DBs if DB statistics is * Options instance can be re-used to open multiple DBs if DB statistics is
* not used. If DB statistics are required, then its recommended to open DB * not used. If DB statistics are required, then its recommended to open DB
* with new Options instance as underlying native statistics instance does not * with new Options instance as underlying native statistics instance does not
@ -115,13 +115,12 @@ public class RocksDB extends RocksObject {
// in RocksDB can prevent Java to GC during the life-time of // in RocksDB can prevent Java to GC during the life-time of
// the currently-created RocksDB. // the currently-created RocksDB.
RocksDB db = new RocksDB(); RocksDB db = new RocksDB();
db.open(options.nativeHandle_, options.cacheSize_, db.open(options.nativeHandle_, path);
options.numShardBits_, path);
db.storeOptionsInstance(options); db.storeOptionsInstance(options);
return db; return db;
} }
private void storeOptionsInstance(Options options) { private void storeOptionsInstance(Options options) {
options_ = options; options_ = options;
} }
@ -334,8 +333,7 @@ public class RocksDB extends RocksObject {
// native methods // native methods
protected native void open( protected native void open(
long optionsHandle, long cacheSize, int numShardBits, long optionsHandle, String path) throws RocksDBException;
String path) throws RocksDBException;
protected native void put( protected native void put(
long handle, byte[] key, int keyLen, long handle, byte[] key, int keyLen,
byte[] value, int valueLen) throws RocksDBException; byte[] value, int valueLen) throws RocksDBException;

@ -446,7 +446,6 @@ public class DbBenchmark {
randSeed_ = (Long) flags.get(Flag.seed); randSeed_ = (Long) flags.get(Flag.seed);
databaseDir_ = (String) flags.get(Flag.db); databaseDir_ = (String) flags.get(Flag.db);
writesPerSeconds_ = (Integer) flags.get(Flag.writes_per_second); writesPerSeconds_ = (Integer) flags.get(Flag.writes_per_second);
cacheSize_ = (Long) flags.get(Flag.cache_size);
memtable_ = (String) flags.get(Flag.memtablerep); memtable_ = (String) flags.get(Flag.memtablerep);
maxWriteBufferNumber_ = (Integer) flags.get(Flag.max_write_buffer_number); maxWriteBufferNumber_ = (Integer) flags.get(Flag.max_write_buffer_number);
prefixSize_ = (Integer) flags.get(Flag.prefix_size); prefixSize_ = (Integer) flags.get(Flag.prefix_size);
@ -491,7 +490,6 @@ public class DbBenchmark {
} }
private void prepareOptions(Options options) { private void prepareOptions(Options options) {
options.setCacheSize(cacheSize_);
if (!useExisting_) { if (!useExisting_) {
options.setCreateIfMissing(true); options.setCreateIfMissing(true);
} else { } else {
@ -521,6 +519,13 @@ public class DbBenchmark {
if (usePlainTable_) { if (usePlainTable_) {
options.setTableFormatConfig( options.setTableFormatConfig(
new PlainTableConfig().setKeySize(keySize_)); new PlainTableConfig().setKeySize(keySize_));
} else {
BlockBasedTableConfig table_options = new BlockBasedTableConfig();
table_options.setBlockSize((Long)flags_.get(Flag.block_size))
.setBlockCacheSize((Long)flags_.get(Flag.cache_size))
.setFilterBitsPerKey((Integer)flags_.get(Flag.bloom_bits))
.setCacheNumShardBits((Integer)flags_.get(Flag.cache_numshardbits));
options.setTableFormatConfig(table_options);
} }
options.setWriteBufferSize( options.setWriteBufferSize(
(Long)flags_.get(Flag.write_buffer_size)); (Long)flags_.get(Flag.write_buffer_size));
@ -532,12 +537,6 @@ public class DbBenchmark {
(Integer)flags_.get(Flag.max_background_compactions)); (Integer)flags_.get(Flag.max_background_compactions));
options.setMaxBackgroundFlushes( options.setMaxBackgroundFlushes(
(Integer)flags_.get(Flag.max_background_flushes)); (Integer)flags_.get(Flag.max_background_flushes));
options.setCacheSize(
(Long)flags_.get(Flag.cache_size));
options.setCacheNumShardBits(
(Integer)flags_.get(Flag.cache_numshardbits));
options.setBlockSize(
(Long)flags_.get(Flag.block_size));
options.setMaxOpenFiles( options.setMaxOpenFiles(
(Integer)flags_.get(Flag.open_files)); (Integer)flags_.get(Flag.open_files));
options.setTableCacheRemoveScanCountLimit( options.setTableCacheRemoveScanCountLimit(
@ -548,8 +547,6 @@ public class DbBenchmark {
(Boolean)flags_.get(Flag.use_fsync)); (Boolean)flags_.get(Flag.use_fsync));
options.setWalDir( options.setWalDir(
(String)flags_.get(Flag.wal_dir)); (String)flags_.get(Flag.wal_dir));
options.setDisableSeekCompaction(
(Boolean)flags_.get(Flag.disable_seek_compaction));
options.setDeleteObsoleteFilesPeriodMicros( options.setDeleteObsoleteFilesPeriodMicros(
(Integer)flags_.get(Flag.delete_obsolete_files_period_micros)); (Integer)flags_.get(Flag.delete_obsolete_files_period_micros));
options.setTableCacheNumshardbits( options.setTableCacheNumshardbits(
@ -604,15 +601,6 @@ public class DbBenchmark {
(Integer)flags_.get(Flag.max_successive_merges)); (Integer)flags_.get(Flag.max_successive_merges));
options.setWalTtlSeconds((Long)flags_.get(Flag.wal_ttl_seconds)); options.setWalTtlSeconds((Long)flags_.get(Flag.wal_ttl_seconds));
options.setWalSizeLimitMB((Long)flags_.get(Flag.wal_size_limit_MB)); options.setWalSizeLimitMB((Long)flags_.get(Flag.wal_size_limit_MB));
int bloomBits = (Integer)flags_.get(Flag.bloom_bits);
if (bloomBits > 0) {
// Internally, options will keep a reference to this BloomFilter.
// This will disallow Java to GC this BloomFilter. In addition,
// options.dispose() will release the c++ object of this BloomFilter.
// As a result, the caller should not directly call
// BloomFilter.dispose().
options.setFilter(new BloomFilter(bloomBits));
}
/* TODO(yhchiang): enable the following parameters /* TODO(yhchiang): enable the following parameters
options.setCompressionType((String)flags_.get(Flag.compression_type)); options.setCompressionType((String)flags_.get(Flag.compression_type));
options.setCompressionLevel((Integer)flags_.get(Flag.compression_level)); options.setCompressionLevel((Integer)flags_.get(Flag.compression_level));
@ -1160,7 +1148,7 @@ public class DbBenchmark {
return Integer.parseInt(value); return Integer.parseInt(value);
} }
}, },
block_size(defaultOptions_.blockSize(), block_size(defaultBlockBasedTableOptions_.blockSize(),
"Number of bytes in a block.") { "Number of bytes in a block.") {
@Override public Object parseValue(String value) { @Override public Object parseValue(String value) {
return Long.parseLong(value); return Long.parseLong(value);
@ -1312,12 +1300,6 @@ public class DbBenchmark {
return Integer.parseInt(value); return Integer.parseInt(value);
} }
}, },
disable_seek_compaction(false,"Option to disable compaction\n" +
"\ttriggered by read.") {
@Override public Object parseValue(String value) {
return parseBoolean(value);
}
},
delete_obsolete_files_period_micros(0,"Option to delete\n" + delete_obsolete_files_period_micros(0,"Option to delete\n" +
"\tobsolete files periodically. 0 means that obsolete files are\n" + "\tobsolete files periodically. 0 means that obsolete files are\n" +
"\tdeleted after every compaction run.") { "\tdeleted after every compaction run.") {
@ -1597,7 +1579,6 @@ public class DbBenchmark {
final int threadNum_; final int threadNum_;
final int writesPerSeconds_; final int writesPerSeconds_;
final long randSeed_; final long randSeed_;
final long cacheSize_;
final boolean useExisting_; final boolean useExisting_;
final String databaseDir_; final String databaseDir_;
double compressionRatio_; double compressionRatio_;
@ -1620,6 +1601,8 @@ public class DbBenchmark {
// as the scope of a static member equals to the scope of the problem, // as the scope of a static member equals to the scope of the problem,
// we let its c++ pointer to be disposed in its finalizer. // we let its c++ pointer to be disposed in its finalizer.
static Options defaultOptions_ = new Options(); static Options defaultOptions_ = new Options();
static BlockBasedTableConfig defaultBlockBasedTableOptions_ =
new BlockBasedTableConfig();
String compressionType_; String compressionType_;
CompressionType compression_; CompressionType compression_;
} }

@ -214,24 +214,6 @@ public class OptionsTest {
assert(opt.minWriteBufferNumberToMerge() == intValue); assert(opt.minWriteBufferNumberToMerge() == intValue);
} }
{ // BlockSize test
long longValue = rand.nextLong();
opt.setBlockSize(longValue);
assert(opt.blockSize() == longValue);
}
{ // BlockRestartInterval test
int intValue = rand.nextInt();
opt.setBlockRestartInterval(intValue);
assert(opt.blockRestartInterval() == intValue);
}
{ // WholeKeyFiltering test
boolean boolValue = rand.nextBoolean();
opt.setWholeKeyFiltering(boolValue);
assert(opt.wholeKeyFiltering() == boolValue);
}
{ // NumLevels test { // NumLevels test
int intValue = rand.nextInt(); int intValue = rand.nextInt();
opt.setNumLevels(intValue); opt.setNumLevels(intValue);
@ -304,12 +286,6 @@ public class OptionsTest {
assert(opt.maxGrandparentOverlapFactor() == intValue); assert(opt.maxGrandparentOverlapFactor() == intValue);
} }
{ // DisableSeekCompaction test
boolean boolValue = rand.nextBoolean();
opt.setDisableSeekCompaction(boolValue);
assert(opt.disableSeekCompaction() == boolValue);
}
{ // SoftRateLimit test { // SoftRateLimit test
double doubleValue = rand.nextDouble(); double doubleValue = rand.nextDouble();
opt.setSoftRateLimit(doubleValue); opt.setSoftRateLimit(doubleValue);
@ -328,12 +304,6 @@ public class OptionsTest {
assert(opt.rateLimitDelayMaxMilliseconds() == intValue); assert(opt.rateLimitDelayMaxMilliseconds() == intValue);
} }
{ // NoBlockCache test
boolean boolValue = rand.nextBoolean();
opt.setNoBlockCache(boolValue);
assert(opt.noBlockCache() == boolValue);
}
{ // ArenaBlockSize test { // ArenaBlockSize test
long longValue = rand.nextLong(); long longValue = rand.nextLong();
opt.setArenaBlockSize(longValue); opt.setArenaBlockSize(longValue);
@ -352,12 +322,6 @@ public class OptionsTest {
assert(opt.purgeRedundantKvsWhileFlush() == boolValue); assert(opt.purgeRedundantKvsWhileFlush() == boolValue);
} }
{ // BlockSizeDeviation test
int intValue = rand.nextInt();
opt.setBlockSizeDeviation(intValue);
assert(opt.blockSizeDeviation() == intValue);
}
{ // VerifyChecksumsInCompaction test { // VerifyChecksumsInCompaction test
boolean boolValue = rand.nextBoolean(); boolean boolValue = rand.nextBoolean();
opt.setVerifyChecksumsInCompaction(boolValue); opt.setVerifyChecksumsInCompaction(boolValue);

@ -21,7 +21,6 @@
#include "rocksdb/memtablerep.h" #include "rocksdb/memtablerep.h"
#include "rocksdb/table.h" #include "rocksdb/table.h"
#include "rocksdb/slice_transform.h" #include "rocksdb/slice_transform.h"
#include "rocksdb/filter_policy.h"
/* /*
* Class: org_rocksdb_Options * Class: org_rocksdb_Options
@ -118,17 +117,6 @@ jlong Java_org_rocksdb_Options_statisticsPtr(
return reinterpret_cast<jlong>(st); return reinterpret_cast<jlong>(st);
} }
/*
* Class: org_rocksdb_Options
* Method: setFilterHandle
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setFilterHandle(
JNIEnv* env, jobject jobj, jlong jopt_handle, jlong jfilter_handle) {
reinterpret_cast<rocksdb::Options*>(jopt_handle)->filter_policy =
reinterpret_cast<rocksdb::FilterPolicy*>(jfilter_handle);
}
/* /*
* Class: org_rocksdb_Options * Class: org_rocksdb_Options
* Method: maxWriteBufferNumber * Method: maxWriteBufferNumber
@ -139,49 +127,6 @@ jint Java_org_rocksdb_Options_maxWriteBufferNumber(
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_write_buffer_number; return reinterpret_cast<rocksdb::Options*>(jhandle)->max_write_buffer_number;
} }
/*
* Class: org_rocksdb_Options
* Method: setBlockSize
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setBlockSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jblock_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->block_size =
static_cast<size_t>(jblock_size);
}
/*
* Class: org_rocksdb_Options
* Method: blockSize
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_blockSize(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->block_size;
}
/*
* Class: org_rocksdb_Options
* Method: setDisableSeekCompaction
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setDisableSeekCompaction(
JNIEnv* env, jobject jobj, jlong jhandle,
jboolean jdisable_seek_compaction) {
reinterpret_cast<rocksdb::Options*>(jhandle)->disable_seek_compaction =
jdisable_seek_compaction;
}
/*
* Class: org_rocksdb_Options
* Method: disableSeekCompaction
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_disableSeekCompaction(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->disable_seek_compaction;
}
/* /*
* Class: org_rocksdb_Options * Class: org_rocksdb_Options
* Method: errorIfExists * Method: errorIfExists
@ -893,27 +838,6 @@ void Java_org_rocksdb_Options_setMinWriteBufferNumberToMerge(
static_cast<int>(jmin_write_buffer_number_to_merge); static_cast<int>(jmin_write_buffer_number_to_merge);
} }
/*
* Class: org_rocksdb_Options
* Method: blockRestartInterval
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_blockRestartInterval(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->block_restart_interval;
}
/*
* Class: org_rocksdb_Options
* Method: setBlockRestartInterval
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setBlockRestartInterval(
JNIEnv* env, jobject jobj, jlong jhandle, jint jblock_restart_interval) {
reinterpret_cast<rocksdb::Options*>(jhandle)->block_restart_interval =
static_cast<int>(jblock_restart_interval);
}
/* /*
* Class: org_rocksdb_Options * Class: org_rocksdb_Options
* Method: setCompressionType * Method: setCompressionType
@ -956,27 +880,6 @@ jbyte Java_org_rocksdb_Options_compactionStyle(
return reinterpret_cast<rocksdb::Options*>(jhandle)->compaction_style; return reinterpret_cast<rocksdb::Options*>(jhandle)->compaction_style;
} }
/*
* Class: org_rocksdb_Options
* Method: wholeKeyFiltering
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_wholeKeyFiltering(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->whole_key_filtering;
}
/*
* Class: org_rocksdb_Options
* Method: setWholeKeyFiltering
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setWholeKeyFiltering(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean jwhole_key_filtering) {
reinterpret_cast<rocksdb::Options*>(jhandle)->whole_key_filtering =
static_cast<bool>(jwhole_key_filtering);
}
/* /*
* Class: org_rocksdb_Options * Class: org_rocksdb_Options
* Method: numLevels * Method: numLevels
@ -1324,27 +1227,6 @@ void Java_org_rocksdb_Options_setRateLimitDelayMaxMilliseconds(
static_cast<int>(jrate_limit_delay_max_milliseconds); static_cast<int>(jrate_limit_delay_max_milliseconds);
} }
/*
* Class: org_rocksdb_Options
* Method: noBlockCache
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_noBlockCache(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->no_block_cache;
}
/*
* Class: org_rocksdb_Options
* Method: setNoBlockCache
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setNoBlockCache(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean jno_block_cache) {
reinterpret_cast<rocksdb::Options*>(jhandle)->no_block_cache =
static_cast<bool>(jno_block_cache);
}
/* /*
* Class: org_rocksdb_Options * Class: org_rocksdb_Options
* Method: arenaBlockSize * Method: arenaBlockSize
@ -1414,28 +1296,6 @@ void Java_org_rocksdb_Options_setPurgeRedundantKvsWhileFlush(
static_cast<bool>(jpurge_redundant_kvs_while_flush); static_cast<bool>(jpurge_redundant_kvs_while_flush);
} }
/*
* Class: org_rocksdb_Options
* Method: blockSizeDeviation
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_blockSizeDeviation(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->block_size_deviation;
}
/*
* Class: org_rocksdb_Options
* Method: setBlockSizeDeviation
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setBlockSizeDeviation(
JNIEnv* env, jobject jobj, jlong jhandle,
jint jblock_size_deviation) {
reinterpret_cast<rocksdb::Options*>(jhandle)->block_size_deviation =
static_cast<int>(jblock_size_deviation);
}
/* /*
* Class: org_rocksdb_Options * Class: org_rocksdb_Options
* Method: verifyChecksumsInCompaction * Method: verifyChecksumsInCompaction

@ -26,21 +26,8 @@
* Signature: (JLjava/lang/String;)V * Signature: (JLjava/lang/String;)V
*/ */
void Java_org_rocksdb_RocksDB_open( void Java_org_rocksdb_RocksDB_open(
JNIEnv* env, jobject jdb, jlong jopt_handle, JNIEnv* env, jobject jdb, jlong jopt_handle, jstring jdb_path) {
jlong jcache_size, jint jnum_shardbits, jstring jdb_path) {
auto opt = reinterpret_cast<rocksdb::Options*>(jopt_handle); auto opt = reinterpret_cast<rocksdb::Options*>(jopt_handle);
if (jcache_size > 0) {
opt->no_block_cache = false;
if (jnum_shardbits >= 1) {
opt->block_cache = rocksdb::NewLRUCache(jcache_size, jnum_shardbits);
} else {
opt->block_cache = rocksdb::NewLRUCache(jcache_size);
}
} else {
opt->no_block_cache = true;
opt->block_cache = nullptr;
}
rocksdb::DB* db = nullptr; rocksdb::DB* db = nullptr;
const char* db_path = env->GetStringUTFChars(jdb_path, 0); const char* db_path = env->GetStringUTFChars(jdb_path, 0);
rocksdb::Status s = rocksdb::DB::Open(*opt, db_path, &db); rocksdb::Status s = rocksdb::DB::Open(*opt, db_path, &db);

@ -7,7 +7,10 @@
#include <jni.h> #include <jni.h>
#include "include/org_rocksdb_PlainTableConfig.h" #include "include/org_rocksdb_PlainTableConfig.h"
#include "include/org_rocksdb_BlockBasedTableConfig.h"
#include "rocksdb/table.h" #include "rocksdb/table.h"
#include "rocksdb/cache.h"
#include "rocksdb/filter_policy.h"
/* /*
* Class: org_rocksdb_PlainTableConfig * Class: org_rocksdb_PlainTableConfig
@ -24,3 +27,34 @@ jlong Java_org_rocksdb_PlainTableConfig_newTableFactoryHandle(
options.index_sparseness = jindex_sparseness; options.index_sparseness = jindex_sparseness;
return reinterpret_cast<jlong>(rocksdb::NewPlainTableFactory(options)); return reinterpret_cast<jlong>(rocksdb::NewPlainTableFactory(options));
} }
/*
* Class: org_rocksdb_BlockBasedTableConfig
* Method: newTableFactoryHandle
* Signature: (ZJIJIIZI)J
*/
jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle(
JNIEnv* env, jobject jobj, jboolean no_block_cache, jlong block_cache_size,
jint num_shardbits, jlong block_size, jint block_size_deviation,
jint block_restart_interval, jboolean whole_key_filtering,
jint bits_per_key) {
rocksdb::BlockBasedTableOptions options;
options.no_block_cache = no_block_cache;
if (!no_block_cache && block_cache_size > 0) {
if (num_shardbits > 0) {
options.block_cache =
rocksdb::NewLRUCache(block_cache_size, num_shardbits);
} else {
options.block_cache = rocksdb::NewLRUCache(block_cache_size);
}
}
options.block_size = block_size;
options.block_size_deviation = block_size_deviation;
options.block_restart_interval = block_restart_interval;
options.whole_key_filtering = whole_key_filtering;
if (bits_per_key > 0) {
options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(bits_per_key));
}
return reinterpret_cast<jlong>(rocksdb::NewBlockBasedTableFactory(options));
}

Loading…
Cancel
Save