diff --git a/java/RocksDBSample.java b/java/RocksDBSample.java index d78a070df..bd5a85076 100644 --- a/java/RocksDBSample.java +++ b/java/RocksDBSample.java @@ -83,9 +83,23 @@ public class RocksDBSample { BlockBasedTableConfig table_options = new BlockBasedTableConfig(); table_options.setBlockCacheSize(64 * SizeUnit.KB) .setFilterBitsPerKey(10) - .setCacheNumShardBits(6); + .setCacheNumShardBits(6) + .setBlockSizeDeviation(5) + .setBlockRestartInterval(10) + .setCacheIndexAndFilterBlocks(true) + .setHashIndexAllowCollision(false) + .setBlockCacheCompressedSize(64 * SizeUnit.KB) + .setBlockCacheCompressedNumShardBits(10); + assert(table_options.blockCacheSize() == 64 * SizeUnit.KB); assert(table_options.cacheNumShardBits() == 6); + assert(table_options.blockSizeDeviation() == 5); + assert(table_options.blockRestartInterval() == 10); + assert(table_options.cacheIndexAndFilterBlocks() == true); + assert(table_options.hashIndexAllowCollision() == false); + assert(table_options.blockCacheCompressedSize() == 64 * SizeUnit.KB); + assert(table_options.blockCacheCompressedNumShardBits() == 10); + options.setTableFormatConfig(table_options); assert(options.tableFactoryName().equals("BlockBasedTable")); @@ -94,6 +108,8 @@ public class RocksDBSample { db.put("hello".getBytes(), "world".getBytes()); byte[] value = db.get("hello".getBytes()); assert("world".equals(new String(value))); + String str = db.getProperty("rocksdb.stats"); + assert(str != null && str != ""); } catch (RocksDBException e) { System.out.format("[ERROR] caught the unexpceted exception -- %s\n", e); assert(db == null); diff --git a/java/org/rocksdb/BlockBasedTableConfig.java b/java/org/rocksdb/BlockBasedTableConfig.java index 523a57691..bdb27d6c2 100644 --- a/java/org/rocksdb/BlockBasedTableConfig.java +++ b/java/org/rocksdb/BlockBasedTableConfig.java @@ -14,11 +14,14 @@ public class BlockBasedTableConfig extends TableFormatConfig { public BlockBasedTableConfig() { noBlockCache_ = false; blockCacheSize_ = 8 * 1024 * 1024; - blockSize_ = 4 * 1024; - blockSizeDeviation_ =10; - blockRestartInterval_ =16; + blockSize_ = 4 * 1024; + blockSizeDeviation_ = 10; + blockRestartInterval_ = 16; wholeKeyFiltering_ = true; - bitsPerKey_ = 0; + bitsPerKey_ = 10; + cacheIndexAndFilterBlocks_ = false; + hashIndexAllowCollision_ = true; + blockCacheCompressedSize_ = 0; } /** @@ -71,8 +74,8 @@ public class BlockBasedTableConfig extends TableFormatConfig { * number means use default settings." * @return the reference to the current option. */ - public BlockBasedTableConfig setCacheNumShardBits(int numShardBits) { - numShardBits_ = numShardBits; + public BlockBasedTableConfig setCacheNumShardBits(int blockCacheNumShardBits) { + blockCacheNumShardBits_ = blockCacheNumShardBits; return this; } @@ -84,7 +87,7 @@ public class BlockBasedTableConfig extends TableFormatConfig { * @return the number of shard bits used in the block cache. */ public int cacheNumShardBits() { - return numShardBits_; + return blockCacheNumShardBits_; } /** @@ -186,25 +189,135 @@ public class BlockBasedTableConfig extends TableFormatConfig { bitsPerKey_ = bitsPerKey; return this; } + + /** + * Indicating if we'd put index/filter blocks to the block cache. + If not specified, each "table reader" object will pre-load index/filter + block during table initialization. + * + * @return if index and filter blocks should be put in block cache. + */ + public boolean cacheIndexAndFilterBlocks() { + return cacheIndexAndFilterBlocks_; + } + + /** + * Indicating if we'd put index/filter blocks to the block cache. + If not specified, each "table reader" object will pre-load index/filter + block during table initialization. + * + * @param index and filter blocks should be put in block cache. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setCacheIndexAndFilterBlocks( + boolean cacheIndexAndFilterBlocks) { + cacheIndexAndFilterBlocks_ = cacheIndexAndFilterBlocks; + return this; + } + + /** + * Influence the behavior when kHashSearch is used. + if false, stores a precise prefix to block range mapping + if true, does not store prefix and allows prefix hash collision + (less memory consumption) + * + * @return if hash collisions should be allowed. + */ + public boolean hashIndexAllowCollision() { + return hashIndexAllowCollision_; + } + + /** + * Influence the behavior when kHashSearch is used. + if false, stores a precise prefix to block range mapping + if true, does not store prefix and allows prefix hash collision + (less memory consumption) + * + * @param if hash collisions should be allowed. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setHashIndexAllowCollision( + boolean hashIndexAllowCollision) { + hashIndexAllowCollision_ = hashIndexAllowCollision; + return this; + } + + /** + * Size of compressed block cache. If 0, then block_cache_compressed is set + * to null. + * + * @return size of compressed block cache. + */ + public long blockCacheCompressedSize() { + return blockCacheCompressedSize_; + } + + /** + * Size of compressed block cache. If 0, then block_cache_compressed is set + * to null. + * + * @param size of compressed block cache. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockCacheCompressedSize( + long blockCacheCompressedSize) { + blockCacheCompressedSize_ = blockCacheCompressedSize; + return this; + } + + /** + * Controls the number of shards for the block compressed cache. + * This is applied only if blockCompressedCacheSize is set to non-negative. + * + * @return numShardBits the number of shard bits. The resulting + * number of shards would be 2 ^ numShardBits. Any negative + * number means use default settings. + */ + public int blockCacheCompressedNumShardBits() { + return blockCacheCompressedNumShardBits_; + } + + /** + * Controls the number of shards for the block compressed cache. + * This is applied only if blockCompressedCacheSize is set to non-negative. + * + * @param numShardBits the number of shard bits. The resulting + * number of shards would be 2 ^ numShardBits. Any negative + * number means use default settings." + * @return the reference to the current option. + */ + public BlockBasedTableConfig setBlockCacheCompressedNumShardBits( + int blockCacheCompressedNumShardBits) { + blockCacheCompressedNumShardBits_ = blockCacheCompressedNumShardBits; + return this; + } @Override protected long newTableFactoryHandle() { - return newTableFactoryHandle(noBlockCache_, blockCacheSize_, numShardBits_, - blockSize_, blockSizeDeviation_, blockRestartInterval_, - wholeKeyFiltering_, bitsPerKey_); + return newTableFactoryHandle(noBlockCache_, blockCacheSize_, + blockCacheNumShardBits_, blockSize_, blockSizeDeviation_, + blockRestartInterval_, wholeKeyFiltering_, bitsPerKey_, + cacheIndexAndFilterBlocks_, hashIndexAllowCollision_, + blockCacheCompressedSize_, blockCacheCompressedNumShardBits_); } private native long newTableFactoryHandle( - boolean noBlockCache, long blockCacheSize, int numShardbits, + boolean noBlockCache, long blockCacheSize, int blockCacheNumShardBits, long blockSize, int blockSizeDeviation, int blockRestartInterval, - boolean wholeKeyFiltering, int bitsPerKey); + boolean wholeKeyFiltering, int bitsPerKey, + boolean cacheIndexAndFilterBlocks, boolean hashIndexAllowCollision, + long blockCacheCompressedSize, int blockCacheCompressedNumShardBits); private boolean noBlockCache_; private long blockCacheSize_; - private int numShardBits_; + private int blockCacheNumShardBits_; private long shard; private long blockSize_; private int blockSizeDeviation_; private int blockRestartInterval_; private boolean wholeKeyFiltering_; private int bitsPerKey_; + private boolean cacheIndexAndFilterBlocks_; + private boolean hashIndexAllowCollision_; + private long blockCacheCompressedSize_; + private int blockCacheCompressedNumShardBits_; } diff --git a/java/org/rocksdb/RocksDB.java b/java/org/rocksdb/RocksDB.java index 91726253b..a16586551 100644 --- a/java/org/rocksdb/RocksDB.java +++ b/java/org/rocksdb/RocksDB.java @@ -324,6 +324,26 @@ public class RocksDB extends RocksObject { throws RocksDBException { remove(nativeHandle_, writeOpt.nativeHandle_, key, key.length); } + + /** + * DB implementations can export properties about their state + via this method. If "property" is a valid property understood by this + DB implementation, fills "*value" with its current value and returns + true. Otherwise returns false. + + + Valid property names include: + + "rocksdb.num-files-at-level" - return the number of files at level , + where is an ASCII representation of a level number (e.g. "0"). + "rocksdb.stats" - returns a multi-line string that describes statistics + about the internal operation of the DB. + "rocksdb.sstables" - returns a multi-line string that describes all + of the sstables that make up the db contents. + */ + public String getProperty(String property) throws RocksDBException { + return getProperty0(nativeHandle_, property, property.length()); + } /** * Return a heap-allocated iterator over the contents of the database. @@ -378,6 +398,8 @@ public class RocksDB extends RocksObject { protected native void remove( long handle, long writeOptHandle, byte[] key, int keyLen) throws RocksDBException; + protected native String getProperty0(long nativeHandle, + String property, int propertyLength) throws RocksDBException; protected native long iterator0(long optHandle); private native void disposeInternal(long handle); diff --git a/java/rocksjni/rocksjni.cc b/java/rocksjni/rocksjni.cc index f55290f64..f1b9cc758 100644 --- a/java/rocksjni/rocksjni.cc +++ b/java/rocksjni/rocksjni.cc @@ -425,3 +425,27 @@ jlong Java_org_rocksdb_RocksDB_iterator0( rocksdb::Iterator* iterator = db->NewIterator(rocksdb::ReadOptions()); return reinterpret_cast(iterator); } + +/* + * Class: org_rocksdb_RocksDB + * Method: getProperty0 + * Signature: (JLjava/lang/String;I)Ljava/lang/String; + */ +jstring Java_org_rocksdb_RocksDB_getProperty0( + JNIEnv* env, jobject jdb, jlong db_handle, jstring jproperty, + jint jproperty_len) { + auto db = reinterpret_cast(db_handle); + + const char* property = env->GetStringUTFChars(jproperty, 0); + rocksdb::Slice property_slice(property, jproperty_len); + + std::string property_value; + bool retCode = db->GetProperty(property_slice, &property_value); + env->ReleaseStringUTFChars(jproperty, property); + + if (!retCode) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound()); + } + + return env->NewStringUTF(property_value.data()); +} diff --git a/java/rocksjni/table.cc b/java/rocksjni/table.cc index ffda1a2ba..500cb255e 100644 --- a/java/rocksjni/table.cc +++ b/java/rocksjni/table.cc @@ -31,20 +31,22 @@ jlong Java_org_rocksdb_PlainTableConfig_newTableFactoryHandle( /* * Class: org_rocksdb_BlockBasedTableConfig * Method: newTableFactoryHandle - * Signature: (ZJIJIIZI)J + * Signature: (ZJIJIIZIZZJI)J */ jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle( JNIEnv* env, jobject jobj, jboolean no_block_cache, jlong block_cache_size, - jint num_shardbits, jlong block_size, jint block_size_deviation, + jint block_cache_num_shardbits, jlong block_size, jint block_size_deviation, jint block_restart_interval, jboolean whole_key_filtering, - jint bits_per_key) { + jint bits_per_key, jboolean cache_index_and_filter_blocks, + jboolean hash_index_allow_collision, jlong block_cache_compressed_size, + jint block_cache_compressd_num_shard_bits) { rocksdb::BlockBasedTableOptions options; options.no_block_cache = no_block_cache; if (!no_block_cache && block_cache_size > 0) { - if (num_shardbits > 0) { + if (block_cache_num_shardbits > 0) { options.block_cache = - rocksdb::NewLRUCache(block_cache_size, num_shardbits); + rocksdb::NewLRUCache(block_cache_size, block_cache_num_shardbits); } else { options.block_cache = rocksdb::NewLRUCache(block_cache_size); } @@ -56,5 +58,17 @@ jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle( if (bits_per_key > 0) { options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(bits_per_key)); } + options.cache_index_and_filter_blocks = cache_index_and_filter_blocks; + options.hash_index_allow_collision = hash_index_allow_collision; + if (block_cache_compressed_size > 0) { + if (block_cache_compressd_num_shard_bits > 0) { + options.block_cache = + rocksdb::NewLRUCache(block_cache_compressed_size, + block_cache_compressd_num_shard_bits); + } else { + options.block_cache = rocksdb::NewLRUCache(block_cache_compressed_size); + } + } + return reinterpret_cast(rocksdb::NewBlockBasedTableFactory(options)); }