Summary: This adds almost all missing options to RocksJava Closes https://github.com/facebook/rocksdb/pull/2039 Differential Revision: D4779991 Pulled By: siying fbshipit-source-id: 4a1bf28main
parent
c6d04f2ecf
commit
0ee7f04039
@ -0,0 +1,40 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ for
|
||||
// rocksdb::ClockCache.
|
||||
|
||||
#include <jni.h> |
||||
|
||||
#include "include/org_rocksdb_ClockCache.h" |
||||
#include "util/clock_cache.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ClockCache |
||||
* Method: newClockCache |
||||
* Signature: (JIZ)J |
||||
*/ |
||||
jlong Java_org_rocksdb_ClockCache_newClockCache( |
||||
JNIEnv* env, jclass jcls, jlong jcapacity, jint jnum_shard_bits, |
||||
jboolean jstrict_capacity_limit) { |
||||
auto* sptr_clock_cache = |
||||
new std::shared_ptr<rocksdb::Cache>(rocksdb::NewClockCache( |
||||
static_cast<size_t>(jcapacity), |
||||
static_cast<int>(jnum_shard_bits), |
||||
static_cast<bool>(jstrict_capacity_limit))); |
||||
return reinterpret_cast<jlong>(sptr_clock_cache); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ClockCache |
||||
* Method: disposeInternal |
||||
* Signature: (J)V |
||||
*/ |
||||
void Java_org_rocksdb_ClockCache_disposeInternal( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* sptr_clock_cache = |
||||
reinterpret_cast<std::shared_ptr<rocksdb::Cache> *>(jhandle); |
||||
delete sptr_clock_cache; // delete std::shared_ptr
|
||||
} |
@ -0,0 +1,55 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ for
|
||||
// rocksdb::CompactionOptionsFIFO.
|
||||
|
||||
#include <jni.h> |
||||
|
||||
#include "include/org_rocksdb_CompactionOptionsFIFO.h" |
||||
#include "rocksdb/advanced_options.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsFIFO |
||||
* Method: newCompactionOptionsFIFO |
||||
* Signature: ()J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionOptionsFIFO_newCompactionOptionsFIFO( |
||||
JNIEnv* env, jclass jcls) { |
||||
const auto* opt = new rocksdb::CompactionOptionsFIFO(); |
||||
return reinterpret_cast<jlong>(opt); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsFIFO |
||||
* Method: setMaxTableFilesSize |
||||
* Signature: (JJ)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptionsFIFO_setMaxTableFilesSize( |
||||
JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_table_files_size) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle); |
||||
opt->max_table_files_size = static_cast<uint64_t>(jmax_table_files_size); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsFIFO |
||||
* Method: maxTableFilesSize |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle); |
||||
return static_cast<jlong>(opt->max_table_files_size); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsFIFO |
||||
* Method: disposeInternal |
||||
* Signature: (J)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptionsFIFO_disposeInternal( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
delete reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle); |
||||
} |
@ -0,0 +1,194 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ for
|
||||
// rocksdb::CompactionOptionsUniversal.
|
||||
|
||||
#include <jni.h> |
||||
|
||||
#include "include/org_rocksdb_CompactionOptionsUniversal.h" |
||||
#include "rocksdb/advanced_options.h" |
||||
#include "rocksjni/portal.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: newCompactionOptionsUniversal |
||||
* Signature: ()J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionOptionsUniversal_newCompactionOptionsUniversal( |
||||
JNIEnv* env, jclass jcls) { |
||||
const auto* opt = new rocksdb::CompactionOptionsUniversal(); |
||||
return reinterpret_cast<jlong>(opt); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: setSizeRatio |
||||
* Signature: (JI)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptionsUniversal_setSizeRatio( |
||||
JNIEnv* env, jobject jobj, jlong jhandle, jint jsize_ratio) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
opt->size_ratio = static_cast<unsigned int>(jsize_ratio); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: sizeRatio |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
return static_cast<jint>(opt->size_ratio); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: setMinMergeWidth |
||||
* Signature: (JI)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptionsUniversal_setMinMergeWidth( |
||||
JNIEnv* env, jobject jobj, jlong jhandle, jint jmin_merge_width) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
opt->min_merge_width = static_cast<unsigned int>(jmin_merge_width); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: minMergeWidth |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
return static_cast<jint>(opt->min_merge_width); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: setMaxMergeWidth |
||||
* Signature: (JI)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptionsUniversal_setMaxMergeWidth( |
||||
JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_merge_width) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
opt->max_merge_width = static_cast<unsigned int>(jmax_merge_width); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: maxMergeWidth |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
return static_cast<jint>(opt->max_merge_width); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: setMaxSizeAmplificationPercent |
||||
* Signature: (JI)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptionsUniversal_setMaxSizeAmplificationPercent( |
||||
JNIEnv* env, jobject jobj, jlong jhandle, |
||||
jint jmax_size_amplification_percent) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
opt->max_size_amplification_percent = |
||||
static_cast<unsigned int>(jmax_size_amplification_percent); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: maxSizeAmplificationPercent |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompactionOptionsUniversal_maxSizeAmplificationPercent( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
return static_cast<jint>(opt->max_size_amplification_percent); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: setCompressionSizePercent |
||||
* Signature: (JI)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptionsUniversal_setCompressionSizePercent( |
||||
JNIEnv* env, jobject jobj, jlong jhandle, jint jcompression_size_percent) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
opt->compression_size_percent = |
||||
static_cast<unsigned int>(jcompression_size_percent); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: compressionSizePercent |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompactionOptionsUniversal_compressionSizePercent( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
return static_cast<jint>(opt->compression_size_percent); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: setStopStyle |
||||
* Signature: (JB)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptionsUniversal_setStopStyle( |
||||
JNIEnv* env, jobject jobj, jlong jhandle, jbyte jstop_style_value) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
opt->stop_style = |
||||
rocksdb::CompactionStopStyleJni::toCppCompactionStopStyle( |
||||
jstop_style_value);
|
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: stopStyle |
||||
* Signature: (J)B |
||||
*/ |
||||
jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
return rocksdb::CompactionStopStyleJni::toJavaCompactionStopStyle( |
||||
opt->stop_style); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: setAllowTrivialMove |
||||
* Signature: (JZ)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptionsUniversal_setAllowTrivialMove( |
||||
JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_trivial_move) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
opt->allow_trivial_move = static_cast<bool>(jallow_trivial_move); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: allowTrivialMove |
||||
* Signature: (J)Z |
||||
*/ |
||||
jboolean Java_org_rocksdb_CompactionOptionsUniversal_allowTrivialMove( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
return opt->allow_trivial_move; |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptionsUniversal |
||||
* Method: disposeInternal |
||||
* Signature: (J)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptionsUniversal_disposeInternal( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
delete reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle); |
||||
} |
@ -0,0 +1,121 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ for
|
||||
// rocksdb::CompressionOptions.
|
||||
|
||||
#include <jni.h> |
||||
|
||||
#include "include/org_rocksdb_CompressionOptions.h" |
||||
#include "rocksdb/advanced_options.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompressionOptions |
||||
* Method: newCompressionOptions |
||||
* Signature: ()J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompressionOptions_newCompressionOptions( |
||||
JNIEnv* env, jclass jcls) { |
||||
const auto* opt = new rocksdb::CompressionOptions(); |
||||
return reinterpret_cast<jlong>(opt); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompressionOptions |
||||
* Method: setWindowBits |
||||
* Signature: (JI)V |
||||
*/ |
||||
void Java_org_rocksdb_CompressionOptions_setWindowBits( |
||||
JNIEnv* env, jobject jobj, jlong jhandle, jint jwindow_bits) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle); |
||||
opt->window_bits = static_cast<int>(jwindow_bits); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompressionOptions |
||||
* Method: windowBits |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompressionOptions_windowBits( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle); |
||||
return static_cast<jint>(opt->window_bits); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompressionOptions |
||||
* Method: setLevel |
||||
* Signature: (JI)V |
||||
*/ |
||||
void Java_org_rocksdb_CompressionOptions_setLevel( |
||||
JNIEnv* env, jobject jobj, jlong jhandle, jint jlevel) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle); |
||||
opt->level = static_cast<int>(jlevel); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompressionOptions |
||||
* Method: level |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompressionOptions_level( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle); |
||||
return static_cast<jint>(opt->level); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompressionOptions |
||||
* Method: setStrategy |
||||
* Signature: (JI)V |
||||
*/ |
||||
void Java_org_rocksdb_CompressionOptions_setStrategy( |
||||
JNIEnv* env, jobject jobj, jlong jhandle, jint jstrategy) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle); |
||||
opt->strategy = static_cast<int>(jstrategy); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompressionOptions |
||||
* Method: strategy |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompressionOptions_strategy( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle); |
||||
return static_cast<jint>(opt->strategy); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompressionOptions |
||||
* Method: setMaxDictBytes |
||||
* Signature: (JI)V |
||||
*/ |
||||
void Java_org_rocksdb_CompressionOptions_setMaxDictBytes( |
||||
JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_dict_bytes) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle); |
||||
opt->max_dict_bytes = static_cast<int>(jmax_dict_bytes); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompressionOptions |
||||
* Method: maxDictBytes |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompressionOptions_maxDictBytes( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle); |
||||
return static_cast<jint>(opt->max_dict_bytes); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompressionOptions |
||||
* Method: disposeInternal |
||||
* Signature: (J)V |
||||
*/ |
||||
void Java_org_rocksdb_CompressionOptions_disposeInternal( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
delete reinterpret_cast<rocksdb::CompressionOptions*>(jhandle); |
||||
} |
@ -0,0 +1,41 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ for
|
||||
// rocksdb::LRUCache.
|
||||
|
||||
#include <jni.h> |
||||
|
||||
#include "include/org_rocksdb_LRUCache.h" |
||||
#include "util/lru_cache.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_LRUCache |
||||
* Method: newLRUCache |
||||
* Signature: (JIZD)J |
||||
*/ |
||||
jlong Java_org_rocksdb_LRUCache_newLRUCache( |
||||
JNIEnv* env, jclass jcls, jlong jcapacity, jint jnum_shard_bits, |
||||
jboolean jstrict_capacity_limit, jdouble jhigh_pri_pool_ratio) { |
||||
auto* sptr_lru_cache = |
||||
new std::shared_ptr<rocksdb::Cache>(rocksdb::NewLRUCache( |
||||
static_cast<size_t>(jcapacity), |
||||
static_cast<int>(jnum_shard_bits), |
||||
static_cast<bool>(jstrict_capacity_limit), |
||||
static_cast<double>(jhigh_pri_pool_ratio))); |
||||
return reinterpret_cast<jlong>(sptr_lru_cache); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_LRUCache |
||||
* Method: disposeInternal |
||||
* Signature: (J)V |
||||
*/ |
||||
void Java_org_rocksdb_LRUCache_disposeInternal( |
||||
JNIEnv* env, jobject jobj, jlong jhandle) { |
||||
auto* sptr_lru_cache = |
||||
reinterpret_cast<std::shared_ptr<rocksdb::Cache> *>(jhandle); |
||||
delete sptr_lru_cache; // delete std::shared_ptr
|
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,53 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* File access pattern once a compaction has started |
||||
*/ |
||||
public enum AccessHint { |
||||
NONE((byte)0x0), |
||||
NORMAL((byte)0x1), |
||||
SEQUENTIAL((byte)0x2), |
||||
WILLNEED((byte)0x3); |
||||
|
||||
private final byte value; |
||||
|
||||
AccessHint(final byte value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
/** |
||||
* <p>Returns the byte value of the enumerations value.</p> |
||||
* |
||||
* @return byte representation |
||||
*/ |
||||
public byte getValue() { |
||||
return value; |
||||
} |
||||
|
||||
/** |
||||
* <p>Get the AccessHint enumeration value by |
||||
* passing the byte identifier to this method.</p> |
||||
* |
||||
* @param byteIdentifier of AccessHint. |
||||
* |
||||
* @return AccessHint instance. |
||||
* |
||||
* @throws IllegalArgumentException if the access hint for the byteIdentifier |
||||
* cannot be found |
||||
*/ |
||||
public static AccessHint getAccessHint(final byte byteIdentifier) { |
||||
for (final AccessHint accessHint : AccessHint.values()) { |
||||
if (accessHint.getValue() == byteIdentifier) { |
||||
return accessHint; |
||||
} |
||||
} |
||||
|
||||
throw new IllegalArgumentException( |
||||
"Illegal value provided for AccessHint."); |
||||
} |
||||
} |
@ -0,0 +1,465 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import java.util.List; |
||||
|
||||
/** |
||||
* Advanced Column Family Options which are not |
||||
* mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface} |
||||
* |
||||
* Taken from include/rocksdb/advanced_options.h |
||||
*/ |
||||
public interface AdvancedColumnFamilyOptionsInterface |
||||
<T extends AdvancedColumnFamilyOptionsInterface> { |
||||
|
||||
/** |
||||
* The minimum number of write buffers that will be merged together |
||||
* before writing to storage. If set to 1, then |
||||
* all write buffers are flushed to L0 as individual files and this increases |
||||
* read amplification because a get request has to check in all of these |
||||
* files. Also, an in-memory merge may result in writing lesser |
||||
* data to storage if there are duplicate records in each of these |
||||
* individual write buffers. Default: 1 |
||||
* |
||||
* @param minWriteBufferNumberToMerge the minimum number of write buffers |
||||
* that will be merged together. |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setMinWriteBufferNumberToMerge( |
||||
int minWriteBufferNumberToMerge); |
||||
|
||||
/** |
||||
* The minimum number of write buffers that will be merged together |
||||
* before writing to storage. If set to 1, then |
||||
* all write buffers are flushed to L0 as individual files and this increases |
||||
* read amplification because a get request has to check in all of these |
||||
* files. Also, an in-memory merge may result in writing lesser |
||||
* data to storage if there are duplicate records in each of these |
||||
* individual write buffers. Default: 1 |
||||
* |
||||
* @return the minimum number of write buffers that will be merged together. |
||||
*/ |
||||
int minWriteBufferNumberToMerge(); |
||||
|
||||
/** |
||||
* The total maximum number of write buffers to maintain in memory including |
||||
* copies of buffers that have already been flushed. Unlike |
||||
* {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()}, |
||||
* this parameter does not affect flushing. |
||||
* This controls the minimum amount of write history that will be available |
||||
* in memory for conflict checking when Transactions are used. |
||||
* |
||||
* When using an OptimisticTransactionDB: |
||||
* If this value is too low, some transactions may fail at commit time due |
||||
* to not being able to determine whether there were any write conflicts. |
||||
* |
||||
* When using a TransactionDB: |
||||
* If Transaction::SetSnapshot is used, TransactionDB will read either |
||||
* in-memory write buffers or SST files to do write-conflict checking. |
||||
* Increasing this value can reduce the number of reads to SST files |
||||
* done for conflict detection. |
||||
* |
||||
* Setting this value to 0 will cause write buffers to be freed immediately |
||||
* after they are flushed. |
||||
* If this value is set to -1, |
||||
* {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()} |
||||
* will be used. |
||||
* |
||||
* Default: |
||||
* If using a TransactionDB/OptimisticTransactionDB, the default value will |
||||
* be set to the value of |
||||
* {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()} |
||||
* if it is not explicitly set by the user. Otherwise, the default is 0. |
||||
* |
||||
* @param maxWriteBufferNumberToMaintain The maximum number of write |
||||
* buffers to maintain |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setMaxWriteBufferNumberToMaintain( |
||||
int maxWriteBufferNumberToMaintain); |
||||
|
||||
/** |
||||
* The total maximum number of write buffers to maintain in memory including |
||||
* copies of buffers that have already been flushed. |
||||
* |
||||
* @return maxWriteBufferNumberToMaintain The maximum number of write buffers |
||||
* to maintain |
||||
*/ |
||||
int maxWriteBufferNumberToMaintain(); |
||||
|
||||
/** |
||||
* Allows thread-safe inplace updates. |
||||
* If inplace_callback function is not set, |
||||
* Put(key, new_value) will update inplace the existing_value iff |
||||
* * key exists in current memtable |
||||
* * new sizeof(new_value) ≤ sizeof(existing_value) |
||||
* * existing_value for that key is a put i.e. kTypeValue |
||||
* If inplace_callback function is set, check doc for inplace_callback. |
||||
* Default: false. |
||||
* |
||||
* @param inplaceUpdateSupport true if thread-safe inplace updates |
||||
* are allowed. |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setInplaceUpdateSupport( |
||||
boolean inplaceUpdateSupport); |
||||
|
||||
/** |
||||
* Allows thread-safe inplace updates. |
||||
* If inplace_callback function is not set, |
||||
* Put(key, new_value) will update inplace the existing_value iff |
||||
* * key exists in current memtable |
||||
* * new sizeof(new_value) ≤ sizeof(existing_value) |
||||
* * existing_value for that key is a put i.e. kTypeValue |
||||
* If inplace_callback function is set, check doc for inplace_callback. |
||||
* Default: false. |
||||
* |
||||
* @return true if thread-safe inplace updates are allowed. |
||||
*/ |
||||
boolean inplaceUpdateSupport(); |
||||
|
||||
/** |
||||
* Control locality of bloom filter probes to improve cache miss rate. |
||||
* This option only applies to memtable prefix bloom and plaintable |
||||
* prefix bloom. It essentially limits the max number of cache lines each |
||||
* bloom filter check can touch. |
||||
* This optimization is turned off when set to 0. The number should never |
||||
* be greater than number of probes. This option can boost performance |
||||
* for in-memory workload but should use with care since it can cause |
||||
* higher false positive rate. |
||||
* Default: 0 |
||||
* |
||||
* @param bloomLocality the level of locality of bloom-filter probes. |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setBloomLocality(int bloomLocality); |
||||
|
||||
/** |
||||
* Control locality of bloom filter probes to improve cache miss rate. |
||||
* This option only applies to memtable prefix bloom and plaintable |
||||
* prefix bloom. It essentially limits the max number of cache lines each |
||||
* bloom filter check can touch. |
||||
* This optimization is turned off when set to 0. The number should never |
||||
* be greater than number of probes. This option can boost performance |
||||
* for in-memory workload but should use with care since it can cause |
||||
* higher false positive rate. |
||||
* Default: 0 |
||||
* |
||||
* @return the level of locality of bloom-filter probes. |
||||
* @see #setBloomLocality(int) |
||||
*/ |
||||
int bloomLocality(); |
||||
|
||||
/** |
||||
* <p>Different levels can have different compression |
||||
* policies. There are cases where most lower levels |
||||
* would like to use quick compression algorithms while |
||||
* the higher levels (which have more data) use |
||||
* compression algorithms that have better compression |
||||
* but could be slower. This array, if non-empty, should |
||||
* have an entry for each level of the database; |
||||
* these override the value specified in the previous |
||||
* field 'compression'.</p> |
||||
* |
||||
* <strong>NOTICE</strong> |
||||
* <p>If {@code level_compaction_dynamic_level_bytes=true}, |
||||
* {@code compression_per_level[0]} still determines {@code L0}, |
||||
* but other elements of the array are based on base level |
||||
* (the level {@code L0} files are merged to), and may not |
||||
* match the level users see from info log for metadata. |
||||
* </p> |
||||
* <p>If {@code L0} files are merged to {@code level - n}, |
||||
* then, for {@code i>0}, {@code compression_per_level[i]} |
||||
* determines compaction type for level {@code n+i-1}.</p> |
||||
* |
||||
* <strong>Example</strong> |
||||
* <p>For example, if we have 5 levels, and we determine to |
||||
* merge {@code L0} data to {@code L4} (which means {@code L1..L3} |
||||
* will be empty), then the new files go to {@code L4} uses |
||||
* compression type {@code compression_per_level[1]}.</p> |
||||
* |
||||
* <p>If now {@code L0} is merged to {@code L2}. Data goes to |
||||
* {@code L2} will be compressed according to |
||||
* {@code compression_per_level[1]}, {@code L3} using |
||||
* {@code compression_per_level[2]}and {@code L4} using |
||||
* {@code compression_per_level[3]}. Compaction for each |
||||
* level can change when data grows.</p> |
||||
* |
||||
* <p><strong>Default:</strong> empty</p> |
||||
* |
||||
* @param compressionLevels list of |
||||
* {@link org.rocksdb.CompressionType} instances. |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setCompressionPerLevel( |
||||
List<CompressionType> compressionLevels); |
||||
|
||||
/** |
||||
* <p>Return the currently set {@link org.rocksdb.CompressionType} |
||||
* per instances.</p> |
||||
* |
||||
* <p>See: {@link #setCompressionPerLevel(java.util.List)}</p> |
||||
* |
||||
* @return list of {@link org.rocksdb.CompressionType} |
||||
* instances. |
||||
*/ |
||||
List<CompressionType> compressionPerLevel(); |
||||
|
||||
/** |
||||
* Set the number of levels for this database |
||||
* If level-styled compaction is used, then this number determines |
||||
* the total number of levels. |
||||
* |
||||
* @param numLevels the number of levels. |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setNumLevels(int numLevels); |
||||
|
||||
/** |
||||
* If level-styled compaction is used, then this number determines |
||||
* the total number of levels. |
||||
* |
||||
* @return the number of levels. |
||||
*/ |
||||
int numLevels(); |
||||
|
||||
/** |
||||
* <p>If {@code true}, RocksDB will pick target size of each level |
||||
* dynamically. We will pick a base level b >= 1. L0 will be |
||||
* directly merged into level b, instead of always into level 1. |
||||
* Level 1 to b-1 need to be empty. We try to pick b and its target |
||||
* size so that</p> |
||||
* |
||||
* <ol> |
||||
* <li>target size is in the range of |
||||
* (max_bytes_for_level_base / max_bytes_for_level_multiplier, |
||||
* max_bytes_for_level_base]</li> |
||||
* <li>target size of the last level (level num_levels-1) equals to extra size |
||||
* of the level.</li> |
||||
* </ol> |
||||
* |
||||
* <p>At the same time max_bytes_for_level_multiplier and |
||||
* max_bytes_for_level_multiplier_additional are still satisfied.</p> |
||||
* |
||||
* <p>With this option on, from an empty DB, we make last level the base |
||||
* level, which means merging L0 data into the last level, until it exceeds |
||||
* max_bytes_for_level_base. And then we make the second last level to be |
||||
* base level, to start to merge L0 data to second last level, with its |
||||
* target size to be {@code 1/max_bytes_for_level_multiplier} of the last |
||||
* levels extra size. After the data accumulates more so that we need to |
||||
* move the base level to the third last one, and so on.</p> |
||||
* |
||||
* <h2>Example</h2> |
||||
* <p>For example, assume {@code max_bytes_for_level_multiplier=10}, |
||||
* {@code num_levels=6}, and {@code max_bytes_for_level_base=10MB}.</p> |
||||
* |
||||
* <p>Target sizes of level 1 to 5 starts with:</p> |
||||
* {@code [- - - - 10MB]} |
||||
* <p>with base level is level. Target sizes of level 1 to 4 are not applicable |
||||
* because they will not be used. |
||||
* Until the size of Level 5 grows to more than 10MB, say 11MB, we make |
||||
* base target to level 4 and now the targets looks like:</p> |
||||
* {@code [- - - 1.1MB 11MB]} |
||||
* <p>While data are accumulated, size targets are tuned based on actual data |
||||
* of level 5. When level 5 has 50MB of data, the target is like:</p> |
||||
* {@code [- - - 5MB 50MB]} |
||||
* <p>Until level 5's actual size is more than 100MB, say 101MB. Now if we |
||||
* keep level 4 to be the base level, its target size needs to be 10.1MB, |
||||
* which doesn't satisfy the target size range. So now we make level 3 |
||||
* the target size and the target sizes of the levels look like:</p> |
||||
* {@code [- - 1.01MB 10.1MB 101MB]} |
||||
* <p>In the same way, while level 5 further grows, all levels' targets grow, |
||||
* like</p> |
||||
* {@code [- - 5MB 50MB 500MB]} |
||||
* <p>Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the |
||||
* base level and make levels' target sizes like this:</p> |
||||
* {@code [- 1.001MB 10.01MB 100.1MB 1001MB]} |
||||
* <p>and go on...</p> |
||||
* |
||||
* <p>By doing it, we give {@code max_bytes_for_level_multiplier} a priority |
||||
* against {@code max_bytes_for_level_base}, for a more predictable LSM tree |
||||
* shape. It is useful to limit worse case space amplification.</p> |
||||
* |
||||
* <p>{@code max_bytes_for_level_multiplier_additional} is ignored with |
||||
* this flag on.</p> |
||||
* |
||||
* <p>Turning this feature on or off for an existing DB can cause unexpected |
||||
* LSM tree structure so it's not recommended.</p> |
||||
* |
||||
* <p><strong>Caution</strong>: this option is experimental</p> |
||||
* |
||||
* <p>Default: false</p> |
||||
* |
||||
* @param enableLevelCompactionDynamicLevelBytes boolean value indicating |
||||
* if {@code LevelCompactionDynamicLevelBytes} shall be enabled. |
||||
* @return the reference to the current options. |
||||
*/ |
||||
@Experimental("Turning this feature on or off for an existing DB can cause" + |
||||
"unexpected LSM tree structure so it's not recommended") |
||||
T setLevelCompactionDynamicLevelBytes( |
||||
boolean enableLevelCompactionDynamicLevelBytes); |
||||
|
||||
/** |
||||
* <p>Return if {@code LevelCompactionDynamicLevelBytes} is enabled. |
||||
* </p> |
||||
* |
||||
* <p>For further information see |
||||
* {@link #setLevelCompactionDynamicLevelBytes(boolean)}</p> |
||||
* |
||||
* @return boolean value indicating if |
||||
* {@code levelCompactionDynamicLevelBytes} is enabled. |
||||
*/ |
||||
@Experimental("Caution: this option is experimental") |
||||
boolean levelCompactionDynamicLevelBytes(); |
||||
|
||||
/** |
||||
* Maximum size of each compaction (not guarantee) |
||||
* |
||||
* @param maxCompactionBytes the compaction size limit |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setMaxCompactionBytes( |
||||
long maxCompactionBytes); |
||||
|
||||
/** |
||||
* Control maximum size of each compaction (not guaranteed) |
||||
* |
||||
* @return compaction size threshold |
||||
*/ |
||||
long maxCompactionBytes(); |
||||
|
||||
/** |
||||
* Set compaction style for DB. |
||||
* |
||||
* Default: LEVEL. |
||||
* |
||||
* @param compactionStyle Compaction style. |
||||
* @return the reference to the current options. |
||||
*/ |
||||
ColumnFamilyOptionsInterface setCompactionStyle( |
||||
CompactionStyle compactionStyle); |
||||
|
||||
/** |
||||
* Compaction style for DB. |
||||
* |
||||
* @return Compaction style. |
||||
*/ |
||||
CompactionStyle compactionStyle(); |
||||
|
||||
/** |
||||
* If level {@link #compactionStyle()} == {@link CompactionStyle#LEVEL}, |
||||
* for each level, which files are prioritized to be picked to compact. |
||||
* |
||||
* Default: {@link CompactionPriority#ByCompensatedSize} |
||||
* |
||||
* @param compactionPriority The compaction priority |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setCompactionPriority( |
||||
CompactionPriority compactionPriority); |
||||
|
||||
/** |
||||
* Get the Compaction priority if level compaction |
||||
* is used for all levels |
||||
* |
||||
* @return The compaction priority |
||||
*/ |
||||
CompactionPriority compactionPriority(); |
||||
|
||||
/** |
||||
* Set the options needed to support Universal Style compactions |
||||
* |
||||
* @param compactionOptionsUniversal The Universal Style compaction options |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setCompactionOptionsUniversal( |
||||
CompactionOptionsUniversal compactionOptionsUniversal); |
||||
|
||||
/** |
||||
* The options needed to support Universal Style compactions |
||||
* |
||||
* @return The Universal Style compaction options |
||||
*/ |
||||
CompactionOptionsUniversal compactionOptionsUniversal(); |
||||
|
||||
/** |
||||
* The options for FIFO compaction style |
||||
* |
||||
* @param compactionOptionsFIFO The FIFO compaction options |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setCompactionOptionsFIFO( |
||||
CompactionOptionsFIFO compactionOptionsFIFO); |
||||
|
||||
/** |
||||
* The options for FIFO compaction style |
||||
* |
||||
* @return The FIFO compaction options |
||||
*/ |
||||
CompactionOptionsFIFO compactionOptionsFIFO(); |
||||
|
||||
/** |
||||
* <p>This flag specifies that the implementation should optimize the filters |
||||
* mainly for cases where keys are found rather than also optimize for keys |
||||
* missed. This would be used in cases where the application knows that |
||||
* there are very few misses or the performance in the case of misses is not |
||||
* important.</p> |
||||
* |
||||
* <p>For now, this flag allows us to not store filters for the last level i.e |
||||
* the largest level which contains data of the LSM store. For keys which |
||||
* are hits, the filters in this level are not useful because we will search |
||||
* for the data anyway.</p> |
||||
* |
||||
* <p><strong>NOTE</strong>: the filters in other levels are still useful |
||||
* even for key hit because they tell us whether to look in that level or go |
||||
* to the higher level.</p> |
||||
* |
||||
* <p>Default: false<p> |
||||
* |
||||
* @param optimizeFiltersForHits boolean value indicating if this flag is set. |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setOptimizeFiltersForHits( |
||||
boolean optimizeFiltersForHits); |
||||
|
||||
/** |
||||
* <p>Returns the current state of the {@code optimize_filters_for_hits} |
||||
* setting.</p> |
||||
* |
||||
* @return boolean value indicating if the flag |
||||
* {@code optimize_filters_for_hits} was set. |
||||
*/ |
||||
boolean optimizeFiltersForHits(); |
||||
|
||||
/** |
||||
* In debug mode, RocksDB run consistency checks on the LSM everytime the LSM |
||||
* change (Flush, Compaction, AddFile). These checks are disabled in release |
||||
* mode, use this option to enable them in release mode as well. |
||||
* |
||||
* Default: false |
||||
* |
||||
* @param forceConsistencyChecks true to force consistency checks |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setForceConsistencyChecks( |
||||
boolean forceConsistencyChecks); |
||||
|
||||
/** |
||||
* In debug mode, RocksDB run consistency checks on the LSM everytime the LSM |
||||
* change (Flush, Compaction, AddFile). These checks are disabled in release |
||||
* mode. |
||||
* |
||||
* @return true if consistency checks are enforced |
||||
*/ |
||||
boolean forceConsistencyChecks(); |
||||
} |
@ -0,0 +1,437 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Advanced Column Family Options which are mutable |
||||
* |
||||
* Taken from include/rocksdb/advanced_options.h |
||||
* and MutableCFOptions in util/cf_options.h |
||||
*/ |
||||
public interface AdvancedMutableColumnFamilyOptionsInterface |
||||
<T extends AdvancedMutableColumnFamilyOptionsInterface> { |
||||
|
||||
/** |
||||
* The maximum number of write buffers that are built up in memory. |
||||
* The default is 2, so that when 1 write buffer is being flushed to |
||||
* storage, new writes can continue to the other write buffer. |
||||
* Default: 2 |
||||
* |
||||
* @param maxWriteBufferNumber maximum number of write buffers. |
||||
* @return the instance of the current options. |
||||
*/ |
||||
T setMaxWriteBufferNumber( |
||||
int maxWriteBufferNumber); |
||||
|
||||
/** |
||||
* Returns maximum number of write buffers. |
||||
* |
||||
* @return maximum number of write buffers. |
||||
* @see #setMaxWriteBufferNumber(int) |
||||
*/ |
||||
int maxWriteBufferNumber(); |
||||
|
||||
/** |
||||
* Number of locks used for inplace update |
||||
* Default: 10000, if inplace_update_support = true, else 0. |
||||
* |
||||
* @param inplaceUpdateNumLocks the number of locks used for |
||||
* inplace updates. |
||||
* @return the reference to the current options. |
||||
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms |
||||
* while overflowing the underlying platform specific value. |
||||
*/ |
||||
T setInplaceUpdateNumLocks( |
||||
long inplaceUpdateNumLocks); |
||||
|
||||
/** |
||||
* Number of locks used for inplace update |
||||
* Default: 10000, if inplace_update_support = true, else 0. |
||||
* |
||||
* @return the number of locks used for inplace update. |
||||
*/ |
||||
long inplaceUpdateNumLocks(); |
||||
|
||||
/** |
||||
* if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, |
||||
* create prefix bloom for memtable with the size of |
||||
* write_buffer_size * memtable_prefix_bloom_size_ratio. |
||||
* If it is larger than 0.25, it is santinized to 0.25. |
||||
* |
||||
* Default: 0 (disable) |
||||
* |
||||
* @param memtablePrefixBloomSizeRatio The ratio |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setMemtablePrefixBloomSizeRatio( |
||||
double memtablePrefixBloomSizeRatio); |
||||
|
||||
/** |
||||
* if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, |
||||
* create prefix bloom for memtable with the size of |
||||
* write_buffer_size * memtable_prefix_bloom_size_ratio. |
||||
* If it is larger than 0.25, it is santinized to 0.25. |
||||
* |
||||
* Default: 0 (disable) |
||||
* |
||||
* @return the ratio |
||||
*/ |
||||
double memtablePrefixBloomSizeRatio(); |
||||
|
||||
/** |
||||
* Page size for huge page TLB for bloom in memtable. If ≤ 0, not allocate |
||||
* from huge page TLB but from malloc. |
||||
* Need to reserve huge pages for it to be allocated. For example: |
||||
* sysctl -w vm.nr_hugepages=20 |
||||
* See linux doc Documentation/vm/hugetlbpage.txt |
||||
* |
||||
* @param memtableHugePageSize The page size of the huge |
||||
* page tlb |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setMemtableHugePageSize( |
||||
long memtableHugePageSize); |
||||
|
||||
/** |
||||
* Page size for huge page TLB for bloom in memtable. If ≤ 0, not allocate |
||||
* from huge page TLB but from malloc. |
||||
* Need to reserve huge pages for it to be allocated. For example: |
||||
* sysctl -w vm.nr_hugepages=20 |
||||
* See linux doc Documentation/vm/hugetlbpage.txt |
||||
* |
||||
* @return The page size of the huge page tlb |
||||
*/ |
||||
long memtableHugePageSize(); |
||||
|
||||
/** |
||||
* The size of one block in arena memory allocation. |
||||
* If ≤ 0, a proper value is automatically calculated (usually 1/10 of |
||||
* writer_buffer_size). |
||||
* |
||||
* There are two additional restriction of the The specified size: |
||||
* (1) size should be in the range of [4096, 2 << 30] and |
||||
* (2) be the multiple of the CPU word (which helps with the memory |
||||
* alignment). |
||||
* |
||||
* We'll automatically check and adjust the size number to make sure it |
||||
* conforms to the restrictions. |
||||
* Default: 0 |
||||
* |
||||
* @param arenaBlockSize the size of an arena block |
||||
* @return the reference to the current options. |
||||
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms |
||||
* while overflowing the underlying platform specific value. |
||||
*/ |
||||
T setArenaBlockSize(long arenaBlockSize); |
||||
|
||||
/** |
||||
* The size of one block in arena memory allocation. |
||||
* If ≤ 0, a proper value is automatically calculated (usually 1/10 of |
||||
* writer_buffer_size). |
||||
* |
||||
* There are two additional restriction of the The specified size: |
||||
* (1) size should be in the range of [4096, 2 << 30] and |
||||
* (2) be the multiple of the CPU word (which helps with the memory |
||||
* alignment). |
||||
* |
||||
* We'll automatically check and adjust the size number to make sure it |
||||
* conforms to the restrictions. |
||||
* Default: 0 |
||||
* |
||||
* @return the size of an arena block |
||||
*/ |
||||
long arenaBlockSize(); |
||||
|
||||
/** |
||||
* Soft limit on number of level-0 files. We start slowing down writes at this |
||||
* point. A value < 0 means that no writing slow down will be triggered by |
||||
* number of files in level-0. |
||||
* |
||||
* @param level0SlowdownWritesTrigger The soft limit on the number of |
||||
* level-0 files |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setLevel0SlowdownWritesTrigger( |
||||
int level0SlowdownWritesTrigger); |
||||
|
||||
/** |
||||
* Soft limit on number of level-0 files. We start slowing down writes at this |
||||
* point. A value < 0 means that no writing slow down will be triggered by |
||||
* number of files in level-0. |
||||
* |
||||
* @return The soft limit on the number of |
||||
* level-0 files |
||||
*/ |
||||
int level0SlowdownWritesTrigger(); |
||||
|
||||
/** |
||||
* Maximum number of level-0 files. We stop writes at this point. |
||||
* |
||||
* @param level0StopWritesTrigger The maximum number of level-0 files |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setLevel0StopWritesTrigger( |
||||
int level0StopWritesTrigger); |
||||
|
||||
/** |
||||
* Maximum number of level-0 files. We stop writes at this point. |
||||
* |
||||
* @return The maximum number of level-0 files |
||||
*/ |
||||
int level0StopWritesTrigger(); |
||||
|
||||
/** |
||||
* The target file size for compaction. |
||||
* This targetFileSizeBase determines a level-1 file size. |
||||
* Target file size for level L can be calculated by |
||||
* targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1)) |
||||
* For example, if targetFileSizeBase is 2MB and |
||||
* target_file_size_multiplier is 10, then each file on level-1 will |
||||
* be 2MB, and each file on level 2 will be 20MB, |
||||
* and each file on level-3 will be 200MB. |
||||
* by default targetFileSizeBase is 2MB. |
||||
* |
||||
* @param targetFileSizeBase the target size of a level-0 file. |
||||
* @return the reference to the current options. |
||||
* |
||||
* @see #setTargetFileSizeMultiplier(int) |
||||
*/ |
||||
T setTargetFileSizeBase( |
||||
long targetFileSizeBase); |
||||
|
||||
/** |
||||
* The target file size for compaction. |
||||
* This targetFileSizeBase determines a level-1 file size. |
||||
* Target file size for level L can be calculated by |
||||
* targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1)) |
||||
* For example, if targetFileSizeBase is 2MB and |
||||
* target_file_size_multiplier is 10, then each file on level-1 will |
||||
* be 2MB, and each file on level 2 will be 20MB, |
||||
* and each file on level-3 will be 200MB. |
||||
* by default targetFileSizeBase is 2MB. |
||||
* |
||||
* @return the target size of a level-0 file. |
||||
* |
||||
* @see #targetFileSizeMultiplier() |
||||
*/ |
||||
long targetFileSizeBase(); |
||||
|
||||
/** |
||||
* targetFileSizeMultiplier defines the size ratio between a |
||||
* level-L file and level-(L+1) file. |
||||
* By default target_file_size_multiplier is 1, meaning |
||||
* files in different levels have the same target. |
||||
* |
||||
* @param multiplier the size ratio between a level-(L+1) file |
||||
* and level-L file. |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setTargetFileSizeMultiplier( |
||||
int multiplier); |
||||
|
||||
/** |
||||
* targetFileSizeMultiplier defines the size ratio between a |
||||
* level-(L+1) file and level-L file. |
||||
* By default targetFileSizeMultiplier is 1, meaning |
||||
* files in different levels have the same target. |
||||
* |
||||
* @return the size ratio between a level-(L+1) file and level-L file. |
||||
*/ |
||||
int targetFileSizeMultiplier(); |
||||
|
||||
/** |
||||
* The ratio between the total size of level-(L+1) files and the total |
||||
* size of level-L files for all L. |
||||
* DEFAULT: 10 |
||||
* |
||||
* @param multiplier the ratio between the total size of level-(L+1) |
||||
* files and the total size of level-L files for all L. |
||||
* @return the reference to the current options. |
||||
* |
||||
* See {@link MutableColumnFamilyOptionsInterface#setMaxBytesForLevelBase(long)} |
||||
*/ |
||||
T setMaxBytesForLevelMultiplier(double multiplier); |
||||
|
||||
/** |
||||
* The ratio between the total size of level-(L+1) files and the total |
||||
* size of level-L files for all L. |
||||
* DEFAULT: 10 |
||||
* |
||||
* @return the ratio between the total size of level-(L+1) files and |
||||
* the total size of level-L files for all L. |
||||
* |
||||
* See {@link MutableColumnFamilyOptionsInterface#maxBytesForLevelBase()} |
||||
*/ |
||||
double maxBytesForLevelMultiplier(); |
||||
|
||||
/** |
||||
* Different max-size multipliers for different levels. |
||||
* These are multiplied by max_bytes_for_level_multiplier to arrive |
||||
* at the max-size of each level. |
||||
* |
||||
* Default: 1 |
||||
* |
||||
* @param maxBytesForLevelMultiplierAdditional The max-size multipliers |
||||
* for each level |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setMaxBytesForLevelMultiplierAdditional( |
||||
int[] maxBytesForLevelMultiplierAdditional); |
||||
|
||||
/** |
||||
* Different max-size multipliers for different levels. |
||||
* These are multiplied by max_bytes_for_level_multiplier to arrive |
||||
* at the max-size of each level. |
||||
* |
||||
* Default: 1 |
||||
* |
||||
* @return The max-size multipliers for each level |
||||
*/ |
||||
int[] maxBytesForLevelMultiplierAdditional(); |
||||
|
||||
/** |
||||
* All writes will be slowed down to at least delayed_write_rate if estimated |
||||
* bytes needed to be compaction exceed this threshold. |
||||
* |
||||
* Default: 64GB |
||||
* |
||||
* @param softPendingCompactionBytesLimit The soft limit to impose on |
||||
* compaction |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setSoftPendingCompactionBytesLimit( |
||||
long softPendingCompactionBytesLimit); |
||||
|
||||
/** |
||||
* All writes will be slowed down to at least delayed_write_rate if estimated |
||||
* bytes needed to be compaction exceed this threshold. |
||||
* |
||||
* Default: 64GB |
||||
* |
||||
* @return The soft limit to impose on compaction |
||||
*/ |
||||
long softPendingCompactionBytesLimit(); |
||||
|
||||
/** |
||||
* All writes are stopped if estimated bytes needed to be compaction exceed |
||||
* this threshold. |
||||
* |
||||
* Default: 256GB |
||||
* |
||||
* @param hardPendingCompactionBytesLimit The hard limit to impose on |
||||
* compaction |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setHardPendingCompactionBytesLimit( |
||||
long hardPendingCompactionBytesLimit); |
||||
|
||||
/** |
||||
* All writes are stopped if estimated bytes needed to be compaction exceed |
||||
* this threshold. |
||||
* |
||||
* Default: 256GB |
||||
* |
||||
* @return The hard limit to impose on compaction |
||||
*/ |
||||
long hardPendingCompactionBytesLimit(); |
||||
|
||||
/** |
||||
* An iteration->Next() sequentially skips over keys with the same |
||||
* user-key unless this option is set. This number specifies the number |
||||
* of keys (with the same userkey) that will be sequentially |
||||
* skipped before a reseek is issued. |
||||
* Default: 8 |
||||
* |
||||
* @param maxSequentialSkipInIterations the number of keys could |
||||
* be skipped in a iteration. |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setMaxSequentialSkipInIterations( |
||||
long maxSequentialSkipInIterations); |
||||
|
||||
/** |
||||
* An iteration->Next() sequentially skips over keys with the same |
||||
* user-key unless this option is set. This number specifies the number |
||||
* of keys (with the same userkey) that will be sequentially |
||||
* skipped before a reseek is issued. |
||||
* Default: 8 |
||||
* |
||||
* @return the number of keys could be skipped in a iteration. |
||||
*/ |
||||
long maxSequentialSkipInIterations(); |
||||
|
||||
/** |
||||
* Maximum number of successive merge operations on a key in the memtable. |
||||
* |
||||
* When a merge operation is added to the memtable and the maximum number of |
||||
* successive merges is reached, the value of the key will be calculated and |
||||
* inserted into the memtable instead of the merge operation. This will |
||||
* ensure that there are never more than max_successive_merges merge |
||||
* operations in the memtable. |
||||
* |
||||
* Default: 0 (disabled) |
||||
* |
||||
* @param maxSuccessiveMerges the maximum number of successive merges. |
||||
* @return the reference to the current options. |
||||
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms |
||||
* while overflowing the underlying platform specific value. |
||||
*/ |
||||
T setMaxSuccessiveMerges( |
||||
long maxSuccessiveMerges); |
||||
|
||||
/** |
||||
* Maximum number of successive merge operations on a key in the memtable. |
||||
* |
||||
* When a merge operation is added to the memtable and the maximum number of |
||||
* successive merges is reached, the value of the key will be calculated and |
||||
* inserted into the memtable instead of the merge operation. This will |
||||
* ensure that there are never more than max_successive_merges merge |
||||
* operations in the memtable. |
||||
* |
||||
* Default: 0 (disabled) |
||||
* |
||||
* @return the maximum number of successive merges. |
||||
*/ |
||||
long maxSuccessiveMerges(); |
||||
|
||||
/** |
||||
* After writing every SST file, reopen it and read all the keys. |
||||
* |
||||
* Default: false |
||||
* |
||||
* @param paranoidFileChecks true to enable paranoid file checks |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setParanoidFileChecks( |
||||
boolean paranoidFileChecks); |
||||
|
||||
/** |
||||
* After writing every SST file, reopen it and read all the keys. |
||||
* |
||||
* Default: false |
||||
* |
||||
* @return true if paranoid file checks are enabled |
||||
*/ |
||||
boolean paranoidFileChecks(); |
||||
|
||||
/** |
||||
* Measure IO stats in compactions and flushes, if true. |
||||
* |
||||
* Default: false |
||||
* |
||||
* @param reportBgIoStats true to enable reporting |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setReportBgIoStats( |
||||
boolean reportBgIoStats); |
||||
|
||||
/** |
||||
* Determine whether IO stats in compactions and flushes are being measured |
||||
* |
||||
* @return true if reporting is enabled |
||||
*/ |
||||
boolean reportBgIoStats(); |
||||
} |
@ -0,0 +1,13 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
|
||||
public abstract class Cache extends RocksObject { |
||||
protected Cache(final long nativeHandle) { |
||||
super(nativeHandle); |
||||
} |
||||
} |
@ -0,0 +1,59 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Similar to {@link LRUCache}, but based on the CLOCK algorithm with |
||||
* better concurrent performance in some cases |
||||
*/ |
||||
public class ClockCache extends Cache { |
||||
|
||||
/** |
||||
* Create a new cache with a fixed size capacity. |
||||
* |
||||
* @param capacity The fixed size capacity of the cache |
||||
*/ |
||||
public ClockCache(final long capacity) { |
||||
super(newClockCache(capacity, -1, false)); |
||||
} |
||||
|
||||
/** |
||||
* Create a new cache with a fixed size capacity. The cache is sharded |
||||
* to 2^numShardBits shards, by hash of the key. The total capacity |
||||
* is divided and evenly assigned to each shard. |
||||
* numShardBits = -1 means it is automatically determined: every shard |
||||
* will be at least 512KB and number of shard bits will not exceed 6. |
||||
* |
||||
* @param capacity The fixed size capacity of the cache |
||||
* @param numShardBits The cache is sharded to 2^numShardBits shards, |
||||
* by hash of the key |
||||
*/ |
||||
public ClockCache(final long capacity, final int numShardBits) { |
||||
super(newClockCache(capacity, numShardBits, false)); |
||||
} |
||||
|
||||
/** |
||||
* Create a new cache with a fixed size capacity. The cache is sharded |
||||
* to 2^numShardBits shards, by hash of the key. The total capacity |
||||
* is divided and evenly assigned to each shard. If strictCapacityLimit |
||||
* is set, insert to the cache will fail when cache is full. |
||||
* numShardBits = -1 means it is automatically determined: every shard |
||||
* will be at least 512KB and number of shard bits will not exceed 6. |
||||
* |
||||
* @param capacity The fixed size capacity of the cache |
||||
* @param numShardBits The cache is sharded to 2^numShardBits shards, |
||||
* by hash of the key |
||||
* @param strictCapacityLimit insert to the cache will fail when cache is full |
||||
*/ |
||||
public ClockCache(final long capacity, final int numShardBits, |
||||
final boolean strictCapacityLimit) { |
||||
super(newClockCache(capacity, numShardBits, strictCapacityLimit)); |
||||
} |
||||
|
||||
private native static long newClockCache(final long capacity, |
||||
final int numShardBits, final boolean strictCapacityLimit); |
||||
@Override protected final native void disposeInternal(final long handle); |
||||
} |
@ -0,0 +1,50 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Options for FIFO Compaction |
||||
*/ |
||||
public class CompactionOptionsFIFO extends RocksObject { |
||||
|
||||
public CompactionOptionsFIFO() { |
||||
super(newCompactionOptionsFIFO()); |
||||
} |
||||
|
||||
/** |
||||
* Once the total sum of table files reaches this, we will delete the oldest |
||||
* table file |
||||
* |
||||
* Default: 1GB |
||||
* |
||||
* @param maxTableFilesSize The maximum size of the table files |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
public CompactionOptionsFIFO setMaxTableFilesSize( |
||||
final long maxTableFilesSize) { |
||||
setMaxTableFilesSize(nativeHandle_, maxTableFilesSize); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* Once the total sum of table files reaches this, we will delete the oldest |
||||
* table file |
||||
* |
||||
* Default: 1GB |
||||
* |
||||
* @return max table file size in bytes |
||||
*/ |
||||
public long maxTableFilesSize() { |
||||
return maxTableFilesSize(nativeHandle_); |
||||
} |
||||
|
||||
private native void setMaxTableFilesSize(long handle, long maxTableFilesSize); |
||||
private native long maxTableFilesSize(long handle); |
||||
|
||||
private native static long newCompactionOptionsFIFO(); |
||||
@Override protected final native void disposeInternal(final long handle); |
||||
} |
@ -0,0 +1,273 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Options for Universal Compaction |
||||
*/ |
||||
public class CompactionOptionsUniversal extends RocksObject { |
||||
|
||||
public CompactionOptionsUniversal() { |
||||
super(newCompactionOptionsUniversal()); |
||||
} |
||||
|
||||
/** |
||||
* Percentage flexibilty while comparing file size. If the candidate file(s) |
||||
* size is 1% smaller than the next file's size, then include next file into |
||||
* this candidate set. |
||||
* |
||||
* Default: 1 |
||||
* |
||||
* @param sizeRatio The size ratio to use |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
public CompactionOptionsUniversal setSizeRatio(final int sizeRatio) { |
||||
setSizeRatio(nativeHandle_, sizeRatio); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* Percentage flexibilty while comparing file size. If the candidate file(s) |
||||
* size is 1% smaller than the next file's size, then include next file into |
||||
* this candidate set. |
||||
* |
||||
* Default: 1 |
||||
* |
||||
* @return The size ratio in use |
||||
*/ |
||||
public int sizeRatio() { |
||||
return sizeRatio(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* The minimum number of files in a single compaction run. |
||||
* |
||||
* Default: 2 |
||||
* |
||||
* @param minMergeWidth minimum number of files in a single compaction run |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
public CompactionOptionsUniversal setMinMergeWidth(final int minMergeWidth) { |
||||
setMinMergeWidth(nativeHandle_, minMergeWidth); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* The minimum number of files in a single compaction run. |
||||
* |
||||
* Default: 2 |
||||
* |
||||
* @return minimum number of files in a single compaction run |
||||
*/ |
||||
public int minMergeWidth() { |
||||
return minMergeWidth(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* The maximum number of files in a single compaction run. |
||||
* |
||||
* Default: {@link Long#MAX_VALUE} |
||||
* |
||||
* @param maxMergeWidth maximum number of files in a single compaction run |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
public CompactionOptionsUniversal setMaxMergeWidth(final int maxMergeWidth) { |
||||
setMaxMergeWidth(nativeHandle_, maxMergeWidth); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* The maximum number of files in a single compaction run. |
||||
* |
||||
* Default: {@link Long#MAX_VALUE} |
||||
* |
||||
* @return maximum number of files in a single compaction run |
||||
*/ |
||||
public int maxMergeWidth() { |
||||
return maxMergeWidth(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* The size amplification is defined as the amount (in percentage) of |
||||
* additional storage needed to store a single byte of data in the database. |
||||
* For example, a size amplification of 2% means that a database that |
||||
* contains 100 bytes of user-data may occupy upto 102 bytes of |
||||
* physical storage. By this definition, a fully compacted database has |
||||
* a size amplification of 0%. Rocksdb uses the following heuristic |
||||
* to calculate size amplification: it assumes that all files excluding |
||||
* the earliest file contribute to the size amplification. |
||||
* |
||||
* Default: 200, which means that a 100 byte database could require upto |
||||
* 300 bytes of storage. |
||||
* |
||||
* @param maxSizeAmplificationPercent the amount of additional storage needed |
||||
* (as a percentage) to store a single byte in the database |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
public CompactionOptionsUniversal setMaxSizeAmplificationPercent( |
||||
final int maxSizeAmplificationPercent) { |
||||
setMaxSizeAmplificationPercent(nativeHandle_, maxSizeAmplificationPercent); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* The size amplification is defined as the amount (in percentage) of |
||||
* additional storage needed to store a single byte of data in the database. |
||||
* For example, a size amplification of 2% means that a database that |
||||
* contains 100 bytes of user-data may occupy upto 102 bytes of |
||||
* physical storage. By this definition, a fully compacted database has |
||||
* a size amplification of 0%. Rocksdb uses the following heuristic |
||||
* to calculate size amplification: it assumes that all files excluding |
||||
* the earliest file contribute to the size amplification. |
||||
* |
||||
* Default: 200, which means that a 100 byte database could require upto |
||||
* 300 bytes of storage. |
||||
* |
||||
* @return the amount of additional storage needed (as a percentage) to store |
||||
* a single byte in the database |
||||
*/ |
||||
public int maxSizeAmplificationPercent() { |
||||
return maxSizeAmplificationPercent(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* If this option is set to be -1 (the default value), all the output files |
||||
* will follow compression type specified. |
||||
* |
||||
* If this option is not negative, we will try to make sure compressed |
||||
* size is just above this value. In normal cases, at least this percentage |
||||
* of data will be compressed. |
||||
* |
||||
* When we are compacting to a new file, here is the criteria whether |
||||
* it needs to be compressed: assuming here are the list of files sorted |
||||
* by generation time: |
||||
* A1...An B1...Bm C1...Ct |
||||
* where A1 is the newest and Ct is the oldest, and we are going to compact |
||||
* B1...Bm, we calculate the total size of all the files as total_size, as |
||||
* well as the total size of C1...Ct as total_C, the compaction output file |
||||
* will be compressed iff |
||||
* total_C / total_size < this percentage |
||||
* |
||||
* Default: -1 |
||||
* |
||||
* @param compressionSizePercent percentage of size for compression |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
public CompactionOptionsUniversal setCompressionSizePercent( |
||||
final int compressionSizePercent) { |
||||
setCompressionSizePercent(nativeHandle_, compressionSizePercent); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* If this option is set to be -1 (the default value), all the output files |
||||
* will follow compression type specified. |
||||
* |
||||
* If this option is not negative, we will try to make sure compressed |
||||
* size is just above this value. In normal cases, at least this percentage |
||||
* of data will be compressed. |
||||
* |
||||
* When we are compacting to a new file, here is the criteria whether |
||||
* it needs to be compressed: assuming here are the list of files sorted |
||||
* by generation time: |
||||
* A1...An B1...Bm C1...Ct |
||||
* where A1 is the newest and Ct is the oldest, and we are going to compact |
||||
* B1...Bm, we calculate the total size of all the files as total_size, as |
||||
* well as the total size of C1...Ct as total_C, the compaction output file |
||||
* will be compressed iff |
||||
* total_C / total_size < this percentage |
||||
* |
||||
* Default: -1 |
||||
* |
||||
* @return percentage of size for compression |
||||
*/ |
||||
public int compressionSizePercent() { |
||||
return compressionSizePercent(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* The algorithm used to stop picking files into a single compaction run |
||||
* |
||||
* Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize} |
||||
* |
||||
* @param compactionStopStyle The compaction algorithm |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
public CompactionOptionsUniversal setStopStyle( |
||||
final CompactionStopStyle compactionStopStyle) { |
||||
setStopStyle(nativeHandle_, compactionStopStyle.getValue()); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* The algorithm used to stop picking files into a single compaction run |
||||
* |
||||
* Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize} |
||||
* |
||||
* @return The compaction algorithm |
||||
*/ |
||||
public CompactionStopStyle stopStyle() { |
||||
return CompactionStopStyle.getCompactionStopStyle(stopStyle(nativeHandle_)); |
||||
} |
||||
|
||||
/** |
||||
* Option to optimize the universal multi level compaction by enabling |
||||
* trivial move for non overlapping files. |
||||
* |
||||
* Default: false |
||||
* |
||||
* @param allowTrivialMove true if trivial move is allowed |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
public CompactionOptionsUniversal setAllowTrivialMove( |
||||
final boolean allowTrivialMove) { |
||||
setAllowTrivialMove(nativeHandle_, allowTrivialMove); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* Option to optimize the universal multi level compaction by enabling |
||||
* trivial move for non overlapping files. |
||||
* |
||||
* Default: false |
||||
* |
||||
* @return true if trivial move is allowed |
||||
*/ |
||||
public boolean allowTrivialMove() { |
||||
return allowTrivialMove(nativeHandle_); |
||||
} |
||||
|
||||
private native static long newCompactionOptionsUniversal(); |
||||
@Override protected final native void disposeInternal(final long handle); |
||||
|
||||
private native void setSizeRatio(final long handle, final int sizeRatio); |
||||
private native int sizeRatio(final long handle); |
||||
private native void setMinMergeWidth( |
||||
final long handle, final int minMergeWidth); |
||||
private native int minMergeWidth(final long handle); |
||||
private native void setMaxMergeWidth( |
||||
final long handle, final int maxMergeWidth); |
||||
private native int maxMergeWidth(final long handle); |
||||
private native void setMaxSizeAmplificationPercent( |
||||
final long handle, final int maxSizeAmplificationPercent); |
||||
private native int maxSizeAmplificationPercent(final long handle); |
||||
private native void setCompressionSizePercent( |
||||
final long handle, final int compressionSizePercent); |
||||
private native int compressionSizePercent(final long handle); |
||||
private native void setStopStyle( |
||||
final long handle, final byte stopStyle); |
||||
private native byte stopStyle(final long handle); |
||||
private native void setAllowTrivialMove( |
||||
final long handle, final boolean allowTrivialMove); |
||||
private native boolean allowTrivialMove(final long handle); |
||||
} |
@ -0,0 +1,73 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Compaction Priorities |
||||
*/ |
||||
public enum CompactionPriority { |
||||
|
||||
/** |
||||
* Slightly Prioritize larger files by size compensated by #deletes |
||||
*/ |
||||
ByCompensatedSize((byte)0x0), |
||||
|
||||
/** |
||||
* First compact files whose data's latest update time is oldest. |
||||
* Try this if you only update some hot keys in small ranges. |
||||
*/ |
||||
OldestLargestSeqFirst((byte)0x1), |
||||
|
||||
/** |
||||
* First compact files whose range hasn't been compacted to the next level |
||||
* for the longest. If your updates are random across the key space, |
||||
* write amplification is slightly better with this option. |
||||
*/ |
||||
OldestSmallestSeqFirst((byte)0x2), |
||||
|
||||
/** |
||||
* First compact files whose ratio between overlapping size in next level |
||||
* and its size is the smallest. It in many cases can optimize write |
||||
* amplification. |
||||
*/ |
||||
MinOverlappingRatio((byte)0x3); |
||||
|
||||
|
||||
private final byte value; |
||||
|
||||
CompactionPriority(final byte value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
/** |
||||
* Returns the byte value of the enumerations value |
||||
* |
||||
* @return byte representation |
||||
*/ |
||||
public byte getValue() { |
||||
return value; |
||||
} |
||||
|
||||
/** |
||||
* Get CompactionPriority by byte value. |
||||
* |
||||
* @param value byte representation of CompactionPriority. |
||||
* |
||||
* @return {@link org.rocksdb.CompactionPriority} instance or null. |
||||
* @throws java.lang.IllegalArgumentException if an invalid |
||||
* value is provided. |
||||
*/ |
||||
public static CompactionPriority getCompactionPriority(final byte value) { |
||||
for (final CompactionPriority compactionPriority : |
||||
CompactionPriority.values()) { |
||||
if (compactionPriority.getValue() == value){ |
||||
return compactionPriority; |
||||
} |
||||
} |
||||
throw new IllegalArgumentException( |
||||
"Illegal value provided for CompactionPriority."); |
||||
} |
||||
} |
@ -0,0 +1,54 @@ |
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Algorithm used to make a compaction request stop picking new files |
||||
* into a single compaction run |
||||
*/ |
||||
public enum CompactionStopStyle { |
||||
|
||||
/** |
||||
* Pick files of similar size |
||||
*/ |
||||
CompactionStopStyleSimilarSize((byte)0x0), |
||||
|
||||
/** |
||||
* Total size of picked files > next file |
||||
*/ |
||||
CompactionStopStyleTotalSize((byte)0x1); |
||||
|
||||
|
||||
private final byte value; |
||||
|
||||
CompactionStopStyle(final byte value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
/** |
||||
* Returns the byte value of the enumerations value |
||||
* |
||||
* @return byte representation |
||||
*/ |
||||
public byte getValue() { |
||||
return value; |
||||
} |
||||
|
||||
/** |
||||
* Get CompactionStopStyle by byte value. |
||||
* |
||||
* @param value byte representation of CompactionStopStyle. |
||||
* |
||||
* @return {@link org.rocksdb.CompactionStopStyle} instance or null. |
||||
* @throws java.lang.IllegalArgumentException if an invalid |
||||
* value is provided. |
||||
*/ |
||||
public static CompactionStopStyle getCompactionStopStyle(final byte value) { |
||||
for (final CompactionStopStyle compactionStopStyle : |
||||
CompactionStopStyle.values()) { |
||||
if (compactionStopStyle.getValue() == value){ |
||||
return compactionStopStyle; |
||||
} |
||||
} |
||||
throw new IllegalArgumentException( |
||||
"Illegal value provided for CompactionStopStyle."); |
||||
} |
||||
} |
@ -0,0 +1,85 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Options for Compression |
||||
*/ |
||||
public class CompressionOptions extends RocksObject { |
||||
|
||||
public CompressionOptions() { |
||||
super(newCompressionOptions()); |
||||
} |
||||
|
||||
public CompressionOptions setWindowBits(final int windowBits) { |
||||
setWindowBits(nativeHandle_, windowBits); |
||||
return this; |
||||
} |
||||
|
||||
public int windowBits() { |
||||
return windowBits(nativeHandle_); |
||||
} |
||||
|
||||
public CompressionOptions setLevel(final int level) { |
||||
setLevel(nativeHandle_, level); |
||||
return this; |
||||
} |
||||
|
||||
public int level() { |
||||
return level(nativeHandle_); |
||||
} |
||||
|
||||
public CompressionOptions setStrategy(final int strategy) { |
||||
setStrategy(nativeHandle_, strategy); |
||||
return this; |
||||
} |
||||
|
||||
public int strategy() { |
||||
return strategy(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Maximum size of dictionary used to prime the compression library. Currently |
||||
* this dictionary will be constructed by sampling the first output file in a |
||||
* subcompaction when the target level is bottommost. This dictionary will be |
||||
* loaded into the compression library before compressing/uncompressing each |
||||
* data block of subsequent files in the subcompaction. Effectively, this |
||||
* improves compression ratios when there are repetitions across data blocks. |
||||
* |
||||
* A value of 0 indicates the feature is disabled. |
||||
* |
||||
* Default: 0. |
||||
* |
||||
* @param maxDictBytes Maximum bytes to use for the dictionary |
||||
* |
||||
* @return the reference to the current options |
||||
*/ |
||||
public CompressionOptions setMaxDictBytes(final int maxDictBytes) { |
||||
setMaxDictBytes(nativeHandle_, maxDictBytes); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* Maximum size of dictionary used to prime the compression library. |
||||
* |
||||
* @return The maximum bytes to use for the dictionary |
||||
*/ |
||||
public int maxDictBytes() { |
||||
return maxDictBytes(nativeHandle_); |
||||
} |
||||
|
||||
private native static long newCompressionOptions(); |
||||
@Override protected final native void disposeInternal(final long handle); |
||||
|
||||
private native void setWindowBits(final long handle, final int windowBits); |
||||
private native int windowBits(final long handle); |
||||
private native void setLevel(final long handle, final int level); |
||||
private native int level(final long handle); |
||||
private native void setStrategy(final long handle, final int strategy); |
||||
private native int strategy(final long handle); |
||||
private native void setMaxDictBytes(final long handle, final int maxDictBytes); |
||||
private native int maxDictBytes(final long handle); |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,47 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import java.nio.file.Path; |
||||
|
||||
/** |
||||
* Tuple of database path and target size |
||||
*/ |
||||
public class DbPath { |
||||
final Path path; |
||||
final long targetSize; |
||||
|
||||
public DbPath(final Path path, final long targetSize) { |
||||
this.path = path; |
||||
this.targetSize = targetSize; |
||||
} |
||||
|
||||
@Override |
||||
public boolean equals(final Object o) { |
||||
if (this == o) { |
||||
return true; |
||||
} |
||||
|
||||
if (o == null || getClass() != o.getClass()) { |
||||
return false; |
||||
} |
||||
|
||||
final DbPath dbPath = (DbPath) o; |
||||
|
||||
if (targetSize != dbPath.targetSize) { |
||||
return false; |
||||
} |
||||
|
||||
return path != null ? path.equals(dbPath.path) : dbPath.path == null; |
||||
} |
||||
|
||||
@Override |
||||
public int hashCode() { |
||||
int result = path != null ? path.hashCode() : 0; |
||||
result = 31 * result + (int) (targetSize ^ (targetSize >>> 32)); |
||||
return result; |
||||
} |
||||
} |
@ -0,0 +1,82 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Least Recently Used Cache |
||||
*/ |
||||
public class LRUCache extends Cache { |
||||
|
||||
/** |
||||
* Create a new cache with a fixed size capacity |
||||
* |
||||
* @param capacity The fixed size capacity of the cache |
||||
*/ |
||||
public LRUCache(final long capacity) { |
||||
this(capacity, -1, false, 0.0); |
||||
} |
||||
|
||||
/** |
||||
* Create a new cache with a fixed size capacity. The cache is sharded |
||||
* to 2^numShardBits shards, by hash of the key. The total capacity |
||||
* is divided and evenly assigned to each shard. |
||||
* numShardBits = -1 means it is automatically determined: every shard |
||||
* will be at least 512KB and number of shard bits will not exceed 6. |
||||
* |
||||
* @param capacity The fixed size capacity of the cache |
||||
* @param numShardBits The cache is sharded to 2^numShardBits shards, |
||||
* by hash of the key |
||||
*/ |
||||
public LRUCache(final long capacity, final int numShardBits) { |
||||
super(newLRUCache(capacity, numShardBits, false,0.0)); |
||||
} |
||||
|
||||
/** |
||||
* Create a new cache with a fixed size capacity. The cache is sharded |
||||
* to 2^numShardBits shards, by hash of the key. The total capacity |
||||
* is divided and evenly assigned to each shard. If strictCapacityLimit |
||||
* is set, insert to the cache will fail when cache is full. |
||||
* numShardBits = -1 means it is automatically determined: every shard |
||||
* will be at least 512KB and number of shard bits will not exceed 6. |
||||
* |
||||
* @param capacity The fixed size capacity of the cache |
||||
* @param numShardBits The cache is sharded to 2^numShardBits shards, |
||||
* by hash of the key |
||||
* @param strictCapacityLimit insert to the cache will fail when cache is full |
||||
*/ |
||||
public LRUCache(final long capacity, final int numShardBits, |
||||
final boolean strictCapacityLimit) { |
||||
super(newLRUCache(capacity, numShardBits, strictCapacityLimit,0.0)); |
||||
} |
||||
|
||||
/** |
||||
* Create a new cache with a fixed size capacity. The cache is sharded |
||||
* to 2^numShardBits shards, by hash of the key. The total capacity |
||||
* is divided and evenly assigned to each shard. If strictCapacityLimit |
||||
* is set, insert to the cache will fail when cache is full. User can also |
||||
* set percentage of the cache reserves for high priority entries via |
||||
* highPriPoolRatio. |
||||
* numShardBits = -1 means it is automatically determined: every shard |
||||
* will be at least 512KB and number of shard bits will not exceed 6. |
||||
* |
||||
* @param capacity The fixed size capacity of the cache |
||||
* @param numShardBits The cache is sharded to 2^numShardBits shards, |
||||
* by hash of the key |
||||
* @param strictCapacityLimit insert to the cache will fail when cache is full |
||||
* @param highPriPoolRatio percentage of the cache reserves for high priority |
||||
* entries |
||||
*/ |
||||
public LRUCache(final long capacity, final int numShardBits, |
||||
final boolean strictCapacityLimit, final double highPriPoolRatio) { |
||||
super(newLRUCache(capacity, numShardBits, strictCapacityLimit, |
||||
highPriPoolRatio)); |
||||
} |
||||
|
||||
private native static long newLRUCache(final long capacity, |
||||
final int numShardBits, final boolean strictCapacityLimit, |
||||
final double highPriPoolRatio); |
||||
@Override protected final native void disposeInternal(final long handle); |
||||
} |
@ -0,0 +1,83 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* The WAL Recover Mode |
||||
*/ |
||||
public enum WALRecoveryMode { |
||||
|
||||
/** |
||||
* Original levelDB recovery |
||||
* |
||||
* We tolerate incomplete record in trailing data on all logs |
||||
* Use case : This is legacy behavior (default) |
||||
*/ |
||||
TolerateCorruptedTailRecords((byte)0x00), |
||||
|
||||
/** |
||||
* Recover from clean shutdown |
||||
* |
||||
* We don't expect to find any corruption in the WAL |
||||
* Use case : This is ideal for unit tests and rare applications that |
||||
* can require high consistency guarantee |
||||
*/ |
||||
AbsoluteConsistency((byte)0x01), |
||||
|
||||
/** |
||||
* Recover to point-in-time consistency |
||||
* We stop the WAL playback on discovering WAL inconsistency |
||||
* Use case : Ideal for systems that have disk controller cache like |
||||
* hard disk, SSD without super capacitor that store related data |
||||
*/ |
||||
PointInTimeRecovery((byte)0x02), |
||||
|
||||
/** |
||||
* Recovery after a disaster |
||||
* We ignore any corruption in the WAL and try to salvage as much data as |
||||
* possible |
||||
* Use case : Ideal for last ditch effort to recover data or systems that |
||||
* operate with low grade unrelated data |
||||
*/ |
||||
SkipAnyCorruptedRecords((byte)0x03); |
||||
|
||||
private byte value; |
||||
|
||||
WALRecoveryMode(final byte value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
/** |
||||
* <p>Returns the byte value of the enumerations value.</p> |
||||
* |
||||
* @return byte representation |
||||
*/ |
||||
public byte getValue() { |
||||
return value; |
||||
} |
||||
|
||||
/** |
||||
* <p>Get the WALRecoveryMode enumeration value by |
||||
* passing the byte identifier to this method.</p> |
||||
* |
||||
* @param byteIdentifier of WALRecoveryMode. |
||||
* |
||||
* @return CompressionType instance. |
||||
* |
||||
* @throws IllegalArgumentException If WALRecoveryMode cannot be found for the |
||||
* provided byteIdentifier |
||||
*/ |
||||
public static WALRecoveryMode getWALRecoveryMode(final byte byteIdentifier) { |
||||
for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) { |
||||
if (walRecoveryMode.getValue() == byteIdentifier) { |
||||
return walRecoveryMode; |
||||
} |
||||
} |
||||
|
||||
throw new IllegalArgumentException( |
||||
"Illegal value provided for WALRecoveryMode."); |
||||
} |
||||
} |
@ -0,0 +1,26 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import org.junit.Test; |
||||
|
||||
public class ClockCacheTest { |
||||
|
||||
static { |
||||
RocksDB.loadLibrary(); |
||||
} |
||||
|
||||
@Test |
||||
public void newClockCache() { |
||||
final long capacity = 1000; |
||||
final int numShardBits = 16; |
||||
final boolean strictCapacityLimit = true; |
||||
try(final Cache clockCache = new ClockCache(capacity, |
||||
numShardBits, strictCapacityLimit)) { |
||||
//no op
|
||||
} |
||||
} |
||||
} |
@ -0,0 +1,26 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import org.junit.Test; |
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat; |
||||
|
||||
public class CompactionOptionsFIFOTest { |
||||
|
||||
static { |
||||
RocksDB.loadLibrary(); |
||||
} |
||||
|
||||
@Test |
||||
public void maxTableFilesSize() { |
||||
final long size = 500 * 1024 * 1026; |
||||
try(final CompactionOptionsFIFO opt = new CompactionOptionsFIFO()) { |
||||
opt.setMaxTableFilesSize(size); |
||||
assertThat(opt.maxTableFilesSize()).isEqualTo(size); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,80 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import org.junit.Test; |
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat; |
||||
|
||||
public class CompactionOptionsUniversalTest { |
||||
|
||||
static { |
||||
RocksDB.loadLibrary(); |
||||
} |
||||
|
||||
@Test |
||||
public void sizeRatio() { |
||||
final int sizeRatio = 4; |
||||
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { |
||||
opt.setSizeRatio(sizeRatio); |
||||
assertThat(opt.sizeRatio()).isEqualTo(sizeRatio); |
||||
} |
||||
} |
||||
|
||||
@Test |
||||
public void minMergeWidth() { |
||||
final int minMergeWidth = 3; |
||||
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { |
||||
opt.setMinMergeWidth(minMergeWidth); |
||||
assertThat(opt.minMergeWidth()).isEqualTo(minMergeWidth); |
||||
} |
||||
} |
||||
|
||||
@Test |
||||
public void maxMergeWidth() { |
||||
final int maxMergeWidth = Integer.MAX_VALUE - 1234; |
||||
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { |
||||
opt.setMaxMergeWidth(maxMergeWidth); |
||||
assertThat(opt.maxMergeWidth()).isEqualTo(maxMergeWidth); |
||||
} |
||||
} |
||||
|
||||
@Test |
||||
public void maxSizeAmplificationPercent() { |
||||
final int maxSizeAmplificationPercent = 150; |
||||
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { |
||||
opt.setMaxSizeAmplificationPercent(maxSizeAmplificationPercent); |
||||
assertThat(opt.maxSizeAmplificationPercent()).isEqualTo(maxSizeAmplificationPercent); |
||||
} |
||||
} |
||||
|
||||
@Test |
||||
public void compressionSizePercent() { |
||||
final int compressionSizePercent = 500; |
||||
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { |
||||
opt.setCompressionSizePercent(compressionSizePercent); |
||||
assertThat(opt.compressionSizePercent()).isEqualTo(compressionSizePercent); |
||||
} |
||||
} |
||||
|
||||
@Test |
||||
public void stopStyle() { |
||||
final CompactionStopStyle stopStyle = CompactionStopStyle.CompactionStopStyleSimilarSize; |
||||
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { |
||||
opt.setStopStyle(stopStyle); |
||||
assertThat(opt.stopStyle()).isEqualTo(stopStyle); |
||||
} |
||||
} |
||||
|
||||
@Test |
||||
public void allowTrivialMove() { |
||||
final boolean allowTrivialMove = true; |
||||
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { |
||||
opt.setAllowTrivialMove(allowTrivialMove); |
||||
assertThat(opt.allowTrivialMove()).isEqualTo(allowTrivialMove); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,31 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import org.junit.Test; |
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat; |
||||
|
||||
public class CompactionPriorityTest { |
||||
|
||||
@Test(expected = IllegalArgumentException.class) |
||||
public void failIfIllegalByteValueProvided() { |
||||
CompactionPriority.getCompactionPriority((byte) -1); |
||||
} |
||||
|
||||
@Test |
||||
public void getCompactionPriority() { |
||||
assertThat(CompactionPriority.getCompactionPriority( |
||||
CompactionPriority.OldestLargestSeqFirst.getValue())) |
||||
.isEqualTo(CompactionPriority.OldestLargestSeqFirst); |
||||
} |
||||
|
||||
@Test |
||||
public void valueOf() { |
||||
assertThat(CompactionPriority.valueOf("OldestSmallestSeqFirst")). |
||||
isEqualTo(CompactionPriority.OldestSmallestSeqFirst); |
||||
} |
||||
} |
@ -0,0 +1,31 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import org.junit.Test; |
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat; |
||||
|
||||
public class CompactionStopStyleTest { |
||||
|
||||
@Test(expected = IllegalArgumentException.class) |
||||
public void failIfIllegalByteValueProvided() { |
||||
CompactionStopStyle.getCompactionStopStyle((byte) -1); |
||||
} |
||||
|
||||
@Test |
||||
public void getCompactionStopStyle() { |
||||
assertThat(CompactionStopStyle.getCompactionStopStyle( |
||||
CompactionStopStyle.CompactionStopStyleTotalSize.getValue())) |
||||
.isEqualTo(CompactionStopStyle.CompactionStopStyleTotalSize); |
||||
} |
||||
|
||||
@Test |
||||
public void valueOf() { |
||||
assertThat(CompactionStopStyle.valueOf("CompactionStopStyleSimilarSize")). |
||||
isEqualTo(CompactionStopStyle.CompactionStopStyleSimilarSize); |
||||
} |
||||
} |
@ -0,0 +1,20 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import org.junit.Test; |
||||
|
||||
|
||||
public class CompressionTypesTest { |
||||
@Test |
||||
public void getCompressionType() { |
||||
for (final CompressionType compressionType : CompressionType.values()) { |
||||
String libraryName = compressionType.getLibraryName(); |
||||
compressionType.equals(CompressionType.getCompressionType( |
||||
libraryName)); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,27 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import org.junit.Test; |
||||
|
||||
public class LRUCacheTest { |
||||
|
||||
static { |
||||
RocksDB.loadLibrary(); |
||||
} |
||||
|
||||
@Test |
||||
public void newLRUCache() { |
||||
final long capacity = 1000; |
||||
final int numShardBits = 16; |
||||
final boolean strictCapacityLimit = true; |
||||
final double highPriPoolRatio = 5; |
||||
try(final Cache lruCache = new LRUCache(capacity, |
||||
numShardBits, strictCapacityLimit, highPriPoolRatio)) { |
||||
//no op
|
||||
} |
||||
} |
||||
} |
@ -0,0 +1,22 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import org.junit.Test; |
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat; |
||||
|
||||
|
||||
public class WALRecoveryModeTest { |
||||
|
||||
@Test |
||||
public void getWALRecoveryMode() { |
||||
for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) { |
||||
assertThat(WALRecoveryMode.getWALRecoveryMode(walRecoveryMode.getValue())) |
||||
.isEqualTo(walRecoveryMode); |
||||
} |
||||
} |
||||
} |
Loading…
Reference in new issue