Add missing functionality to RocksJava (#4833)
Summary: This is my latest round of changes to add missing items to RocksJava. More to come in future PRs. Pull Request resolved: https://github.com/facebook/rocksdb/pull/4833 Differential Revision: D14152266 Pulled By: sagar0 fbshipit-source-id: d6cff67e26da06c131491b5cf6911a8cd0db0775main
parent
06f378d75e
commit
bb474e9a02
@ -0,0 +1,222 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ for
|
||||
// rocksdb::CompactionJobInfo.
|
||||
|
||||
#include <jni.h> |
||||
|
||||
#include "include/org_rocksdb_CompactionJobInfo.h" |
||||
#include "rocksdb/listener.h" |
||||
#include "rocksjni/portal.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: newCompactionJobInfo |
||||
* Signature: ()J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobInfo_newCompactionJobInfo( |
||||
JNIEnv*, jclass) { |
||||
auto* compact_job_info = new rocksdb::CompactionJobInfo(); |
||||
return reinterpret_cast<jlong>(compact_job_info); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: disposeInternal |
||||
* Signature: (J)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionJobInfo_disposeInternal( |
||||
JNIEnv*, jobject, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
delete compact_job_info; |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: columnFamilyName |
||||
* Signature: (J)[B |
||||
*/ |
||||
jbyteArray Java_org_rocksdb_CompactionJobInfo_columnFamilyName( |
||||
JNIEnv* env, jclass, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
return rocksdb::JniUtil::copyBytes( |
||||
env, compact_job_info->cf_name); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: status |
||||
* Signature: (J)Lorg/rocksdb/Status; |
||||
*/ |
||||
jobject Java_org_rocksdb_CompactionJobInfo_status( |
||||
JNIEnv* env, jclass, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
return rocksdb::StatusJni::construct( |
||||
env, compact_job_info->status); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: threadId |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobInfo_threadId( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
return static_cast<jlong>(compact_job_info->thread_id); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: jobId |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompactionJobInfo_jobId( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
return static_cast<jint>(compact_job_info->job_id); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: baseInputLevel |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompactionJobInfo_baseInputLevel( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
return static_cast<jint>(compact_job_info->base_input_level); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: outputLevel |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompactionJobInfo_outputLevel( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
return static_cast<jint>(compact_job_info->output_level); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: inputFiles |
||||
* Signature: (J)[Ljava/lang/String; |
||||
*/ |
||||
jobjectArray Java_org_rocksdb_CompactionJobInfo_inputFiles( |
||||
JNIEnv* env, jclass, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
return rocksdb::JniUtil::toJavaStrings( |
||||
env, &compact_job_info->input_files); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: outputFiles |
||||
* Signature: (J)[Ljava/lang/String; |
||||
*/ |
||||
jobjectArray Java_org_rocksdb_CompactionJobInfo_outputFiles( |
||||
JNIEnv* env, jclass, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
return rocksdb::JniUtil::toJavaStrings( |
||||
env, &compact_job_info->output_files); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: tableProperties |
||||
* Signature: (J)Ljava/util/Map; |
||||
*/ |
||||
jobject Java_org_rocksdb_CompactionJobInfo_tableProperties( |
||||
JNIEnv* env, jclass, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
auto* map = &compact_job_info->table_properties; |
||||
|
||||
jobject jhash_map = rocksdb::HashMapJni::construct( |
||||
env, static_cast<uint32_t>(map->size())); |
||||
if (jhash_map == nullptr) { |
||||
// exception occurred
|
||||
return nullptr; |
||||
} |
||||
|
||||
const rocksdb::HashMapJni::FnMapKV<const std::string, std::shared_ptr<const rocksdb::TableProperties>, jobject, jobject> fn_map_kv = |
||||
[env](const std::pair<const std::string, std::shared_ptr<const rocksdb::TableProperties>>& kv) { |
||||
jstring jkey = rocksdb::JniUtil::toJavaString(env, &(kv.first), false); |
||||
if (env->ExceptionCheck()) { |
||||
// an error occurred
|
||||
return std::unique_ptr<std::pair<jobject, jobject>>(nullptr); |
||||
} |
||||
|
||||
jobject jtable_properties = rocksdb::TablePropertiesJni::fromCppTableProperties( |
||||
env, *(kv.second.get())); |
||||
if (env->ExceptionCheck()) { |
||||
// an error occurred
|
||||
env->DeleteLocalRef(jkey); |
||||
return std::unique_ptr<std::pair<jobject, jobject>>(nullptr); |
||||
} |
||||
|
||||
return std::unique_ptr<std::pair<jobject, jobject>>( |
||||
new std::pair<jobject, jobject>(static_cast<jobject>(jkey), jtable_properties)); |
||||
}; |
||||
|
||||
if (!rocksdb::HashMapJni::putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) { |
||||
// exception occurred
|
||||
return nullptr; |
||||
} |
||||
|
||||
return jhash_map; |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: compactionReason |
||||
* Signature: (J)B |
||||
*/ |
||||
jbyte Java_org_rocksdb_CompactionJobInfo_compactionReason( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
return rocksdb::CompactionReasonJni::toJavaCompactionReason( |
||||
compact_job_info->compaction_reason); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: compression |
||||
* Signature: (J)B |
||||
*/ |
||||
jbyte Java_org_rocksdb_CompactionJobInfo_compression( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
return rocksdb::CompressionTypeJni::toJavaCompressionType( |
||||
compact_job_info->compression); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobInfo |
||||
* Method: stats |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobInfo_stats( |
||||
JNIEnv *, jclass, jlong jhandle) { |
||||
auto* compact_job_info = |
||||
reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle); |
||||
auto* stats = new rocksdb::CompactionJobStats(); |
||||
stats->Add(compact_job_info->stats); |
||||
return reinterpret_cast<jlong>(stats); |
||||
} |
@ -0,0 +1,361 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ for
|
||||
// rocksdb::CompactionJobStats.
|
||||
|
||||
#include <jni.h> |
||||
|
||||
#include "include/org_rocksdb_CompactionJobStats.h" |
||||
#include "rocksdb/compaction_job_stats.h" |
||||
#include "rocksjni/portal.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: newCompactionJobStats |
||||
* Signature: ()J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_newCompactionJobStats( |
||||
JNIEnv*, jclass) { |
||||
auto* compact_job_stats = new rocksdb::CompactionJobStats(); |
||||
return reinterpret_cast<jlong>(compact_job_stats); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: disposeInternal |
||||
* Signature: (J)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionJobStats_disposeInternal( |
||||
JNIEnv *, jobject, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
delete compact_job_stats; |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: reset |
||||
* Signature: (J)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionJobStats_reset( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
compact_job_stats->Reset(); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: add |
||||
* Signature: (JJ)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionJobStats_add( |
||||
JNIEnv*, jclass, jlong jhandle, jlong jother_handle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
auto* other_compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jother_handle); |
||||
compact_job_stats->Add(*other_compact_job_stats); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: elapsedMicros |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_elapsedMicros( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>(compact_job_stats->elapsed_micros); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: numInputRecords |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_numInputRecords( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>(compact_job_stats->num_input_records); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: numInputFiles |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_numInputFiles( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>(compact_job_stats->num_input_files); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: numInputFilesAtOutputLevel |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_numInputFilesAtOutputLevel( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->num_input_files_at_output_level); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: numOutputRecords |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_numOutputRecords( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->num_output_records); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: numOutputFiles |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_numOutputFiles( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->num_output_files); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: isManualCompaction |
||||
* Signature: (J)Z |
||||
*/ |
||||
jboolean Java_org_rocksdb_CompactionJobStats_isManualCompaction( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
if (compact_job_stats->is_manual_compaction) { |
||||
return JNI_TRUE; |
||||
} else { |
||||
return JNI_FALSE; |
||||
} |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: totalInputBytes |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_totalInputBytes( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->total_input_bytes); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: totalOutputBytes |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_totalOutputBytes( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->total_output_bytes); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: numRecordsReplaced |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_numRecordsReplaced( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->num_records_replaced); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: totalInputRawKeyBytes |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_totalInputRawKeyBytes( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->total_input_raw_key_bytes); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: totalInputRawValueBytes |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_totalInputRawValueBytes( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->total_input_raw_value_bytes); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: numInputDeletionRecords |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_numInputDeletionRecords( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->num_input_deletion_records); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: numExpiredDeletionRecords |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_numExpiredDeletionRecords( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->num_expired_deletion_records); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: numCorruptKeys |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_numCorruptKeys( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->num_corrupt_keys); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: fileWriteNanos |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_fileWriteNanos( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->file_write_nanos); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: fileRangeSyncNanos |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_fileRangeSyncNanos( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->file_range_sync_nanos); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: fileFsyncNanos |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_fileFsyncNanos( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->file_fsync_nanos); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: filePrepareWriteNanos |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_filePrepareWriteNanos( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->file_prepare_write_nanos); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: smallestOutputKeyPrefix |
||||
* Signature: (J)[B |
||||
*/ |
||||
jbyteArray Java_org_rocksdb_CompactionJobStats_smallestOutputKeyPrefix( |
||||
JNIEnv* env, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return rocksdb::JniUtil::copyBytes(env, |
||||
compact_job_stats->smallest_output_key_prefix); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: largestOutputKeyPrefix |
||||
* Signature: (J)[B |
||||
*/ |
||||
jbyteArray Java_org_rocksdb_CompactionJobStats_largestOutputKeyPrefix( |
||||
JNIEnv* env, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return rocksdb::JniUtil::copyBytes(env, |
||||
compact_job_stats->largest_output_key_prefix); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: numSingleDelFallthru |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_numSingleDelFallthru( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->num_single_del_fallthru); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionJobStats |
||||
* Method: numSingleDelMismatch |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionJobStats_numSingleDelMismatch( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_job_stats = |
||||
reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_job_stats->num_single_del_mismatch); |
||||
} |
@ -0,0 +1,116 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ for
|
||||
// rocksdb::CompactionOptions.
|
||||
|
||||
#include <jni.h> |
||||
|
||||
#include "include/org_rocksdb_CompactionOptions.h" |
||||
#include "rocksdb/options.h" |
||||
#include "rocksjni/portal.h" |
||||
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptions |
||||
* Method: newCompactionOptions |
||||
* Signature: ()J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionOptions_newCompactionOptions( |
||||
JNIEnv*, jclass) { |
||||
auto* compact_opts = new rocksdb::CompactionOptions(); |
||||
return reinterpret_cast<jlong>(compact_opts); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptions |
||||
* Method: disposeInternal |
||||
* Signature: (J)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptions_disposeInternal( |
||||
JNIEnv *, jobject, jlong jhandle) { |
||||
auto* compact_opts = |
||||
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle); |
||||
delete compact_opts; |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptions |
||||
* Method: compression |
||||
* Signature: (J)B |
||||
*/ |
||||
jbyte Java_org_rocksdb_CompactionOptions_compression( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_opts = |
||||
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle); |
||||
return rocksdb::CompressionTypeJni::toJavaCompressionType( |
||||
compact_opts->compression); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptions |
||||
* Method: setCompression |
||||
* Signature: (JB)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptions_setCompression( |
||||
JNIEnv*, jclass, jlong jhandle, jbyte jcompression_type_value) { |
||||
auto* compact_opts = |
||||
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle); |
||||
compact_opts->compression = |
||||
rocksdb::CompressionTypeJni::toCppCompressionType( |
||||
jcompression_type_value); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptions |
||||
* Method: outputFileSizeLimit |
||||
* Signature: (J)J |
||||
*/ |
||||
jlong Java_org_rocksdb_CompactionOptions_outputFileSizeLimit( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_opts = |
||||
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle); |
||||
return static_cast<jlong>( |
||||
compact_opts->output_file_size_limit); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptions |
||||
* Method: setOutputFileSizeLimit |
||||
* Signature: (JJ)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptions_setOutputFileSizeLimit( |
||||
JNIEnv*, jclass, jlong jhandle, jlong joutput_file_size_limit) { |
||||
auto* compact_opts = |
||||
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle); |
||||
compact_opts->output_file_size_limit = |
||||
static_cast<uint64_t>(joutput_file_size_limit); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptions |
||||
* Method: maxSubcompactions |
||||
* Signature: (J)I |
||||
*/ |
||||
jint Java_org_rocksdb_CompactionOptions_maxSubcompactions( |
||||
JNIEnv*, jclass, jlong jhandle) { |
||||
auto* compact_opts = |
||||
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle); |
||||
return static_cast<jint>( |
||||
compact_opts->max_subcompactions); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_CompactionOptions |
||||
* Method: setMaxSubcompactions |
||||
* Signature: (JI)V |
||||
*/ |
||||
void Java_org_rocksdb_CompactionOptions_setMaxSubcompactions( |
||||
JNIEnv*, jclass, jlong jhandle, jint jmax_subcompactions) { |
||||
auto* compact_opts = |
||||
reinterpret_cast<rocksdb::CompactionOptions*>(jhandle); |
||||
compact_opts->max_subcompactions = |
||||
static_cast<uint32_t>(jmax_subcompactions); |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,53 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ for
|
||||
// rocksdb::PersistentCache.
|
||||
|
||||
#include <jni.h> |
||||
#include <string> |
||||
|
||||
#include "include/org_rocksdb_PersistentCache.h" |
||||
#include "rocksdb/persistent_cache.h" |
||||
#include "loggerjnicallback.h" |
||||
#include "portal.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_PersistentCache |
||||
* Method: newPersistentCache |
||||
* Signature: (JLjava/lang/String;JJZ)J |
||||
*/ |
||||
jlong Java_org_rocksdb_PersistentCache_newPersistentCache( |
||||
JNIEnv* env, jclass, jlong jenv_handle, jstring jpath, |
||||
jlong jsz, jlong jlogger_handle, jboolean joptimized_for_nvm) { |
||||
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jenv_handle); |
||||
jboolean has_exception = JNI_FALSE; |
||||
std::string path = rocksdb::JniUtil::copyStdString(env, jpath, &has_exception); |
||||
if (has_exception == JNI_TRUE) { |
||||
return 0; |
||||
} |
||||
auto* logger = |
||||
reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback>*>(jlogger_handle); |
||||
auto* cache = new std::shared_ptr<rocksdb::PersistentCache>(nullptr); |
||||
rocksdb::Status s = rocksdb::NewPersistentCache( |
||||
rocks_env, path, static_cast<uint64_t>(jsz), *logger, |
||||
static_cast<bool>(joptimized_for_nvm), cache); |
||||
if (!s.ok()) { |
||||
rocksdb::RocksDBExceptionJni::ThrowNew(env, s); |
||||
} |
||||
return reinterpret_cast<jlong>(cache); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_PersistentCache |
||||
* Method: disposeInternal |
||||
* Signature: (J)V |
||||
*/ |
||||
void Java_org_rocksdb_PersistentCache_disposeInternal( |
||||
JNIEnv*, jobject, jlong jhandle) { |
||||
auto* cache = |
||||
reinterpret_cast<std::shared_ptr<rocksdb::PersistentCache>*>(jhandle); |
||||
delete cache; // delete std::shared_ptr
|
||||
} |
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,25 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ for
|
||||
// org.rocksdb.AbstractTableFilter.
|
||||
|
||||
#include <jni.h> |
||||
#include <memory> |
||||
|
||||
#include "include/org_rocksdb_AbstractTableFilter.h" |
||||
#include "rocksjni/table_filter_jnicallback.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_AbstractTableFilter |
||||
* Method: createNewTableFilter |
||||
* Signature: ()J |
||||
*/ |
||||
jlong Java_org_rocksdb_AbstractTableFilter_createNewTableFilter( |
||||
JNIEnv* env, jobject jtable_filter) { |
||||
auto* table_filter_jnicallback = |
||||
new rocksdb::TableFilterJniCallback(env, jtable_filter); |
||||
return reinterpret_cast<jlong>(table_filter_jnicallback); |
||||
} |
@ -0,0 +1,62 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the callback "bridge" between Java and C++ for
|
||||
// rocksdb::TableFilter.
|
||||
|
||||
#include "rocksjni/table_filter_jnicallback.h" |
||||
#include "rocksjni/portal.h" |
||||
|
||||
namespace rocksdb { |
||||
TableFilterJniCallback::TableFilterJniCallback( |
||||
JNIEnv* env, jobject jtable_filter) |
||||
: JniCallback(env, jtable_filter) { |
||||
m_jfilter_methodid = |
||||
AbstractTableFilterJni::getFilterMethod(env); |
||||
if(m_jfilter_methodid == nullptr) { |
||||
// exception thrown: NoSuchMethodException or OutOfMemoryError
|
||||
return; |
||||
} |
||||
|
||||
// create the function reference
|
||||
/*
|
||||
Note the JNI ENV must be obtained/release |
||||
on each call to the function itself as |
||||
it may be called from multiple threads |
||||
*/ |
||||
m_table_filter_function = [this](const rocksdb::TableProperties& table_properties) { |
||||
jboolean attached_thread = JNI_FALSE; |
||||
JNIEnv* thread_env = getJniEnv(&attached_thread); |
||||
assert(thread_env != nullptr); |
||||
|
||||
// create a Java TableProperties object
|
||||
jobject jtable_properties = TablePropertiesJni::fromCppTableProperties(thread_env, table_properties); |
||||
if (jtable_properties == nullptr) { |
||||
// exception thrown from fromCppTableProperties
|
||||
thread_env->ExceptionDescribe(); // print out exception to stderr
|
||||
releaseJniEnv(attached_thread); |
||||
return false; |
||||
} |
||||
|
||||
jboolean result = thread_env->CallBooleanMethod(m_jcallback_obj, m_jfilter_methodid, jtable_properties); |
||||
if (thread_env->ExceptionCheck()) { |
||||
// exception thrown from CallBooleanMethod
|
||||
thread_env->DeleteLocalRef(jtable_properties); |
||||
thread_env->ExceptionDescribe(); // print out exception to stderr
|
||||
releaseJniEnv(attached_thread); |
||||
return false; |
||||
} |
||||
|
||||
// ok... cleanup and then return
|
||||
releaseJniEnv(attached_thread); |
||||
return static_cast<bool>(result); |
||||
}; |
||||
} |
||||
|
||||
std::function<bool(const rocksdb::TableProperties&)> TableFilterJniCallback::GetTableFilterFunction() { |
||||
return m_table_filter_function; |
||||
} |
||||
|
||||
} // namespace rocksdb
|
@ -0,0 +1,34 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the callback "bridge" between Java and C++ for
|
||||
// rocksdb::TableFilter.
|
||||
|
||||
#ifndef JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_ |
||||
#define JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_ |
||||
|
||||
#include <jni.h> |
||||
#include <functional> |
||||
#include <memory> |
||||
|
||||
#include "rocksdb/table_properties.h" |
||||
#include "rocksjni/jnicallback.h" |
||||
|
||||
namespace rocksdb { |
||||
|
||||
class TableFilterJniCallback : public JniCallback { |
||||
public: |
||||
TableFilterJniCallback( |
||||
JNIEnv* env, jobject jtable_filter); |
||||
std::function<bool(const rocksdb::TableProperties&)> GetTableFilterFunction(); |
||||
|
||||
private: |
||||
jmethodID m_jfilter_methodid; |
||||
std::function<bool(const rocksdb::TableProperties&)> m_table_filter_function; |
||||
}; |
||||
|
||||
} //namespace rocksdb
|
||||
|
||||
#endif // JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_
|
@ -0,0 +1,121 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ and enables
|
||||
// calling c++ rocksdb::ThreadStatus methods from Java side.
|
||||
|
||||
#include <jni.h> |
||||
|
||||
#include "portal.h" |
||||
#include "include/org_rocksdb_ThreadStatus.h" |
||||
#include "rocksdb/thread_status.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ThreadStatus |
||||
* Method: getThreadTypeName |
||||
* Signature: (B)Ljava/lang/String; |
||||
*/ |
||||
jstring Java_org_rocksdb_ThreadStatus_getThreadTypeName( |
||||
JNIEnv* env, jclass, jbyte jthread_type_value) { |
||||
auto name = rocksdb::ThreadStatus::GetThreadTypeName( |
||||
rocksdb::ThreadTypeJni::toCppThreadType(jthread_type_value)); |
||||
return rocksdb::JniUtil::toJavaString(env, &name, true); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ThreadStatus |
||||
* Method: getOperationName |
||||
* Signature: (B)Ljava/lang/String; |
||||
*/ |
||||
jstring Java_org_rocksdb_ThreadStatus_getOperationName( |
||||
JNIEnv* env, jclass, jbyte joperation_type_value) { |
||||
auto name = rocksdb::ThreadStatus::GetOperationName( |
||||
rocksdb::OperationTypeJni::toCppOperationType(joperation_type_value)); |
||||
return rocksdb::JniUtil::toJavaString(env, &name, true); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ThreadStatus |
||||
* Method: microsToStringNative |
||||
* Signature: (J)Ljava/lang/String; |
||||
*/ |
||||
jstring Java_org_rocksdb_ThreadStatus_microsToStringNative( |
||||
JNIEnv* env, jclass, jlong jmicros) { |
||||
auto str = |
||||
rocksdb::ThreadStatus::MicrosToString(static_cast<uint64_t>(jmicros)); |
||||
return rocksdb::JniUtil::toJavaString(env, &str, true); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ThreadStatus |
||||
* Method: getOperationStageName |
||||
* Signature: (B)Ljava/lang/String; |
||||
*/ |
||||
jstring Java_org_rocksdb_ThreadStatus_getOperationStageName( |
||||
JNIEnv* env, jclass, jbyte joperation_stage_value) { |
||||
auto name = rocksdb::ThreadStatus::GetOperationStageName( |
||||
rocksdb::OperationStageJni::toCppOperationStage(joperation_stage_value)); |
||||
return rocksdb::JniUtil::toJavaString(env, &name, true); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ThreadStatus |
||||
* Method: getOperationPropertyName |
||||
* Signature: (BI)Ljava/lang/String; |
||||
*/ |
||||
jstring Java_org_rocksdb_ThreadStatus_getOperationPropertyName( |
||||
JNIEnv* env, jclass, jbyte joperation_type_value, jint jindex) { |
||||
auto name = rocksdb::ThreadStatus::GetOperationPropertyName( |
||||
rocksdb::OperationTypeJni::toCppOperationType(joperation_type_value), |
||||
static_cast<int>(jindex)); |
||||
return rocksdb::JniUtil::toJavaString(env, &name, true); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ThreadStatus |
||||
* Method: interpretOperationProperties |
||||
* Signature: (B[J)Ljava/util/Map; |
||||
*/ |
||||
jobject Java_org_rocksdb_ThreadStatus_interpretOperationProperties( |
||||
JNIEnv* env, jclass, jbyte joperation_type_value, |
||||
jlongArray joperation_properties) { |
||||
|
||||
//convert joperation_properties
|
||||
const jsize len = env->GetArrayLength(joperation_properties); |
||||
const std::unique_ptr<uint64_t[]> op_properties(new uint64_t[len]); |
||||
jlong* jop = env->GetLongArrayElements(joperation_properties, nullptr); |
||||
if (jop == nullptr) { |
||||
// exception thrown: OutOfMemoryError
|
||||
return nullptr; |
||||
} |
||||
for (jsize i = 0; i < len; i++) { |
||||
op_properties[i] = static_cast<uint64_t>(jop[i]); |
||||
} |
||||
env->ReleaseLongArrayElements(joperation_properties, jop, JNI_ABORT); |
||||
|
||||
// call the function
|
||||
auto result = rocksdb::ThreadStatus::InterpretOperationProperties( |
||||
rocksdb::OperationTypeJni::toCppOperationType(joperation_type_value), |
||||
op_properties.get()); |
||||
jobject jresult = rocksdb::HashMapJni::fromCppMap(env, &result); |
||||
if (env->ExceptionCheck()) { |
||||
// exception occurred
|
||||
return nullptr; |
||||
} |
||||
|
||||
return jresult; |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ThreadStatus |
||||
* Method: getStateName |
||||
* Signature: (B)Ljava/lang/String; |
||||
*/ |
||||
jstring Java_org_rocksdb_ThreadStatus_getStateName( |
||||
JNIEnv* env, jclass, jbyte jstate_type_value) { |
||||
auto name = rocksdb::ThreadStatus::GetStateName( |
||||
rocksdb::StateTypeJni::toCppStateType(jstate_type_value)); |
||||
return rocksdb::JniUtil::toJavaString(env, &name, true); |
||||
} |
@ -0,0 +1,23 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ for
|
||||
// rocksdb::CompactionFilterFactory.
|
||||
|
||||
#include <jni.h> |
||||
|
||||
#include "include/org_rocksdb_AbstractTraceWriter.h" |
||||
#include "rocksjni/trace_writer_jnicallback.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_AbstractTraceWriter |
||||
* Method: createNewTraceWriter |
||||
* Signature: ()J |
||||
*/ |
||||
jlong Java_org_rocksdb_AbstractTraceWriter_createNewTraceWriter( |
||||
JNIEnv* env, jobject jobj) { |
||||
auto* trace_writer = new rocksdb::TraceWriterJniCallback(env, jobj);
|
||||
return reinterpret_cast<jlong>(trace_writer); |
||||
} |
@ -0,0 +1,115 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the callback "bridge" between Java and C++ for
|
||||
// rocksdb::TraceWriter.
|
||||
|
||||
#include "rocksjni/trace_writer_jnicallback.h" |
||||
#include "rocksjni/portal.h" |
||||
|
||||
namespace rocksdb { |
||||
TraceWriterJniCallback::TraceWriterJniCallback( |
||||
JNIEnv* env, jobject jtrace_writer) |
||||
: JniCallback(env, jtrace_writer) { |
||||
m_jwrite_proxy_methodid = |
||||
AbstractTraceWriterJni::getWriteProxyMethodId(env); |
||||
if(m_jwrite_proxy_methodid == nullptr) { |
||||
// exception thrown: NoSuchMethodException or OutOfMemoryError
|
||||
return; |
||||
} |
||||
|
||||
m_jclose_writer_proxy_methodid = |
||||
AbstractTraceWriterJni::getCloseWriterProxyMethodId(env); |
||||
if(m_jclose_writer_proxy_methodid == nullptr) { |
||||
// exception thrown: NoSuchMethodException or OutOfMemoryError
|
||||
return; |
||||
} |
||||
|
||||
m_jget_file_size_methodid = |
||||
AbstractTraceWriterJni::getGetFileSizeMethodId(env); |
||||
if(m_jget_file_size_methodid == nullptr) { |
||||
// exception thrown: NoSuchMethodException or OutOfMemoryError
|
||||
return; |
||||
} |
||||
} |
||||
|
||||
Status TraceWriterJniCallback::Write(const Slice& data) { |
||||
jboolean attached_thread = JNI_FALSE; |
||||
JNIEnv* env = getJniEnv(&attached_thread); |
||||
if (env == nullptr) { |
||||
return Status::IOError("Unable to attach JNI Environment"); |
||||
} |
||||
|
||||
jshort jstatus = env->CallShortMethod(m_jcallback_obj, |
||||
m_jwrite_proxy_methodid, |
||||
&data); |
||||
|
||||
if(env->ExceptionCheck()) { |
||||
// exception thrown from CallShortMethod
|
||||
env->ExceptionDescribe(); // print out exception to stderr
|
||||
releaseJniEnv(attached_thread); |
||||
return Status::IOError("Unable to call AbstractTraceWriter#writeProxy(long)"); |
||||
} |
||||
|
||||
// unpack status code and status sub-code from jstatus
|
||||
jbyte jcode_value = (jstatus >> 8) & 0xFF; |
||||
jbyte jsub_code_value = jstatus & 0xFF; |
||||
std::unique_ptr<Status> s = StatusJni::toCppStatus(jcode_value, jsub_code_value); |
||||
|
||||
releaseJniEnv(attached_thread); |
||||
|
||||
return Status(*s); |
||||
} |
||||
|
||||
Status TraceWriterJniCallback::Close() { |
||||
jboolean attached_thread = JNI_FALSE; |
||||
JNIEnv* env = getJniEnv(&attached_thread); |
||||
if (env == nullptr) { |
||||
return Status::IOError("Unable to attach JNI Environment"); |
||||
} |
||||
|
||||
jshort jstatus = env->CallShortMethod(m_jcallback_obj, |
||||
m_jclose_writer_proxy_methodid); |
||||
|
||||
if(env->ExceptionCheck()) { |
||||
// exception thrown from CallShortMethod
|
||||
env->ExceptionDescribe(); // print out exception to stderr
|
||||
releaseJniEnv(attached_thread); |
||||
return Status::IOError("Unable to call AbstractTraceWriter#closeWriterProxy()"); |
||||
} |
||||
|
||||
// unpack status code and status sub-code from jstatus
|
||||
jbyte code_value = (jstatus >> 8) & 0xFF; |
||||
jbyte sub_code_value = jstatus & 0xFF; |
||||
std::unique_ptr<Status> s = StatusJni::toCppStatus(code_value, sub_code_value); |
||||
|
||||
releaseJniEnv(attached_thread); |
||||
|
||||
return Status(*s); |
||||
} |
||||
|
||||
uint64_t TraceWriterJniCallback::GetFileSize() { |
||||
jboolean attached_thread = JNI_FALSE; |
||||
JNIEnv* env = getJniEnv(&attached_thread); |
||||
if (env == nullptr) { |
||||
return 0; |
||||
} |
||||
|
||||
jlong jfile_size = env->CallLongMethod(m_jcallback_obj, |
||||
m_jget_file_size_methodid); |
||||
|
||||
if(env->ExceptionCheck()) { |
||||
// exception thrown from CallLongMethod
|
||||
env->ExceptionDescribe(); // print out exception to stderr
|
||||
releaseJniEnv(attached_thread); |
||||
return 0; |
||||
} |
||||
|
||||
releaseJniEnv(attached_thread); |
||||
|
||||
return static_cast<uint64_t>(jfile_size); |
||||
} |
||||
|
||||
} // namespace rocksdb
|
@ -0,0 +1,36 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the callback "bridge" between Java and C++ for
|
||||
// rocksdb::TraceWriter.
|
||||
|
||||
#ifndef JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_ |
||||
#define JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_ |
||||
|
||||
#include <jni.h> |
||||
#include <memory> |
||||
|
||||
#include "rocksdb/trace_reader_writer.h" |
||||
#include "rocksjni/jnicallback.h" |
||||
|
||||
namespace rocksdb { |
||||
|
||||
class TraceWriterJniCallback : public JniCallback, public TraceWriter { |
||||
public: |
||||
TraceWriterJniCallback( |
||||
JNIEnv* env, jobject jtrace_writer); |
||||
virtual Status Write(const Slice& data); |
||||
virtual Status Close(); |
||||
virtual uint64_t GetFileSize(); |
||||
|
||||
private: |
||||
jmethodID m_jwrite_proxy_methodid; |
||||
jmethodID m_jclose_writer_proxy_methodid; |
||||
jmethodID m_jget_file_size_methodid; |
||||
}; |
||||
|
||||
} //namespace rocksdb
|
||||
|
||||
#endif // JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_
|
@ -0,0 +1,23 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ for
|
||||
// rocksdb::WalFilter.
|
||||
|
||||
#include <jni.h> |
||||
|
||||
#include "include/org_rocksdb_AbstractWalFilter.h" |
||||
#include "rocksjni/wal_filter_jnicallback.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_AbstractWalFilter |
||||
* Method: createNewWalFilter |
||||
* Signature: ()J |
||||
*/ |
||||
jlong Java_org_rocksdb_AbstractWalFilter_createNewWalFilter( |
||||
JNIEnv* env, jobject jobj) { |
||||
auto* wal_filter = new rocksdb::WalFilterJniCallback(env, jobj);
|
||||
return reinterpret_cast<jlong>(wal_filter); |
||||
} |
@ -0,0 +1,144 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the callback "bridge" between Java and C++ for
|
||||
// rocksdb::WalFilter.
|
||||
|
||||
#include "rocksjni/wal_filter_jnicallback.h" |
||||
#include "rocksjni/portal.h" |
||||
|
||||
namespace rocksdb { |
||||
WalFilterJniCallback::WalFilterJniCallback( |
||||
JNIEnv* env, jobject jwal_filter) |
||||
: JniCallback(env, jwal_filter) { |
||||
// Note: The name of a WalFilter will not change during it's lifetime,
|
||||
// so we cache it in a global var
|
||||
jmethodID jname_mid = AbstractWalFilterJni::getNameMethodId(env); |
||||
if(jname_mid == nullptr) { |
||||
// exception thrown: NoSuchMethodException or OutOfMemoryError
|
||||
return; |
||||
} |
||||
jstring jname = (jstring)env->CallObjectMethod(m_jcallback_obj, jname_mid); |
||||
if(env->ExceptionCheck()) { |
||||
// exception thrown
|
||||
return; |
||||
} |
||||
jboolean has_exception = JNI_FALSE; |
||||
m_name = JniUtil::copyString(env, jname, |
||||
&has_exception); // also releases jname
|
||||
if (has_exception == JNI_TRUE) { |
||||
// exception thrown
|
||||
return; |
||||
} |
||||
|
||||
m_column_family_log_number_map_mid = |
||||
AbstractWalFilterJni::getColumnFamilyLogNumberMapMethodId(env); |
||||
if(m_column_family_log_number_map_mid == nullptr) { |
||||
// exception thrown: NoSuchMethodException or OutOfMemoryError
|
||||
return; |
||||
} |
||||
|
||||
m_log_record_found_proxy_mid = |
||||
AbstractWalFilterJni::getLogRecordFoundProxyMethodId(env); |
||||
if(m_log_record_found_proxy_mid == nullptr) { |
||||
// exception thrown: NoSuchMethodException or OutOfMemoryError
|
||||
return; |
||||
} |
||||
} |
||||
|
||||
void WalFilterJniCallback::ColumnFamilyLogNumberMap( |
||||
const std::map<uint32_t, uint64_t>& cf_lognumber_map, |
||||
const std::map<std::string, uint32_t>& cf_name_id_map) { |
||||
jboolean attached_thread = JNI_FALSE; |
||||
JNIEnv* env = getJniEnv(&attached_thread); |
||||
if (env == nullptr) { |
||||
return; |
||||
} |
||||
|
||||
jobject jcf_lognumber_map = |
||||
rocksdb::HashMapJni::fromCppMap(env, &cf_lognumber_map); |
||||
if (jcf_lognumber_map == nullptr) { |
||||
// exception occurred
|
||||
env->ExceptionDescribe(); // print out exception to stderr
|
||||
releaseJniEnv(attached_thread); |
||||
return; |
||||
} |
||||
|
||||
jobject jcf_name_id_map = |
||||
rocksdb::HashMapJni::fromCppMap(env, &cf_name_id_map); |
||||
if (jcf_name_id_map == nullptr) { |
||||
// exception occurred
|
||||
env->ExceptionDescribe(); // print out exception to stderr
|
||||
env->DeleteLocalRef(jcf_lognumber_map); |
||||
releaseJniEnv(attached_thread); |
||||
return; |
||||
} |
||||
|
||||
env->CallVoidMethod(m_jcallback_obj, |
||||
m_column_family_log_number_map_mid, |
||||
jcf_lognumber_map, |
||||
jcf_name_id_map); |
||||
|
||||
env->DeleteLocalRef(jcf_lognumber_map); |
||||
env->DeleteLocalRef(jcf_name_id_map); |
||||
|
||||
if(env->ExceptionCheck()) { |
||||
// exception thrown from CallVoidMethod
|
||||
env->ExceptionDescribe(); // print out exception to stderr
|
||||
} |
||||
|
||||
releaseJniEnv(attached_thread); |
||||
} |
||||
|
||||
WalFilter::WalProcessingOption WalFilterJniCallback::LogRecordFound( |
||||
unsigned long long log_number, const std::string& log_file_name, |
||||
const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) { |
||||
jboolean attached_thread = JNI_FALSE; |
||||
JNIEnv* env = getJniEnv(&attached_thread); |
||||
if (env == nullptr) { |
||||
return WalFilter::WalProcessingOption::kCorruptedRecord; |
||||
} |
||||
|
||||
jstring jlog_file_name = JniUtil::toJavaString(env, &log_file_name); |
||||
if (jlog_file_name == nullptr) { |
||||
// exception occcurred
|
||||
env->ExceptionDescribe(); // print out exception to stderr
|
||||
releaseJniEnv(attached_thread); |
||||
return WalFilter::WalProcessingOption::kCorruptedRecord; |
||||
} |
||||
|
||||
jshort jlog_record_found_result = env->CallShortMethod(m_jcallback_obj, |
||||
m_log_record_found_proxy_mid, |
||||
static_cast<jlong>(log_number), |
||||
jlog_file_name, |
||||
reinterpret_cast<jlong>(&batch), |
||||
reinterpret_cast<jlong>(new_batch)); |
||||
|
||||
env->DeleteLocalRef(jlog_file_name); |
||||
|
||||
if (env->ExceptionCheck()) { |
||||
// exception thrown from CallShortMethod
|
||||
env->ExceptionDescribe(); // print out exception to stderr
|
||||
releaseJniEnv(attached_thread); |
||||
return WalFilter::WalProcessingOption::kCorruptedRecord; |
||||
} |
||||
|
||||
// unpack WalProcessingOption and batch_changed from jlog_record_found_result
|
||||
jbyte jwal_processing_option_value = (jlog_record_found_result >> 8) & 0xFF; |
||||
jbyte jbatch_changed_value = jlog_record_found_result & 0xFF; |
||||
|
||||
releaseJniEnv(attached_thread); |
||||
|
||||
*batch_changed = jbatch_changed_value == JNI_TRUE; |
||||
|
||||
return WalProcessingOptionJni::toCppWalProcessingOption( |
||||
jwal_processing_option_value); |
||||
} |
||||
|
||||
const char* WalFilterJniCallback::Name() const { |
||||
return m_name.get(); |
||||
} |
||||
|
||||
} // namespace rocksdb
|
@ -0,0 +1,42 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// This file implements the callback "bridge" between Java and C++ for
|
||||
// rocksdb::WalFilter.
|
||||
|
||||
#ifndef JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_ |
||||
#define JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_ |
||||
|
||||
#include <jni.h> |
||||
#include <map> |
||||
#include <memory> |
||||
#include <string> |
||||
|
||||
#include "rocksdb/wal_filter.h" |
||||
#include "rocksjni/jnicallback.h" |
||||
|
||||
namespace rocksdb { |
||||
|
||||
class WalFilterJniCallback : public JniCallback, public WalFilter { |
||||
public: |
||||
WalFilterJniCallback( |
||||
JNIEnv* env, jobject jwal_filter); |
||||
virtual void ColumnFamilyLogNumberMap( |
||||
const std::map<uint32_t, uint64_t>& cf_lognumber_map, |
||||
const std::map<std::string, uint32_t>& cf_name_id_map); |
||||
virtual WalFilter::WalProcessingOption LogRecordFound( |
||||
unsigned long long log_number, const std::string& log_file_name, |
||||
const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed); |
||||
virtual const char* Name() const; |
||||
|
||||
private: |
||||
std::unique_ptr<const char[]> m_name; |
||||
jmethodID m_column_family_log_number_map_mid; |
||||
jmethodID m_log_record_found_proxy_mid; |
||||
}; |
||||
|
||||
} //namespace rocksdb
|
||||
|
||||
#endif // JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_
|
@ -0,0 +1,254 @@ |
||||
package org.rocksdb; |
||||
|
||||
import java.util.*; |
||||
|
||||
public abstract class AbstractMutableOptions { |
||||
|
||||
protected static final String KEY_VALUE_PAIR_SEPARATOR = ";"; |
||||
protected static final char KEY_VALUE_SEPARATOR = '='; |
||||
static final String INT_ARRAY_INT_SEPARATOR = ","; |
||||
|
||||
protected final String[] keys; |
||||
private final String[] values; |
||||
|
||||
/** |
||||
* User must use builder pattern, or parser. |
||||
* |
||||
* @param keys the keys |
||||
* @param values the values |
||||
*/ |
||||
protected AbstractMutableOptions(final String[] keys, final String[] values) { |
||||
this.keys = keys; |
||||
this.values = values; |
||||
} |
||||
|
||||
String[] getKeys() { |
||||
return keys; |
||||
} |
||||
|
||||
String[] getValues() { |
||||
return values; |
||||
} |
||||
|
||||
/** |
||||
* Returns a string representation of MutableOptions which |
||||
* is suitable for consumption by {@code #parse(String)}. |
||||
* |
||||
* @return String representation of MutableOptions |
||||
*/ |
||||
@Override |
||||
public String toString() { |
||||
final StringBuilder buffer = new StringBuilder(); |
||||
for(int i = 0; i < keys.length; i++) { |
||||
buffer |
||||
.append(keys[i]) |
||||
.append(KEY_VALUE_SEPARATOR) |
||||
.append(values[i]); |
||||
|
||||
if(i + 1 < keys.length) { |
||||
buffer.append(KEY_VALUE_PAIR_SEPARATOR); |
||||
} |
||||
} |
||||
return buffer.toString(); |
||||
} |
||||
|
||||
public static abstract class AbstractMutableOptionsBuilder< |
||||
T extends AbstractMutableOptions, |
||||
U extends AbstractMutableOptionsBuilder<T, U, K>, |
||||
K extends MutableOptionKey> { |
||||
|
||||
private final Map<K, MutableOptionValue<?>> options = new LinkedHashMap<>(); |
||||
|
||||
protected abstract U self(); |
||||
|
||||
/** |
||||
* Get all of the possible keys |
||||
* |
||||
* @return A map of all keys, indexed by name. |
||||
*/ |
||||
protected abstract Map<String, K> allKeys(); |
||||
|
||||
/** |
||||
* Construct a sub-class instance of {@link AbstractMutableOptions}. |
||||
* |
||||
* @param keys the keys |
||||
* @param values the values |
||||
* |
||||
* @return an instance of the options. |
||||
*/ |
||||
protected abstract T build(final String[] keys, final String[] values); |
||||
|
||||
public T build() { |
||||
final String keys[] = new String[options.size()]; |
||||
final String values[] = new String[options.size()]; |
||||
|
||||
int i = 0; |
||||
for (final Map.Entry<K, MutableOptionValue<?>> option : options.entrySet()) { |
||||
keys[i] = option.getKey().name(); |
||||
values[i] = option.getValue().asString(); |
||||
i++; |
||||
} |
||||
|
||||
return build(keys, values); |
||||
} |
||||
|
||||
protected U setDouble( |
||||
final K key, final double value) { |
||||
if (key.getValueType() != MutableOptionKey.ValueType.DOUBLE) { |
||||
throw new IllegalArgumentException( |
||||
key + " does not accept a double value"); |
||||
} |
||||
options.put(key, MutableOptionValue.fromDouble(value)); |
||||
return self(); |
||||
} |
||||
|
||||
protected double getDouble(final K key) |
||||
throws NoSuchElementException, NumberFormatException { |
||||
final MutableOptionValue<?> value = options.get(key); |
||||
if(value == null) { |
||||
throw new NoSuchElementException(key.name() + " has not been set"); |
||||
} |
||||
return value.asDouble(); |
||||
} |
||||
|
||||
protected U setLong( |
||||
final K key, final long value) { |
||||
if(key.getValueType() != MutableOptionKey.ValueType.LONG) { |
||||
throw new IllegalArgumentException( |
||||
key + " does not accept a long value"); |
||||
} |
||||
options.put(key, MutableOptionValue.fromLong(value)); |
||||
return self(); |
||||
} |
||||
|
||||
protected long getLong(final K key) |
||||
throws NoSuchElementException, NumberFormatException { |
||||
final MutableOptionValue<?> value = options.get(key); |
||||
if(value == null) { |
||||
throw new NoSuchElementException(key.name() + " has not been set"); |
||||
} |
||||
return value.asLong(); |
||||
} |
||||
|
||||
protected U setInt( |
||||
final K key, final int value) { |
||||
if(key.getValueType() != MutableOptionKey.ValueType.INT) { |
||||
throw new IllegalArgumentException( |
||||
key + " does not accept an integer value"); |
||||
} |
||||
options.put(key, MutableOptionValue.fromInt(value)); |
||||
return self(); |
||||
} |
||||
|
||||
protected int getInt(final K key) |
||||
throws NoSuchElementException, NumberFormatException { |
||||
final MutableOptionValue<?> value = options.get(key); |
||||
if(value == null) { |
||||
throw new NoSuchElementException(key.name() + " has not been set"); |
||||
} |
||||
return value.asInt(); |
||||
} |
||||
|
||||
protected U setBoolean( |
||||
final K key, final boolean value) { |
||||
if(key.getValueType() != MutableOptionKey.ValueType.BOOLEAN) { |
||||
throw new IllegalArgumentException( |
||||
key + " does not accept a boolean value"); |
||||
} |
||||
options.put(key, MutableOptionValue.fromBoolean(value)); |
||||
return self(); |
||||
} |
||||
|
||||
protected boolean getBoolean(final K key) |
||||
throws NoSuchElementException, NumberFormatException { |
||||
final MutableOptionValue<?> value = options.get(key); |
||||
if(value == null) { |
||||
throw new NoSuchElementException(key.name() + " has not been set"); |
||||
} |
||||
return value.asBoolean(); |
||||
} |
||||
|
||||
protected U setIntArray( |
||||
final K key, final int[] value) { |
||||
if(key.getValueType() != MutableOptionKey.ValueType.INT_ARRAY) { |
||||
throw new IllegalArgumentException( |
||||
key + " does not accept an int array value"); |
||||
} |
||||
options.put(key, MutableOptionValue.fromIntArray(value)); |
||||
return self(); |
||||
} |
||||
|
||||
protected int[] getIntArray(final K key) |
||||
throws NoSuchElementException, NumberFormatException { |
||||
final MutableOptionValue<?> value = options.get(key); |
||||
if(value == null) { |
||||
throw new NoSuchElementException(key.name() + " has not been set"); |
||||
} |
||||
return value.asIntArray(); |
||||
} |
||||
|
||||
protected <N extends Enum<N>> U setEnum( |
||||
final K key, final N value) { |
||||
if(key.getValueType() != MutableOptionKey.ValueType.ENUM) { |
||||
throw new IllegalArgumentException( |
||||
key + " does not accept a Enum value"); |
||||
} |
||||
options.put(key, MutableOptionValue.fromEnum(value)); |
||||
return self(); |
||||
} |
||||
|
||||
protected <N extends Enum<N>> N getEnum(final K key) |
||||
throws NoSuchElementException, NumberFormatException { |
||||
final MutableOptionValue<?> value = options.get(key); |
||||
if(value == null) { |
||||
throw new NoSuchElementException(key.name() + " has not been set"); |
||||
} |
||||
|
||||
if(!(value instanceof MutableOptionValue.MutableOptionEnumValue)) { |
||||
throw new NoSuchElementException(key.name() + " is not of Enum type"); |
||||
} |
||||
|
||||
return ((MutableOptionValue.MutableOptionEnumValue<N>)value).asObject(); |
||||
} |
||||
|
||||
public U fromString( |
||||
final String keyStr, final String valueStr) |
||||
throws IllegalArgumentException { |
||||
Objects.requireNonNull(keyStr); |
||||
Objects.requireNonNull(valueStr); |
||||
|
||||
final K key = allKeys().get(keyStr); |
||||
switch(key.getValueType()) { |
||||
case DOUBLE: |
||||
return setDouble(key, Double.parseDouble(valueStr)); |
||||
|
||||
case LONG: |
||||
return setLong(key, Long.parseLong(valueStr)); |
||||
|
||||
case INT: |
||||
return setInt(key, Integer.parseInt(valueStr)); |
||||
|
||||
case BOOLEAN: |
||||
return setBoolean(key, Boolean.parseBoolean(valueStr)); |
||||
|
||||
case INT_ARRAY: |
||||
final String[] strInts = valueStr |
||||
.trim().split(INT_ARRAY_INT_SEPARATOR); |
||||
if(strInts == null || strInts.length == 0) { |
||||
throw new IllegalArgumentException( |
||||
"int array value is not correctly formatted"); |
||||
} |
||||
|
||||
final int value[] = new int[strInts.length]; |
||||
int i = 0; |
||||
for(final String strInt : strInts) { |
||||
value[i++] = Integer.parseInt(strInt); |
||||
} |
||||
return setIntArray(key, value); |
||||
} |
||||
|
||||
throw new IllegalStateException( |
||||
key + " has unknown value type: " + key.getValueType()); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,19 @@ |
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Base class for Table Filters. |
||||
*/ |
||||
public abstract class AbstractTableFilter |
||||
extends RocksCallbackObject implements TableFilter { |
||||
|
||||
protected AbstractTableFilter() { |
||||
super(); |
||||
} |
||||
|
||||
@Override |
||||
protected long initializeNative(final long... nativeParameterHandles) { |
||||
return createNewTableFilter(); |
||||
} |
||||
|
||||
private native long createNewTableFilter(); |
||||
} |
@ -0,0 +1,70 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Base class for TraceWriters. |
||||
*/ |
||||
public abstract class AbstractTraceWriter |
||||
extends RocksCallbackObject implements TraceWriter { |
||||
|
||||
@Override |
||||
protected long initializeNative(final long... nativeParameterHandles) { |
||||
return createNewTraceWriter(); |
||||
} |
||||
|
||||
/** |
||||
* Called from JNI, proxy for {@link TraceWriter#write(Slice)}. |
||||
* |
||||
* @param sliceHandle the native handle of the slice (which we do not own) |
||||
* |
||||
* @return short (2 bytes) where the first byte is the |
||||
* {@link Status.Code#getValue()} and the second byte is the |
||||
* {@link Status.SubCode#getValue()}. |
||||
*/ |
||||
private short writeProxy(final long sliceHandle) { |
||||
try { |
||||
write(new Slice(sliceHandle)); |
||||
return statusToShort(Status.Code.Ok, Status.SubCode.None); |
||||
} catch (final RocksDBException e) { |
||||
return statusToShort(e.getStatus()); |
||||
} |
||||
} |
||||
|
||||
/** |
||||
* Called from JNI, proxy for {@link TraceWriter#closeWriter()}. |
||||
* |
||||
* @return short (2 bytes) where the first byte is the |
||||
* {@link Status.Code#getValue()} and the second byte is the |
||||
* {@link Status.SubCode#getValue()}. |
||||
*/ |
||||
private short closeWriterProxy() { |
||||
try { |
||||
closeWriter(); |
||||
return statusToShort(Status.Code.Ok, Status.SubCode.None); |
||||
} catch (final RocksDBException e) { |
||||
return statusToShort(e.getStatus()); |
||||
} |
||||
} |
||||
|
||||
private static short statusToShort(/*@Nullable*/ final Status status) { |
||||
final Status.Code code = status != null && status.getCode() != null |
||||
? status.getCode() |
||||
: Status.Code.IOError; |
||||
final Status.SubCode subCode = status != null && status.getSubCode() != null |
||||
? status.getSubCode() |
||||
: Status.SubCode.None; |
||||
return statusToShort(code, subCode); |
||||
} |
||||
|
||||
private static short statusToShort(final Status.Code code, |
||||
final Status.SubCode subCode) { |
||||
short result = (short)(code.getValue() << 8); |
||||
return (short)(result | subCode.getValue()); |
||||
} |
||||
|
||||
private native long createNewTraceWriter(); |
||||
} |
@ -0,0 +1,49 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Base class for WAL Filters. |
||||
*/ |
||||
public abstract class AbstractWalFilter |
||||
extends RocksCallbackObject implements WalFilter { |
||||
|
||||
@Override |
||||
protected long initializeNative(final long... nativeParameterHandles) { |
||||
return createNewWalFilter(); |
||||
} |
||||
|
||||
/** |
||||
* Called from JNI, proxy for |
||||
* {@link WalFilter#logRecordFound(long, String, WriteBatch, WriteBatch)}. |
||||
* |
||||
* @param logNumber the log handle. |
||||
* @param logFileName the log file name |
||||
* @param batchHandle the native handle of a WriteBatch (which we do not own) |
||||
* @param newBatchHandle the native handle of a |
||||
* new WriteBatch (which we do not own) |
||||
* |
||||
* @return short (2 bytes) where the first byte is the |
||||
* {@link WalFilter.LogRecordFoundResult#walProcessingOption} |
||||
* {@link WalFilter.LogRecordFoundResult#batchChanged}. |
||||
*/ |
||||
private short logRecordFoundProxy(final long logNumber, |
||||
final String logFileName, final long batchHandle, |
||||
final long newBatchHandle) { |
||||
final LogRecordFoundResult logRecordFoundResult = logRecordFound( |
||||
logNumber, logFileName, new WriteBatch(batchHandle), |
||||
new WriteBatch(newBatchHandle)); |
||||
return logRecordFoundResultToShort(logRecordFoundResult); |
||||
} |
||||
|
||||
private static short logRecordFoundResultToShort( |
||||
final LogRecordFoundResult logRecordFoundResult) { |
||||
short result = (short)(logRecordFoundResult.walProcessingOption.getValue() << 8); |
||||
return (short)(result | (logRecordFoundResult.batchChanged ? 1 : 0)); |
||||
} |
||||
|
||||
private native long createNewWalFilter(); |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,70 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import java.util.Arrays; |
||||
import java.util.List; |
||||
|
||||
/** |
||||
* The metadata that describes a column family. |
||||
*/ |
||||
public class ColumnFamilyMetaData { |
||||
private final long size; |
||||
private final long fileCount; |
||||
private final byte[] name; |
||||
private final LevelMetaData[] levels; |
||||
|
||||
/** |
||||
* Called from JNI C++ |
||||
*/ |
||||
private ColumnFamilyMetaData( |
||||
final long size, |
||||
final long fileCount, |
||||
final byte[] name, |
||||
final LevelMetaData[] levels) { |
||||
this.size = size; |
||||
this.fileCount = fileCount; |
||||
this.name = name; |
||||
this.levels = levels; |
||||
} |
||||
|
||||
/** |
||||
* The size of this column family in bytes, which is equal to the sum of |
||||
* the file size of its {@link #levels()}. |
||||
* |
||||
* @return the size of this column family |
||||
*/ |
||||
public long size() { |
||||
return size; |
||||
} |
||||
|
||||
/** |
||||
* The number of files in this column family. |
||||
* |
||||
* @return the number of files |
||||
*/ |
||||
public long fileCount() { |
||||
return fileCount; |
||||
} |
||||
|
||||
/** |
||||
* The name of the column family. |
||||
* |
||||
* @return the name |
||||
*/ |
||||
public byte[] name() { |
||||
return name; |
||||
} |
||||
|
||||
/** |
||||
* The metadata of all levels in this column family. |
||||
* |
||||
* @return the levels metadata |
||||
*/ |
||||
public List<LevelMetaData> levels() { |
||||
return Arrays.asList(levels); |
||||
} |
||||
} |
@ -0,0 +1,159 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import java.util.Arrays; |
||||
import java.util.List; |
||||
import java.util.Map; |
||||
|
||||
public class CompactionJobInfo extends RocksObject { |
||||
|
||||
public CompactionJobInfo() { |
||||
super(newCompactionJobInfo()); |
||||
} |
||||
|
||||
/** |
||||
* Private as called from JNI C++ |
||||
*/ |
||||
private CompactionJobInfo(final long nativeHandle) { |
||||
super(nativeHandle); |
||||
} |
||||
|
||||
/** |
||||
* Get the name of the column family where the compaction happened. |
||||
* |
||||
* @return the name of the column family |
||||
*/ |
||||
public byte[] columnFamilyName() { |
||||
return columnFamilyName(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the status indicating whether the compaction was successful or not. |
||||
* |
||||
* @return the status |
||||
*/ |
||||
public Status status() { |
||||
return status(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the id of the thread that completed this compaction job. |
||||
* |
||||
* @return the id of the thread |
||||
*/ |
||||
public long threadId() { |
||||
return threadId(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the job id, which is unique in the same thread. |
||||
* |
||||
* @return the id of the thread |
||||
*/ |
||||
public int jobId() { |
||||
return jobId(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the smallest input level of the compaction. |
||||
* |
||||
* @return the input level |
||||
*/ |
||||
public int baseInputLevel() { |
||||
return baseInputLevel(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the output level of the compaction. |
||||
* |
||||
* @return the output level |
||||
*/ |
||||
public int outputLevel() { |
||||
return outputLevel(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the names of the compaction input files. |
||||
* |
||||
* @return the names of the input files. |
||||
*/ |
||||
public List<String> inputFiles() { |
||||
return Arrays.asList(inputFiles(nativeHandle_)); |
||||
} |
||||
|
||||
/** |
||||
* Get the names of the compaction output files. |
||||
* |
||||
* @return the names of the output files. |
||||
*/ |
||||
public List<String> outputFiles() { |
||||
return Arrays.asList(outputFiles(nativeHandle_)); |
||||
} |
||||
|
||||
/** |
||||
* Get the table properties for the input and output tables. |
||||
* |
||||
* The map is keyed by values from {@link #inputFiles()} and |
||||
* {@link #outputFiles()}. |
||||
* |
||||
* @return the table properties |
||||
*/ |
||||
public Map<String, TableProperties> tableProperties() { |
||||
return tableProperties(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the Reason for running the compaction. |
||||
* |
||||
* @return the reason. |
||||
*/ |
||||
public CompactionReason compactionReason() { |
||||
return CompactionReason.fromValue(compactionReason(nativeHandle_)); |
||||
} |
||||
|
||||
//
|
||||
/** |
||||
* Get the compression algorithm used for output files. |
||||
* |
||||
* @return the compression algorithm |
||||
*/ |
||||
public CompressionType compression() { |
||||
return CompressionType.getCompressionType(compression(nativeHandle_)); |
||||
} |
||||
|
||||
/** |
||||
* Get detailed information about this compaction. |
||||
* |
||||
* @return the detailed information, or null if not available. |
||||
*/ |
||||
public /* @Nullable */ CompactionJobStats stats() { |
||||
final long statsHandle = stats(nativeHandle_); |
||||
if (statsHandle == 0) { |
||||
return null; |
||||
} |
||||
|
||||
return new CompactionJobStats(statsHandle); |
||||
} |
||||
|
||||
|
||||
private static native long newCompactionJobInfo(); |
||||
@Override protected native void disposeInternal(final long handle); |
||||
|
||||
private static native byte[] columnFamilyName(final long handle); |
||||
private static native Status status(final long handle); |
||||
private static native long threadId(final long handle); |
||||
private static native int jobId(final long handle); |
||||
private static native int baseInputLevel(final long handle); |
||||
private static native int outputLevel(final long handle); |
||||
private static native String[] inputFiles(final long handle); |
||||
private static native String[] outputFiles(final long handle); |
||||
private static native Map<String, TableProperties> tableProperties( |
||||
final long handle); |
||||
private static native byte compactionReason(final long handle); |
||||
private static native byte compression(final long handle); |
||||
private static native long stats(final long handle); |
||||
} |
@ -0,0 +1,295 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
public class CompactionJobStats extends RocksObject { |
||||
|
||||
public CompactionJobStats() { |
||||
super(newCompactionJobStats()); |
||||
} |
||||
|
||||
/** |
||||
* Private as called from JNI C++ |
||||
*/ |
||||
CompactionJobStats(final long nativeHandle) { |
||||
super(nativeHandle); |
||||
} |
||||
|
||||
/** |
||||
* Reset the stats. |
||||
*/ |
||||
public void reset() { |
||||
reset(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Aggregate the CompactionJobStats from another instance with this one. |
||||
* |
||||
* @param compactionJobStats another instance of stats. |
||||
*/ |
||||
public void add(final CompactionJobStats compactionJobStats) { |
||||
add(nativeHandle_, compactionJobStats.nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the elapsed time in micro of this compaction. |
||||
* |
||||
* @return the elapsed time in micro of this compaction. |
||||
*/ |
||||
public long elapsedMicros() { |
||||
return elapsedMicros(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the number of compaction input records. |
||||
* |
||||
* @return the number of compaction input records. |
||||
*/ |
||||
public long numInputRecords() { |
||||
return numInputRecords(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the number of compaction input files. |
||||
* |
||||
* @return the number of compaction input files. |
||||
*/ |
||||
public long numInputFiles() { |
||||
return numInputFiles(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the number of compaction input files at the output level. |
||||
* |
||||
* @return the number of compaction input files at the output level. |
||||
*/ |
||||
public long numInputFilesAtOutputLevel() { |
||||
return numInputFilesAtOutputLevel(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the number of compaction output records. |
||||
* |
||||
* @return the number of compaction output records. |
||||
*/ |
||||
public long numOutputRecords() { |
||||
return numOutputRecords(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the number of compaction output files. |
||||
* |
||||
* @return the number of compaction output files. |
||||
*/ |
||||
public long numOutputFiles() { |
||||
return numOutputFiles(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Determine if the compaction is a manual compaction. |
||||
* |
||||
* @return true if the compaction is a manual compaction, false otherwise. |
||||
*/ |
||||
public boolean isManualCompaction() { |
||||
return isManualCompaction(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the size of the compaction input in bytes. |
||||
* |
||||
* @return the size of the compaction input in bytes. |
||||
*/ |
||||
public long totalInputBytes() { |
||||
return totalInputBytes(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the size of the compaction output in bytes. |
||||
* |
||||
* @return the size of the compaction output in bytes. |
||||
*/ |
||||
public long totalOutputBytes() { |
||||
return totalOutputBytes(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the number of records being replaced by newer record associated |
||||
* with same key. |
||||
* |
||||
* This could be a new value or a deletion entry for that key so this field |
||||
* sums up all updated and deleted keys. |
||||
* |
||||
* @return the number of records being replaced by newer record associated |
||||
* with same key. |
||||
*/ |
||||
public long numRecordsReplaced() { |
||||
return numRecordsReplaced(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the sum of the uncompressed input keys in bytes. |
||||
* |
||||
* @return the sum of the uncompressed input keys in bytes. |
||||
*/ |
||||
public long totalInputRawKeyBytes() { |
||||
return totalInputRawKeyBytes(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the sum of the uncompressed input values in bytes. |
||||
* |
||||
* @return the sum of the uncompressed input values in bytes. |
||||
*/ |
||||
public long totalInputRawValueBytes() { |
||||
return totalInputRawValueBytes(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the number of deletion entries before compaction. |
||||
* |
||||
* Deletion entries can disappear after compaction because they expired. |
||||
* |
||||
* @return the number of deletion entries before compaction. |
||||
*/ |
||||
public long numInputDeletionRecords() { |
||||
return numInputDeletionRecords(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the number of deletion records that were found obsolete and discarded |
||||
* because it is not possible to delete any more keys with this entry. |
||||
* (i.e. all possible deletions resulting from it have been completed) |
||||
* |
||||
* @return the number of deletion records that were found obsolete and |
||||
* discarded. |
||||
*/ |
||||
public long numExpiredDeletionRecords() { |
||||
return numExpiredDeletionRecords(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the number of corrupt keys (ParseInternalKey returned false when |
||||
* applied to the key) encountered and written out. |
||||
* |
||||
* @return the number of corrupt keys. |
||||
*/ |
||||
public long numCorruptKeys() { |
||||
return numCorruptKeys(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the Time spent on file's Append() call. |
||||
* |
||||
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. |
||||
* |
||||
* @return the Time spent on file's Append() call. |
||||
*/ |
||||
public long fileWriteNanos() { |
||||
return fileWriteNanos(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the Time spent on sync file range. |
||||
* |
||||
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. |
||||
* |
||||
* @return the Time spent on sync file range. |
||||
*/ |
||||
public long fileRangeSyncNanos() { |
||||
return fileRangeSyncNanos(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the Time spent on file fsync. |
||||
* |
||||
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. |
||||
* |
||||
* @return the Time spent on file fsync. |
||||
*/ |
||||
public long fileFsyncNanos() { |
||||
return fileFsyncNanos(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the Time spent on preparing file write (falocate, etc) |
||||
* |
||||
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. |
||||
* |
||||
* @return the Time spent on preparing file write (falocate, etc). |
||||
*/ |
||||
public long filePrepareWriteNanos() { |
||||
return filePrepareWriteNanos(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the smallest output key prefix. |
||||
* |
||||
* @return the smallest output key prefix. |
||||
*/ |
||||
public byte[] smallestOutputKeyPrefix() { |
||||
return smallestOutputKeyPrefix(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the largest output key prefix. |
||||
* |
||||
* @return the smallest output key prefix. |
||||
*/ |
||||
public byte[] largestOutputKeyPrefix() { |
||||
return largestOutputKeyPrefix(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the number of single-deletes which do not meet a put. |
||||
* |
||||
* @return number of single-deletes which do not meet a put. |
||||
*/ |
||||
@Experimental("Performance optimization for a very specific workload") |
||||
public long numSingleDelFallthru() { |
||||
return numSingleDelFallthru(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Get the number of single-deletes which meet something other than a put. |
||||
* |
||||
* @return the number of single-deletes which meet something other than a put. |
||||
*/ |
||||
@Experimental("Performance optimization for a very specific workload") |
||||
public long numSingleDelMismatch() { |
||||
return numSingleDelMismatch(nativeHandle_); |
||||
} |
||||
|
||||
private static native long newCompactionJobStats(); |
||||
@Override protected native void disposeInternal(final long handle); |
||||
|
||||
|
||||
private static native void reset(final long handle); |
||||
private static native void add(final long handle, |
||||
final long compactionJobStatsHandle); |
||||
private static native long elapsedMicros(final long handle); |
||||
private static native long numInputRecords(final long handle); |
||||
private static native long numInputFiles(final long handle); |
||||
private static native long numInputFilesAtOutputLevel(final long handle); |
||||
private static native long numOutputRecords(final long handle); |
||||
private static native long numOutputFiles(final long handle); |
||||
private static native boolean isManualCompaction(final long handle); |
||||
private static native long totalInputBytes(final long handle); |
||||
private static native long totalOutputBytes(final long handle); |
||||
private static native long numRecordsReplaced(final long handle); |
||||
private static native long totalInputRawKeyBytes(final long handle); |
||||
private static native long totalInputRawValueBytes(final long handle); |
||||
private static native long numInputDeletionRecords(final long handle); |
||||
private static native long numExpiredDeletionRecords(final long handle); |
||||
private static native long numCorruptKeys(final long handle); |
||||
private static native long fileWriteNanos(final long handle); |
||||
private static native long fileRangeSyncNanos(final long handle); |
||||
private static native long fileFsyncNanos(final long handle); |
||||
private static native long filePrepareWriteNanos(final long handle); |
||||
private static native byte[] smallestOutputKeyPrefix(final long handle); |
||||
private static native byte[] largestOutputKeyPrefix(final long handle); |
||||
private static native long numSingleDelFallthru(final long handle); |
||||
private static native long numSingleDelMismatch(final long handle); |
||||
} |
@ -0,0 +1,121 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import java.util.List; |
||||
|
||||
/** |
||||
* CompactionOptions are used in |
||||
* {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)} |
||||
* calls. |
||||
*/ |
||||
public class CompactionOptions extends RocksObject { |
||||
|
||||
public CompactionOptions() { |
||||
super(newCompactionOptions()); |
||||
} |
||||
|
||||
/** |
||||
* Get the compaction output compression type. |
||||
* |
||||
* See {@link #setCompression(CompressionType)}. |
||||
* |
||||
* @return the compression type. |
||||
*/ |
||||
public CompressionType compression() { |
||||
return CompressionType.getCompressionType( |
||||
compression(nativeHandle_)); |
||||
} |
||||
|
||||
/** |
||||
* Set the compaction output compression type. |
||||
* |
||||
* Default: snappy |
||||
* |
||||
* If set to {@link CompressionType#DISABLE_COMPRESSION_OPTION}, |
||||
* RocksDB will choose compression type according to the |
||||
* {@link ColumnFamilyOptions#compressionType()}, taking into account |
||||
* the output level if {@link ColumnFamilyOptions#compressionPerLevel()} |
||||
* is specified. |
||||
* |
||||
* @param compression the compression type to use for compaction output. |
||||
* |
||||
* @return the instance of the current Options. |
||||
*/ |
||||
public CompactionOptions setCompression(final CompressionType compression) { |
||||
setCompression(nativeHandle_, compression.getValue()); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* Get the compaction output file size limit. |
||||
* |
||||
* See {@link #setOutputFileSizeLimit(long)}. |
||||
* |
||||
* @return the file size limit. |
||||
*/ |
||||
public long outputFileSizeLimit() { |
||||
return outputFileSizeLimit(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* Compaction will create files of size {@link #outputFileSizeLimit()}. |
||||
* |
||||
* Default: 2^64-1, which means that compaction will create a single file |
||||
* |
||||
* @param outputFileSizeLimit the size limit |
||||
* |
||||
* @return the instance of the current Options. |
||||
*/ |
||||
public CompactionOptions setOutputFileSizeLimit( |
||||
final long outputFileSizeLimit) { |
||||
setOutputFileSizeLimit(nativeHandle_, outputFileSizeLimit); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* Get the maximum number of threads that will concurrently perform a |
||||
* compaction job. |
||||
* |
||||
* @return the maximum number of threads. |
||||
*/ |
||||
public int maxSubcompactions() { |
||||
return maxSubcompactions(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* This value represents the maximum number of threads that will |
||||
* concurrently perform a compaction job by breaking it into multiple, |
||||
* smaller ones that are run simultaneously. |
||||
* |
||||
* Default: 0 (i.e. no subcompactions) |
||||
* |
||||
* If > 0, it will replace the option in |
||||
* {@link DBOptions#maxSubcompactions()} for this compaction. |
||||
* |
||||
* @param maxSubcompactions The maximum number of threads that will |
||||
* concurrently perform a compaction job |
||||
* |
||||
* @return the instance of the current Options. |
||||
*/ |
||||
public CompactionOptions setMaxSubcompactions(final int maxSubcompactions) { |
||||
setMaxSubcompactions(nativeHandle_, maxSubcompactions); |
||||
return this; |
||||
} |
||||
|
||||
private static native long newCompactionOptions(); |
||||
@Override protected final native void disposeInternal(final long handle); |
||||
|
||||
private static native byte compression(final long handle); |
||||
private static native void setCompression(final long handle, |
||||
final byte compressionTypeValue); |
||||
private static native long outputFileSizeLimit(final long handle); |
||||
private static native void setOutputFileSizeLimit(final long handle, |
||||
final long outputFileSizeLimit); |
||||
private static native int maxSubcompactions(final long handle); |
||||
private static native void setMaxSubcompactions(final long handle, |
||||
final int maxSubcompactions); |
||||
} |
@ -0,0 +1,115 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
public enum CompactionReason { |
||||
kUnknown((byte)0x0), |
||||
|
||||
/** |
||||
* [Level] number of L0 files > level0_file_num_compaction_trigger |
||||
*/ |
||||
kLevelL0FilesNum((byte)0x1), |
||||
|
||||
/** |
||||
* [Level] total size of level > MaxBytesForLevel() |
||||
*/ |
||||
kLevelMaxLevelSize((byte)0x2), |
||||
|
||||
/** |
||||
* [Universal] Compacting for size amplification |
||||
*/ |
||||
kUniversalSizeAmplification((byte)0x3), |
||||
|
||||
/** |
||||
* [Universal] Compacting for size ratio |
||||
*/ |
||||
kUniversalSizeRatio((byte)0x4), |
||||
|
||||
/** |
||||
* [Universal] number of sorted runs > level0_file_num_compaction_trigger |
||||
*/ |
||||
kUniversalSortedRunNum((byte)0x5), |
||||
|
||||
/** |
||||
* [FIFO] total size > max_table_files_size |
||||
*/ |
||||
kFIFOMaxSize((byte)0x6), |
||||
|
||||
/** |
||||
* [FIFO] reduce number of files. |
||||
*/ |
||||
kFIFOReduceNumFiles((byte)0x7), |
||||
|
||||
/** |
||||
* [FIFO] files with creation time < (current_time - interval) |
||||
*/ |
||||
kFIFOTtl((byte)0x8), |
||||
|
||||
/** |
||||
* Manual compaction |
||||
*/ |
||||
kManualCompaction((byte)0x9), |
||||
|
||||
/** |
||||
* DB::SuggestCompactRange() marked files for compaction |
||||
*/ |
||||
kFilesMarkedForCompaction((byte)0x10), |
||||
|
||||
/** |
||||
* [Level] Automatic compaction within bottommost level to cleanup duplicate |
||||
* versions of same user key, usually due to a released snapshot. |
||||
*/ |
||||
kBottommostFiles((byte)0x0A), |
||||
|
||||
/** |
||||
* Compaction based on TTL |
||||
*/ |
||||
kTtl((byte)0x0B), |
||||
|
||||
/** |
||||
* According to the comments in flush_job.cc, RocksDB treats flush as |
||||
* a level 0 compaction in internal stats. |
||||
*/ |
||||
kFlush((byte)0x0C), |
||||
|
||||
/** |
||||
* Compaction caused by external sst file ingestion |
||||
*/ |
||||
kExternalSstIngestion((byte)0x0D); |
||||
|
||||
private final byte value; |
||||
|
||||
CompactionReason(final byte value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
/** |
||||
* Get the internal representation value. |
||||
* |
||||
* @return the internal representation value |
||||
*/ |
||||
byte getValue() { |
||||
return value; |
||||
} |
||||
|
||||
/** |
||||
* Get the CompactionReason from the internal representation value. |
||||
* |
||||
* @return the compaction reason. |
||||
* |
||||
* @throws IllegalArgumentException if the value is unknown. |
||||
*/ |
||||
static CompactionReason fromValue(final byte value) { |
||||
for (final CompactionReason compactionReason : CompactionReason.values()) { |
||||
if(compactionReason.value == value) { |
||||
return compactionReason; |
||||
} |
||||
} |
||||
|
||||
throw new IllegalArgumentException( |
||||
"Illegal value provided for CompactionReason: " + value); |
||||
} |
||||
} |
@ -0,0 +1,32 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
|
||||
/** |
||||
* DataBlockIndexType used in conjunction with BlockBasedTable. |
||||
*/ |
||||
public enum DataBlockIndexType { |
||||
/** |
||||
* traditional block type |
||||
*/ |
||||
kDataBlockBinarySearch((byte)0x0), |
||||
|
||||
/** |
||||
* additional hash index |
||||
*/ |
||||
kDataBlockBinaryAndHash((byte)0x1); |
||||
|
||||
private final byte value; |
||||
|
||||
DataBlockIndexType(final byte value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
byte getValue() { |
||||
return value; |
||||
} |
||||
} |
@ -0,0 +1,27 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* HDFS environment. |
||||
*/ |
||||
public class HdfsEnv extends Env { |
||||
|
||||
/** |
||||
<p>Creates a new environment that is used for HDFS environment.</p> |
||||
* |
||||
* <p>The caller must delete the result when it is |
||||
* no longer needed.</p> |
||||
* |
||||
* @param fsName the HDFS as a string in the form "hdfs://hostname:port/" |
||||
*/ |
||||
public HdfsEnv(final String fsName) { |
||||
super(createHdfsEnv(fsName)); |
||||
} |
||||
|
||||
private static native long createHdfsEnv(final String fsName); |
||||
@Override protected final native void disposeInternal(final long handle); |
||||
} |
@ -0,0 +1,56 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import java.util.Arrays; |
||||
import java.util.List; |
||||
|
||||
/** |
||||
* The metadata that describes a level. |
||||
*/ |
||||
public class LevelMetaData { |
||||
private final int level; |
||||
private final long size; |
||||
private final SstFileMetaData[] files; |
||||
|
||||
/** |
||||
* Called from JNI C++ |
||||
*/ |
||||
private LevelMetaData(final int level, final long size, |
||||
final SstFileMetaData[] files) { |
||||
this.level = level; |
||||
this.size = size; |
||||
this.files = files; |
||||
} |
||||
|
||||
/** |
||||
* The level which this meta data describes. |
||||
* |
||||
* @return the level |
||||
*/ |
||||
public int level() { |
||||
return level; |
||||
} |
||||
|
||||
/** |
||||
* The size of this level in bytes, which is equal to the sum of |
||||
* the file size of its {@link #files()}. |
||||
* |
||||
* @return the size |
||||
*/ |
||||
public long size() { |
||||
return size; |
||||
} |
||||
|
||||
/** |
||||
* The metadata of all sst files in this level. |
||||
* |
||||
* @return the metadata of the files |
||||
*/ |
||||
public List<SstFileMetaData> files() { |
||||
return Arrays.asList(files); |
||||
} |
||||
} |
@ -0,0 +1,55 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* The full set of metadata associated with each SST file. |
||||
*/ |
||||
public class LiveFileMetaData extends SstFileMetaData { |
||||
private final byte[] columnFamilyName; |
||||
private final int level; |
||||
|
||||
/** |
||||
* Called from JNI C++ |
||||
*/ |
||||
private LiveFileMetaData( |
||||
final byte[] columnFamilyName, |
||||
final int level, |
||||
final String fileName, |
||||
final String path, |
||||
final long size, |
||||
final long smallestSeqno, |
||||
final long largestSeqno, |
||||
final byte[] smallestKey, |
||||
final byte[] largestKey, |
||||
final long numReadsSampled, |
||||
final boolean beingCompacted, |
||||
final long numEntries, |
||||
final long numDeletions) { |
||||
super(fileName, path, size, smallestSeqno, largestSeqno, smallestKey, |
||||
largestKey, numReadsSampled, beingCompacted, numEntries, numDeletions); |
||||
this.columnFamilyName = columnFamilyName; |
||||
this.level = level; |
||||
} |
||||
|
||||
/** |
||||
* Get the name of the column family. |
||||
* |
||||
* @return the name of the column family |
||||
*/ |
||||
public byte[] columnFamilyName() { |
||||
return columnFamilyName; |
||||
} |
||||
|
||||
/** |
||||
* Get the level at which this file resides. |
||||
* |
||||
* @return the level at which the file resides. |
||||
*/ |
||||
public int level() { |
||||
return level; |
||||
} |
||||
} |
@ -0,0 +1,75 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
public class LogFile { |
||||
private final String pathName; |
||||
private final long logNumber; |
||||
private final WalFileType type; |
||||
private final long startSequence; |
||||
private final long sizeFileBytes; |
||||
|
||||
/** |
||||
* Called from JNI C++ |
||||
*/ |
||||
private LogFile(final String pathName, final long logNumber, |
||||
final byte walFileTypeValue, final long startSequence, |
||||
final long sizeFileBytes) { |
||||
this.pathName = pathName; |
||||
this.logNumber = logNumber; |
||||
this.type = WalFileType.fromValue(walFileTypeValue); |
||||
this.startSequence = startSequence; |
||||
this.sizeFileBytes = sizeFileBytes; |
||||
} |
||||
|
||||
/** |
||||
* Returns log file's pathname relative to the main db dir |
||||
* Eg. For a live-log-file = /000003.log |
||||
* For an archived-log-file = /archive/000003.log |
||||
* |
||||
* @return log file's pathname |
||||
*/ |
||||
public String pathName() { |
||||
return pathName; |
||||
} |
||||
|
||||
/** |
||||
* Primary identifier for log file. |
||||
* This is directly proportional to creation time of the log file |
||||
* |
||||
* @return the log number |
||||
*/ |
||||
public long logNumber() { |
||||
return logNumber; |
||||
} |
||||
|
||||
/** |
||||
* Log file can be either alive or archived. |
||||
* |
||||
* @return the type of the log file. |
||||
*/ |
||||
public WalFileType type() { |
||||
return type; |
||||
} |
||||
|
||||
/** |
||||
* Starting sequence number of writebatch written in this log file. |
||||
* |
||||
* @return the stating sequence number |
||||
*/ |
||||
public long startSequence() { |
||||
return startSequence; |
||||
} |
||||
|
||||
/** |
||||
* Size of log file on disk in Bytes. |
||||
* |
||||
* @return size of log file |
||||
*/ |
||||
public long sizeFileBytes() { |
||||
return sizeFileBytes; |
||||
} |
||||
} |
@ -0,0 +1,286 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import java.util.HashMap; |
||||
import java.util.Map; |
||||
import java.util.Objects; |
||||
|
||||
public class MutableDBOptions extends AbstractMutableOptions { |
||||
|
||||
/** |
||||
* User must use builder pattern, or parser. |
||||
* |
||||
* @param keys the keys |
||||
* @param values the values |
||||
* |
||||
* See {@link #builder()} and {@link #parse(String)}. |
||||
*/ |
||||
private MutableDBOptions(final String[] keys, final String[] values) { |
||||
super(keys, values); |
||||
} |
||||
|
||||
/** |
||||
* Creates a builder which allows you |
||||
* to set MutableDBOptions in a fluent |
||||
* manner |
||||
* |
||||
* @return A builder for MutableDBOptions |
||||
*/ |
||||
public static MutableDBOptionsBuilder builder() { |
||||
return new MutableDBOptionsBuilder(); |
||||
} |
||||
|
||||
/** |
||||
* Parses a String representation of MutableDBOptions |
||||
* |
||||
* The format is: key1=value1;key2=value2;key3=value3 etc |
||||
* |
||||
* For int[] values, each int should be separated by a comma, e.g. |
||||
* |
||||
* key1=value1;intArrayKey1=1,2,3 |
||||
* |
||||
* @param str The string representation of the mutable db options |
||||
* |
||||
* @return A builder for the mutable db options |
||||
*/ |
||||
public static MutableDBOptionsBuilder parse(final String str) { |
||||
Objects.requireNonNull(str); |
||||
|
||||
final MutableDBOptionsBuilder builder = |
||||
new MutableDBOptionsBuilder(); |
||||
|
||||
final String[] options = str.trim().split(KEY_VALUE_PAIR_SEPARATOR); |
||||
for(final String option : options) { |
||||
final int equalsOffset = option.indexOf(KEY_VALUE_SEPARATOR); |
||||
if(equalsOffset <= 0) { |
||||
throw new IllegalArgumentException( |
||||
"options string has an invalid key=value pair"); |
||||
} |
||||
|
||||
final String key = option.substring(0, equalsOffset); |
||||
if(key.isEmpty()) { |
||||
throw new IllegalArgumentException("options string is invalid"); |
||||
} |
||||
|
||||
final String value = option.substring(equalsOffset + 1); |
||||
if(value.isEmpty()) { |
||||
throw new IllegalArgumentException("options string is invalid"); |
||||
} |
||||
|
||||
builder.fromString(key, value); |
||||
} |
||||
|
||||
return builder; |
||||
} |
||||
|
||||
private interface MutableDBOptionKey extends MutableOptionKey {} |
||||
|
||||
public enum DBOption implements MutableDBOptionKey { |
||||
max_background_jobs(ValueType.INT), |
||||
base_background_compactions(ValueType.INT), |
||||
max_background_compactions(ValueType.INT), |
||||
avoid_flush_during_shutdown(ValueType.BOOLEAN), |
||||
writable_file_max_buffer_size(ValueType.LONG), |
||||
delayed_write_rate(ValueType.LONG), |
||||
max_total_wal_size(ValueType.LONG), |
||||
delete_obsolete_files_period_micros(ValueType.LONG), |
||||
stats_dump_period_sec(ValueType.INT), |
||||
max_open_files(ValueType.INT), |
||||
bytes_per_sync(ValueType.LONG), |
||||
wal_bytes_per_sync(ValueType.LONG), |
||||
compaction_readahead_size(ValueType.LONG); |
||||
|
||||
private final ValueType valueType; |
||||
DBOption(final ValueType valueType) { |
||||
this.valueType = valueType; |
||||
} |
||||
|
||||
@Override |
||||
public ValueType getValueType() { |
||||
return valueType; |
||||
} |
||||
} |
||||
|
||||
public static class MutableDBOptionsBuilder |
||||
extends AbstractMutableOptionsBuilder<MutableDBOptions, MutableDBOptionsBuilder, MutableDBOptionKey> |
||||
implements MutableDBOptionsInterface<MutableDBOptionsBuilder> { |
||||
|
||||
private final static Map<String, MutableDBOptionKey> ALL_KEYS_LOOKUP = new HashMap<>(); |
||||
static { |
||||
for(final MutableDBOptionKey key : DBOption.values()) { |
||||
ALL_KEYS_LOOKUP.put(key.name(), key); |
||||
} |
||||
} |
||||
|
||||
private MutableDBOptionsBuilder() { |
||||
super(); |
||||
} |
||||
|
||||
@Override |
||||
protected MutableDBOptionsBuilder self() { |
||||
return this; |
||||
} |
||||
|
||||
@Override |
||||
protected Map<String, MutableDBOptionKey> allKeys() { |
||||
return ALL_KEYS_LOOKUP; |
||||
} |
||||
|
||||
@Override |
||||
protected MutableDBOptions build(final String[] keys, |
||||
final String[] values) { |
||||
return new MutableDBOptions(keys, values); |
||||
} |
||||
|
||||
@Override |
||||
public MutableDBOptionsBuilder setMaxBackgroundJobs( |
||||
final int maxBackgroundJobs) { |
||||
return setInt(DBOption.max_background_jobs, maxBackgroundJobs); |
||||
} |
||||
|
||||
@Override |
||||
public int maxBackgroundJobs() { |
||||
return getInt(DBOption.max_background_jobs); |
||||
} |
||||
|
||||
@Override |
||||
public void setBaseBackgroundCompactions( |
||||
final int baseBackgroundCompactions) { |
||||
setInt(DBOption.base_background_compactions, |
||||
baseBackgroundCompactions); |
||||
} |
||||
|
||||
@Override |
||||
public int baseBackgroundCompactions() { |
||||
return getInt(DBOption.base_background_compactions); |
||||
} |
||||
|
||||
@Override |
||||
public MutableDBOptionsBuilder setMaxBackgroundCompactions( |
||||
final int maxBackgroundCompactions) { |
||||
return setInt(DBOption.max_background_compactions, |
||||
maxBackgroundCompactions); |
||||
} |
||||
|
||||
@Override |
||||
public int maxBackgroundCompactions() { |
||||
return getInt(DBOption.max_background_compactions); |
||||
} |
||||
|
||||
@Override |
||||
public MutableDBOptionsBuilder setAvoidFlushDuringShutdown( |
||||
final boolean avoidFlushDuringShutdown) { |
||||
return setBoolean(DBOption.avoid_flush_during_shutdown, |
||||
avoidFlushDuringShutdown); |
||||
} |
||||
|
||||
@Override |
||||
public boolean avoidFlushDuringShutdown() { |
||||
return getBoolean(DBOption.avoid_flush_during_shutdown); |
||||
} |
||||
|
||||
@Override |
||||
public MutableDBOptionsBuilder setWritableFileMaxBufferSize( |
||||
final long writableFileMaxBufferSize) { |
||||
return setLong(DBOption.writable_file_max_buffer_size, |
||||
writableFileMaxBufferSize); |
||||
} |
||||
|
||||
@Override |
||||
public long writableFileMaxBufferSize() { |
||||
return getLong(DBOption.writable_file_max_buffer_size); |
||||
} |
||||
|
||||
@Override |
||||
public MutableDBOptionsBuilder setDelayedWriteRate( |
||||
final long delayedWriteRate) { |
||||
return setLong(DBOption.delayed_write_rate, |
||||
delayedWriteRate); |
||||
} |
||||
|
||||
@Override |
||||
public long delayedWriteRate() { |
||||
return getLong(DBOption.delayed_write_rate); |
||||
} |
||||
|
||||
@Override |
||||
public MutableDBOptionsBuilder setMaxTotalWalSize( |
||||
final long maxTotalWalSize) { |
||||
return setLong(DBOption.max_total_wal_size, maxTotalWalSize); |
||||
} |
||||
|
||||
@Override |
||||
public long maxTotalWalSize() { |
||||
return getLong(DBOption.max_total_wal_size); |
||||
} |
||||
|
||||
@Override |
||||
public MutableDBOptionsBuilder setDeleteObsoleteFilesPeriodMicros( |
||||
final long micros) { |
||||
return setLong(DBOption.delete_obsolete_files_period_micros, micros); |
||||
} |
||||
|
||||
@Override |
||||
public long deleteObsoleteFilesPeriodMicros() { |
||||
return getLong(DBOption.delete_obsolete_files_period_micros); |
||||
} |
||||
|
||||
@Override |
||||
public MutableDBOptionsBuilder setStatsDumpPeriodSec( |
||||
final int statsDumpPeriodSec) { |
||||
return setInt(DBOption.stats_dump_period_sec, statsDumpPeriodSec); |
||||
} |
||||
|
||||
@Override |
||||
public int statsDumpPeriodSec() { |
||||
return getInt(DBOption.stats_dump_period_sec); |
||||
} |
||||
|
||||
@Override |
||||
public MutableDBOptionsBuilder setMaxOpenFiles(final int maxOpenFiles) { |
||||
return setInt(DBOption.max_open_files, maxOpenFiles); |
||||
} |
||||
|
||||
@Override |
||||
public int maxOpenFiles() { |
||||
return getInt(DBOption.max_open_files); |
||||
} |
||||
|
||||
@Override |
||||
public MutableDBOptionsBuilder setBytesPerSync(final long bytesPerSync) { |
||||
return setLong(DBOption.bytes_per_sync, bytesPerSync); |
||||
} |
||||
|
||||
@Override |
||||
public long bytesPerSync() { |
||||
return getLong(DBOption.bytes_per_sync); |
||||
} |
||||
|
||||
@Override |
||||
public MutableDBOptionsBuilder setWalBytesPerSync( |
||||
final long walBytesPerSync) { |
||||
return setLong(DBOption.wal_bytes_per_sync, walBytesPerSync); |
||||
} |
||||
|
||||
@Override |
||||
public long walBytesPerSync() { |
||||
return getLong(DBOption.wal_bytes_per_sync); |
||||
} |
||||
|
||||
@Override |
||||
public MutableDBOptionsBuilder setCompactionReadaheadSize( |
||||
final long compactionReadaheadSize) { |
||||
return setLong(DBOption.compaction_readahead_size, |
||||
compactionReadaheadSize); |
||||
} |
||||
|
||||
@Override |
||||
public long compactionReadaheadSize() { |
||||
return getLong(DBOption.compaction_readahead_size); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,336 @@ |
||||
package org.rocksdb; |
||||
|
||||
public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface> { |
||||
|
||||
/** |
||||
* Specifies the maximum number of concurrent background jobs (both flushes |
||||
* and compactions combined). |
||||
* Default: 2 |
||||
* |
||||
* @param maxBackgroundJobs number of max concurrent background jobs |
||||
* @return the instance of the current object. |
||||
*/ |
||||
T setMaxBackgroundJobs(int maxBackgroundJobs); |
||||
|
||||
/** |
||||
* Returns the maximum number of concurrent background jobs (both flushes |
||||
* and compactions combined). |
||||
* Default: 2 |
||||
* |
||||
* @return the maximum number of concurrent background jobs. |
||||
*/ |
||||
int maxBackgroundJobs(); |
||||
|
||||
/** |
||||
* Suggested number of concurrent background compaction jobs, submitted to |
||||
* the default LOW priority thread pool. |
||||
* Default: 1 |
||||
* |
||||
* @param baseBackgroundCompactions Suggested number of background compaction |
||||
* jobs |
||||
* |
||||
* @deprecated Use {@link #setMaxBackgroundJobs(int)} |
||||
*/ |
||||
@Deprecated |
||||
void setBaseBackgroundCompactions(int baseBackgroundCompactions); |
||||
|
||||
/** |
||||
* Suggested number of concurrent background compaction jobs, submitted to |
||||
* the default LOW priority thread pool. |
||||
* Default: 1 |
||||
* |
||||
* @return Suggested number of background compaction jobs |
||||
*/ |
||||
int baseBackgroundCompactions(); |
||||
|
||||
/** |
||||
* Specifies the maximum number of concurrent background compaction jobs, |
||||
* submitted to the default LOW priority thread pool. |
||||
* If you're increasing this, also consider increasing number of threads in |
||||
* LOW priority thread pool. For more information, see |
||||
* Default: 1 |
||||
* |
||||
* @param maxBackgroundCompactions the maximum number of background |
||||
* compaction jobs. |
||||
* @return the instance of the current object. |
||||
* |
||||
* @see RocksEnv#setBackgroundThreads(int) |
||||
* @see RocksEnv#setBackgroundThreads(int, Priority) |
||||
* @see DBOptionsInterface#maxBackgroundFlushes() |
||||
*/ |
||||
T setMaxBackgroundCompactions(int maxBackgroundCompactions); |
||||
|
||||
/** |
||||
* Returns the maximum number of concurrent background compaction jobs, |
||||
* submitted to the default LOW priority thread pool. |
||||
* When increasing this number, we may also want to consider increasing |
||||
* number of threads in LOW priority thread pool. |
||||
* Default: 1 |
||||
* |
||||
* @return the maximum number of concurrent background compaction jobs. |
||||
* @see RocksEnv#setBackgroundThreads(int) |
||||
* @see RocksEnv#setBackgroundThreads(int, Priority) |
||||
* |
||||
* @deprecated Use {@link #setMaxBackgroundJobs(int)} |
||||
*/ |
||||
@Deprecated |
||||
int maxBackgroundCompactions(); |
||||
|
||||
/** |
||||
* By default RocksDB will flush all memtables on DB close if there are |
||||
* unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup |
||||
* DB close. Unpersisted data WILL BE LOST. |
||||
* |
||||
* DEFAULT: false |
||||
* |
||||
* Dynamically changeable through |
||||
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} |
||||
* API. |
||||
* |
||||
* @param avoidFlushDuringShutdown true if we should avoid flush during |
||||
* shutdown |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown); |
||||
|
||||
/** |
||||
* By default RocksDB will flush all memtables on DB close if there are |
||||
* unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup |
||||
* DB close. Unpersisted data WILL BE LOST. |
||||
* |
||||
* DEFAULT: false |
||||
* |
||||
* Dynamically changeable through |
||||
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} |
||||
* API. |
||||
* |
||||
* @return true if we should avoid flush during shutdown |
||||
*/ |
||||
boolean avoidFlushDuringShutdown(); |
||||
|
||||
/** |
||||
* This is the maximum buffer size that is used by WritableFileWriter. |
||||
* On Windows, we need to maintain an aligned buffer for writes. |
||||
* We allow the buffer to grow until it's size hits the limit. |
||||
* |
||||
* Default: 1024 * 1024 (1 MB) |
||||
* |
||||
* @param writableFileMaxBufferSize the maximum buffer size |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setWritableFileMaxBufferSize(long writableFileMaxBufferSize); |
||||
|
||||
/** |
||||
* This is the maximum buffer size that is used by WritableFileWriter. |
||||
* On Windows, we need to maintain an aligned buffer for writes. |
||||
* We allow the buffer to grow until it's size hits the limit. |
||||
* |
||||
* Default: 1024 * 1024 (1 MB) |
||||
* |
||||
* @return the maximum buffer size |
||||
*/ |
||||
long writableFileMaxBufferSize(); |
||||
|
||||
/** |
||||
* The limited write rate to DB if |
||||
* {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or |
||||
* {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered, |
||||
* or we are writing to the last mem table allowed and we allow more than 3 |
||||
* mem tables. It is calculated using size of user write requests before |
||||
* compression. RocksDB may decide to slow down more if the compaction still |
||||
* gets behind further. |
||||
* |
||||
* Unit: bytes per second. |
||||
* |
||||
* Default: 16MB/s |
||||
* |
||||
* @param delayedWriteRate the rate in bytes per second |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setDelayedWriteRate(long delayedWriteRate); |
||||
|
||||
/** |
||||
* The limited write rate to DB if |
||||
* {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or |
||||
* {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered, |
||||
* or we are writing to the last mem table allowed and we allow more than 3 |
||||
* mem tables. It is calculated using size of user write requests before |
||||
* compression. RocksDB may decide to slow down more if the compaction still |
||||
* gets behind further. |
||||
* |
||||
* Unit: bytes per second. |
||||
* |
||||
* Default: 16MB/s |
||||
* |
||||
* @return the rate in bytes per second |
||||
*/ |
||||
long delayedWriteRate(); |
||||
|
||||
/** |
||||
* <p>Once write-ahead logs exceed this size, we will start forcing the |
||||
* flush of column families whose memtables are backed by the oldest live |
||||
* WAL file (i.e. the ones that are causing all the space amplification). |
||||
* </p> |
||||
* <p>If set to 0 (default), we will dynamically choose the WAL size limit to |
||||
* be [sum of all write_buffer_size * max_write_buffer_number] * 2</p> |
||||
* <p>This option takes effect only when there are more than one column family as |
||||
* otherwise the wal size is dictated by the write_buffer_size.</p> |
||||
* <p>Default: 0</p> |
||||
* |
||||
* @param maxTotalWalSize max total wal size. |
||||
* @return the instance of the current object. |
||||
*/ |
||||
T setMaxTotalWalSize(long maxTotalWalSize); |
||||
|
||||
/** |
||||
* <p>Returns the max total wal size. Once write-ahead logs exceed this size, |
||||
* we will start forcing the flush of column families whose memtables are |
||||
* backed by the oldest live WAL file (i.e. the ones that are causing all |
||||
* the space amplification).</p> |
||||
* |
||||
* <p>If set to 0 (default), we will dynamically choose the WAL size limit |
||||
* to be [sum of all write_buffer_size * max_write_buffer_number] * 2 |
||||
* </p> |
||||
* |
||||
* @return max total wal size |
||||
*/ |
||||
long maxTotalWalSize(); |
||||
|
||||
/** |
||||
* The periodicity when obsolete files get deleted. The default |
||||
* value is 6 hours. The files that get out of scope by compaction |
||||
* process will still get automatically delete on every compaction, |
||||
* regardless of this setting |
||||
* |
||||
* @param micros the time interval in micros |
||||
* @return the instance of the current object. |
||||
*/ |
||||
T setDeleteObsoleteFilesPeriodMicros(long micros); |
||||
|
||||
/** |
||||
* The periodicity when obsolete files get deleted. The default |
||||
* value is 6 hours. The files that get out of scope by compaction |
||||
* process will still get automatically delete on every compaction, |
||||
* regardless of this setting |
||||
* |
||||
* @return the time interval in micros when obsolete files will be deleted. |
||||
*/ |
||||
long deleteObsoleteFilesPeriodMicros(); |
||||
|
||||
/** |
||||
* if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec |
||||
* Default: 600 (10 minutes) |
||||
* |
||||
* @param statsDumpPeriodSec time interval in seconds. |
||||
* @return the instance of the current object. |
||||
*/ |
||||
T setStatsDumpPeriodSec(int statsDumpPeriodSec); |
||||
|
||||
/** |
||||
* If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec |
||||
* Default: 600 (10 minutes) |
||||
* |
||||
* @return time interval in seconds. |
||||
*/ |
||||
int statsDumpPeriodSec(); |
||||
|
||||
/** |
||||
* Number of open files that can be used by the DB. You may need to |
||||
* increase this if your database has a large working set. Value -1 means |
||||
* files opened are always kept open. You can estimate number of files based |
||||
* on {@code target_file_size_base} and {@code target_file_size_multiplier} |
||||
* for level-based compaction. For universal-style compaction, you can usually |
||||
* set it to -1. |
||||
* Default: 5000 |
||||
* |
||||
* @param maxOpenFiles the maximum number of open files. |
||||
* @return the instance of the current object. |
||||
*/ |
||||
T setMaxOpenFiles(int maxOpenFiles); |
||||
|
||||
/** |
||||
* Number of open files that can be used by the DB. You may need to |
||||
* increase this if your database has a large working set. Value -1 means |
||||
* files opened are always kept open. You can estimate number of files based |
||||
* on {@code target_file_size_base} and {@code target_file_size_multiplier} |
||||
* for level-based compaction. For universal-style compaction, you can usually |
||||
* set it to -1. |
||||
* |
||||
* @return the maximum number of open files. |
||||
*/ |
||||
int maxOpenFiles(); |
||||
|
||||
/** |
||||
* Allows OS to incrementally sync files to disk while they are being |
||||
* written, asynchronously, in the background. |
||||
* Issue one request for every bytes_per_sync written. 0 turns it off. |
||||
* Default: 0 |
||||
* |
||||
* @param bytesPerSync size in bytes |
||||
* @return the instance of the current object. |
||||
*/ |
||||
T setBytesPerSync(long bytesPerSync); |
||||
|
||||
/** |
||||
* Allows OS to incrementally sync files to disk while they are being |
||||
* written, asynchronously, in the background. |
||||
* Issue one request for every bytes_per_sync written. 0 turns it off. |
||||
* Default: 0 |
||||
* |
||||
* @return size in bytes |
||||
*/ |
||||
long bytesPerSync(); |
||||
|
||||
/** |
||||
* Same as {@link #setBytesPerSync(long)} , but applies to WAL files |
||||
* |
||||
* Default: 0, turned off |
||||
* |
||||
* @param walBytesPerSync size in bytes |
||||
* @return the instance of the current object. |
||||
*/ |
||||
T setWalBytesPerSync(long walBytesPerSync); |
||||
|
||||
/** |
||||
* Same as {@link #bytesPerSync()} , but applies to WAL files |
||||
* |
||||
* Default: 0, turned off |
||||
* |
||||
* @return size in bytes |
||||
*/ |
||||
long walBytesPerSync(); |
||||
|
||||
|
||||
/** |
||||
* If non-zero, we perform bigger reads when doing compaction. If you're |
||||
* running RocksDB on spinning disks, you should set this to at least 2MB. |
||||
* |
||||
* That way RocksDB's compaction is doing sequential instead of random reads. |
||||
* When non-zero, we also force |
||||
* {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true. |
||||
* |
||||
* Default: 0 |
||||
* |
||||
* @param compactionReadaheadSize The compaction read-ahead size |
||||
* |
||||
* @return the reference to the current options. |
||||
*/ |
||||
T setCompactionReadaheadSize(final long compactionReadaheadSize); |
||||
|
||||
/** |
||||
* If non-zero, we perform bigger reads when doing compaction. If you're |
||||
* running RocksDB on spinning disks, you should set this to at least 2MB. |
||||
* |
||||
* That way RocksDB's compaction is doing sequential instead of random reads. |
||||
* When non-zero, we also force |
||||
* {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true. |
||||
* |
||||
* Default: 0 |
||||
* |
||||
* @return The compaction read-ahead size |
||||
*/ |
||||
long compactionReadaheadSize(); |
||||
} |
@ -0,0 +1,15 @@ |
||||
package org.rocksdb; |
||||
|
||||
public interface MutableOptionKey { |
||||
enum ValueType { |
||||
DOUBLE, |
||||
LONG, |
||||
INT, |
||||
BOOLEAN, |
||||
INT_ARRAY, |
||||
ENUM |
||||
} |
||||
|
||||
String name(); |
||||
ValueType getValueType(); |
||||
} |
@ -0,0 +1,375 @@ |
||||
package org.rocksdb; |
||||
|
||||
import static org.rocksdb.AbstractMutableOptions.INT_ARRAY_INT_SEPARATOR; |
||||
|
||||
public abstract class MutableOptionValue<T> { |
||||
|
||||
abstract double asDouble() throws NumberFormatException; |
||||
abstract long asLong() throws NumberFormatException; |
||||
abstract int asInt() throws NumberFormatException; |
||||
abstract boolean asBoolean() throws IllegalStateException; |
||||
abstract int[] asIntArray() throws IllegalStateException; |
||||
abstract String asString(); |
||||
abstract T asObject(); |
||||
|
||||
private static abstract class MutableOptionValueObject<T> |
||||
extends MutableOptionValue<T> { |
||||
protected final T value; |
||||
|
||||
private MutableOptionValueObject(final T value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
@Override T asObject() { |
||||
return value; |
||||
} |
||||
} |
||||
|
||||
static MutableOptionValue<String> fromString(final String s) { |
||||
return new MutableOptionStringValue(s); |
||||
} |
||||
|
||||
static MutableOptionValue<Double> fromDouble(final double d) { |
||||
return new MutableOptionDoubleValue(d); |
||||
} |
||||
|
||||
static MutableOptionValue<Long> fromLong(final long d) { |
||||
return new MutableOptionLongValue(d); |
||||
} |
||||
|
||||
static MutableOptionValue<Integer> fromInt(final int i) { |
||||
return new MutableOptionIntValue(i); |
||||
} |
||||
|
||||
static MutableOptionValue<Boolean> fromBoolean(final boolean b) { |
||||
return new MutableOptionBooleanValue(b); |
||||
} |
||||
|
||||
static MutableOptionValue<int[]> fromIntArray(final int[] ix) { |
||||
return new MutableOptionIntArrayValue(ix); |
||||
} |
||||
|
||||
static <N extends Enum<N>> MutableOptionValue<N> fromEnum(final N value) { |
||||
return new MutableOptionEnumValue<>(value); |
||||
} |
||||
|
||||
static class MutableOptionStringValue |
||||
extends MutableOptionValueObject<String> { |
||||
MutableOptionStringValue(final String value) { |
||||
super(value); |
||||
} |
||||
|
||||
@Override |
||||
double asDouble() throws NumberFormatException { |
||||
return Double.parseDouble(value); |
||||
} |
||||
|
||||
@Override |
||||
long asLong() throws NumberFormatException { |
||||
return Long.parseLong(value); |
||||
} |
||||
|
||||
@Override |
||||
int asInt() throws NumberFormatException { |
||||
return Integer.parseInt(value); |
||||
} |
||||
|
||||
@Override |
||||
boolean asBoolean() throws IllegalStateException { |
||||
return Boolean.parseBoolean(value); |
||||
} |
||||
|
||||
@Override |
||||
int[] asIntArray() throws IllegalStateException { |
||||
throw new IllegalStateException("String is not applicable as int[]"); |
||||
} |
||||
|
||||
@Override |
||||
String asString() { |
||||
return value; |
||||
} |
||||
} |
||||
|
||||
static class MutableOptionDoubleValue |
||||
extends MutableOptionValue<Double> { |
||||
private final double value; |
||||
MutableOptionDoubleValue(final double value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
@Override |
||||
double asDouble() { |
||||
return value; |
||||
} |
||||
|
||||
@Override |
||||
long asLong() throws NumberFormatException { |
||||
return Double.valueOf(value).longValue(); |
||||
} |
||||
|
||||
@Override |
||||
int asInt() throws NumberFormatException { |
||||
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { |
||||
throw new NumberFormatException( |
||||
"double value lies outside the bounds of int"); |
||||
} |
||||
return Double.valueOf(value).intValue(); |
||||
} |
||||
|
||||
@Override |
||||
boolean asBoolean() throws IllegalStateException { |
||||
throw new IllegalStateException( |
||||
"double is not applicable as boolean"); |
||||
} |
||||
|
||||
@Override |
||||
int[] asIntArray() throws IllegalStateException { |
||||
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { |
||||
throw new NumberFormatException( |
||||
"double value lies outside the bounds of int"); |
||||
} |
||||
return new int[] { Double.valueOf(value).intValue() }; |
||||
} |
||||
|
||||
@Override |
||||
String asString() { |
||||
return String.valueOf(value); |
||||
} |
||||
|
||||
@Override |
||||
Double asObject() { |
||||
return value; |
||||
} |
||||
} |
||||
|
||||
static class MutableOptionLongValue |
||||
extends MutableOptionValue<Long> { |
||||
private final long value; |
||||
|
||||
MutableOptionLongValue(final long value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
@Override |
||||
double asDouble() { |
||||
if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) { |
||||
throw new NumberFormatException( |
||||
"long value lies outside the bounds of int"); |
||||
} |
||||
return Long.valueOf(value).doubleValue(); |
||||
} |
||||
|
||||
@Override |
||||
long asLong() throws NumberFormatException { |
||||
return value; |
||||
} |
||||
|
||||
@Override |
||||
int asInt() throws NumberFormatException { |
||||
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { |
||||
throw new NumberFormatException( |
||||
"long value lies outside the bounds of int"); |
||||
} |
||||
return Long.valueOf(value).intValue(); |
||||
} |
||||
|
||||
@Override |
||||
boolean asBoolean() throws IllegalStateException { |
||||
throw new IllegalStateException( |
||||
"long is not applicable as boolean"); |
||||
} |
||||
|
||||
@Override |
||||
int[] asIntArray() throws IllegalStateException { |
||||
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { |
||||
throw new NumberFormatException( |
||||
"long value lies outside the bounds of int"); |
||||
} |
||||
return new int[] { Long.valueOf(value).intValue() }; |
||||
} |
||||
|
||||
@Override |
||||
String asString() { |
||||
return String.valueOf(value); |
||||
} |
||||
|
||||
@Override |
||||
Long asObject() { |
||||
return value; |
||||
} |
||||
} |
||||
|
||||
static class MutableOptionIntValue |
||||
extends MutableOptionValue<Integer> { |
||||
private final int value; |
||||
|
||||
MutableOptionIntValue(final int value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
@Override |
||||
double asDouble() { |
||||
if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) { |
||||
throw new NumberFormatException("int value lies outside the bounds of int"); |
||||
} |
||||
return Integer.valueOf(value).doubleValue(); |
||||
} |
||||
|
||||
@Override |
||||
long asLong() throws NumberFormatException { |
||||
return value; |
||||
} |
||||
|
||||
@Override |
||||
int asInt() throws NumberFormatException { |
||||
return value; |
||||
} |
||||
|
||||
@Override |
||||
boolean asBoolean() throws IllegalStateException { |
||||
throw new IllegalStateException("int is not applicable as boolean"); |
||||
} |
||||
|
||||
@Override |
||||
int[] asIntArray() throws IllegalStateException { |
||||
return new int[] { value }; |
||||
} |
||||
|
||||
@Override |
||||
String asString() { |
||||
return String.valueOf(value); |
||||
} |
||||
|
||||
@Override |
||||
Integer asObject() { |
||||
return value; |
||||
} |
||||
} |
||||
|
||||
static class MutableOptionBooleanValue |
||||
extends MutableOptionValue<Boolean> { |
||||
private final boolean value; |
||||
|
||||
MutableOptionBooleanValue(final boolean value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
@Override |
||||
double asDouble() { |
||||
throw new NumberFormatException("boolean is not applicable as double"); |
||||
} |
||||
|
||||
@Override |
||||
long asLong() throws NumberFormatException { |
||||
throw new NumberFormatException("boolean is not applicable as Long"); |
||||
} |
||||
|
||||
@Override |
||||
int asInt() throws NumberFormatException { |
||||
throw new NumberFormatException("boolean is not applicable as int"); |
||||
} |
||||
|
||||
@Override |
||||
boolean asBoolean() { |
||||
return value; |
||||
} |
||||
|
||||
@Override |
||||
int[] asIntArray() throws IllegalStateException { |
||||
throw new IllegalStateException("boolean is not applicable as int[]"); |
||||
} |
||||
|
||||
@Override |
||||
String asString() { |
||||
return String.valueOf(value); |
||||
} |
||||
|
||||
@Override |
||||
Boolean asObject() { |
||||
return value; |
||||
} |
||||
} |
||||
|
||||
static class MutableOptionIntArrayValue |
||||
extends MutableOptionValueObject<int[]> { |
||||
MutableOptionIntArrayValue(final int[] value) { |
||||
super(value); |
||||
} |
||||
|
||||
@Override |
||||
double asDouble() { |
||||
throw new NumberFormatException("int[] is not applicable as double"); |
||||
} |
||||
|
||||
@Override |
||||
long asLong() throws NumberFormatException { |
||||
throw new NumberFormatException("int[] is not applicable as Long"); |
||||
} |
||||
|
||||
@Override |
||||
int asInt() throws NumberFormatException { |
||||
throw new NumberFormatException("int[] is not applicable as int"); |
||||
} |
||||
|
||||
@Override |
||||
boolean asBoolean() { |
||||
throw new NumberFormatException("int[] is not applicable as boolean"); |
||||
} |
||||
|
||||
@Override |
||||
int[] asIntArray() throws IllegalStateException { |
||||
return value; |
||||
} |
||||
|
||||
@Override |
||||
String asString() { |
||||
final StringBuilder builder = new StringBuilder(); |
||||
for(int i = 0; i < value.length; i++) { |
||||
builder.append(i); |
||||
if(i + 1 < value.length) { |
||||
builder.append(INT_ARRAY_INT_SEPARATOR); |
||||
} |
||||
} |
||||
return builder.toString(); |
||||
} |
||||
} |
||||
|
||||
static class MutableOptionEnumValue<T extends Enum<T>> |
||||
extends MutableOptionValueObject<T> { |
||||
|
||||
MutableOptionEnumValue(final T value) { |
||||
super(value); |
||||
} |
||||
|
||||
@Override |
||||
double asDouble() throws NumberFormatException { |
||||
throw new NumberFormatException("Enum is not applicable as double"); |
||||
} |
||||
|
||||
@Override |
||||
long asLong() throws NumberFormatException { |
||||
throw new NumberFormatException("Enum is not applicable as long"); |
||||
} |
||||
|
||||
@Override |
||||
int asInt() throws NumberFormatException { |
||||
throw new NumberFormatException("Enum is not applicable as int"); |
||||
} |
||||
|
||||
@Override |
||||
boolean asBoolean() throws IllegalStateException { |
||||
throw new NumberFormatException("Enum is not applicable as boolean"); |
||||
} |
||||
|
||||
@Override |
||||
int[] asIntArray() throws IllegalStateException { |
||||
throw new NumberFormatException("Enum is not applicable as int[]"); |
||||
} |
||||
|
||||
@Override |
||||
String asString() { |
||||
return value.name(); |
||||
} |
||||
} |
||||
|
||||
} |
@ -0,0 +1,59 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* The operation stage. |
||||
*/ |
||||
public enum OperationStage { |
||||
STAGE_UNKNOWN((byte)0x0), |
||||
STAGE_FLUSH_RUN((byte)0x1), |
||||
STAGE_FLUSH_WRITE_L0((byte)0x2), |
||||
STAGE_COMPACTION_PREPARE((byte)0x3), |
||||
STAGE_COMPACTION_RUN((byte)0x4), |
||||
STAGE_COMPACTION_PROCESS_KV((byte)0x5), |
||||
STAGE_COMPACTION_INSTALL((byte)0x6), |
||||
STAGE_COMPACTION_SYNC_FILE((byte)0x7), |
||||
STAGE_PICK_MEMTABLES_TO_FLUSH((byte)0x8), |
||||
STAGE_MEMTABLE_ROLLBACK((byte)0x9), |
||||
STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS((byte)0xA); |
||||
|
||||
private final byte value; |
||||
|
||||
OperationStage(final byte value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
/** |
||||
* Get the internal representation value. |
||||
* |
||||
* @return the internal representation value. |
||||
*/ |
||||
byte getValue() { |
||||
return value; |
||||
} |
||||
|
||||
/** |
||||
* Get the Operation stage from the internal representation value. |
||||
* |
||||
* @param value the internal representation value. |
||||
* |
||||
* @return the operation stage |
||||
* |
||||
* @throws IllegalArgumentException if the value does not match |
||||
* an OperationStage |
||||
*/ |
||||
static OperationStage fromValue(final byte value) |
||||
throws IllegalArgumentException { |
||||
for (final OperationStage threadType : OperationStage.values()) { |
||||
if (threadType.value == value) { |
||||
return threadType; |
||||
} |
||||
} |
||||
throw new IllegalArgumentException( |
||||
"Unknown value for OperationStage: " + value); |
||||
} |
||||
} |
@ -0,0 +1,54 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* The type used to refer to a thread operation. |
||||
* |
||||
* A thread operation describes high-level action of a thread, |
||||
* examples include compaction and flush. |
||||
*/ |
||||
public enum OperationType { |
||||
OP_UNKNOWN((byte)0x0), |
||||
OP_COMPACTION((byte)0x1), |
||||
OP_FLUSH((byte)0x2); |
||||
|
||||
private final byte value; |
||||
|
||||
OperationType(final byte value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
/** |
||||
* Get the internal representation value. |
||||
* |
||||
* @return the internal representation value. |
||||
*/ |
||||
byte getValue() { |
||||
return value; |
||||
} |
||||
|
||||
/** |
||||
* Get the Operation type from the internal representation value. |
||||
* |
||||
* @param value the internal representation value. |
||||
* |
||||
* @return the operation type |
||||
* |
||||
* @throws IllegalArgumentException if the value does not match |
||||
* an OperationType |
||||
*/ |
||||
static OperationType fromValue(final byte value) |
||||
throws IllegalArgumentException { |
||||
for (final OperationType threadType : OperationType.values()) { |
||||
if (threadType.value == value) { |
||||
return threadType; |
||||
} |
||||
} |
||||
throw new IllegalArgumentException( |
||||
"Unknown value for OperationType: " + value); |
||||
} |
||||
} |
@ -0,0 +1,26 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Persistent cache for caching IO pages on a persistent medium. The |
||||
* cache is specifically designed for persistent read cache. |
||||
*/ |
||||
public class PersistentCache extends RocksObject { |
||||
|
||||
public PersistentCache(final Env env, final String path, final long size, |
||||
final Logger logger, final boolean optimizedForNvm) |
||||
throws RocksDBException { |
||||
super(newPersistentCache(env.nativeHandle_, path, size, |
||||
logger.nativeHandle_, optimizedForNvm)); |
||||
} |
||||
|
||||
private native static long newPersistentCache(final long envHandle, |
||||
final String path, final long size, final long loggerHandle, |
||||
final boolean optimizedForNvm) throws RocksDBException; |
||||
|
||||
@Override protected final native void disposeInternal(final long handle); |
||||
} |
@ -0,0 +1,49 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* The Thread Pool priority. |
||||
*/ |
||||
public enum Priority { |
||||
BOTTOM((byte) 0x0), |
||||
LOW((byte) 0x1), |
||||
HIGH((byte)0x2), |
||||
TOTAL((byte)0x3); |
||||
|
||||
private final byte value; |
||||
|
||||
Priority(final byte value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
/** |
||||
* <p>Returns the byte value of the enumerations value.</p> |
||||
* |
||||
* @return byte representation |
||||
*/ |
||||
byte getValue() { |
||||
return value; |
||||
} |
||||
|
||||
/** |
||||
* Get Priority by byte value. |
||||
* |
||||
* @param value byte representation of Priority. |
||||
* |
||||
* @return {@link org.rocksdb.Priority} instance. |
||||
* @throws java.lang.IllegalArgumentException if an invalid |
||||
* value is provided. |
||||
*/ |
||||
static Priority getPriority(final byte value) { |
||||
for (final Priority priority : Priority.values()) { |
||||
if (priority.getValue() == value){ |
||||
return priority; |
||||
} |
||||
} |
||||
throw new IllegalArgumentException("Illegal value provided for Priority."); |
||||
} |
||||
} |
@ -0,0 +1,19 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Range from start to limit. |
||||
*/ |
||||
public class Range { |
||||
final Slice start; |
||||
final Slice limit; |
||||
|
||||
public Range(final Slice start, final Slice limit) { |
||||
this.start = start; |
||||
this.limit = limit; |
||||
} |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,30 @@ |
||||
package org.rocksdb; |
||||
|
||||
import java.util.List; |
||||
|
||||
/** |
||||
* Flags for |
||||
* {@link RocksDB#getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)} |
||||
* that specify whether memtable stats should be included, |
||||
* or file stats approximation or both. |
||||
*/ |
||||
public enum SizeApproximationFlag { |
||||
NONE((byte)0x0), |
||||
INCLUDE_MEMTABLES((byte)0x1), |
||||
INCLUDE_FILES((byte)0x2); |
||||
|
||||
private final byte value; |
||||
|
||||
SizeApproximationFlag(final byte value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
/** |
||||
* Get the internal byte representation. |
||||
* |
||||
* @return the internal representation. |
||||
*/ |
||||
byte getValue() { |
||||
return value; |
||||
} |
||||
} |
@ -0,0 +1,150 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* The metadata that describes a SST file. |
||||
*/ |
||||
public class SstFileMetaData { |
||||
private final String fileName; |
||||
private final String path; |
||||
private final long size; |
||||
private final long smallestSeqno; |
||||
private final long largestSeqno; |
||||
private final byte[] smallestKey; |
||||
private final byte[] largestKey; |
||||
private final long numReadsSampled; |
||||
private final boolean beingCompacted; |
||||
private final long numEntries; |
||||
private final long numDeletions; |
||||
|
||||
/** |
||||
* Called from JNI C++ |
||||
*/ |
||||
protected SstFileMetaData( |
||||
final String fileName, |
||||
final String path, |
||||
final long size, |
||||
final long smallestSeqno, |
||||
final long largestSeqno, |
||||
final byte[] smallestKey, |
||||
final byte[] largestKey, |
||||
final long numReadsSampled, |
||||
final boolean beingCompacted, |
||||
final long numEntries, |
||||
final long numDeletions) { |
||||
this.fileName = fileName; |
||||
this.path = path; |
||||
this.size = size; |
||||
this.smallestSeqno = smallestSeqno; |
||||
this.largestSeqno = largestSeqno; |
||||
this.smallestKey = smallestKey; |
||||
this.largestKey = largestKey; |
||||
this.numReadsSampled = numReadsSampled; |
||||
this.beingCompacted = beingCompacted; |
||||
this.numEntries = numEntries; |
||||
this.numDeletions = numDeletions; |
||||
} |
||||
|
||||
/** |
||||
* Get the name of the file. |
||||
* |
||||
* @return the name of the file. |
||||
*/ |
||||
public String fileName() { |
||||
return fileName; |
||||
} |
||||
|
||||
/** |
||||
* Get the full path where the file locates. |
||||
* |
||||
* @return the full path |
||||
*/ |
||||
public String path() { |
||||
return path; |
||||
} |
||||
|
||||
/** |
||||
* Get the file size in bytes. |
||||
* |
||||
* @return file size |
||||
*/ |
||||
public long size() { |
||||
return size; |
||||
} |
||||
|
||||
/** |
||||
* Get the smallest sequence number in file. |
||||
* |
||||
* @return the smallest sequence number |
||||
*/ |
||||
public long smallestSeqno() { |
||||
return smallestSeqno; |
||||
} |
||||
|
||||
/** |
||||
* Get the largest sequence number in file. |
||||
* |
||||
* @return the largest sequence number |
||||
*/ |
||||
public long largestSeqno() { |
||||
return largestSeqno; |
||||
} |
||||
|
||||
/** |
||||
* Get the smallest user defined key in the file. |
||||
* |
||||
* @return the smallest user defined key |
||||
*/ |
||||
public byte[] smallestKey() { |
||||
return smallestKey; |
||||
} |
||||
|
||||
/** |
||||
* Get the largest user defined key in the file. |
||||
* |
||||
* @return the largest user defined key |
||||
*/ |
||||
public byte[] largestKey() { |
||||
return largestKey; |
||||
} |
||||
|
||||
/** |
||||
* Get the number of times the file has been read. |
||||
* |
||||
* @return the number of times the file has been read |
||||
*/ |
||||
public long numReadsSampled() { |
||||
return numReadsSampled; |
||||
} |
||||
|
||||
/** |
||||
* Returns true if the file is currently being compacted. |
||||
* |
||||
* @return true if the file is currently being compacted, false otherwise. |
||||
*/ |
||||
public boolean beingCompacted() { |
||||
return beingCompacted; |
||||
} |
||||
|
||||
/** |
||||
* Get the number of entries. |
||||
* |
||||
* @return the number of entries. |
||||
*/ |
||||
public long numEntries() { |
||||
return numEntries; |
||||
} |
||||
|
||||
/** |
||||
* Get the number of deletions. |
||||
* |
||||
* @return the number of deletions. |
||||
*/ |
||||
public long numDeletions() { |
||||
return numDeletions; |
||||
} |
||||
} |
@ -0,0 +1,53 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* The type used to refer to a thread state. |
||||
* |
||||
* A state describes lower-level action of a thread |
||||
* such as reading / writing a file or waiting for a mutex. |
||||
*/ |
||||
public enum StateType { |
||||
STATE_UNKNOWN((byte)0x0), |
||||
STATE_MUTEX_WAIT((byte)0x1); |
||||
|
||||
private final byte value; |
||||
|
||||
StateType(final byte value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
/** |
||||
* Get the internal representation value. |
||||
* |
||||
* @return the internal representation value. |
||||
*/ |
||||
byte getValue() { |
||||
return value; |
||||
} |
||||
|
||||
/** |
||||
* Get the State type from the internal representation value. |
||||
* |
||||
* @param value the internal representation value. |
||||
* |
||||
* @return the state type |
||||
* |
||||
* @throws IllegalArgumentException if the value does not match |
||||
* a StateType |
||||
*/ |
||||
static StateType fromValue(final byte value) |
||||
throws IllegalArgumentException { |
||||
for (final StateType threadType : StateType.values()) { |
||||
if (threadType.value == value) { |
||||
return threadType; |
||||
} |
||||
} |
||||
throw new IllegalArgumentException( |
||||
"Unknown value for StateType: " + value); |
||||
} |
||||
} |
@ -0,0 +1,20 @@ |
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Filter for iterating a table. |
||||
*/ |
||||
public interface TableFilter { |
||||
|
||||
/** |
||||
* A callback to determine whether relevant keys for this scan exist in a |
||||
* given table based on the table's properties. The callback is passed the |
||||
* properties of each table during iteration. If the callback returns false, |
||||
* the table will not be scanned. This option only affects Iterators and has |
||||
* no impact on point lookups. |
||||
* |
||||
* @param tableProperties the table properties. |
||||
* |
||||
* @return true if the table should be scanned, false otherwise. |
||||
*/ |
||||
boolean filter(final TableProperties tableProperties); |
||||
} |
@ -0,0 +1,365 @@ |
||||
package org.rocksdb; |
||||
|
||||
import java.util.Map; |
||||
|
||||
/** |
||||
* TableProperties contains read-only properties of its associated |
||||
* table. |
||||
*/ |
||||
public class TableProperties { |
||||
private final long dataSize; |
||||
private final long indexSize; |
||||
private final long indexPartitions; |
||||
private final long topLevelIndexSize; |
||||
private final long indexKeyIsUserKey; |
||||
private final long indexValueIsDeltaEncoded; |
||||
private final long filterSize; |
||||
private final long rawKeySize; |
||||
private final long rawValueSize; |
||||
private final long numDataBlocks; |
||||
private final long numEntries; |
||||
private final long numDeletions; |
||||
private final long numMergeOperands; |
||||
private final long numRangeDeletions; |
||||
private final long formatVersion; |
||||
private final long fixedKeyLen; |
||||
private final long columnFamilyId; |
||||
private final long creationTime; |
||||
private final long oldestKeyTime; |
||||
private final byte[] columnFamilyName; |
||||
private final String filterPolicyName; |
||||
private final String comparatorName; |
||||
private final String mergeOperatorName; |
||||
private final String prefixExtractorName; |
||||
private final String propertyCollectorsNames; |
||||
private final String compressionName; |
||||
private final Map<String, String> userCollectedProperties; |
||||
private final Map<String, String> readableProperties; |
||||
private final Map<String, Long> propertiesOffsets; |
||||
|
||||
/** |
||||
* Access is private as this will only be constructed from |
||||
* C++ via JNI. |
||||
*/ |
||||
private TableProperties(final long dataSize, final long indexSize, |
||||
final long indexPartitions, final long topLevelIndexSize, |
||||
final long indexKeyIsUserKey, final long indexValueIsDeltaEncoded, |
||||
final long filterSize, final long rawKeySize, final long rawValueSize, |
||||
final long numDataBlocks, final long numEntries, final long numDeletions, |
||||
final long numMergeOperands, final long numRangeDeletions, |
||||
final long formatVersion, final long fixedKeyLen, |
||||
final long columnFamilyId, final long creationTime, |
||||
final long oldestKeyTime, final byte[] columnFamilyName, |
||||
final String filterPolicyName, final String comparatorName, |
||||
final String mergeOperatorName, final String prefixExtractorName, |
||||
final String propertyCollectorsNames, final String compressionName, |
||||
final Map<String, String> userCollectedProperties, |
||||
final Map<String, String> readableProperties, |
||||
final Map<String, Long> propertiesOffsets) { |
||||
this.dataSize = dataSize; |
||||
this.indexSize = indexSize; |
||||
this.indexPartitions = indexPartitions; |
||||
this.topLevelIndexSize = topLevelIndexSize; |
||||
this.indexKeyIsUserKey = indexKeyIsUserKey; |
||||
this.indexValueIsDeltaEncoded = indexValueIsDeltaEncoded; |
||||
this.filterSize = filterSize; |
||||
this.rawKeySize = rawKeySize; |
||||
this.rawValueSize = rawValueSize; |
||||
this.numDataBlocks = numDataBlocks; |
||||
this.numEntries = numEntries; |
||||
this.numDeletions = numDeletions; |
||||
this.numMergeOperands = numMergeOperands; |
||||
this.numRangeDeletions = numRangeDeletions; |
||||
this.formatVersion = formatVersion; |
||||
this.fixedKeyLen = fixedKeyLen; |
||||
this.columnFamilyId = columnFamilyId; |
||||
this.creationTime = creationTime; |
||||
this.oldestKeyTime = oldestKeyTime; |
||||
this.columnFamilyName = columnFamilyName; |
||||
this.filterPolicyName = filterPolicyName; |
||||
this.comparatorName = comparatorName; |
||||
this.mergeOperatorName = mergeOperatorName; |
||||
this.prefixExtractorName = prefixExtractorName; |
||||
this.propertyCollectorsNames = propertyCollectorsNames; |
||||
this.compressionName = compressionName; |
||||
this.userCollectedProperties = userCollectedProperties; |
||||
this.readableProperties = readableProperties; |
||||
this.propertiesOffsets = propertiesOffsets; |
||||
} |
||||
|
||||
/** |
||||
* Get the total size of all data blocks. |
||||
* |
||||
* @return the total size of all data blocks. |
||||
*/ |
||||
public long getDataSize() { |
||||
return dataSize; |
||||
} |
||||
|
||||
/** |
||||
* Get the size of index block. |
||||
* |
||||
* @return the size of index block. |
||||
*/ |
||||
public long getIndexSize() { |
||||
return indexSize; |
||||
} |
||||
|
||||
/** |
||||
* Get the total number of index partitions |
||||
* if {@link IndexType#kTwoLevelIndexSearch} is used. |
||||
* |
||||
* @return the total number of index partitions. |
||||
*/ |
||||
public long getIndexPartitions() { |
||||
return indexPartitions; |
||||
} |
||||
|
||||
/** |
||||
* Size of the top-level index |
||||
* if {@link IndexType#kTwoLevelIndexSearch} is used. |
||||
* |
||||
* @return the size of the top-level index. |
||||
*/ |
||||
public long getTopLevelIndexSize() { |
||||
return topLevelIndexSize; |
||||
} |
||||
|
||||
/** |
||||
* Whether the index key is user key. |
||||
* Otherwise it includes 8 byte of sequence |
||||
* number added by internal key format. |
||||
* |
||||
* @return the index key |
||||
*/ |
||||
public long getIndexKeyIsUserKey() { |
||||
return indexKeyIsUserKey; |
||||
} |
||||
|
||||
/** |
||||
* Whether delta encoding is used to encode the index values. |
||||
* |
||||
* @return whether delta encoding is used to encode the index values. |
||||
*/ |
||||
public long getIndexValueIsDeltaEncoded() { |
||||
return indexValueIsDeltaEncoded; |
||||
} |
||||
|
||||
/** |
||||
* Get the size of filter block. |
||||
* |
||||
* @return the size of filter block. |
||||
*/ |
||||
public long getFilterSize() { |
||||
return filterSize; |
||||
} |
||||
|
||||
/** |
||||
* Get the total raw key size. |
||||
* |
||||
* @return the total raw key size. |
||||
*/ |
||||
public long getRawKeySize() { |
||||
return rawKeySize; |
||||
} |
||||
|
||||
/** |
||||
* Get the total raw value size. |
||||
* |
||||
* @return the total raw value size. |
||||
*/ |
||||
public long getRawValueSize() { |
||||
return rawValueSize; |
||||
} |
||||
|
||||
/** |
||||
* Get the number of blocks in this table. |
||||
* |
||||
* @return the number of blocks in this table. |
||||
*/ |
||||
public long getNumDataBlocks() { |
||||
return numDataBlocks; |
||||
} |
||||
|
||||
/** |
||||
* Get the number of entries in this table. |
||||
* |
||||
* @return the number of entries in this table. |
||||
*/ |
||||
public long getNumEntries() { |
||||
return numEntries; |
||||
} |
||||
|
||||
/** |
||||
* Get the number of deletions in the table. |
||||
* |
||||
* @return the number of deletions in the table. |
||||
*/ |
||||
public long getNumDeletions() { |
||||
return numDeletions; |
||||
} |
||||
|
||||
/** |
||||
* Get the number of merge operands in the table. |
||||
* |
||||
* @return the number of merge operands in the table. |
||||
*/ |
||||
public long getNumMergeOperands() { |
||||
return numMergeOperands; |
||||
} |
||||
|
||||
/** |
||||
* Get the number of range deletions in this table. |
||||
* |
||||
* @return the number of range deletions in this table. |
||||
*/ |
||||
public long getNumRangeDeletions() { |
||||
return numRangeDeletions; |
||||
} |
||||
|
||||
/** |
||||
* Get the format version, reserved for backward compatibility. |
||||
* |
||||
* @return the format version. |
||||
*/ |
||||
public long getFormatVersion() { |
||||
return formatVersion; |
||||
} |
||||
|
||||
/** |
||||
* Get the length of the keys. |
||||
* |
||||
* @return 0 when the key is variable length, otherwise number of |
||||
* bytes for each key. |
||||
*/ |
||||
public long getFixedKeyLen() { |
||||
return fixedKeyLen; |
||||
} |
||||
|
||||
/** |
||||
* Get the ID of column family for this SST file, |
||||
* corresponding to the column family identified by |
||||
* {@link #getColumnFamilyName()}. |
||||
* |
||||
* @return the id of the column family. |
||||
*/ |
||||
public long getColumnFamilyId() { |
||||
return columnFamilyId; |
||||
} |
||||
|
||||
/** |
||||
* The time when the SST file was created. |
||||
* Since SST files are immutable, this is equivalent |
||||
* to last modified time. |
||||
* |
||||
* @return the created time. |
||||
*/ |
||||
public long getCreationTime() { |
||||
return creationTime; |
||||
} |
||||
|
||||
/** |
||||
* Get the timestamp of the earliest key. |
||||
* |
||||
* @return 0 means unknown, otherwise the timestamp. |
||||
*/ |
||||
public long getOldestKeyTime() { |
||||
return oldestKeyTime; |
||||
} |
||||
|
||||
/** |
||||
* Get the name of the column family with which this |
||||
* SST file is associated. |
||||
* |
||||
* @return the name of the column family, or null if the |
||||
* column family is unknown. |
||||
*/ |
||||
/*@Nullable*/ public byte[] getColumnFamilyName() { |
||||
return columnFamilyName; |
||||
} |
||||
|
||||
/** |
||||
* Get the name of the filter policy used in this table. |
||||
* |
||||
* @return the name of the filter policy, or null if |
||||
* no filter policy is used. |
||||
*/ |
||||
/*@Nullable*/ public String getFilterPolicyName() { |
||||
return filterPolicyName; |
||||
} |
||||
|
||||
/** |
||||
* Get the name of the comparator used in this table. |
||||
* |
||||
* @return the name of the comparator. |
||||
*/ |
||||
public String getComparatorName() { |
||||
return comparatorName; |
||||
} |
||||
|
||||
/** |
||||
* Get the name of the merge operator used in this table. |
||||
* |
||||
* @return the name of the merge operator, or null if no merge operator |
||||
* is used. |
||||
*/ |
||||
/*@Nullable*/ public String getMergeOperatorName() { |
||||
return mergeOperatorName; |
||||
} |
||||
|
||||
/** |
||||
* Get the name of the prefix extractor used in this table. |
||||
* |
||||
* @return the name of the prefix extractor, or null if no prefix |
||||
* extractor is used. |
||||
*/ |
||||
/*@Nullable*/ public String getPrefixExtractorName() { |
||||
return prefixExtractorName; |
||||
} |
||||
|
||||
/** |
||||
* Get the names of the property collectors factories used in this table. |
||||
* |
||||
* @return the names of the property collector factories separated |
||||
* by commas, e.g. {collector_name[1]},{collector_name[2]},... |
||||
*/ |
||||
public String getPropertyCollectorsNames() { |
||||
return propertyCollectorsNames; |
||||
} |
||||
|
||||
/** |
||||
* Get the name of the compression algorithm used to compress the SST files. |
||||
* |
||||
* @return the name of the compression algorithm. |
||||
*/ |
||||
public String getCompressionName() { |
||||
return compressionName; |
||||
} |
||||
|
||||
/** |
||||
* Get the user collected properties. |
||||
* |
||||
* @return the user collected properties. |
||||
*/ |
||||
public Map<String, String> getUserCollectedProperties() { |
||||
return userCollectedProperties; |
||||
} |
||||
|
||||
/** |
||||
* Get the readable properties. |
||||
* |
||||
* @return the readable properties. |
||||
*/ |
||||
public Map<String, String> getReadableProperties() { |
||||
return readableProperties; |
||||
} |
||||
|
||||
/** |
||||
* The offset of the value of each property in the file. |
||||
* |
||||
* @return the offset of each property. |
||||
*/ |
||||
public Map<String, Long> getPropertiesOffsets() { |
||||
return propertiesOffsets; |
||||
} |
||||
} |
@ -0,0 +1,224 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import java.util.Map; |
||||
|
||||
public class ThreadStatus { |
||||
private final long threadId; |
||||
private final ThreadType threadType; |
||||
private final String dbName; |
||||
private final String cfName; |
||||
private final OperationType operationType; |
||||
private final long operationElapsedTime; // microseconds
|
||||
private final OperationStage operationStage; |
||||
private final long operationProperties[]; |
||||
private final StateType stateType; |
||||
|
||||
/** |
||||
* Invoked from C++ via JNI |
||||
*/ |
||||
private ThreadStatus(final long threadId, |
||||
final byte threadTypeValue, |
||||
final String dbName, |
||||
final String cfName, |
||||
final byte operationTypeValue, |
||||
final long operationElapsedTime, |
||||
final byte operationStageValue, |
||||
final long[] operationProperties, |
||||
final byte stateTypeValue) { |
||||
this.threadId = threadId; |
||||
this.threadType = ThreadType.fromValue(threadTypeValue); |
||||
this.dbName = dbName; |
||||
this.cfName = cfName; |
||||
this.operationType = OperationType.fromValue(operationTypeValue); |
||||
this.operationElapsedTime = operationElapsedTime; |
||||
this.operationStage = OperationStage.fromValue(operationStageValue); |
||||
this.operationProperties = operationProperties; |
||||
this.stateType = StateType.fromValue(stateTypeValue); |
||||
} |
||||
|
||||
/** |
||||
* Get the unique ID of the thread. |
||||
* |
||||
* @return the thread id |
||||
*/ |
||||
public long getThreadId() { |
||||
return threadId; |
||||
} |
||||
|
||||
/** |
||||
* Get the type of the thread. |
||||
* |
||||
* @return the type of the thread. |
||||
*/ |
||||
public ThreadType getThreadType() { |
||||
return threadType; |
||||
} |
||||
|
||||
/** |
||||
* The name of the DB instance that the thread is currently |
||||
* involved with. |
||||
* |
||||
* @return the name of the db, or null if the thread is not involved |
||||
* in any DB operation. |
||||
*/ |
||||
/* @Nullable */ public String getDbName() { |
||||
return dbName; |
||||
} |
||||
|
||||
/** |
||||
* The name of the Column Family that the thread is currently |
||||
* involved with. |
||||
* |
||||
* @return the name of the db, or null if the thread is not involved |
||||
* in any column Family operation. |
||||
*/ |
||||
/* @Nullable */ public String getCfName() { |
||||
return cfName; |
||||
} |
||||
|
||||
/** |
||||
* Get the operation (high-level action) that the current thread is involved |
||||
* with. |
||||
* |
||||
* @return the operation |
||||
*/ |
||||
public OperationType getOperationType() { |
||||
return operationType; |
||||
} |
||||
|
||||
/** |
||||
* Get the elapsed time of the current thread operation in microseconds. |
||||
* |
||||
* @return the elapsed time |
||||
*/ |
||||
public long getOperationElapsedTime() { |
||||
return operationElapsedTime; |
||||
} |
||||
|
||||
/** |
||||
* Get the current stage where the thread is involved in the current |
||||
* operation. |
||||
* |
||||
* @return the current stage of the current operation |
||||
*/ |
||||
public OperationStage getOperationStage() { |
||||
return operationStage; |
||||
} |
||||
|
||||
/** |
||||
* Get the list of properties that describe some details about the current |
||||
* operation. |
||||
* |
||||
* Each field in might have different meanings for different operations. |
||||
* |
||||
* @return the properties |
||||
*/ |
||||
public long[] getOperationProperties() { |
||||
return operationProperties; |
||||
} |
||||
|
||||
/** |
||||
* Get the state (lower-level action) that the current thread is involved |
||||
* with. |
||||
* |
||||
* @return the state |
||||
*/ |
||||
public StateType getStateType() { |
||||
return stateType; |
||||
} |
||||
|
||||
/** |
||||
* Get the name of the thread type. |
||||
* |
||||
* @param threadType the thread type |
||||
* |
||||
* @return the name of the thread type. |
||||
*/ |
||||
public static String getThreadTypeName(final ThreadType threadType) { |
||||
return getThreadTypeName(threadType.getValue()); |
||||
} |
||||
|
||||
/** |
||||
* Get the name of an operation given its type. |
||||
* |
||||
* @param operationType the type of operation. |
||||
* |
||||
* @return the name of the operation. |
||||
*/ |
||||
public static String getOperationName(final OperationType operationType) { |
||||
return getOperationName(operationType.getValue()); |
||||
} |
||||
|
||||
public static String microsToString(final long operationElapsedTime) { |
||||
return microsToStringNative(operationElapsedTime); |
||||
} |
||||
|
||||
/** |
||||
* Obtain a human-readable string describing the specified operation stage. |
||||
* |
||||
* @param operationStage the stage of the operation. |
||||
* |
||||
* @return the description of the operation stage. |
||||
*/ |
||||
public static String getOperationStageName( |
||||
final OperationStage operationStage) { |
||||
return getOperationStageName(operationStage.getValue()); |
||||
} |
||||
|
||||
/** |
||||
* Obtain the name of the "i"th operation property of the |
||||
* specified operation. |
||||
* |
||||
* @param operationType the operation type. |
||||
* @param i the index of the operation property. |
||||
* |
||||
* @return the name of the operation property |
||||
*/ |
||||
public static String getOperationPropertyName( |
||||
final OperationType operationType, final int i) { |
||||
return getOperationPropertyName(operationType.getValue(), i); |
||||
} |
||||
|
||||
/** |
||||
* Translate the "i"th property of the specified operation given |
||||
* a property value. |
||||
* |
||||
* @param operationType the operation type. |
||||
* @param operationProperties the operation properties. |
||||
* |
||||
* @return the property values. |
||||
*/ |
||||
public static Map<String, Long> interpretOperationProperties( |
||||
final OperationType operationType, final long[] operationProperties) { |
||||
return interpretOperationProperties(operationType.getValue(), |
||||
operationProperties); |
||||
} |
||||
|
||||
/** |
||||
* Obtain the name of a state given its type. |
||||
* |
||||
* @param stateType the state type. |
||||
* |
||||
* @return the name of the state. |
||||
*/ |
||||
public static String getStateName(final StateType stateType) { |
||||
return getStateName(stateType.getValue()); |
||||
} |
||||
|
||||
private static native String getThreadTypeName(final byte threadTypeValue); |
||||
private static native String getOperationName(final byte operationTypeValue); |
||||
private static native String microsToStringNative( |
||||
final long operationElapsedTime); |
||||
private static native String getOperationStageName( |
||||
final byte operationStageTypeValue); |
||||
private static native String getOperationPropertyName( |
||||
final byte operationTypeValue, final int i); |
||||
private static native Map<String, Long>interpretOperationProperties( |
||||
final byte operationTypeValue, final long[] operationProperties); |
||||
private static native String getStateName(final byte stateTypeValue); |
||||
} |
@ -0,0 +1,65 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* The type of a thread. |
||||
*/ |
||||
public enum ThreadType { |
||||
/** |
||||
* RocksDB BG thread in high-pri thread pool. |
||||
*/ |
||||
HIGH_PRIORITY((byte)0x0), |
||||
|
||||
/** |
||||
* RocksDB BG thread in low-pri thread pool. |
||||
*/ |
||||
LOW_PRIORITY((byte)0x1), |
||||
|
||||
/** |
||||
* User thread (Non-RocksDB BG thread). |
||||
*/ |
||||
USER((byte)0x2), |
||||
|
||||
/** |
||||
* RocksDB BG thread in bottom-pri thread pool |
||||
*/ |
||||
BOTTOM_PRIORITY((byte)0x3); |
||||
|
||||
private final byte value; |
||||
|
||||
ThreadType(final byte value) { |
||||
this.value = value; |
||||
} |
||||
|
||||
/** |
||||
* Get the internal representation value. |
||||
* |
||||
* @return the internal representation value. |
||||
*/ |
||||
byte getValue() { |
||||
return value; |
||||
} |
||||
|
||||
/** |
||||
* Get the Thread type from the internal representation value. |
||||
* |
||||
* @param value the internal representation value. |
||||
* |
||||
* @return the thread type |
||||
* |
||||
* @throws IllegalArgumentException if the value does not match a ThreadType |
||||
*/ |
||||
static ThreadType fromValue(final byte value) |
||||
throws IllegalArgumentException { |
||||
for (final ThreadType threadType : ThreadType.values()) { |
||||
if (threadType.value == value) { |
||||
return threadType; |
||||
} |
||||
} |
||||
throw new IllegalArgumentException("Unknown value for ThreadType: " + value); |
||||
} |
||||
} |
@ -0,0 +1,30 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Timed environment. |
||||
*/ |
||||
public class TimedEnv extends Env { |
||||
|
||||
/** |
||||
* <p>Creates a new environment that measures function call times for |
||||
* filesystem operations, reporting results to variables in PerfContext.</p> |
||||
* |
||||
* |
||||
* <p>The caller must delete the result when it is |
||||
* no longer needed.</p> |
||||
* |
||||
* @param baseEnv the base environment, |
||||
* must remain live while the result is in use. |
||||
*/ |
||||
public TimedEnv(final Env baseEnv) { |
||||
super(createTimedEnv(baseEnv.nativeHandle_)); |
||||
} |
||||
|
||||
private static native long createTimedEnv(final long baseEnvHandle); |
||||
@Override protected final native void disposeInternal(final long handle); |
||||
} |
@ -0,0 +1,32 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* TraceOptions is used for |
||||
* {@link RocksDB#startTrace(TraceOptions, AbstractTraceWriter)}. |
||||
*/ |
||||
public class TraceOptions { |
||||
private final long maxTraceFileSize; |
||||
|
||||
public TraceOptions() { |
||||
this.maxTraceFileSize = 64 * 1024 * 1024 * 1024; // 64 GB
|
||||
} |
||||
|
||||
public TraceOptions(final long maxTraceFileSize) { |
||||
this.maxTraceFileSize = maxTraceFileSize; |
||||
} |
||||
|
||||
/** |
||||
* To avoid the trace file size grows large than the storage space, |
||||
* user can set the max trace file size in Bytes. Default is 64GB |
||||
* |
||||
* @return the max trace size |
||||
*/ |
||||
public long getMaxTraceFileSize() { |
||||
return maxTraceFileSize; |
||||
} |
||||
} |
@ -0,0 +1,36 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* TraceWriter allows exporting RocksDB traces to any system, |
||||
* one operation at a time. |
||||
*/ |
||||
public interface TraceWriter { |
||||
|
||||
/** |
||||
* Write the data. |
||||
* |
||||
* @param data the data |
||||
* |
||||
* @throws RocksDBException if an error occurs whilst writing. |
||||
*/ |
||||
void write(final Slice data) throws RocksDBException; |
||||
|
||||
/** |
||||
* Close the writer. |
||||
* |
||||
* @throws RocksDBException if an error occurs whilst closing the writer. |
||||
*/ |
||||
void closeWriter() throws RocksDBException; |
||||
|
||||
/** |
||||
* Get the size of the file that this writer is writing to. |
||||
* |
||||
* @return the file size |
||||
*/ |
||||
long getFileSize(); |
||||
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue