[Java] Fixed 32-bit overflowing issue when converting jlong to size_t

Summary:
Fixed 32-bit overflowing issue when converting jlong to size_t by
capping jlong to std::numeric_limits<size_t>::max().

Test Plan:
make rocksdbjava
make jtest

Reviewers: ankgup87, ljin, sdong, igor

Reviewed By: igor

Subscribers: leveldb

Differential Revision: https://reviews.facebook.net/D23511
main
Yueh-Hsuan Chiang 10 years ago
parent f090575e43
commit 94e43a1dfe
  1. 7
      java/rocksjni/memtablejni.cc
  2. 19
      java/rocksjni/options.cc
  3. 7
      java/rocksjni/portal.h
  4. 2
      java/rocksjni/write_batch.cc

@ -5,6 +5,7 @@
//
// This file implements the "bridge" between Java and C++ for MemTables.
#include "rocksjni/portal.h"
#include "include/org_rocksdb_HashSkipListMemTableConfig.h"
#include "include/org_rocksdb_HashLinkedListMemTableConfig.h"
#include "include/org_rocksdb_VectorMemTableConfig.h"
@ -20,7 +21,7 @@ jlong Java_org_rocksdb_HashSkipListMemTableConfig_newMemTableFactoryHandle(
JNIEnv* env, jobject jobj, jlong jbucket_count,
jint jheight, jint jbranching_factor) {
return reinterpret_cast<jlong>(rocksdb::NewHashSkipListRepFactory(
static_cast<size_t>(jbucket_count),
rocksdb::jlong_to_size_t(jbucket_count),
static_cast<int32_t>(jheight),
static_cast<int32_t>(jbranching_factor)));
}
@ -33,7 +34,7 @@ jlong Java_org_rocksdb_HashSkipListMemTableConfig_newMemTableFactoryHandle(
jlong Java_org_rocksdb_HashLinkedListMemTableConfig_newMemTableFactoryHandle(
JNIEnv* env, jobject jobj, jlong jbucket_count) {
return reinterpret_cast<jlong>(rocksdb::NewHashLinkListRepFactory(
static_cast<size_t>(jbucket_count)));
rocksdb::jlong_to_size_t(jbucket_count)));
}
/*
@ -44,7 +45,7 @@ jlong Java_org_rocksdb_HashLinkedListMemTableConfig_newMemTableFactoryHandle(
jlong Java_org_rocksdb_VectorMemTableConfig_newMemTableFactoryHandle(
JNIEnv* env, jobject jobj, jlong jreserved_size) {
return reinterpret_cast<jlong>(new rocksdb::VectorRepFactory(
static_cast<size_t>(jreserved_size)));
rocksdb::jlong_to_size_t(jreserved_size)));
}
/*

@ -70,7 +70,7 @@ jboolean Java_org_rocksdb_Options_createIfMissing(
void Java_org_rocksdb_Options_setWriteBufferSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jwrite_buffer_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->write_buffer_size =
static_cast<size_t>(jwrite_buffer_size);
rocksdb::jlong_to_size_t(jwrite_buffer_size);
}
@ -362,7 +362,7 @@ jlong Java_org_rocksdb_Options_maxLogFileSize(
void Java_org_rocksdb_Options_setMaxLogFileSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong max_log_file_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_log_file_size =
static_cast<size_t>(max_log_file_size);
rocksdb::jlong_to_size_t(max_log_file_size);
}
/*
@ -383,7 +383,7 @@ jlong Java_org_rocksdb_Options_logFileTimeToRoll(
void Java_org_rocksdb_Options_setLogFileTimeToRoll(
JNIEnv* env, jobject jobj, jlong jhandle, jlong log_file_time_to_roll) {
reinterpret_cast<rocksdb::Options*>(jhandle)->log_file_time_to_roll =
static_cast<size_t>(log_file_time_to_roll);
rocksdb::jlong_to_size_t(log_file_time_to_roll);
}
/*
@ -404,7 +404,7 @@ jlong Java_org_rocksdb_Options_keepLogFileNum(
void Java_org_rocksdb_Options_setKeepLogFileNum(
JNIEnv* env, jobject jobj, jlong jhandle, jlong keep_log_file_num) {
reinterpret_cast<rocksdb::Options*>(jhandle)->keep_log_file_num =
static_cast<size_t>(keep_log_file_num);
rocksdb::jlong_to_size_t(keep_log_file_num);
}
/*
@ -509,7 +509,8 @@ void Java_org_rocksdb_Options_setTableCacheRemoveScanCountLimit(
void Java_org_rocksdb_Options_useFixedLengthPrefixExtractor(
JNIEnv* env, jobject jobj, jlong jhandle, jint jprefix_length) {
reinterpret_cast<rocksdb::Options*>(jhandle)->prefix_extractor.reset(
rocksdb::NewFixedPrefixTransform(static_cast<size_t>(jprefix_length)));
rocksdb::NewFixedPrefixTransform(
rocksdb::jlong_to_size_t(jprefix_length)));
}
/*
@ -573,7 +574,7 @@ jlong Java_org_rocksdb_Options_manifestPreallocationSize(
void Java_org_rocksdb_Options_setManifestPreallocationSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong preallocation_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->manifest_preallocation_size =
static_cast<size_t>(preallocation_size);
rocksdb::jlong_to_size_t(preallocation_size);
}
/*
@ -1245,7 +1246,7 @@ jlong Java_org_rocksdb_Options_arenaBlockSize(
void Java_org_rocksdb_Options_setArenaBlockSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jarena_block_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->arena_block_size =
static_cast<size_t>(jarena_block_size);
rocksdb::jlong_to_size_t(jarena_block_size);
}
/*
@ -1410,7 +1411,7 @@ void Java_org_rocksdb_Options_setInplaceUpdateNumLocks(
jlong jinplace_update_num_locks) {
reinterpret_cast<rocksdb::Options*>(
jhandle)->inplace_update_num_locks =
static_cast<size_t>(jinplace_update_num_locks);
rocksdb::jlong_to_size_t(jinplace_update_num_locks);
}
/*
@ -1501,7 +1502,7 @@ void Java_org_rocksdb_Options_setMaxSuccessiveMerges(
JNIEnv* env, jobject jobj, jlong jhandle,
jlong jmax_successive_merges) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_successive_merges =
static_cast<size_t>(jmax_successive_merges);
rocksdb::jlong_to_size_t(jmax_successive_merges);
}
/*

@ -11,12 +11,19 @@
#define JAVA_ROCKSJNI_PORTAL_H_
#include <jni.h>
#include <limits>
#include "rocksdb/db.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/utilities/backupable_db.h"
namespace rocksdb {
inline size_t jlong_to_size_t(const jlong& jvalue) {
return static_cast<uint64_t>(jvalue) <=
static_cast<uint64_t>(std::numeric_limits<size_t>::max()) ?
static_cast<size_t>(jvalue) : std::numeric_limits<size_t>::max();
}
// The portal class for org.rocksdb.RocksDB
class RocksDBJni {
public:

@ -30,7 +30,7 @@
void Java_org_rocksdb_WriteBatch_newWriteBatch(
JNIEnv* env, jobject jobj, jint jreserved_bytes) {
rocksdb::WriteBatch* wb = new rocksdb::WriteBatch(
static_cast<size_t>(jreserved_bytes));
rocksdb::jlong_to_size_t(jreserved_bytes));
rocksdb::WriteBatchJni::setHandle(env, jobj, wb);
}

Loading…
Cancel
Save