Conflicts:
	Makefile
	java/Makefile
	java/org/rocksdb/Options.java
	java/rocksjni/portal.h
main
Ankit Gupta 11 years ago
commit dc291f5bf0
  1. 2
      HISTORY.md
  2. 6
      Makefile
  3. 4
      db/db_impl.cc
  4. 2
      db/db_impl_readonly.cc
  5. 46
      db/table_cache.cc
  6. 5
      db/version_edit.h
  7. 63
      db/version_set.cc
  8. 9
      db/version_set.h
  9. 7
      java/Makefile
  10. 84
      java/org/rocksdb/BackupableDB.java
  11. 52
      java/org/rocksdb/BackupableDBOptions.java
  12. 930
      java/org/rocksdb/Options.java
  13. 22
      java/org/rocksdb/RocksDB.java
  14. 41
      java/org/rocksdb/test/BackupableDBTest.java
  15. 201
      java/org/rocksdb/test/OptionsTest.java
  16. 85
      java/rocksjni/backupablejni.cc
  17. 611
      java/rocksjni/options.cc
  18. 34
      java/rocksjni/portal.h
  19. 27
      table/block_based_table_reader.cc
  20. 5
      table/block_based_table_reader.h
  21. 2
      table/meta_blocks.cc
  22. 2
      table/table_properties.cc
  23. 271
      tools/shell/DBClientProxy.cpp
  24. 64
      tools/shell/DBClientProxy.h
  25. 8
      tools/shell/LeveldbShell.cpp
  26. 104
      tools/shell/ShellContext.cpp
  27. 51
      tools/shell/ShellContext.h
  28. 139
      tools/shell/ShellState.cpp
  29. 87
      tools/shell/ShellState.h
  30. 182
      tools/shell/test/DBClientProxyTest.cpp
  31. 2
      util/sync_point.cc
  32. 9
      util/sync_point.h

@ -24,7 +24,7 @@
* Added Env::GetThreadPoolQueueLen(), which returns the waiting queue length of thread pools * Added Env::GetThreadPoolQueueLen(), which returns the waiting queue length of thread pools
* Added a command "checkconsistency" in ldb tool, which checks * Added a command "checkconsistency" in ldb tool, which checks
if file system state matches DB state (file existence and file sizes) if file system state matches DB state (file existence and file sizes)
* Separate options related to block based table to a new struct BlockBasedTableOptions * Separate options related to block based table to a new struct BlockBasedTableOptions.
* WriteBatch has a new function Count() to return total size in the batch, and Data() now returns a reference instead of a copy * WriteBatch has a new function Count() to return total size in the batch, and Data() now returns a reference instead of a copy
* Add more counters to perf context. * Add more counters to perf context.
* Supports several more DB properties: compaction-pending, background-errors and cur-size-active-mem-table. * Supports several more DB properties: compaction-pending, background-errors and cur-size-active-mem-table.

@ -17,7 +17,9 @@ ifeq ($(MAKECMDGOALS),shared_lib)
PLATFORM_SHARED_LDFLAGS=-fPIC PLATFORM_SHARED_LDFLAGS=-fPIC
OPT += -DNDEBUG OPT += -DNDEBUG
endif endif
ifeq ($(MAKECMDGOALS),static_lib) ifeq ($(MAKECMDGOALS),static_lib)
PLATFORM_SHARED_LDFLAGS=-fPIC
OPT += -DNDEBUG OPT += -DNDEBUG
endif endif
@ -420,7 +422,11 @@ ldb: tools/ldb.o $(LIBOBJECTS)
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Jni stuff # Jni stuff
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
<<<<<<< HEAD
JNI_NATIVE_SOURCES = ./java/rocksjni/rocksjni.cc ./java/rocksjni/options.cc ./java/rocksjni/write_batch.cc ./java/rocksjni/statistics.cc JNI_NATIVE_SOURCES = ./java/rocksjni/rocksjni.cc ./java/rocksjni/options.cc ./java/rocksjni/write_batch.cc ./java/rocksjni/statistics.cc
=======
JNI_NATIVE_SOURCES = ./java/rocksjni/*.cc
>>>>>>> 1a8abe72768b2b5cea800aa390c28e5ace6a552e
JAVA_INCLUDE = -I/usr/lib/jvm/java-openjdk/include/ -I/usr/lib/jvm/java-openjdk/include/linux JAVA_INCLUDE = -I/usr/lib/jvm/java-openjdk/include/ -I/usr/lib/jvm/java-openjdk/include/linux
ROCKSDBJNILIB = ./java/librocksdbjni.so ROCKSDBJNILIB = ./java/librocksdbjni.so

@ -3219,7 +3219,7 @@ Status DBImpl::GetImpl(const ReadOptions& options,
PERF_TIMER_START(get_from_output_files_time); PERF_TIMER_START(get_from_output_files_time);
sv->current->Get(options, lkey, value, &s, &merge_context, &stats, sv->current->Get(options, lkey, value, &s, &merge_context, &stats,
*cfd->options(), value_found); value_found);
have_stat_update = true; have_stat_update = true;
PERF_TIMER_STOP(get_from_output_files_time); PERF_TIMER_STOP(get_from_output_files_time);
RecordTick(options_.statistics.get(), MEMTABLE_MISS); RecordTick(options_.statistics.get(), MEMTABLE_MISS);
@ -3334,7 +3334,7 @@ std::vector<Status> DBImpl::MultiGet(
// Done // Done
} else { } else {
super_version->current->Get(options, lkey, value, &s, &merge_context, super_version->current->Get(options, lkey, value, &s, &merge_context,
&mgd->stats, *cfd->options()); &mgd->stats);
mgd->have_stat_update = true; mgd->have_stat_update = true;
} }

@ -67,7 +67,7 @@ Status DBImplReadOnly::Get(const ReadOptions& options,
} else { } else {
Version::GetStats stats; Version::GetStats stats;
super_version->current->Get(options, lkey, value, &s, &merge_context, super_version->current->Get(options, lkey, value, &s, &merge_context,
&stats, *cfd->options()); &stats);
} }
return s; return s;
} }

@ -106,19 +106,20 @@ Iterator* TableCache::NewIterator(const ReadOptions& options,
if (table_reader_ptr != nullptr) { if (table_reader_ptr != nullptr) {
*table_reader_ptr = nullptr; *table_reader_ptr = nullptr;
} }
Cache::Handle* handle = file_meta.table_reader_handle; TableReader* table_reader = file_meta.table_reader;
Cache::Handle* handle = nullptr;
Status s; Status s;
if (!handle) { if (table_reader == nullptr) {
s = FindTable(toptions, icomparator, file_meta.number, file_meta.file_size, s = FindTable(toptions, icomparator, file_meta.number, file_meta.file_size,
&handle, nullptr, options.read_tier == kBlockCacheTier); &handle, nullptr, options.read_tier == kBlockCacheTier);
} if (!s.ok()) {
if (!s.ok()) { return NewErrorIterator(s);
return NewErrorIterator(s); }
table_reader = GetTableReaderFromHandle(handle);
} }
TableReader* table_reader = GetTableReaderFromHandle(handle);
Iterator* result = table_reader->NewIterator(options); Iterator* result = table_reader->NewIterator(options);
if (!file_meta.table_reader_handle) { if (handle != nullptr) {
result->RegisterCleanup(&UnrefEntry, cache_, handle); result->RegisterCleanup(&UnrefEntry, cache_, handle);
} }
if (table_reader_ptr != nullptr) { if (table_reader_ptr != nullptr) {
@ -138,17 +139,20 @@ Status TableCache::Get(const ReadOptions& options,
bool (*saver)(void*, const ParsedInternalKey&, bool (*saver)(void*, const ParsedInternalKey&,
const Slice&, bool), const Slice&, bool),
bool* table_io, void (*mark_key_may_exist)(void*)) { bool* table_io, void (*mark_key_may_exist)(void*)) {
Cache::Handle* handle = file_meta.table_reader_handle; TableReader* t = file_meta.table_reader;
Status s; Status s;
if (!handle) { Cache::Handle* handle = nullptr;
if (!t) {
s = FindTable(storage_options_, internal_comparator, file_meta.number, s = FindTable(storage_options_, internal_comparator, file_meta.number,
file_meta.file_size, &handle, table_io, file_meta.file_size, &handle, table_io,
options.read_tier == kBlockCacheTier); options.read_tier == kBlockCacheTier);
if (s.ok()) {
t = GetTableReaderFromHandle(handle);
}
} }
if (s.ok()) { if (s.ok()) {
TableReader* t = GetTableReaderFromHandle(handle);
s = t->Get(options, k, arg, saver, mark_key_may_exist); s = t->Get(options, k, arg, saver, mark_key_may_exist);
if (!file_meta.table_reader_handle) { if (handle != nullptr) {
ReleaseHandle(handle); ReleaseHandle(handle);
} }
} else if (options.read_tier && s.IsIncomplete()) { } else if (options.read_tier && s.IsIncomplete()) {
@ -164,15 +168,16 @@ Status TableCache::GetTableProperties(
const FileMetaData& file_meta, const FileMetaData& file_meta,
std::shared_ptr<const TableProperties>* properties, bool no_io) { std::shared_ptr<const TableProperties>* properties, bool no_io) {
Status s; Status s;
auto table_handle = file_meta.table_reader_handle; auto table_reader = file_meta.table_reader;
// table already been pre-loaded? // table already been pre-loaded?
if (table_handle) { if (table_reader) {
auto table = GetTableReaderFromHandle(table_handle); *properties = table_reader->GetTableProperties();
*properties = table->GetTableProperties();
return s; return s;
} }
bool table_io; bool table_io;
Cache::Handle* table_handle = nullptr;
s = FindTable(toptions, internal_comparator, file_meta.number, s = FindTable(toptions, internal_comparator, file_meta.number,
file_meta.file_size, &table_handle, &table_io, no_io); file_meta.file_size, &table_handle, &table_io, no_io);
if (!s.ok()) { if (!s.ok()) {
@ -190,20 +195,21 @@ bool TableCache::PrefixMayMatch(const ReadOptions& options,
const FileMetaData& file_meta, const FileMetaData& file_meta,
const Slice& internal_prefix, bool* table_io) { const Slice& internal_prefix, bool* table_io) {
bool may_match = true; bool may_match = true;
auto table_handle = file_meta.table_reader_handle; auto table_reader = file_meta.table_reader;
if (table_handle == nullptr) { Cache::Handle* table_handle = nullptr;
if (table_reader == nullptr) {
// Need to get table handle from file number // Need to get table handle from file number
Status s = FindTable(storage_options_, icomparator, file_meta.number, Status s = FindTable(storage_options_, icomparator, file_meta.number,
file_meta.file_size, &table_handle, table_io); file_meta.file_size, &table_handle, table_io);
if (!s.ok()) { if (!s.ok()) {
return may_match; return may_match;
} }
table_reader = GetTableReaderFromHandle(table_handle);
} }
auto table = GetTableReaderFromHandle(table_handle); may_match = table_reader->PrefixMayMatch(internal_prefix);
may_match = table->PrefixMayMatch(internal_prefix);
if (file_meta.table_reader_handle == nullptr) { if (table_handle != nullptr) {
// Need to release handle if it is generated from here. // Need to release handle if it is generated from here.
ReleaseHandle(table_handle); ReleaseHandle(table_handle);
} }

@ -32,6 +32,8 @@ struct FileMetaData {
// Needs to be disposed when refs becomes 0. // Needs to be disposed when refs becomes 0.
Cache::Handle* table_reader_handle; Cache::Handle* table_reader_handle;
// Table reader in table_reader_handle
TableReader* table_reader;
FileMetaData(uint64_t number, uint64_t file_size) FileMetaData(uint64_t number, uint64_t file_size)
: refs(0), : refs(0),
@ -39,7 +41,8 @@ struct FileMetaData {
number(number), number(number),
file_size(file_size), file_size(file_size),
being_compacted(false), being_compacted(false),
table_reader_handle(nullptr) {} table_reader_handle(nullptr),
table_reader(nullptr) {}
FileMetaData() : FileMetaData(0, 0) {} FileMetaData() : FileMetaData(0, 0) {}
}; };

@ -151,7 +151,7 @@ namespace {
struct EncodedFileMetaData { struct EncodedFileMetaData {
uint64_t number; // file number uint64_t number; // file number
uint64_t file_size; // file size uint64_t file_size; // file size
Cache::Handle* table_reader_handle; // cached table reader's handler TableReader* table_reader; // cached table reader
}; };
} // namespace } // namespace
@ -199,7 +199,7 @@ class Version::LevelFileNumIterator : public Iterator {
auto* file_meta = (*flist_)[index_]; auto* file_meta = (*flist_)[index_];
current_value_.number = file_meta->number; current_value_.number = file_meta->number;
current_value_.file_size = file_meta->file_size; current_value_.file_size = file_meta->file_size;
current_value_.table_reader_handle = file_meta->table_reader_handle; current_value_.table_reader = file_meta->table_reader;
return Slice(reinterpret_cast<const char*>(&current_value_), return Slice(reinterpret_cast<const char*>(&current_value_),
sizeof(EncodedFileMetaData)); sizeof(EncodedFileMetaData));
} }
@ -231,7 +231,7 @@ static Iterator* GetFileIterator(void* arg, const ReadOptions& options,
const EncodedFileMetaData* encoded_meta = const EncodedFileMetaData* encoded_meta =
reinterpret_cast<const EncodedFileMetaData*>(file_value.data()); reinterpret_cast<const EncodedFileMetaData*>(file_value.data());
FileMetaData meta(encoded_meta->number, encoded_meta->file_size); FileMetaData meta(encoded_meta->number, encoded_meta->file_size);
meta.table_reader_handle = encoded_meta->table_reader_handle; meta.table_reader = encoded_meta->table_reader;
return cache->NewIterator( return cache->NewIterator(
options.prefix ? options_copy : options, soptions, icomparator, meta, options.prefix ? options_copy : options, soptions, icomparator, meta,
nullptr /* don't need reference to table*/, for_compaction); nullptr /* don't need reference to table*/, for_compaction);
@ -257,7 +257,7 @@ bool Version::PrefixMayMatch(const ReadOptions& options,
reinterpret_cast<const EncodedFileMetaData*>( reinterpret_cast<const EncodedFileMetaData*>(
level_iter->value().data()); level_iter->value().data());
FileMetaData meta(encoded_meta->number, encoded_meta->file_size); FileMetaData meta(encoded_meta->number, encoded_meta->file_size);
meta.table_reader_handle = encoded_meta->table_reader_handle; meta.table_reader = encoded_meta->table_reader;
may_match = cfd_->table_cache()->PrefixMayMatch( may_match = cfd_->table_cache()->PrefixMayMatch(
options, cfd_->internal_comparator(), meta, internal_prefix, nullptr); options, cfd_->internal_comparator(), meta, internal_prefix, nullptr);
} }
@ -483,6 +483,17 @@ bool BySmallestKey(FileMetaData* a, FileMetaData* b,
Version::Version(ColumnFamilyData* cfd, VersionSet* vset, Version::Version(ColumnFamilyData* cfd, VersionSet* vset,
uint64_t version_number) uint64_t version_number)
: cfd_(cfd), : cfd_(cfd),
internal_comparator_((cfd == nullptr) ? nullptr
: &cfd->internal_comparator()),
user_comparator_((cfd == nullptr)
? nullptr
: internal_comparator_->user_comparator()),
table_cache_((cfd == nullptr) ? nullptr : cfd->table_cache()),
merge_operator_((cfd == nullptr) ? nullptr
: cfd->options()->merge_operator.get()),
info_log_((cfd == nullptr) ? nullptr : cfd->options()->info_log.get()),
db_statistics_((cfd == nullptr) ? nullptr
: cfd->options()->statistics.get()),
vset_(vset), vset_(vset),
next_(this), next_(this),
prev_(this), prev_(this),
@ -504,27 +515,22 @@ void Version::Get(const ReadOptions& options,
Status* status, Status* status,
MergeContext* merge_context, MergeContext* merge_context,
GetStats* stats, GetStats* stats,
const Options& db_options,
bool* value_found) { bool* value_found) {
Slice ikey = k.internal_key(); Slice ikey = k.internal_key();
Slice user_key = k.user_key(); Slice user_key = k.user_key();
const Comparator* ucmp = cfd_->internal_comparator().user_comparator();
auto merge_operator = db_options.merge_operator.get();
auto logger = db_options.info_log.get();
assert(status->ok() || status->IsMergeInProgress()); assert(status->ok() || status->IsMergeInProgress());
Saver saver; Saver saver;
saver.state = status->ok()? kNotFound : kMerge; saver.state = status->ok()? kNotFound : kMerge;
saver.ucmp = ucmp; saver.ucmp = user_comparator_;
saver.user_key = user_key; saver.user_key = user_key;
saver.value_found = value_found; saver.value_found = value_found;
saver.value = value; saver.value = value;
saver.merge_operator = merge_operator; saver.merge_operator = merge_operator_;
saver.merge_context = merge_context; saver.merge_context = merge_context;
saver.logger = logger; saver.logger = info_log_;
saver.didIO = false; saver.didIO = false;
saver.statistics = db_options.statistics.get(); saver.statistics = db_statistics_;
stats->seek_file = nullptr; stats->seek_file = nullptr;
stats->seek_file_level = -1; stats->seek_file_level = -1;
@ -555,7 +561,7 @@ void Version::Get(const ReadOptions& options,
// On Level-n (n>=1), files are sorted. // On Level-n (n>=1), files are sorted.
// Binary search to find earliest index whose largest key >= ikey. // Binary search to find earliest index whose largest key >= ikey.
// We will also stop when the file no longer overlaps ikey // We will also stop when the file no longer overlaps ikey
start_index = FindFile(cfd_->internal_comparator(), files_[level], ikey); start_index = FindFile(*internal_comparator_, files_[level], ikey);
} }
// Traverse each relevant file to find the desired key // Traverse each relevant file to find the desired key
@ -564,8 +570,10 @@ void Version::Get(const ReadOptions& options,
#endif #endif
for (uint32_t i = start_index; i < num_files; ++i) { for (uint32_t i = start_index; i < num_files; ++i) {
FileMetaData* f = files[i]; FileMetaData* f = files[i];
if (ucmp->Compare(user_key, f->smallest.user_key()) < 0 || // Skip key range filtering for levle 0 if there are few level 0 files.
ucmp->Compare(user_key, f->largest.user_key()) > 0) { if ((level > 0 || num_files > 2) &&
(user_comparator_->Compare(user_key, f->smallest.user_key()) < 0 ||
user_comparator_->Compare(user_key, f->largest.user_key()) > 0)) {
// Only process overlapping files. // Only process overlapping files.
if (level > 0) { if (level > 0) {
// If on Level-n (n>=1) then the files are sorted. // If on Level-n (n>=1) then the files are sorted.
@ -581,8 +589,8 @@ void Version::Get(const ReadOptions& options,
// Sanity check to make sure that the files are correctly sorted // Sanity check to make sure that the files are correctly sorted
if (prev_file) { if (prev_file) {
if (level != 0) { if (level != 0) {
int comp_sign = cfd_->internal_comparator().Compare( int comp_sign =
prev_file->largest, f->smallest); internal_comparator_->Compare(prev_file->largest, f->smallest);
assert(comp_sign < 0); assert(comp_sign < 0);
} else { } else {
// level == 0, the current file cannot be newer than the previous one. // level == 0, the current file cannot be newer than the previous one.
@ -596,9 +604,8 @@ void Version::Get(const ReadOptions& options,
prev_file = f; prev_file = f;
#endif #endif
bool tableIO = false; bool tableIO = false;
*status = cfd_->table_cache()->Get(options, cfd_->internal_comparator(), *status = table_cache_->Get(options, *internal_comparator_, *f, ikey,
*f, ikey, &saver, SaveValue, &tableIO, &saver, SaveValue, &tableIO, MarkKeyMayExist);
MarkKeyMayExist);
// TODO: examine the behavior for corrupted key // TODO: examine the behavior for corrupted key
if (!status->ok()) { if (!status->ok()) {
return; return;
@ -643,12 +650,12 @@ void Version::Get(const ReadOptions& options,
if (kMerge == saver.state) { if (kMerge == saver.state) {
// merge_operands are in saver and we hit the beginning of the key history // merge_operands are in saver and we hit the beginning of the key history
// do a final merge of nullptr and operands; // do a final merge of nullptr and operands;
if (merge_operator->FullMerge(user_key, nullptr, if (merge_operator_->FullMerge(user_key, nullptr,
saver.merge_context->GetOperands(), saver.merge_context->GetOperands(), value,
value, logger)) { info_log_)) {
*status = Status::OK(); *status = Status::OK();
} else { } else {
RecordTick(db_options.statistics.get(), NUMBER_MERGE_FAILURES); RecordTick(db_statistics_, NUMBER_MERGE_FAILURES);
*status = Status::Corruption("could not perform end-of-key merge for ", *status = Status::Corruption("could not perform end-of-key merge for ",
user_key); user_key);
} }
@ -1458,6 +1465,12 @@ class VersionSet::Builder {
base_->vset_->storage_options_, cfd_->internal_comparator(), base_->vset_->storage_options_, cfd_->internal_comparator(),
file_meta->number, file_meta->file_size, file_meta->number, file_meta->file_size,
&file_meta->table_reader_handle, &table_io, false); &file_meta->table_reader_handle, &table_io, false);
if (file_meta->table_reader_handle != nullptr) {
// Load table_reader
file_meta->table_reader =
cfd_->table_cache()->GetTableReaderFromHandle(
file_meta->table_reader_handle);
}
} }
} }
} }

@ -88,8 +88,7 @@ class Version {
int seek_file_level; int seek_file_level;
}; };
void Get(const ReadOptions&, const LookupKey& key, std::string* val, void Get(const ReadOptions&, const LookupKey& key, std::string* val,
Status* status, MergeContext* merge_context, Status* status, MergeContext* merge_context, GetStats* stats,
GetStats* stats, const Options& db_option,
bool* value_found = nullptr); bool* value_found = nullptr);
// Adds "stats" into the current state. Returns true if a new // Adds "stats" into the current state. Returns true if a new
@ -230,6 +229,12 @@ class Version {
void UpdateFilesBySize(); void UpdateFilesBySize();
ColumnFamilyData* cfd_; // ColumnFamilyData to which this Version belongs ColumnFamilyData* cfd_; // ColumnFamilyData to which this Version belongs
const InternalKeyComparator* internal_comparator_;
const Comparator* user_comparator_;
TableCache* table_cache_;
const MergeOperator* merge_operator_;
Logger* info_log_;
Statistics* db_statistics_;
VersionSet* vset_; // VersionSet to which this Version belongs VersionSet* vset_; // VersionSet to which this Version belongs
Version* next_; // Next version in linked list Version* next_; // Next version in linked list
Version* prev_; // Previous version in linked list Version* prev_; // Previous version in linked list

@ -1,4 +1,8 @@
<<<<<<< HEAD
NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.Statistics NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.Statistics
=======
NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.BackupableDB org.rocksdb.BackupableDBOptions
>>>>>>> 1a8abe72768b2b5cea800aa390c28e5ace6a552e
NATIVE_INCLUDE = ./include NATIVE_INCLUDE = ./include
ROCKSDB_JAR = rocksdbjni.jar ROCKSDB_JAR = rocksdbjni.jar
@ -21,7 +25,10 @@ sample: java
@rm -rf /tmp/rocksdbjni_not_found @rm -rf /tmp/rocksdbjni_not_found
test: java test: java
javac org/rocksdb/test/*.java
java -ea -Djava.library.path=.:../ -cp "$(ROCKSDB_JAR):.:./*" org.rocksdb.WriteBatchTest java -ea -Djava.library.path=.:../ -cp "$(ROCKSDB_JAR):.:./*" org.rocksdb.WriteBatchTest
java -ea -Djava.library.path=.:../ -cp "$(ROCKSDB_JAR):.:./*" org.rocksdb.test.BackupableDBTest
java -ea -Djava.library.path=.:../ -cp "$(ROCKSDB_JAR):.:./*" org.rocksdb.test.OptionsTest
db_bench: java db_bench: java
javac org/rocksdb/benchmark/*.java javac org/rocksdb/benchmark/*.java

@ -0,0 +1,84 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* A subclass of RocksDB which supports backup-related operations.
*
* @see BackupableDBOptions
*/
public class BackupableDB extends RocksDB {
/**
* Open a BackupableDB under the specified path.
* Note that the backup path should be set properly in the
* input BackupableDBOptions.
*
* @param opt options for db.
* @param bopt backup related options.
* @param the db path for storing data. The path for storing
* backup should be specified in the BackupableDBOptions.
* @return reference to the opened BackupableDB.
*/
public static BackupableDB open(
Options opt, BackupableDBOptions bopt, String db_path)
throws RocksDBException {
// since BackupableDB c++ will handle the life cycle of
// the returned RocksDB of RocksDB.open(), here we store
// it as a BackupableDB member variable to avoid GC.
BackupableDB bdb = new BackupableDB(RocksDB.open(opt, db_path));
bdb.open(bdb.db_.nativeHandle_, bopt.nativeHandle_);
return bdb;
}
/**
* Captures the state of the database in the latest backup.
* Note that this function is not thread-safe.
*
* @param flushBeforeBackup if true, then all data will be flushed
* before creating backup.
*/
public void createNewBackup(boolean flushBeforeBackup) {
createNewBackup(nativeHandle_, flushBeforeBackup);
}
/**
* Close the BackupableDB instance and release resource.
*
* Internally, BackupableDB owns the rocksdb::DB pointer to its
* associated RocksDB. The release of that RocksDB pointer is
* handled in the destructor of the c++ rocksdb::BackupableDB and
* should be transparent to Java developers.
*/
@Override public synchronized void close() {
if (isOpened()) {
super.close0();
}
}
/**
* A protected construction that will be used in the static factory
* method BackupableDB.open().
*/
protected BackupableDB(RocksDB db) {
super();
db_ = db;
}
@Override protected void finalize() {
close();
}
private boolean isOpened() {
return nativeHandle_ != 0;
}
protected native void open(long rocksDBHandle, long backupDBOptionsHandle);
protected native void createNewBackup(long handle, boolean flag);
private final RocksDB db_;
}

@ -0,0 +1,52 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* BackupableDBOptions to control the behavior of a backupable database.
* It will be used during the creation of a BackupableDB.
*
* Note that dispose() must be called before an Options instance
* become out-of-scope to release the allocated memory in c++.
*/
public class BackupableDBOptions {
public BackupableDBOptions(String path) {
newBackupableDBOptions(path);
}
/**
* Returns the path to the BackupableDB directory.
*
* @return the path to the BackupableDB directory.
*/
public String backupDir() {
assert(isInitialized());
return backupDir(nativeHandle_);
}
/**
* Release the memory allocated for the current instance
* in the c++ side.
*/
public synchronized void dispose() {
if (isInitialized()) {
dispose(nativeHandle_);
}
}
@Override protected void finalize() {
dispose();
}
boolean isInitialized() {
return nativeHandle_ != 0;
}
private native void newBackupableDBOptions(String path);
private native String backupDir(long handle);
private native void dispose(long handle);
long nativeHandle_;
}

@ -173,32 +173,341 @@ public class Options {
return disableSeekCompaction(nativeHandle_); return disableSeekCompaction(nativeHandle_);
} }
/* /**
* Maximum number of concurrent background jobs, submitted to * Set the amount of cache in bytes that will be used by RocksDB.
* the default LOW priority thread pool. * If cacheSize is non-positive, then cache will not be used.
* Default: 1
* *
* @param maxBackgroundCompactions the maximum number of concurrent * DEFAULT: 8M
* background jobs. */
* @return the instance of the current Options. public Options setCacheSize(long cacheSize) {
cacheSize_ = cacheSize;
return this;
}
/**
* @return the amount of cache in bytes that will be used by RocksDB.
*/
public long cacheSize() {
return cacheSize_;
}
/**
* If true, an error will be thrown during RocksDB.open() if the
* database already exists.
*
* @return if true, an error is raised when the specified database
* already exists before open.
*/
public boolean errorIfExists() {
assert(isInitialized());
return errorIfExists(nativeHandle_);
}
private native boolean errorIfExists(long handle);
/**
* If true, an error will be thrown during RocksDB.open() if the
* database already exists.
* Default: false
*
* @param errorIfExists if true, an exception will be thrown
* during RocksDB.open() if the database already exists.
* @return the reference to the current option.
* @see RocksDB.open() * @see RocksDB.open()
*/ */
public Options setMaxBackgroundCompactions(int maxBackgroundCompactions) { public Options setErrorIfExists(boolean errorIfExists) {
assert(isInitialized()); assert(isInitialized());
setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions); setErrorIfExists(nativeHandle_, errorIfExists);
return this; return this;
} }
private native void setErrorIfExists(long handle, boolean errorIfExists);
/* /**
* Returns maximum number of background concurrent jobs. * If true, the implementation will do aggressive checking of the
* data it is processing and will stop early if it detects any
* errors. This may have unforeseen ramifications: for example, a
* corruption of one DB entry may cause a large number of entries to
* become unreadable or for the entire DB to become unopenable.
* If any of the writes to the database fails (Put, Delete, Merge, Write),
* the database will switch to read-only mode and fail all other
* Write operations.
*
* @return a boolean indicating whether paranoid-check is on.
*/
public boolean paranoidChecks() {
assert(isInitialized());
return paranoidChecks(nativeHandle_);
}
private native boolean paranoidChecks(long handle);
/**
* If true, the implementation will do aggressive checking of the
* data it is processing and will stop early if it detects any
* errors. This may have unforeseen ramifications: for example, a
* corruption of one DB entry may cause a large number of entries to
* become unreadable or for the entire DB to become unopenable.
* If any of the writes to the database fails (Put, Delete, Merge, Write),
* the database will switch to read-only mode and fail all other
* Write operations.
* Default: true
*
* @param paranoidChecks a flag to indicate whether paranoid-check
* is on.
* @return the reference to the current option.
*/
public Options setParanoidChecks(boolean paranoidChecks) {
assert(isInitialized());
setParanoidChecks(nativeHandle_, paranoidChecks);
return this;
}
private native void setParanoidChecks(
long handle, boolean paranoidChecks);
/**
* Number of open files that can be used by the DB. You may need to
* increase this if your database has a large working set. Value -1 means
* files opened are always kept open. You can estimate number of files based
* on target_file_size_base and target_file_size_multiplier for level-based
* compaction. For universal-style compaction, you can usually set it to -1.
*
* @return the maximum number of open files.
*/
public int maxOpenFiles() {
assert(isInitialized());
return maxOpenFiles(nativeHandle_);
}
private native int maxOpenFiles(long handle);
/**
* Number of open files that can be used by the DB. You may need to
* increase this if your database has a large working set. Value -1 means
* files opened are always kept open. You can estimate number of files based
* on target_file_size_base and target_file_size_multiplier for level-based
* compaction. For universal-style compaction, you can usually set it to -1.
* Default: 5000
* *
* @return maximum number of background concurrent jobs. * @param maxOpenFiles the maximum number of open files.
* @see setMaxBackgroundCompactions * @return the reference to the current option.
*/
public Options setMaxOpenFiles(int maxOpenFiles) {
assert(isInitialized());
setMaxOpenFiles(nativeHandle_, maxOpenFiles);
return this;
}
private native void setMaxOpenFiles(long handle, int maxOpenFiles);
/**
* If true, then the contents of data files are not synced
* to stable storage. Their contents remain in the OS buffers till the
* OS decides to flush them. This option is good for bulk-loading
* of data. Once the bulk-loading is complete, please issue a
* sync to the OS to flush all dirty buffesrs to stable storage.
*
* @return if true, then data-sync is disabled.
*/
public boolean disableDataSync() {
assert(isInitialized());
return disableDataSync(nativeHandle_);
}
private native boolean disableDataSync(long handle);
/**
* If true, then the contents of data files are not synced
* to stable storage. Their contents remain in the OS buffers till the
* OS decides to flush them. This option is good for bulk-loading
* of data. Once the bulk-loading is complete, please issue a
* sync to the OS to flush all dirty buffesrs to stable storage.
* Default: false
*
* @param disableDataSync a boolean flag to specify whether to
* disable data sync.
* @return the reference to the current option.
*/
public Options setDisableDataSync(boolean disableDataSync) {
assert(isInitialized());
setDisableDataSync(nativeHandle_, disableDataSync);
return this;
}
private native void setDisableDataSync(long handle, boolean disableDataSync);
/**
* If true, then every store to stable storage will issue a fsync.
* If false, then every store to stable storage will issue a fdatasync.
* This parameter should be set to true while storing data to
* filesystem like ext3 that can lose files after a reboot.
*
* @return true if fsync is used.
*/
public boolean useFsync() {
assert(isInitialized());
return useFsync(nativeHandle_);
}
private native boolean useFsync(long handle);
/**
* If true, then every store to stable storage will issue a fsync.
* If false, then every store to stable storage will issue a fdatasync.
* This parameter should be set to true while storing data to
* filesystem like ext3 that can lose files after a reboot.
* Default: false
*
* @param useFsync a boolean flag to specify whether to use fsync
* @return the reference to the current option.
*/
public Options setUseFsync(boolean useFsync) {
assert(isInitialized());
setUseFsync(nativeHandle_, useFsync);
return this;
}
private native void setUseFsync(long handle, boolean useFsync);
/**
* The time interval in seconds between each two consecutive stats logs.
* This number controls how often a new scribe log about
* db deploy stats is written out.
* -1 indicates no logging at all.
*
* @return the time interval in seconds between each two consecutive
* stats logs.
*/
public int dbStatsLogInterval() {
assert(isInitialized());
return dbStatsLogInterval(nativeHandle_);
}
private native int dbStatsLogInterval(long handle);
/**
* The time interval in seconds between each two consecutive stats logs.
* This number controls how often a new scribe log about
* db deploy stats is written out.
* -1 indicates no logging at all.
* Default value is 1800 (half an hour).
*
* @param dbStatsLogInterval the time interval in seconds between each
* two consecutive stats logs.
* @return the reference to the current option.
*/
public Options setDbStatsLogInterval(int dbStatsLogInterval) {
assert(isInitialized());
setDbStatsLogInterval(nativeHandle_, dbStatsLogInterval);
return this;
}
private native void setDbStatsLogInterval(
long handle, int dbStatsLogInterval);
/**
* Returns the directory of info log.
*
* If it is empty, the log files will be in the same dir as data.
* If it is non empty, the log files will be in the specified dir,
* and the db data dir's absolute path will be used as the log file
* name's prefix.
*
* @return the path to the info log directory
*/
public String dbLogDir() {
assert(isInitialized());
return dbLogDir(nativeHandle_);
}
private native String dbLogDir(long handle);
/**
* This specifies the info LOG dir.
* If it is empty, the log files will be in the same dir as data.
* If it is non empty, the log files will be in the specified dir,
* and the db data dir's absolute path will be used as the log file
* name's prefix.
*
* @param dbLogDir the path to the info log directory
* @return the reference to the current option.
*/
public Options setDbLogDir(String dbLogDir) {
assert(isInitialized());
setDbLogDir(nativeHandle_, dbLogDir);
return this;
}
private native void setDbLogDir(long handle, String dbLogDir);
/**
* Returns the path to the write-ahead-logs (WAL) directory.
*
* If it is empty, the log files will be in the same dir as data,
* dbname is used as the data dir by default
* If it is non empty, the log files will be in kept the specified dir.
* When destroying the db,
* all log files in wal_dir and the dir itself is deleted
*
* @return the path to the write-ahead-logs (WAL) directory.
*/
public String walDir() {
assert(isInitialized());
return walDir(nativeHandle_);
}
private native String walDir(long handle);
/**
* This specifies the absolute dir path for write-ahead logs (WAL).
* If it is empty, the log files will be in the same dir as data,
* dbname is used as the data dir by default
* If it is non empty, the log files will be in kept the specified dir.
* When destroying the db,
* all log files in wal_dir and the dir itself is deleted
*
* @param walDir the path to the write-ahead-log directory.
* @return the reference to the current option.
*/
public Options setWalDir(String walDir) {
assert(isInitialized());
setWalDir(nativeHandle_, walDir);
return this;
}
private native void setWalDir(long handle, String walDir);
/**
* The periodicity when obsolete files get deleted. The default
* value is 6 hours. The files that get out of scope by compaction
* process will still get automatically delete on every compaction,
* regardless of this setting
*
* @return the time interval in micros when obsolete files will be deleted.
*/
public long deleteObsoleteFilesPeriodMicros() {
assert(isInitialized());
return deleteObsoleteFilesPeriodMicros(nativeHandle_);
}
private native long deleteObsoleteFilesPeriodMicros(long handle);
/**
* The periodicity when obsolete files get deleted. The default
* value is 6 hours. The files that get out of scope by compaction
* process will still get automatically delete on every compaction,
* regardless of this setting
*
* @param micros the time interval in micros
* @return the reference to the current option.
*/
public Options setDeleteObsoleteFilesPeriodMicros(long micros) {
assert(isInitialized());
setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros);
return this;
}
private native void setDeleteObsoleteFilesPeriodMicros(
long handle, long micros);
/**
* Returns the maximum number of concurrent background compaction jobs,
* submitted to the default LOW priority thread pool.
* When increasing this number, we may also want to consider increasing
* number of threads in LOW priority thread pool.
* Default: 1
*
* @return the maximum number of concurrent background compaction jobs.
* @see Env.setBackgroundThreads()
*/ */
public int maxBackgroundCompactions() { public int maxBackgroundCompactions() {
assert(isInitialized()); assert(isInitialized());
return maxBackgroundCompactions(nativeHandle_); return maxBackgroundCompactions(nativeHandle_);
} }
private native int maxBackgroundCompactions(long handle);
/* /*
* Creates statistics object which collects metrics about database operations. * Creates statistics object which collects metrics about database operations.
@ -234,22 +543,594 @@ public class Options {
} }
/** /**
* Set the amount of cache in bytes that will be used by RocksDB. * Specifies the maximum number of concurrent background compaction jobs,
* If cacheSize is non-positive, then cache will not be used. * submitted to the default LOW priority thread pool.
* If you're increasing this, also consider increasing number of threads in
* LOW priority thread pool. For more information, see
* Default: 1
* *
* DEFAULT: 8M * @param maxBackgroundCompactions the maximum number of background
* compaction jobs.
* @return the reference to the current option.
*
* @see Env.setBackgroundThreads()
* @see maxBackgroundFlushes()
*/ */
public Options setCacheSize(long cacheSize) { public Options setMaxBackgroundCompactions(int maxBackgroundCompactions) {
cacheSize_ = cacheSize; assert(isInitialized());
setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions);
return this; return this;
} }
private native void setMaxBackgroundCompactions(
long handle, int maxBackgroundCompactions);
/** /**
* @return the amount of cache in bytes that will be used by RocksDB. * Returns the maximum number of concurrent background flush jobs.
* If you're increasing this, also consider increasing number of threads in
* HIGH priority thread pool. For more information, see
* Default: 1
*
* @return the maximum number of concurrent background flush jobs.
* @see Env.setBackgroundThreads()
*/ */
public long cacheSize() { public int maxBackgroundFlushes() {
return cacheSize_; assert(isInitialized());
return maxBackgroundFlushes(nativeHandle_);
}
private native int maxBackgroundFlushes(long handle);
/**
* Specifies the maximum number of concurrent background flush jobs.
* If you're increasing this, also consider increasing number of threads in
* HIGH priority thread pool. For more information, see
* Default: 1
*
* @param maxBackgroundFlushes
* @return the reference to the current option.
*
* @see Env.setBackgroundThreads()
* @see maxBackgroundCompactions()
*/
public Options setMaxBackgroundFlushes(int maxBackgroundFlushes) {
assert(isInitialized());
setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes);
return this;
}
private native void setMaxBackgroundFlushes(
long handle, int maxBackgroundFlushes);
/**
* Returns the maximum size of a info log file. If the current log file
* is larger than this size, a new info log file will be created.
* If 0, all logs will be written to one log file.
*
* @return the maximum size of the info log file.
*/
public long maxLogFileSize() {
assert(isInitialized());
return maxLogFileSize(nativeHandle_);
}
private native long maxLogFileSize(long handle);
/**
* Specifies the maximum size of a info log file. If the current log file
* is larger than `max_log_file_size`, a new info log file will
* be created.
* If 0, all logs will be written to one log file.
*
* @param maxLogFileSize the maximum size of a info log file.
* @return the reference to the current option.
*/
public Options setMaxLogFileSize(long maxLogFileSize) {
assert(isInitialized());
setMaxLogFileSize(nativeHandle_, maxLogFileSize);
return this;
}
private native void setMaxLogFileSize(long handle, long maxLogFileSize);
/**
* Returns the time interval for the info log file to roll (in seconds).
* If specified with non-zero value, log file will be rolled
* if it has been active longer than `log_file_time_to_roll`.
* Default: 0 (disabled)
*
* @return the time interval in seconds.
*/
public long logFileTimeToRoll() {
assert(isInitialized());
return logFileTimeToRoll(nativeHandle_);
}
private native long logFileTimeToRoll(long handle);
/**
* Specifies the time interval for the info log file to roll (in seconds).
* If specified with non-zero value, log file will be rolled
* if it has been active longer than `log_file_time_to_roll`.
* Default: 0 (disabled)
*
* @param logFileTimeToRoll the time interval in seconds.
* @return the reference to the current option.
*/
public Options setLogFileTimeToRoll(long logFileTimeToRoll) {
assert(isInitialized());
setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll);
return this;
}
private native void setLogFileTimeToRoll(
long handle, long logFileTimeToRoll);
/**
* Returns the maximum number of info log files to be kept.
* Default: 1000
*
* @return the maximum number of info log files to be kept.
*/
public long keepLogFileNum() {
assert(isInitialized());
return keepLogFileNum(nativeHandle_);
} }
private native long keepLogFileNum(long handle);
/**
* Specifies the maximum number of info log files to be kept.
* Default: 1000
*
* @param keepLogFileNum the maximum number of info log files to be kept.
* @return the reference to the current option.
*/
public Options setKeepLogFileNum(long keepLogFileNum) {
assert(isInitialized());
setKeepLogFileNum(nativeHandle_, keepLogFileNum);
return this;
}
private native void setKeepLogFileNum(long handle, long keepLogFileNum);
/**
* Manifest file is rolled over on reaching this limit.
* The older manifest file be deleted.
* The default value is MAX_INT so that roll-over does not take place.
*
* @return the size limit of a manifest file.
*/
public long maxManifestFileSize() {
assert(isInitialized());
return maxManifestFileSize(nativeHandle_);
}
private native long maxManifestFileSize(long handle);
/**
* Manifest file is rolled over on reaching this limit.
* The older manifest file be deleted.
* The default value is MAX_INT so that roll-over does not take place.
*
* @param maxManifestFileSize the size limit of a manifest file.
* @return the reference to the current option.
*/
public Options setMaxManifestFileSize(long maxManifestFileSize) {
assert(isInitialized());
setMaxManifestFileSize(nativeHandle_, maxManifestFileSize);
return this;
}
private native void setMaxManifestFileSize(
long handle, long maxManifestFileSize);
/**
* Number of shards used for table cache.
*
* @return the number of shards used for table cache.
*/
public int tableCacheNumshardbits() {
assert(isInitialized());
return tableCacheNumshardbits(nativeHandle_);
}
private native int tableCacheNumshardbits(long handle);
/**
* Number of shards used for table cache.
*
* @param tableCacheNumshardbits the number of chards
* @return the reference to the current option.
*/
public Options setTableCacheNumshardbits(int tableCacheNumshardbits) {
assert(isInitialized());
setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits);
return this;
}
private native void setTableCacheNumshardbits(
long handle, int tableCacheNumshardbits);
/**
* During data eviction of table's LRU cache, it would be inefficient
* to strictly follow LRU because this piece of memory will not really
* be released unless its refcount falls to zero. Instead, make two
* passes: the first pass will release items with refcount = 1,
* and if not enough space releases after scanning the number of
* elements specified by this parameter, we will remove items in LRU
* order.
*
* @return scan count limit
*/
public int tableCacheRemoveScanCountLimit() {
assert(isInitialized());
return tableCacheRemoveScanCountLimit(nativeHandle_);
}
private native int tableCacheRemoveScanCountLimit(long handle);
/**
* During data eviction of table's LRU cache, it would be inefficient
* to strictly follow LRU because this piece of memory will not really
* be released unless its refcount falls to zero. Instead, make two
* passes: the first pass will release items with refcount = 1,
* and if not enough space releases after scanning the number of
* elements specified by this parameter, we will remove items in LRU
* order.
*
* @param limit scan count limit
* @return the reference to the current option.
*/
public Options setTableCacheRemoveScanCountLimit(int limit) {
assert(isInitialized());
setTableCacheRemoveScanCountLimit(nativeHandle_, limit);
return this;
}
private native void setTableCacheRemoveScanCountLimit(
long handle, int limit);
/**
* The following two fields affect how archived logs will be deleted.
* 1. If both set to 0, logs will be deleted asap and will not get into
* the archive.
* 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* WAL files will be checked every 10 min and if total size is greater
* then WAL_size_limit_MB, they will be deleted starting with the
* earliest until size_limit is met. All empty files will be deleted.
* 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
* are older than WAL_ttl_seconds will be deleted.
* 4. If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first.
*
* @return the wal-ttl seconds
*/
public long walTtlSeconds() {
assert(isInitialized());
return walTtlSeconds(nativeHandle_);
}
private native long walTtlSeconds(long handle);
/**
* The following two fields affect how archived logs will be deleted.
* 1. If both set to 0, logs will be deleted asap and will not get into
* the archive.
* 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* WAL files will be checked every 10 min and if total size is greater
* then WAL_size_limit_MB, they will be deleted starting with the
* earliest until size_limit is met. All empty files will be deleted.
* 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
* are older than WAL_ttl_seconds will be deleted.
* 4. If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first.
*
* @param walTtlSeconds the ttl seconds
* @return the reference to the current option.
*/
public Options setWALTtlSeconds(long walTtlSeconds) {
assert(isInitialized());
setWALTtlSeconds(nativeHandle_, walTtlSeconds);
return this;
}
private native void setWALTtlSeconds(long handle, long walTtlSeconds);
/**
* Number of bytes to preallocate (via fallocate) the manifest
* files. Default is 4mb, which is reasonable to reduce random IO
* as well as prevent overallocation for mounts that preallocate
* large amounts of data (such as xfs's allocsize option).
*
* @return size in bytes.
*/
public long manifestPreallocationSize() {
assert(isInitialized());
return manifestPreallocationSize(nativeHandle_);
}
private native long manifestPreallocationSize(long handle);
/**
* Number of bytes to preallocate (via fallocate) the manifest
* files. Default is 4mb, which is reasonable to reduce random IO
* as well as prevent overallocation for mounts that preallocate
* large amounts of data (such as xfs's allocsize option).
*
* @param size the size in byte
* @return the reference to the current option.
*/
public Options setManifestPreallocationSize(long size) {
assert(isInitialized());
setManifestPreallocationSize(nativeHandle_, size);
return this;
}
private native void setManifestPreallocationSize(
long handle, long size);
/**
* Data being read from file storage may be buffered in the OS
* Default: true
*
* @return if true, then OS buffering is allowed.
*/
public boolean allowOsBuffer() {
assert(isInitialized());
return allowOsBuffer(nativeHandle_);
}
private native boolean allowOsBuffer(long handle);
/**
* Data being read from file storage may be buffered in the OS
* Default: true
*
* @param allowOsBufferif true, then OS buffering is allowed.
* @return the reference to the current option.
*/
public Options setAllowOsBuffer(boolean allowOsBuffer) {
assert(isInitialized());
setAllowOsBuffer(nativeHandle_, allowOsBuffer);
return this;
}
private native void setAllowOsBuffer(
long handle, boolean allowOsBuffer);
/**
* Allow the OS to mmap file for reading sst tables.
* Default: false
*
* @return true if mmap reads are allowed.
*/
public boolean allowMmapReads() {
assert(isInitialized());
return allowMmapReads(nativeHandle_);
}
private native boolean allowMmapReads(long handle);
/**
* Allow the OS to mmap file for reading sst tables.
* Default: false
*
* @param allowMmapReads true if mmap reads are allowed.
* @return the reference to the current option.
*/
public Options setAllowMmapReads(boolean allowMmapReads) {
assert(isInitialized());
setAllowMmapReads(nativeHandle_, allowMmapReads);
return this;
}
private native void setAllowMmapReads(
long handle, boolean allowMmapReads);
/**
* Allow the OS to mmap file for writing. Default: false
*
* @return true if mmap writes are allowed.
*/
public boolean allowMmapWrites() {
assert(isInitialized());
return allowMmapWrites(nativeHandle_);
}
private native boolean allowMmapWrites(long handle);
/**
* Allow the OS to mmap file for writing. Default: false
*
* @param allowMmapWrites true if mmap writes are allowd.
* @return the reference to the current option.
*/
public Options setAllowMmapWrites(boolean allowMmapWrites) {
assert(isInitialized());
setAllowMmapWrites(nativeHandle_, allowMmapWrites);
return this;
}
private native void setAllowMmapWrites(
long handle, boolean allowMmapWrites);
/**
* Disable child process inherit open files. Default: true
*
* @return true if child process inheriting open files is disabled.
*/
public boolean isFdCloseOnExec() {
assert(isInitialized());
return isFdCloseOnExec(nativeHandle_);
}
private native boolean isFdCloseOnExec(long handle);
/**
* Disable child process inherit open files. Default: true
*
* @param isFdCloseOnExec true if child process inheriting open
* files is disabled.
* @return the reference to the current option.
*/
public Options setIsFdCloseOnExec(boolean isFdCloseOnExec) {
assert(isInitialized());
setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec);
return this;
}
private native void setIsFdCloseOnExec(
long handle, boolean isFdCloseOnExec);
/**
* Skip log corruption error on recovery (If client is ok with
* losing most recent changes)
* Default: false
*
* @return true if log corruption errors are skipped during recovery.
*/
public boolean skipLogErrorOnRecovery() {
assert(isInitialized());
return skipLogErrorOnRecovery(nativeHandle_);
}
private native boolean skipLogErrorOnRecovery(long handle);
/**
* Skip log corruption error on recovery (If client is ok with
* losing most recent changes)
* Default: false
*
* @param skip true if log corruption errors are skipped during recovery.
* @return the reference to the current option.
*/
public Options setSkipLogErrorOnRecovery(boolean skip) {
assert(isInitialized());
setSkipLogErrorOnRecovery(nativeHandle_, skip);
return this;
}
private native void setSkipLogErrorOnRecovery(
long handle, boolean skip);
/**
* If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
* Default: 3600 (1 hour)
*
* @return time interval in seconds.
*/
public int statsDumpPeriodSec() {
assert(isInitialized());
return statsDumpPeriodSec(nativeHandle_);
}
private native int statsDumpPeriodSec(long handle);
/**
* if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
* Default: 3600 (1 hour)
*
* @param statsDumpPeriodSec time interval in seconds.
* @return the reference to the current option.
*/
public Options setStatsDumpPeriodSec(int statsDumpPeriodSec) {
assert(isInitialized());
setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec);
return this;
}
private native void setStatsDumpPeriodSec(
long handle, int statsDumpPeriodSec);
/**
* If set true, will hint the underlying file system that the file
* access pattern is random, when a sst file is opened.
* Default: true
*
* @return true if hinting random access is on.
*/
public boolean adviseRandomOnOpen() {
return adviseRandomOnOpen(nativeHandle_);
}
private native boolean adviseRandomOnOpen(long handle);
/**
* If set true, will hint the underlying file system that the file
* access pattern is random, when a sst file is opened.
* Default: true
*
* @param adviseRandomOnOpen true if hinting random access is on.
* @return the reference to the current option.
*/
public Options setAdviseRandomOnOpen(boolean adviseRandomOnOpen) {
assert(isInitialized());
setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen);
return this;
}
private native void setAdviseRandomOnOpen(
long handle, boolean adviseRandomOnOpen);
/**
* Use adaptive mutex, which spins in the user space before resorting
* to kernel. This could reduce context switch when the mutex is not
* heavily contended. However, if the mutex is hot, we could end up
* wasting spin time.
* Default: false
*
* @return true if adaptive mutex is used.
*/
public boolean useAdaptiveMutex() {
assert(isInitialized());
return useAdaptiveMutex(nativeHandle_);
}
private native boolean useAdaptiveMutex(long handle);
/**
* Use adaptive mutex, which spins in the user space before resorting
* to kernel. This could reduce context switch when the mutex is not
* heavily contended. However, if the mutex is hot, we could end up
* wasting spin time.
* Default: false
*
* @param useAdaptiveMutex true if adaptive mutex is used.
* @return the reference to the current option.
*/
public Options setUseAdaptiveMutex(boolean useAdaptiveMutex) {
assert(isInitialized());
setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex);
return this;
}
private native void setUseAdaptiveMutex(
long handle, boolean useAdaptiveMutex);
/**
* Allows OS to incrementally sync files to disk while they are being
* written, asynchronously, in the background.
* Issue one request for every bytes_per_sync written. 0 turns it off.
* Default: 0
*
* @return size in bytes
*/
public long bytesPerSync() {
return bytesPerSync(nativeHandle_);
}
private native long bytesPerSync(long handle);
/**
* Allows OS to incrementally sync files to disk while they are being
* written, asynchronously, in the background.
* Issue one request for every bytes_per_sync written. 0 turns it off.
* Default: 0
*
* @param bytesPerSync size in bytes
* @return the reference to the current option.
*/
public Options setBytesPerSync(long bytesPerSync) {
assert(isInitialized());
setBytesPerSync(nativeHandle_, bytesPerSync);
return this;
}
private native void setBytesPerSync(
long handle, long bytesPerSync);
/**
* Allow RocksDB to use thread local storage to optimize performance.
* Default: true
*
* @return true if thread-local storage is allowed
*/
public boolean allowThreadLocal() {
assert(isInitialized());
return allowThreadLocal(nativeHandle_);
}
private native boolean allowThreadLocal(long handle);
/**
* Allow RocksDB to use thread local storage to optimize performance.
* Default: true
*
* @param allowThreadLocal true if thread-local storage is allowed.
* @return the reference to the current option.
*/
public Options setAllowThreadLocal(boolean allowThreadLocal) {
assert(isInitialized());
setAllowThreadLocal(nativeHandle_, allowThreadLocal);
return this;
}
private native void setAllowThreadLocal(
long handle, boolean allowThreadLocal);
/** /**
* Release the memory allocated for the current instance * Release the memory allocated for the current instance
@ -261,6 +1142,10 @@ public class Options {
} }
} }
@Override protected void finalize() {
dispose();
}
private boolean isInitialized() { private boolean isInitialized() {
return (nativeHandle_ != 0); return (nativeHandle_ != 0);
} }
@ -279,11 +1164,14 @@ public class Options {
private native void setDisableSeekCompaction( private native void setDisableSeekCompaction(
long handle, boolean disableSeekCompaction); long handle, boolean disableSeekCompaction);
private native boolean disableSeekCompaction(long handle); private native boolean disableSeekCompaction(long handle);
<<<<<<< HEAD
private native void setMaxBackgroundCompactions( private native void setMaxBackgroundCompactions(
long handle, int maxBackgroundCompactions); long handle, int maxBackgroundCompactions);
private native int maxBackgroundCompactions(long handle); private native int maxBackgroundCompactions(long handle);
private native void createStatistics(long optHandle); private native void createStatistics(long optHandle);
private native long statisticsPtr(long optHandle); private native long statisticsPtr(long optHandle);
=======
>>>>>>> 1a8abe72768b2b5cea800aa390c28e5ace6a552e
long nativeHandle_; long nativeHandle_;
long cacheSize_; long cacheSize_;

@ -144,33 +144,33 @@ public class RocksDB {
/** /**
* Private constructor. * Private constructor.
*/ */
private RocksDB() { protected RocksDB() {
nativeHandle_ = 0; nativeHandle_ = 0;
} }
// native methods // native methods
private native void open( protected native void open(
long optionsHandle, long cacheSize, String path) throws RocksDBException; long optionsHandle, long cacheSize, String path) throws RocksDBException;
private native void put( protected native void put(
long handle, byte[] key, int keyLen, long handle, byte[] key, int keyLen,
byte[] value, int valueLen) throws RocksDBException; byte[] value, int valueLen) throws RocksDBException;
private native void put( protected native void put(
long handle, long writeOptHandle, long handle, long writeOptHandle,
byte[] key, int keyLen, byte[] key, int keyLen,
byte[] value, int valueLen) throws RocksDBException; byte[] value, int valueLen) throws RocksDBException;
private native void write( protected native void write(
long writeOptHandle, long batchHandle) throws RocksDBException; long writeOptHandle, long batchHandle) throws RocksDBException;
private native int get( protected native int get(
long handle, byte[] key, int keyLen, long handle, byte[] key, int keyLen,
byte[] value, int valueLen) throws RocksDBException; byte[] value, int valueLen) throws RocksDBException;
private native byte[] get( protected native byte[] get(
long handle, byte[] key, int keyLen) throws RocksDBException; long handle, byte[] key, int keyLen) throws RocksDBException;
private native void remove( protected native void remove(
long handle, byte[] key, int keyLen) throws RocksDBException; long handle, byte[] key, int keyLen) throws RocksDBException;
private native void remove( protected native void remove(
long handle, long writeOptHandle, long handle, long writeOptHandle,
byte[] key, int keyLen) throws RocksDBException; byte[] key, int keyLen) throws RocksDBException;
private native void close0(); protected native void close0();
private long nativeHandle_; protected long nativeHandle_;
} }

@ -0,0 +1,41 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb.test;
import org.rocksdb.*;
public class BackupableDBTest {
static final String db_path = "/tmp/backupablejni_db";
static final String backup_path = "/tmp/backupablejni_db_backup";
static {
System.loadLibrary("rocksdbjni");
}
public static void main(String[] args) {
Options opt = new Options();
opt.setCreateIfMissing(true);
BackupableDBOptions bopt = new BackupableDBOptions(backup_path);
BackupableDB bdb = null;
try {
bdb = BackupableDB.open(opt, bopt, db_path);
bdb.put("hello".getBytes(), "BackupableDB".getBytes());
bdb.createNewBackup(true);
byte[] value = bdb.get("hello".getBytes());
assert(new String(value).equals("BackupableDB"));
} catch (RocksDBException e) {
System.err.format("[ERROR]: %s%n", e);
e.printStackTrace();
} finally {
opt.dispose();
bopt.dispose();
if (bdb != null) {
bdb.close();
}
}
}
}

@ -0,0 +1,201 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb.test;
import java.util.Random;
import org.rocksdb.Options;
public class OptionsTest {
static {
System.loadLibrary("rocksdbjni");
}
public static void main(String[] args) {
Options opt = new Options();
Random rand = new Random();
{ // CreateIfMissing test
boolean boolValue = rand.nextBoolean();
opt.setCreateIfMissing(boolValue);
assert(opt.createIfMissing() == boolValue);
}
{ // ErrorIfExists test
boolean boolValue = rand.nextBoolean();
opt.setErrorIfExists(boolValue);
assert(opt.errorIfExists() == boolValue);
}
{ // ParanoidChecks test
boolean boolValue = rand.nextBoolean();
opt.setParanoidChecks(boolValue);
assert(opt.paranoidChecks() == boolValue);
}
{ // MaxOpenFiles test
int intValue = rand.nextInt();
opt.setMaxOpenFiles(intValue);
assert(opt.maxOpenFiles() == intValue);
}
{ // DisableDataSync test
boolean boolValue = rand.nextBoolean();
opt.setDisableDataSync(boolValue);
assert(opt.disableDataSync() == boolValue);
}
{ // UseFsync test
boolean boolValue = rand.nextBoolean();
opt.setUseFsync(boolValue);
assert(opt.useFsync() == boolValue);
}
{ // DbStatsLogInterval test
int intValue = rand.nextInt();
opt.setDbStatsLogInterval(intValue);
assert(opt.dbStatsLogInterval() == intValue);
}
{ // DbLogDir test
String str = "path/to/DbLogDir";
opt.setDbLogDir(str);
assert(opt.dbLogDir().equals(str));
}
{ // WalDir test
String str = "path/to/WalDir";
opt.setWalDir(str);
assert(opt.walDir().equals(str));
}
{ // DeleteObsoleteFilesPeriodMicros test
long longValue = rand.nextLong();
opt.setDeleteObsoleteFilesPeriodMicros(longValue);
assert(opt.deleteObsoleteFilesPeriodMicros() == longValue);
}
{ // MaxBackgroundCompactions test
int intValue = rand.nextInt();
opt.setMaxBackgroundCompactions(intValue);
assert(opt.maxBackgroundCompactions() == intValue);
}
{ // MaxBackgroundFlushes test
int intValue = rand.nextInt();
opt.setMaxBackgroundFlushes(intValue);
assert(opt.maxBackgroundFlushes() == intValue);
}
{ // MaxLogFileSize test
long longValue = rand.nextLong();
opt.setMaxLogFileSize(longValue);
assert(opt.maxLogFileSize() == longValue);
}
{ // LogFileTimeToRoll test
long longValue = rand.nextLong();
opt.setLogFileTimeToRoll(longValue);
assert(opt.logFileTimeToRoll() == longValue);
}
{ // KeepLogFileNum test
long longValue = rand.nextLong();
opt.setKeepLogFileNum(longValue);
assert(opt.keepLogFileNum() == longValue);
}
{ // MaxManifestFileSize test
long longValue = rand.nextLong();
opt.setMaxManifestFileSize(longValue);
assert(opt.maxManifestFileSize() == longValue);
}
{ // TableCacheNumshardbits test
int intValue = rand.nextInt();
opt.setTableCacheNumshardbits(intValue);
assert(opt.tableCacheNumshardbits() == intValue);
}
{ // TableCacheRemoveScanCountLimit test
int intValue = rand.nextInt();
opt.setTableCacheRemoveScanCountLimit(intValue);
assert(opt.tableCacheRemoveScanCountLimit() == intValue);
}
{ // WALTtlSeconds test
long longValue = rand.nextLong();
opt.setWALTtlSeconds(longValue);
assert(opt.walTtlSeconds() == longValue);
}
{ // ManifestPreallocationSize test
long longValue = rand.nextLong();
opt.setManifestPreallocationSize(longValue);
assert(opt.manifestPreallocationSize() == longValue);
}
{ // AllowOsBuffer test
boolean boolValue = rand.nextBoolean();
opt.setAllowOsBuffer(boolValue);
assert(opt.allowOsBuffer() == boolValue);
}
{ // AllowMmapReads test
boolean boolValue = rand.nextBoolean();
opt.setAllowMmapReads(boolValue);
assert(opt.allowMmapReads() == boolValue);
}
{ // AllowMmapWrites test
boolean boolValue = rand.nextBoolean();
opt.setAllowMmapWrites(boolValue);
assert(opt.allowMmapWrites() == boolValue);
}
{ // IsFdCloseOnExec test
boolean boolValue = rand.nextBoolean();
opt.setIsFdCloseOnExec(boolValue);
assert(opt.isFdCloseOnExec() == boolValue);
}
{ // SkipLogErrorOnRecovery test
boolean boolValue = rand.nextBoolean();
opt.setSkipLogErrorOnRecovery(boolValue);
assert(opt.skipLogErrorOnRecovery() == boolValue);
}
{ // StatsDumpPeriodSec test
int intValue = rand.nextInt();
opt.setStatsDumpPeriodSec(intValue);
assert(opt.statsDumpPeriodSec() == intValue);
}
{ // AdviseRandomOnOpen test
boolean boolValue = rand.nextBoolean();
opt.setAdviseRandomOnOpen(boolValue);
assert(opt.adviseRandomOnOpen() == boolValue);
}
{ // UseAdaptiveMutex test
boolean boolValue = rand.nextBoolean();
opt.setUseAdaptiveMutex(boolValue);
assert(opt.useAdaptiveMutex() == boolValue);
}
{ // BytesPerSync test
long longValue = rand.nextLong();
opt.setBytesPerSync(longValue);
assert(opt.bytesPerSync() == longValue);
}
{ // AllowThreadLocal test
boolean boolValue = rand.nextBoolean();
opt.setAllowThreadLocal(boolValue);
assert(opt.allowThreadLocal() == boolValue);
}
opt.dispose();
System.out.println("Passed OptionsTest");
}
}

@ -0,0 +1,85 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// This file implements the "bridge" between Java and C++ and enables
// calling c++ rocksdb::DB methods from Java side.
#include <stdio.h>
#include <stdlib.h>
#include <jni.h>
#include <string>
#include "include/org_rocksdb_BackupableDB.h"
#include "include/org_rocksdb_BackupableDBOptions.h"
#include "rocksjni/portal.h"
#include "utilities/backupable_db.h"
/*
* Class: org_rocksdb_BackupableDB
* Method: open
* Signature: (JJ)V
*/
void Java_org_rocksdb_BackupableDB_open(
JNIEnv* env, jobject jbdb, jlong jdb_handle, jlong jopt_handle) {
auto db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto opt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jopt_handle);
auto bdb = new rocksdb::BackupableDB(db, *opt);
// as BackupableDB extends RocksDB on the java side, we can reuse
// the RocksDB portal here.
rocksdb::RocksDBJni::setHandle(env, jbdb, bdb);
}
/*
* Class: org_rocksdb_BackupableDB
* Method: createNewBackup
* Signature: (JZ)V
*/
void Java_org_rocksdb_BackupableDB_createNewBackup(
JNIEnv* env, jobject jbdb, jlong jhandle, jboolean jflag) {
reinterpret_cast<rocksdb::BackupableDB*>(jhandle)->CreateNewBackup(jflag);
}
///////////////////////////////////////////////////////////////////////////
// BackupDBOptions
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: newBackupableDBOptions
* Signature: (Ljava/lang/String;)V
*/
void Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions(
JNIEnv* env, jobject jobj, jstring jpath) {
const char* cpath = env->GetStringUTFChars(jpath, 0);
auto bopt = new rocksdb::BackupableDBOptions(cpath);
env->ReleaseStringUTFChars(jpath, cpath);
rocksdb::BackupableDBOptionsJni::setHandle(env, jobj, bopt);
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: backupDir
* Signature: (J)Ljava/lang/String;
*/
jstring Java_org_rocksdb_BackupableDBOptions_backupDir(
JNIEnv* env, jobject jopt, jlong jhandle, jstring jpath) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return env->NewStringUTF(bopt->backup_dir.c_str());
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: dispose
* Signature: (J)V
*/
void Java_org_rocksdb_BackupableDBOptions_dispose(
JNIEnv* env, jobject jopt, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
assert(bopt);
delete bopt;
rocksdb::BackupableDBOptionsJni::setHandle(env, jopt, nullptr);
}

@ -170,14 +170,197 @@ jboolean Java_org_rocksdb_Options_disableSeekCompaction(
/* /*
* Class: org_rocksdb_Options * Class: org_rocksdb_Options
* Method: setMaxBackgroundCompactions * Method: errorIfExists
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_errorIfExists(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->error_if_exists;
}
/*
* Class: org_rocksdb_Options
* Method: setErrorIfExists
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setErrorIfExists(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean error_if_exists) {
reinterpret_cast<rocksdb::Options*>(jhandle)->error_if_exists =
static_cast<bool>(error_if_exists);
}
/*
* Class: org_rocksdb_Options
* Method: paranoidChecks
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_paranoidChecks(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->paranoid_checks;
}
/*
* Class: org_rocksdb_Options
* Method: setParanoidChecks
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setParanoidChecks(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean paranoid_checks) {
reinterpret_cast<rocksdb::Options*>(jhandle)->paranoid_checks =
static_cast<bool>(paranoid_checks);
}
/*
* Class: org_rocksdb_Options
* Method: maxOpenFiles
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_maxOpenFiles(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_open_files;
}
/*
* Class: org_rocksdb_Options
* Method: setMaxOpenFiles
* Signature: (JI)V * Signature: (JI)V
*/ */
void Java_org_rocksdb_Options_setMaxBackgroundCompactions( void Java_org_rocksdb_Options_setMaxOpenFiles(
JNIEnv* env, jobject jobj, jlong jhandle, JNIEnv* env, jobject jobj, jlong jhandle, jint max_open_files) {
jint jmax_background_compactions) { reinterpret_cast<rocksdb::Options*>(jhandle)->max_open_files =
reinterpret_cast<rocksdb::Options*>(jhandle)->max_background_compactions = static_cast<int>(max_open_files);
jmax_background_compactions; }
/*
* Class: org_rocksdb_Options
* Method: disableDataSync
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_disableDataSync(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->disableDataSync;
}
/*
* Class: org_rocksdb_Options
* Method: setDisableDataSync
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setDisableDataSync(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean disableDataSync) {
reinterpret_cast<rocksdb::Options*>(jhandle)->disableDataSync =
static_cast<bool>(disableDataSync);
}
/*
* Class: org_rocksdb_Options
* Method: useFsync
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_useFsync(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->use_fsync;
}
/*
* Class: org_rocksdb_Options
* Method: setUseFsync
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setUseFsync(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean use_fsync) {
reinterpret_cast<rocksdb::Options*>(jhandle)->use_fsync =
static_cast<bool>(use_fsync);
}
/*
* Class: org_rocksdb_Options
* Method: dbStatsLogInterval
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_dbStatsLogInterval(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->db_stats_log_interval;
}
/*
* Class: org_rocksdb_Options
* Method: setDbStatsLogInterval
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setDbStatsLogInterval(
JNIEnv* env, jobject jobj, jlong jhandle, jint db_stats_log_interval) {
reinterpret_cast<rocksdb::Options*>(jhandle)->db_stats_log_interval =
static_cast<int>(db_stats_log_interval);
}
/*
* Class: org_rocksdb_Options
* Method: dbLogDir
* Signature: (J)Ljava/lang/String
*/
jstring Java_org_rocksdb_Options_dbLogDir(
JNIEnv* env, jobject jobj, jlong jhandle) {
return env->NewStringUTF(
reinterpret_cast<rocksdb::Options*>(jhandle)->db_log_dir.c_str());
}
/*
* Class: org_rocksdb_Options
* Method: setDbLogDir
* Signature: (JLjava/lang/String)V
*/
void Java_org_rocksdb_Options_setDbLogDir(
JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) {
const char* log_dir = env->GetStringUTFChars(jdb_log_dir, 0);
reinterpret_cast<rocksdb::Options*>(jhandle)->db_log_dir.assign(log_dir);
env->ReleaseStringUTFChars(jdb_log_dir, log_dir);
}
/*
* Class: org_rocksdb_Options
* Method: walDir
* Signature: (J)Ljava/lang/String
*/
jstring Java_org_rocksdb_Options_walDir(
JNIEnv* env, jobject jobj, jlong jhandle) {
return env->NewStringUTF(
reinterpret_cast<rocksdb::Options*>(jhandle)->wal_dir.c_str());
}
/*
* Class: org_rocksdb_Options
* Method: setWalDir
* Signature: (JLjava/lang/String)V
*/
void Java_org_rocksdb_Options_setWalDir(
JNIEnv* env, jobject jobj, jlong jhandle, jstring jwal_dir) {
const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0);
reinterpret_cast<rocksdb::Options*>(jhandle)->wal_dir.assign(wal_dir);
env->ReleaseStringUTFChars(jwal_dir, wal_dir);
}
/*
* Class: org_rocksdb_Options
* Method: deleteObsoleteFilesPeriodMicros
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_deleteObsoleteFilesPeriodMicros(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->delete_obsolete_files_period_micros;
}
/*
* Class: org_rocksdb_Options
* Method: setDeleteObsoleteFilesPeriodMicros
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setDeleteObsoleteFilesPeriodMicros(
JNIEnv* env, jobject jobj, jlong jhandle, jlong micros) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->delete_obsolete_files_period_micros =
static_cast<int64_t>(micros);
} }
/* /*
@ -187,10 +370,422 @@ void Java_org_rocksdb_Options_setMaxBackgroundCompactions(
*/ */
jint Java_org_rocksdb_Options_maxBackgroundCompactions( jint Java_org_rocksdb_Options_maxBackgroundCompactions(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
return return reinterpret_cast<rocksdb::Options*>(
reinterpret_cast<rocksdb::Options*>(jhandle)->max_background_compactions; jhandle)->max_background_compactions;
}
/*
* Class: org_rocksdb_Options
* Method: setMaxBackgroundCompactions
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setMaxBackgroundCompactions(
JNIEnv* env, jobject jobj, jlong jhandle, jint max) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->max_background_compactions = static_cast<int>(max);
}
/*
* Class: org_rocksdb_Options
* Method: maxBackgroundFlushes
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_maxBackgroundFlushes(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_background_flushes;
}
/*
* Class: org_rocksdb_Options
* Method: setMaxBackgroundFlushes
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setMaxBackgroundFlushes(
JNIEnv* env, jobject jobj, jlong jhandle, jint max_background_flushes) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_background_flushes =
static_cast<int>(max_background_flushes);
}
/*
* Class: org_rocksdb_Options
* Method: maxLogFileSize
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_maxLogFileSize(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_log_file_size;
}
/*
* Class: org_rocksdb_Options
* Method: setMaxLogFileSize
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setMaxLogFileSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong max_log_file_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_log_file_size =
static_cast<size_t>(max_log_file_size);
}
/*
* Class: org_rocksdb_Options
* Method: logFileTimeToRoll
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_logFileTimeToRoll(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->log_file_time_to_roll;
}
/*
* Class: org_rocksdb_Options
* Method: setLogFileTimeToRoll
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setLogFileTimeToRoll(
JNIEnv* env, jobject jobj, jlong jhandle, jlong log_file_time_to_roll) {
reinterpret_cast<rocksdb::Options*>(jhandle)->log_file_time_to_roll =
static_cast<size_t>(log_file_time_to_roll);
}
/*
* Class: org_rocksdb_Options
* Method: keepLogFileNum
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_keepLogFileNum(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->keep_log_file_num;
}
/*
* Class: org_rocksdb_Options
* Method: setKeepLogFileNum
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setKeepLogFileNum(
JNIEnv* env, jobject jobj, jlong jhandle, jlong keep_log_file_num) {
reinterpret_cast<rocksdb::Options*>(jhandle)->keep_log_file_num =
static_cast<size_t>(keep_log_file_num);
}
/*
* Class: org_rocksdb_Options
* Method: maxManifestFileSize
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_maxManifestFileSize(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_manifest_file_size;
}
/*
* Class: org_rocksdb_Options
* Method: setMaxManifestFileSize
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setMaxManifestFileSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong max_manifest_file_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_manifest_file_size =
static_cast<int64_t>(max_manifest_file_size);
}
/*
* Class: org_rocksdb_Options
* Method: tableCacheNumshardbits
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_tableCacheNumshardbits(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->table_cache_numshardbits;
}
/*
* Class: org_rocksdb_Options
* Method: setTableCacheNumshardbits
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setTableCacheNumshardbits(
JNIEnv* env, jobject jobj, jlong jhandle, jint table_cache_numshardbits) {
reinterpret_cast<rocksdb::Options*>(jhandle)->table_cache_numshardbits =
static_cast<int>(table_cache_numshardbits);
}
/*
* Class: org_rocksdb_Options
* Method: tableCacheRemoveScanCountLimit
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_tableCacheRemoveScanCountLimit(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(
jhandle)->table_cache_remove_scan_count_limit;
}
/*
* Class: org_rocksdb_Options
* Method: setTableCacheRemoveScanCountLimit
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setTableCacheRemoveScanCountLimit(
JNIEnv* env, jobject jobj, jlong jhandle, jint limit) {
reinterpret_cast<rocksdb::Options*>(
jhandle)->table_cache_remove_scan_count_limit = static_cast<int>(limit);
}
/*
* Class: org_rocksdb_Options
* Method: walTtlSeconds
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_walTtlSeconds(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->WAL_ttl_seconds;
}
/*
* Class: org_rocksdb_Options
* Method: setWALTtlSeconds
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setWALTtlSeconds(
JNIEnv* env, jobject jobj, jlong jhandle, jlong WAL_ttl_seconds) {
reinterpret_cast<rocksdb::Options*>(jhandle)->WAL_ttl_seconds =
static_cast<int64_t>(WAL_ttl_seconds);
}
/*
* Class: org_rocksdb_Options
* Method: manifestPreallocationSize
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_manifestPreallocationSize(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->manifest_preallocation_size;
}
/*
* Class: org_rocksdb_Options
* Method: setManifestPreallocationSize
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setManifestPreallocationSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong preallocation_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->manifest_preallocation_size =
static_cast<size_t>(preallocation_size);
}
/*
* Class: org_rocksdb_Options
* Method: allowOsBuffer
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_allowOsBuffer(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->allow_os_buffer;
}
/*
* Class: org_rocksdb_Options
* Method: setAllowOsBuffer
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setAllowOsBuffer(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_os_buffer) {
reinterpret_cast<rocksdb::Options*>(jhandle)->allow_os_buffer =
static_cast<bool>(allow_os_buffer);
}
/*
* Class: org_rocksdb_Options
* Method: allowMmapReads
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_allowMmapReads(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->allow_mmap_reads;
}
/*
* Class: org_rocksdb_Options
* Method: setAllowMmapReads
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setAllowMmapReads(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_mmap_reads) {
reinterpret_cast<rocksdb::Options*>(jhandle)->allow_mmap_reads =
static_cast<bool>(allow_mmap_reads);
}
/*
* Class: org_rocksdb_Options
* Method: allowMmapWrites
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_allowMmapWrites(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->allow_mmap_writes;
}
/*
* Class: org_rocksdb_Options
* Method: setAllowMmapWrites
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setAllowMmapWrites(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_mmap_writes) {
reinterpret_cast<rocksdb::Options*>(jhandle)->allow_mmap_writes =
static_cast<bool>(allow_mmap_writes);
}
/*
* Class: org_rocksdb_Options
* Method: isFdCloseOnExec
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_isFdCloseOnExec(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->is_fd_close_on_exec;
}
/*
* Class: org_rocksdb_Options
* Method: setIsFdCloseOnExec
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setIsFdCloseOnExec(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean is_fd_close_on_exec) {
reinterpret_cast<rocksdb::Options*>(jhandle)->is_fd_close_on_exec =
static_cast<bool>(is_fd_close_on_exec);
}
/*
* Class: org_rocksdb_Options
* Method: skipLogErrorOnRecovery
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_skipLogErrorOnRecovery(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->skip_log_error_on_recovery;
}
/*
* Class: org_rocksdb_Options
* Method: setSkipLogErrorOnRecovery
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setSkipLogErrorOnRecovery(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean skip) {
reinterpret_cast<rocksdb::Options*>(jhandle)->skip_log_error_on_recovery =
static_cast<bool>(skip);
}
/*
* Class: org_rocksdb_Options
* Method: statsDumpPeriodSec
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_statsDumpPeriodSec(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->stats_dump_period_sec;
} }
/*
* Class: org_rocksdb_Options
* Method: setStatsDumpPeriodSec
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setStatsDumpPeriodSec(
JNIEnv* env, jobject jobj, jlong jhandle, jint stats_dump_period_sec) {
reinterpret_cast<rocksdb::Options*>(jhandle)->stats_dump_period_sec =
static_cast<int>(stats_dump_period_sec);
}
/*
* Class: org_rocksdb_Options
* Method: adviseRandomOnOpen
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_adviseRandomOnOpen(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->advise_random_on_open;
}
/*
* Class: org_rocksdb_Options
* Method: setAdviseRandomOnOpen
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setAdviseRandomOnOpen(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean advise_random_on_open) {
reinterpret_cast<rocksdb::Options*>(jhandle)->advise_random_on_open =
static_cast<bool>(advise_random_on_open);
}
/*
* Class: org_rocksdb_Options
* Method: useAdaptiveMutex
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_useAdaptiveMutex(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->use_adaptive_mutex;
}
/*
* Class: org_rocksdb_Options
* Method: setUseAdaptiveMutex
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setUseAdaptiveMutex(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean use_adaptive_mutex) {
reinterpret_cast<rocksdb::Options*>(jhandle)->use_adaptive_mutex =
static_cast<bool>(use_adaptive_mutex);
}
/*
* Class: org_rocksdb_Options
* Method: bytesPerSync
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_bytesPerSync(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->bytes_per_sync;
}
/*
* Class: org_rocksdb_Options
* Method: setBytesPerSync
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setBytesPerSync(
JNIEnv* env, jobject jobj, jlong jhandle, jlong bytes_per_sync) {
reinterpret_cast<rocksdb::Options*>(jhandle)->bytes_per_sync =
static_cast<int64_t>(bytes_per_sync);
}
/*
* Class: org_rocksdb_Options
* Method: allowThreadLocal
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_allowThreadLocal(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->allow_thread_local;
}
/*
* Class: org_rocksdb_Options
* Method: setAllowThreadLocal
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setAllowThreadLocal(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_thread_local) {
reinterpret_cast<rocksdb::Options*>(jhandle)->allow_thread_local =
static_cast<bool>(allow_thread_local);
}
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
// WriteOptions // WriteOptions

@ -12,6 +12,7 @@
#include <jni.h> #include <jni.h>
#include "rocksdb/db.h" #include "rocksdb/db.h"
#include "utilities/backupable_db.h"
namespace rocksdb { namespace rocksdb {
@ -171,6 +172,7 @@ class WriteBatchJni {
} }
}; };
<<<<<<< HEAD
class HistogramDataJni { class HistogramDataJni {
public: public:
static jmethodID getConstructorMethodId(JNIEnv* env, jclass jclazz) { static jmethodID getConstructorMethodId(JNIEnv* env, jclass jclazz) {
@ -178,6 +180,38 @@ class HistogramDataJni {
jclazz, "<init>", "(DDDDD)V"); jclazz, "<init>", "(DDDDD)V");
assert(mid != nullptr); assert(mid != nullptr);
return mid; return mid;
=======
class BackupableDBOptionsJni {
public:
// Get the java class id of org.rocksdb.BackupableDBOptions.
static jclass getJClass(JNIEnv* env) {
static jclass jclazz = env->FindClass("org/rocksdb/BackupableDBOptions");
assert(jclazz != nullptr);
return jclazz;
}
// Get the field id of the member variable of org.rocksdb.BackupableDBOptions
// that stores the pointer to rocksdb::BackupableDBOptions
static jfieldID getHandleFieldID(JNIEnv* env) {
static jfieldID fid = env->GetFieldID(
getJClass(env), "nativeHandle_", "J");
assert(fid != nullptr);
return fid;
}
// Get the pointer to rocksdb::BackupableDBOptions
static rocksdb::BackupableDBOptions* getHandle(JNIEnv* env, jobject jobj) {
return reinterpret_cast<rocksdb::BackupableDBOptions*>(
env->GetLongField(jobj, getHandleFieldID(env)));
}
// Pass the rocksdb::BackupableDBOptions pointer to the java side.
static void setHandle(
JNIEnv* env, jobject jobj, rocksdb::BackupableDBOptions* op) {
env->SetLongField(
jobj, getHandleFieldID(env),
reinterpret_cast<jlong>(op));
>>>>>>> 1a8abe72768b2b5cea800aa390c28e5ace6a552e
} }
}; };
} // namespace rocksdb } // namespace rocksdb

@ -365,8 +365,20 @@ Status BlockBasedTable::Open(const Options& options, const EnvOptions& soptions,
s = ReadMetaBlock(rep, &meta, &meta_iter); s = ReadMetaBlock(rep, &meta, &meta_iter);
// Read the properties // Read the properties
bool found_properties_block = true;
meta_iter->Seek(kPropertiesBlock); meta_iter->Seek(kPropertiesBlock);
if (meta_iter->Valid() && meta_iter->key() == kPropertiesBlock) { if (meta_iter->status().ok() &&
(!meta_iter->Valid() || meta_iter->key() != kPropertiesBlock)) {
meta_iter->Seek(kPropertiesBlockOldName);
if (meta_iter->status().ok() &&
(!meta_iter->Valid() || meta_iter->key() != kPropertiesBlockOldName)) {
found_properties_block = false;
Log(WARN_LEVEL, rep->options.info_log,
"Cannot find Properties block from file.");
}
}
if (found_properties_block) {
s = meta_iter->status(); s = meta_iter->status();
TableProperties* table_properties = nullptr; TableProperties* table_properties = nullptr;
if (s.ok()) { if (s.ok()) {
@ -1018,13 +1030,7 @@ bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader) { Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader) {
// Some old version of block-based tables don't have index type present in // Some old version of block-based tables don't have index type present in
// table properties. If that's the case we can safely use the kBinarySearch. // table properties. If that's the case we can safely use the kBinarySearch.
auto index_type = BlockBasedTableOptions::kBinarySearch; auto index_type = rep_->index_type;
auto& props = rep_->table_properties->user_collected_properties;
auto pos = props.find(BlockBasedTablePropertyNames::kIndexType);
if (pos != props.end()) {
index_type = static_cast<BlockBasedTableOptions::IndexType>(
DecodeFixed32(pos->second.c_str()));
}
auto file = rep_->file.get(); auto file = rep_->file.get();
const auto& index_handle = rep_->index_handle; const auto& index_handle = rep_->index_handle;
@ -1082,7 +1088,10 @@ uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key) {
// key is past the last key in the file. If table_properties is not // key is past the last key in the file. If table_properties is not
// available, approximate the offset by returning the offset of the // available, approximate the offset by returning the offset of the
// metaindex block (which is right near the end of the file). // metaindex block (which is right near the end of the file).
result = rep_->table_properties->data_size; result = 0;
if (rep_->table_properties) {
result = rep_->table_properties->data_size;
}
// table_properties is not present in the table. // table_properties is not present in the table.
if (result == 0) { if (result == 0) {
result = rep_->metaindex_handle.offset(); result = rep_->metaindex_handle.offset();

@ -12,6 +12,7 @@
#include <stdint.h> #include <stdint.h>
#include <memory> #include <memory>
#include <utility> #include <utility>
#include <string>
#include "rocksdb/statistics.h" #include "rocksdb/statistics.h"
#include "rocksdb/status.h" #include "rocksdb/status.h"
@ -198,4 +199,8 @@ class BlockBasedTable : public TableReader {
void operator=(const TableReader&) = delete; void operator=(const TableReader&) = delete;
}; };
// Backward compatible properties block name. Limited in block based
// table.
extern const std::string kPropertiesBlockOldName;
} // namespace rocksdb } // namespace rocksdb

@ -244,6 +244,8 @@ Status ReadTableProperties(RandomAccessFile* file, uint64_t file_size,
metaindex_block.NewIterator(BytewiseComparator())); metaindex_block.NewIterator(BytewiseComparator()));
// -- Read property block // -- Read property block
// This function is not used by BlockBasedTable, so we don't have to
// worry about old properties block name.
meta_iter->Seek(kPropertiesBlock); meta_iter->Seek(kPropertiesBlock);
TableProperties table_properties; TableProperties table_properties;
if (meta_iter->Valid() && if (meta_iter->Valid() &&

@ -91,5 +91,7 @@ const std::string TablePropertiesNames::kFixedKeyLen =
"rocksdb.fixed.key.length"; "rocksdb.fixed.key.length";
extern const std::string kPropertiesBlock = "rocksdb.properties"; extern const std::string kPropertiesBlock = "rocksdb.properties";
// Old property block name for backward compatibility
extern const std::string kPropertiesBlockOldName = "rocksdb.stats";
} // namespace rocksdb } // namespace rocksdb

@ -1,271 +0,0 @@
#include <boost/shared_ptr.hpp>
#include "DBClientProxy.h"
#include "thrift/lib/cpp/protocol/TBinaryProtocol.h"
#include "thrift/lib/cpp/transport/TSocket.h"
#include "thrift/lib/cpp/transport/TTransportUtils.h"
using namespace std;
using namespace boost;
using namespace Tleveldb;
using namespace apache::thrift::protocol;
using namespace apache::thrift::transport;
namespace rocksdb {
DBClientProxy::DBClientProxy(const string & host, int port) :
host_(host),
port_(port),
dbToHandle_(),
dbClient_() {
}
DBClientProxy::~DBClientProxy() {
cleanUp();
}
void DBClientProxy::connect(void) {
cleanUp();
printf("Connecting to %s:%d\n", host_.c_str(), port_);
try {
boost::shared_ptr<TSocket> socket(new TSocket(host_, port_));
boost::shared_ptr<TTransport> transport(new TBufferedTransport(socket));
boost::shared_ptr<TBinaryProtocol> protocol(new TBinaryProtocol(transport));
dbClient_.reset(new DBClient(protocol));
transport->open();
} catch (const std::exception & e) {
dbClient_.reset();
throw;
}
}
void DBClientProxy::cleanUp(void) {
if(dbClient_.get()) {
for(map<string, DBHandle>::iterator itor = dbToHandle_.begin();
itor != dbToHandle_.end();
++itor) {
dbClient_->Close(itor->second, itor->first);
}
dbClient_.reset();
}
dbToHandle_.clear();
}
void DBClientProxy::open(const string & db) {
if(!dbClient_.get()) {
printf("please connect() first\n");
return;
}
// printf("opening database : %s\n", db.c_str());
// we use default DBOptions here
DBOptions opt;
DBHandle handle;
try {
dbClient_->Open(handle, db, opt);
} catch (const LeveldbException & e) {
printf("%s\n", e.message.c_str());
if(kIOError == e.errorCode) {
printf("no such database : %s\n", db.c_str());
return;
}else {
printf("Unknown error : %d\n", e.errorCode);
return;
}
}
dbToHandle_[db] = handle;
}
bool DBClientProxy::create(const string & db) {
if(!dbClient_.get()) {
printf("please connect() first\n");
return false;
}
printf("creating database : %s\n", db.c_str());
DBOptions opt;
opt.create_if_missing = true;
opt.error_if_exists = true;
DBHandle handle;
try {
dbClient_->Open(handle, db, opt);
}catch (const LeveldbException & e) {
printf("%s\n", e.message.c_str());
printf("error code : %d\n", e.errorCode);
if(kNotFound == e.errorCode) {
printf("no such database : %s\n", db.c_str());
return false;;
} else {
printf("Unknown error : %d\n", e.errorCode);
return false;
}
}
dbToHandle_[db] = handle;
return true;
}
map<string, DBHandle>::iterator
DBClientProxy::getHandle(const string & db) {
map<string, DBHandle>::iterator itor = dbToHandle_.find(db);
if(dbToHandle_.end() == itor) {
open(db);
itor = dbToHandle_.find(db);
}
return itor;
}
bool DBClientProxy::get(const string & db,
const string & key,
string & value) {
if(!dbClient_.get()) {
printf("please connect() first\n");
return false;
}
map<string, DBHandle>::iterator itor = getHandle(db);
if(dbToHandle_.end() == itor) {
return false;
}
ResultItem ret;
Slice k;
k.data = key;
k.size = key.size();
// we use default values of options here
ReadOptions opt;
dbClient_->Get(ret,
itor->second,
k,
opt);
if(kOk == ret.status) {
value = ret.value.data;
return true;
} else if(kNotFound == ret.status) {
printf("no such key : %s\n", key.c_str());
return false;
} else {
printf("get data error : %d\n", ret.status);
return false;
}
}
bool DBClientProxy::put(const string & db,
const string & key,
const string & value) {
if(!dbClient_.get()) {
printf("please connect() first\n");
return false;
}
map<string, DBHandle>::iterator itor = getHandle(db);
if(dbToHandle_.end() == itor) {
return false;
}
kv temp;
temp.key.data = key;
temp.key.size = key.size();
temp.value.data = value;
temp.value.size = value.size();
WriteOptions opt;
opt.sync = true;
Code code;
code = dbClient_->Put(itor->second,
temp,
opt);
if(kOk == code) {
// printf("set value finished\n");
return true;
} else {
printf("put data error : %d\n", code);
return false;
}
}
bool DBClientProxy::scan(const string & db,
const string & start_key,
const string & end_key,
const string & limit,
vector<pair<string, string> > & kvs) {
if(!dbClient_.get()) {
printf("please connect() first\n");
return false;
}
int limitInt = -1;
limitInt = atoi(limit.c_str());
if(limitInt <= 0) {
printf("Error while parse limit : %s\n", limit.c_str());
return false;
}
if(start_key > end_key) {
printf("empty range.\n");
return false;
}
map<string, DBHandle>::iterator itor = getHandle(db);
if(dbToHandle_.end() == itor) {
return false;
}
ResultIterator ret;
// we use the default values of options here
ReadOptions opt;
Slice k;
k.data = start_key;
k.size = start_key.size();
dbClient_->NewIterator(ret,
itor->second,
opt,
seekToKey,
k);
Iterator it;
if(kOk == ret.status) {
it = ret.iterator;
} else {
printf("get iterator error : %d\n", ret.status);
return false;
}
int idx = 0;
string ck = start_key;
while(idx < limitInt && ck < end_key) {
ResultPair retPair;
dbClient_->GetNext(retPair, itor->second, it);
if(kOk == retPair.status) {
++idx;
ck = retPair.keyvalue.key.data;
if (ck < end_key) {
kvs.push_back(make_pair(retPair.keyvalue.key.data,
retPair.keyvalue.value.data));
}
} else if(kEnd == retPair.status) {
printf("not enough values\n");
return true;
} else {
printf("GetNext() error : %d\n", retPair.status);
return false;
}
}
return true;
}
} // namespace

@ -1,64 +0,0 @@
#ifndef TOOLS_SHELL_DBCLIENTPROXY
#define TOOLS_SHELL_DBCLIENTPROXY
#include <vector>
#include <map>
#include <string>
#include <boost/utility.hpp>
#include <boost/shared_ptr.hpp>
#include "DB.h"
/*
* class DBClientProxy maintains:
* 1. a connection to rocksdb service
* 2. a map from db names to opened db handles
*
* it's client codes' responsibility to catch all possible exceptions.
*/
namespace rocksdb {
class DBClientProxy : private boost::noncopyable {
public:
// connect to host_:port_
void connect(void);
// return true on success, false otherwise
bool get(const std::string & db,
const std::string & key,
std::string & value);
// return true on success, false otherwise
bool put(const std::string & db,
const std::string & key,
const std::string & value);
// return true on success, false otherwise
bool scan(const std::string & db,
const std::string & start_key,
const std::string & end_key,
const std::string & limit,
std::vector<std::pair<std::string, std::string> > & kvs);
// return true on success, false otherwise
bool create(const std::string & db);
DBClientProxy(const std::string & host, int port);
~DBClientProxy();
private:
// some internal help functions
void cleanUp(void);
void open(const std::string & db);
std::map<std::string, Trocksdb::DBHandle>::iterator getHandle(const std::string & db);
const std::string host_;
const int port_;
std::map<std::string, Trocksdb::DBHandle> dbToHandle_;
boost::shared_ptr<Trocksdb::DBClient> dbClient_;
};
} // namespace
#endif

@ -1,8 +0,0 @@
#include "ShellContext.h"
int main(int argc, char ** argv) {
ShellContext c(argc, argv);
c.run();
}

@ -1,104 +0,0 @@
#include <iostream>
#include <boost/shared_ptr.hpp>
#include "ShellContext.h"
#include "ShellState.h"
#include "thrift/lib/cpp/protocol/TBinaryProtocol.h"
#include "thrift/lib/cpp/transport/TSocket.h"
#include "thrift/lib/cpp/transport/TTransportUtils.h"
using namespace std;
using namespace boost;
using namespace Tleveldb;
using namespace rocksdb;
using namespace apache::thrift::protocol;
using namespace apache::thrift::transport;
void ShellContext::changeState(ShellState * pState) {
pShellState_ = pState;
}
void ShellContext::stop(void) {
exit_ = true;
}
bool ShellContext::ParseInput(void) {
if(argc_ != 3) {
printf("leveldb_shell host port\n");
return false;
}
port_ = atoi(argv_[2]);
if(port_ <= 0) {
printf("Error while parse port : %s\n", argv_[2]);
return false;
}
clientProxy_.reset(new DBClientProxy(argv_[1], port_));
if(!clientProxy_.get()) {
return false;
} else {
return true;
}
}
void ShellContext::connect(void) {
clientProxy_->connect();
}
void ShellContext::create(const string & db) {
if (clientProxy_->create(db)) {
printf("%s created\n", db.c_str());
}
}
void ShellContext::get(const string & db,
const string & key) {
string v;
if (clientProxy_->get(db, key, v)) {
printf("%s\n", v.c_str());
}
}
void ShellContext::put(const string & db,
const string & key,
const string & value) {
if (clientProxy_->put(db, key, value)) {
printf("(%s, %s) has been set\n", key.c_str(), value.c_str());
}
}
void ShellContext::scan(const string & db,
const string & start_key,
const string & end_key,
const string & limit) {
vector<pair<string, string> > kvs;
if (clientProxy_->scan(db, start_key, end_key, limit, kvs)) {
for(unsigned int i = 0; i < kvs.size(); ++i) {
printf("%d (%s, %s)\n", i, kvs[i].first.c_str(), kvs[i].second.c_str());
}
}
}
void ShellContext::run(void) {
while(!exit_) {
pShellState_->run(this);
}
}
ShellContext::ShellContext(int argc, char ** argv) :
pShellState_(ShellStateStart::getInstance()),
exit_(false),
argc_(argc),
argv_(argv),
port_(-1),
clientProxy_() {
}

@ -1,51 +0,0 @@
#ifndef TOOLS_SHELL_SHELLCONTEXT
#define TOOLS_SHELL_SHELLCONTEXT
#include <map>
#include <string>
#include <boost/utility.hpp>
#include <boost/shared_ptr.hpp>
#include "DB.h"
#include "DBClientProxy.h"
class ShellState;
class ShellContext : private boost::noncopyable {
public:
void changeState(ShellState * pState);
void stop(void);
bool ParseInput(void);
void connect(void);
void get(const std::string & db,
const std::string & key);
void put(const std::string & db,
const std::string & key,
const std::string & value);
void scan(const std::string & db,
const std::string & start_key,
const std::string & end_key,
const std::string & limit);
void create(const std::string & db);
void run(void);
ShellContext(int argc, char ** argv);
private:
ShellState * pShellState_;
bool exit_;
int argc_;
char ** argv_;
int port_;
boost::shared_ptr<rocksdb::DBClientProxy> clientProxy_;
};
#endif

@ -1,139 +0,0 @@
#include <iostream>
#include <string>
#include <sstream>
#include <vector>
#include "ShellState.h"
#include "ShellContext.h"
#include "transport/TTransportException.h"
using namespace std;
using namespace apache::thrift::transport;
const char * PMT = ">> ";
void ShellStateStart::run(ShellContext * c) {
if(!c->ParseInput()) {
c->changeState(ShellStateStop::getInstance());
} else {
c->changeState(ShellStateConnecting::getInstance());
}
}
void ShellStateStop::run(ShellContext * c) {
c->stop();
}
void ShellStateConnecting::run(ShellContext * c) {
try {
c->connect();
} catch (const TTransportException & e) {
cout << e.what() << endl;
c->changeState(ShellStateStop::getInstance());
return;
}
c->changeState(ShellStateConnected::getInstance());
}
void ShellStateConnected::unknownCmd(void) {
cout << "Unknown command!" << endl;
cout << "Use help to list all available commands" << endl;
}
void ShellStateConnected::helpMsg(void) {
cout << "Currently supported commands:" << endl;
cout << "create db" << endl;
cout << "get db key" << endl;
cout << "scan db start_key end_key limit" << endl;
cout << "put db key value" << endl;
cout << "exit/quit" << endl;
}
void ShellStateConnected::handleConError(ShellContext * c) {
cout << "Connection down" << endl;
cout << "Reconnect ? (y/n) :" << endl;
string s;
while(getline(cin, s)) {
if("y" == s) {
c->changeState(ShellStateConnecting::getInstance());
break;
} else if("n" == s) {
c->changeState(ShellStateStop::getInstance());
break;
} else {
cout << "Reconnect ? (y/n) :" << endl;
}
}
}
void ShellStateConnected::run(ShellContext * c) {
string line;
cout << PMT;
getline(cin, line);
istringstream is(line);
vector<string> params;
string param;
while(is >> param) {
params.push_back(param);
}
// empty input line
if(params.empty())
return;
if("quit" == params[0] || "exit" == params[0]) {
c->changeState(ShellStateStop::getInstance());
} else if("get" == params[0]) {
if(params.size() == 3) {
try {
c->get(params[1], params[2]);
} catch (const TTransportException & e) {
cout << e.what() << endl;
handleConError(c);
}
} else {
unknownCmd();
}
} else if("create" == params[0]) {
if(params.size() == 2) {
try {
c->create(params[1]);
} catch (const TTransportException & e) {
cout << e.what() << endl;
handleConError(c);
}
} else {
unknownCmd();
}
}else if("put" == params[0]) {
if(params.size() == 4) {
try {
c->put(params[1], params[2], params[3]);
} catch (const TTransportException & e) {
cout << e.what() << endl;
handleConError(c);
}
} else {
unknownCmd();
}
} else if("scan" == params[0]) {
if(params.size() == 5) {
try {
c->scan(params[1], params[2], params[3], params[4]);
} catch (const TTransportException & e) {
cout << e.what() << endl;
handleConError(c);
}
} else {
unknownCmd();
}
} else if("help" == params[0]) {
helpMsg();
} else {
unknownCmd();
}
}

@ -1,87 +0,0 @@
#ifndef TOOLS_SHELL_SHELLSTATE
#define TOOLS_SHELL_SHELLSTATE
class ShellContext;
/*
* Currently, there are four types of state in total
* 1. start state: the first state the program enters
* 2. connecting state: the program try to connect to a rocksdb server, whose
* previous states could be "start" or "connected" states
* 3. connected states: the program has already connected to a server, and is
* processing user commands
* 4. stop state: the last state the program enters, do some cleaning up things
*/
class ShellState {
public:
virtual void run(ShellContext *) = 0;
virtual ~ShellState() {}
};
class ShellStateStart : public ShellState {
public:
static ShellStateStart * getInstance(void) {
static ShellStateStart instance;
return &instance;
}
virtual void run(ShellContext *);
private:
ShellStateStart() {}
virtual ~ShellStateStart() {}
};
class ShellStateStop : public ShellState {
public:
static ShellStateStop * getInstance(void) {
static ShellStateStop instance;
return &instance;
}
virtual void run(ShellContext *);
private:
ShellStateStop() {}
virtual ~ShellStateStop() {}
};
class ShellStateConnecting : public ShellState {
public:
static ShellStateConnecting * getInstance(void) {
static ShellStateConnecting instance;
return &instance;
}
virtual void run(ShellContext *);
private:
ShellStateConnecting() {}
virtual ~ShellStateConnecting() {}
};
class ShellStateConnected : public ShellState {
public:
static ShellStateConnected * getInstance(void) {
static ShellStateConnected instance;
return &instance;
}
virtual void run(ShellContext *);
private:
ShellStateConnected() {}
virtual ~ShellStateConnected() {}
void unknownCmd();
void handleConError(ShellContext *);
void helpMsg();
};
#endif

@ -1,182 +0,0 @@
/**
* Tests for DBClientProxy class for leveldb
* @author Bo Liu (newpoo.liu@gmail.com)
* Copyright 2012 Facebook
*/
#include <algorithm>
#include <vector>
#include <string>
#include <protocol/TBinaryProtocol.h>
#include <transport/TSocket.h>
#include <transport/TBufferTransports.h>
#include <util/testharness.h>
#include <DB.h>
#include <AssocService.h>
#include <leveldb_types.h>
#include "server_options.h"
#include "../DBClientProxy.h"
using namespace rocksdb;
using namespace apache::thrift;
using namespace apache::thrift::protocol;
using namespace apache::thrift::transport;
using boost::shared_ptr;
using namespace Tleveldb;
using namespace std;
extern "C" void startServer(int argc, char**argv);
extern "C" void stopServer(int port);
extern ServerOptions server_options;
static const string db1("db1");
static void testDBClientProxy(DBClientProxy & dbcp) {
bool flag;
const int NOK = 100;
const int BUFSIZE = 16;
int testcase = 0;
vector<string> keys, values;
vector<pair<string, string> > kvs, correctKvs;
string k, v;
for(int i = 0; i < NOK; ++i) {
char bufKey[BUFSIZE];
char bufValue[BUFSIZE];
snprintf(bufKey, BUFSIZE, "key%d", i);
snprintf(bufValue, BUFSIZE, "value%d", i);
keys.push_back(bufKey);
values.push_back(bufValue);
correctKvs.push_back((make_pair(string(bufKey), string(bufValue))));
}
sort(correctKvs.begin(), correctKvs.end());
// can not do get(), put(), scan() or create() before connected.
flag = dbcp.get(db1, keys[0], v);
ASSERT_TRUE(false == flag);
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
flag = dbcp.put(db1, keys[0], keys[1]);
ASSERT_TRUE(false == flag);
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
flag = dbcp.scan(db1, "a", "w", "100", kvs);
ASSERT_TRUE(false == flag);
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
flag = dbcp.create(db1);
ASSERT_TRUE(false == flag);
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
dbcp.connect();
// create a database
flag = dbcp.create(db1);
ASSERT_TRUE(true == flag);
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
// no such key
flag = dbcp.get(db1, keys[0], v);
ASSERT_TRUE(false == flag);
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
// scan() success with empty returned key-value pairs
kvs.clear();
flag = dbcp.scan(db1, "a", "w", "100", kvs);
ASSERT_TRUE(true == flag);
ASSERT_TRUE(kvs.empty());
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
// put()
for(int i = 0; i < NOK; ++i) {
flag = dbcp.put(db1, keys[i], values[i]);
ASSERT_TRUE(true == flag);
}
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
// scan all of key-value pairs
kvs.clear();
flag = dbcp.scan(db1, "a", "w", "100", kvs);
ASSERT_TRUE(true == flag);
ASSERT_TRUE(kvs == correctKvs);
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
// scan the first 20 key-value pairs
{
kvs.clear();
flag = dbcp.scan(db1, "a", "w", "20", kvs);
ASSERT_TRUE(true == flag);
vector<pair<string, string> > tkvs(correctKvs.begin(), correctKvs.begin() + 20);
ASSERT_TRUE(kvs == tkvs);
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
}
// scan key[10] to key[50]
{
kvs.clear();
flag = dbcp.scan(db1, correctKvs[10].first, correctKvs[50].first, "100", kvs);
ASSERT_TRUE(true == flag);
vector<pair<string, string> > tkvs(correctKvs.begin() + 10, correctKvs.begin() + 50);
ASSERT_TRUE(kvs == tkvs);
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
}
// scan "key10" to "key40" by limit constraint
{
kvs.clear();
flag = dbcp.scan(db1, correctKvs[10].first.c_str(), "w", "30", kvs);
ASSERT_TRUE(true == flag);
vector<pair<string, string> > tkvs(correctKvs.begin() + 10, correctKvs.begin() + 40);
ASSERT_TRUE(kvs == tkvs);
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
}
// get()
flag = dbcp.get(db1, "unknownKey", v);
ASSERT_TRUE(false == flag);
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
flag = dbcp.get(db1, keys[0], v);
ASSERT_TRUE(true == flag);
ASSERT_TRUE(v == values[0]);
printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase);
}
static void cleanupDir(std::string dir) {
// remove old data, if any
char* cleanup = new char[100];
snprintf(cleanup, 100, "rm -rf %s", dir.c_str());
system(cleanup);
}
int main(int argc, char **argv) {
// create a server
startServer(argc, argv);
printf("Server thread created.\n");
// give some time to the server to initialize itself
while (server_options.getPort() == 0) {
sleep(1);
}
cleanupDir(server_options.getDataDirectory(db1));
DBClientProxy dbcp("localhost", server_options.getPort());
testDBClientProxy(dbcp);
}

@ -5,6 +5,7 @@
#include "util/sync_point.h" #include "util/sync_point.h"
#ifndef NDEBUG
namespace rocksdb { namespace rocksdb {
SyncPoint* SyncPoint::GetInstance() { SyncPoint* SyncPoint::GetInstance() {
@ -60,3 +61,4 @@ void SyncPoint::Process(const std::string& point) {
} }
} // namespace rocksdb } // namespace rocksdb
#endif // NDEBUG

@ -11,6 +11,10 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#ifdef NDEBUG
#define TEST_SYNC_POINT(x)
#else
namespace rocksdb { namespace rocksdb {
// This class provides facility to reproduce race conditions deterministically // This class provides facility to reproduce race conditions deterministically
@ -72,8 +76,5 @@ class SyncPoint {
// utilized to re-produce race conditions between threads. // utilized to re-produce race conditions between threads.
// See TransactionLogIteratorRace in db_test.cc for an example use case. // See TransactionLogIteratorRace in db_test.cc for an example use case.
// TEST_SYNC_POINT is no op in release build. // TEST_SYNC_POINT is no op in release build.
#ifdef NDEBUG
#define TEST_SYNC_POINT(x)
#else
#define TEST_SYNC_POINT(x) rocksdb::SyncPoint::GetInstance()->Process(x) #define TEST_SYNC_POINT(x) rocksdb::SyncPoint::GetInstance()->Process(x)
#endif #endif // NDEBUG

Loading…
Cancel
Save