From a213971d8a59d2b0533eed898df1d867bbd8a868 Mon Sep 17 00:00:00 2001
From: Nik Bougalis
Date: Tue, 30 Sep 2014 17:32:37 -0700
Subject: [PATCH 01/11] Don't return (or dereference) dangling pointer
---
db/flush_scheduler.cc | 1 +
1 file changed, 1 insertion(+)
diff --git a/db/flush_scheduler.cc b/db/flush_scheduler.cc
index 636ff5a98..56816159e 100644
--- a/db/flush_scheduler.cc
+++ b/db/flush_scheduler.cc
@@ -28,6 +28,7 @@ ColumnFamilyData* FlushScheduler::GetNextColumnFamily() {
if (cfd->IsDropped()) {
if (cfd->Unref()) {
delete cfd;
+ cfd = nullptr;
}
} else {
break;
From f4086a88b4e5bafe9978f805a22a0b01e634c342 Mon Sep 17 00:00:00 2001
From: sdong
Date: Thu, 2 Oct 2014 17:02:30 -0700
Subject: [PATCH 02/11] perf_context.get_from_output_files_time is set for
MultiGet() and ReadOnly DB too.
Summary: perf_context.get_from_output_files_time is now only set writable DB's DB::Get(). Extend it to MultiGet() and read only DB.
Test Plan:
make all check
Fix perf_context_test and extend it to cover MultiGet(), as long as read-only DB. Run it and watch the results
Reviewers: ljin, yhchiang, igor
Reviewed By: igor
Subscribers: rven, leveldb
Differential Revision: https://reviews.facebook.net/D24207
---
Makefile | 2 +-
db/db_impl.cc | 1 +
db/db_impl_readonly.cc | 2 +
db/perf_context_test.cc | 195 ++++++++++++++++++++++++++++++++++++----
4 files changed, 182 insertions(+), 18 deletions(-)
diff --git a/Makefile b/Makefile
index c9d12415b..ad9082d31 100644
--- a/Makefile
+++ b/Makefile
@@ -154,7 +154,7 @@ TOOLS = \
options_test \
blob_store_bench
-PROGRAMS = db_bench signal_test table_reader_bench log_and_apply_bench cache_bench $(TOOLS)
+PROGRAMS = db_bench signal_test table_reader_bench log_and_apply_bench cache_bench perf_context_test $(TOOLS)
# The library name is configurable since we are maintaining libraries of both
# debug/release mode.
diff --git a/db/db_impl.cc b/db/db_impl.cc
index 680a22cb3..85dab9f9e 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -3602,6 +3602,7 @@ std::vector DBImpl::MultiGet(
} else if (super_version->imm->Get(lkey, value, &s, &merge_context)) {
// Done
} else {
+ PERF_TIMER_GUARD(get_from_output_files_time);
super_version->current->Get(options, lkey, value, &s, &merge_context);
}
diff --git a/db/db_impl_readonly.cc b/db/db_impl_readonly.cc
index 9faebd8c2..31ebdbedd 100644
--- a/db/db_impl_readonly.cc
+++ b/db/db_impl_readonly.cc
@@ -8,6 +8,7 @@
#include "db/db_impl.h"
#include "db/merge_context.h"
#include "db/db_iter.h"
+#include "util/perf_context_imp.h"
namespace rocksdb {
@@ -34,6 +35,7 @@ Status DBImplReadOnly::Get(const ReadOptions& read_options,
LookupKey lkey(key, snapshot);
if (super_version->mem->Get(lkey, value, &s, &merge_context)) {
} else {
+ PERF_TIMER_GUARD(get_from_output_files_time);
super_version->current->Get(read_options, lkey, value, &s, &merge_context);
}
return s;
diff --git a/db/perf_context_test.cc b/db/perf_context_test.cc
index a182fb521..9d34409c3 100644
--- a/db/perf_context_test.cc
+++ b/db/perf_context_test.cc
@@ -6,7 +6,6 @@
#include
#include
#include
-#include "/usr/include/valgrind/callgrind.h"
#include "rocksdb/db.h"
#include "rocksdb/perf_context.h"
@@ -29,7 +28,7 @@ const std::string kDbName = rocksdb::test::TmpDir() + "/perf_context_test";
namespace rocksdb {
-std::shared_ptr OpenDb() {
+std::shared_ptr OpenDb(bool read_only = false) {
DB* db;
Options options;
options.create_if_missing = true;
@@ -39,12 +38,16 @@ std::shared_ptr OpenDb() {
FLAGS_min_write_buffer_number_to_merge;
if (FLAGS_use_set_based_memetable) {
- auto prefix_extractor = rocksdb::NewFixedPrefixTransform(0);
- options.memtable_factory.reset(
- NewHashSkipListRepFactory(prefix_extractor));
+ options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(0));
+ options.memtable_factory.reset(NewHashSkipListRepFactory());
}
- Status s = DB::Open(options, kDbName, &db);
+ Status s;
+ if (!read_only) {
+ s = DB::Open(options, kDbName, &db);
+ } else {
+ s = DB::OpenForReadOnly(options, kDbName, &db);
+ }
ASSERT_OK(s);
return std::shared_ptr(db);
}
@@ -76,7 +79,8 @@ TEST(PerfContextTest, SeekIntoDeletion) {
std::string value;
perf_context.Reset();
- StopWatchNano timer(Env::Default(), true);
+ StopWatchNano timer(Env::Default());
+ timer.Start();
auto status = db->Get(read_options, key, &value);
auto elapsed_nanos = timer.ElapsedNanos();
ASSERT_TRUE(status.IsNotFound());
@@ -149,11 +153,12 @@ TEST(PerfContextTest, StopWatchNanoOverhead) {
TEST(PerfContextTest, StopWatchOverhead) {
// profile the timer cost by itself!
const int kTotalIterations = 1000000;
+ uint64_t elapsed = 0;
std::vector timings(kTotalIterations);
- StopWatch timer(Env::Default());
+ StopWatch timer(Env::Default(), nullptr, 0, &elapsed);
for (auto& timing : timings) {
- timing = timer.ElapsedMicros();
+ timing = elapsed;
}
HistogramImpl histogram;
@@ -166,7 +171,7 @@ TEST(PerfContextTest, StopWatchOverhead) {
std::cout << histogram.ToString();
}
-void ProfileKeyComparison() {
+void ProfileQueries(bool enabled_time = false) {
DestroyDB(kDbName, Options()); // Start this test with a fresh DB
auto db = OpenDb();
@@ -175,11 +180,21 @@ void ProfileKeyComparison() {
ReadOptions read_options;
HistogramImpl hist_put;
+
HistogramImpl hist_get;
HistogramImpl hist_get_snapshot;
HistogramImpl hist_get_memtable;
+ HistogramImpl hist_get_files;
HistogramImpl hist_get_post_process;
HistogramImpl hist_num_memtable_checked;
+
+ HistogramImpl hist_mget;
+ HistogramImpl hist_mget_snapshot;
+ HistogramImpl hist_mget_memtable;
+ HistogramImpl hist_mget_files;
+ HistogramImpl hist_mget_post_process;
+ HistogramImpl hist_mget_num_memtable_checked;
+
HistogramImpl hist_write_pre_post;
HistogramImpl hist_write_wal_time;
HistogramImpl hist_write_memtable_time;
@@ -187,8 +202,13 @@ void ProfileKeyComparison() {
std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n";
std::vector keys;
+ const int kFlushFlag = -1;
for (int i = 0; i < FLAGS_total_keys; ++i) {
keys.push_back(i);
+ if (i == FLAGS_total_keys / 2) {
+ // Issuing a flush in the middle.
+ keys.push_back(kFlushFlag);
+ }
}
if (FLAGS_random_key) {
@@ -196,27 +216,54 @@ void ProfileKeyComparison() {
}
for (const int i : keys) {
+ if (i == kFlushFlag) {
+ FlushOptions fo;
+ db->Flush(fo);
+ continue;
+ }
std::string key = "k" + std::to_string(i);
std::string value = "v" + std::to_string(i);
+ std::vector keys = {Slice(key)};
+ std::vector values;
+
perf_context.Reset();
db->Put(write_options, key, value);
hist_write_pre_post.Add(perf_context.write_pre_and_post_process_time);
hist_write_wal_time.Add(perf_context.write_wal_time);
hist_write_memtable_time.Add(perf_context.write_memtable_time);
hist_put.Add(perf_context.user_key_comparison_count);
+ }
+
+ for (const int i : keys) {
+ std::string key = "k" + std::to_string(i);
+ std::string value = "v" + std::to_string(i);
+
+ std::vector keys = {Slice(key)};
+ std::vector values;
perf_context.Reset();
db->Get(read_options, key, &value);
hist_get_snapshot.Add(perf_context.get_snapshot_time);
hist_get_memtable.Add(perf_context.get_from_memtable_time);
+ hist_get_files.Add(perf_context.get_from_output_files_time);
hist_num_memtable_checked.Add(perf_context.get_from_memtable_count);
hist_get_post_process.Add(perf_context.get_post_process_time);
hist_get.Add(perf_context.user_key_comparison_count);
+
+ perf_context.Reset();
+ db->MultiGet(read_options, keys, &values);
+ hist_mget_snapshot.Add(perf_context.get_snapshot_time);
+ hist_mget_memtable.Add(perf_context.get_from_memtable_time);
+ hist_mget_files.Add(perf_context.get_from_output_files_time);
+ hist_mget_num_memtable_checked.Add(perf_context.get_from_memtable_count);
+ hist_mget_post_process.Add(perf_context.get_post_process_time);
+ hist_mget.Add(perf_context.user_key_comparison_count);
}
std::cout << "Put uesr key comparison: \n" << hist_put.ToString()
- << "Get uesr key comparison: \n" << hist_get.ToString();
+ << "Get uesr key comparison: \n" << hist_get.ToString()
+ << "MultiGet uesr key comparison: \n" << hist_get.ToString();
std::cout << "Put(): Pre and Post Process Time: \n"
<< hist_write_pre_post.ToString()
<< " Writing WAL time: \n"
@@ -224,25 +271,139 @@ void ProfileKeyComparison() {
<< " Writing Mem Table time: \n"
<< hist_write_memtable_time.ToString() << "\n";
- std::cout << "Get(): Time to get snapshot: \n"
+ std::cout << "Get(): Time to get snapshot: \n" << hist_get_snapshot.ToString()
+ << " Time to get value from memtables: \n"
+ << hist_get_memtable.ToString() << "\n"
+ << " Time to get value from output files: \n"
+ << hist_get_files.ToString() << "\n"
+ << " Number of memtables checked: \n"
+ << hist_num_memtable_checked.ToString() << "\n"
+ << " Time to post process: \n" << hist_get_post_process.ToString()
+ << "\n";
+
+ std::cout << "MultiGet(): Time to get snapshot: \n"
+ << hist_mget_snapshot.ToString()
+ << " Time to get value from memtables: \n"
+ << hist_mget_memtable.ToString() << "\n"
+ << " Time to get value from output files: \n"
+ << hist_mget_files.ToString() << "\n"
+ << " Number of memtables checked: \n"
+ << hist_mget_num_memtable_checked.ToString() << "\n"
+ << " Time to post process: \n" << hist_mget_post_process.ToString()
+ << "\n";
+
+ if (enabled_time) {
+ ASSERT_GT(hist_get.Average(), 0);
+ ASSERT_GT(hist_get_snapshot.Average(), 0);
+ ASSERT_GT(hist_get_memtable.Average(), 0);
+ ASSERT_GT(hist_get_files.Average(), 0);
+ ASSERT_GT(hist_get_post_process.Average(), 0);
+ ASSERT_GT(hist_num_memtable_checked.Average(), 0);
+
+ ASSERT_GT(hist_mget.Average(), 0);
+ ASSERT_GT(hist_mget_snapshot.Average(), 0);
+ ASSERT_GT(hist_mget_memtable.Average(), 0);
+ ASSERT_GT(hist_mget_files.Average(), 0);
+ ASSERT_GT(hist_mget_post_process.Average(), 0);
+ ASSERT_GT(hist_mget_num_memtable_checked.Average(), 0);
+ }
+
+ db.reset();
+ db = OpenDb(true);
+
+ hist_get.Clear();
+ hist_get_snapshot.Clear();
+ hist_get_memtable.Clear();
+ hist_get_files.Clear();
+ hist_get_post_process.Clear();
+ hist_num_memtable_checked.Clear();
+
+ hist_mget.Clear();
+ hist_mget_snapshot.Clear();
+ hist_mget_memtable.Clear();
+ hist_mget_files.Clear();
+ hist_mget_post_process.Clear();
+ hist_mget_num_memtable_checked.Clear();
+
+ for (const int i : keys) {
+ std::string key = "k" + std::to_string(i);
+ std::string value = "v" + std::to_string(i);
+
+ std::vector keys = {Slice(key)};
+ std::vector values;
+
+ perf_context.Reset();
+ db->Get(read_options, key, &value);
+ hist_get_snapshot.Add(perf_context.get_snapshot_time);
+ hist_get_memtable.Add(perf_context.get_from_memtable_time);
+ hist_get_files.Add(perf_context.get_from_output_files_time);
+ hist_num_memtable_checked.Add(perf_context.get_from_memtable_count);
+ hist_get_post_process.Add(perf_context.get_post_process_time);
+ hist_get.Add(perf_context.user_key_comparison_count);
+
+ perf_context.Reset();
+ db->MultiGet(read_options, keys, &values);
+ hist_mget_snapshot.Add(perf_context.get_snapshot_time);
+ hist_mget_memtable.Add(perf_context.get_from_memtable_time);
+ hist_mget_files.Add(perf_context.get_from_output_files_time);
+ hist_mget_num_memtable_checked.Add(perf_context.get_from_memtable_count);
+ hist_mget_post_process.Add(perf_context.get_post_process_time);
+ hist_mget.Add(perf_context.user_key_comparison_count);
+ }
+
+ std::cout << "ReadOnly Get uesr key comparison: \n" << hist_get.ToString()
+ << "ReadOnly MultiGet uesr key comparison: \n"
+ << hist_mget.ToString();
+
+ std::cout << "ReadOnly Get(): Time to get snapshot: \n"
<< hist_get_snapshot.ToString()
<< " Time to get value from memtables: \n"
<< hist_get_memtable.ToString() << "\n"
+ << " Time to get value from output files: \n"
+ << hist_get_files.ToString() << "\n"
<< " Number of memtables checked: \n"
<< hist_num_memtable_checked.ToString() << "\n"
- << " Time to post process: \n"
- << hist_get_post_process.ToString() << "\n";
+ << " Time to post process: \n" << hist_get_post_process.ToString()
+ << "\n";
+
+ std::cout << "ReadOnly MultiGet(): Time to get snapshot: \n"
+ << hist_mget_snapshot.ToString()
+ << " Time to get value from memtables: \n"
+ << hist_mget_memtable.ToString() << "\n"
+ << " Time to get value from output files: \n"
+ << hist_mget_files.ToString() << "\n"
+ << " Number of memtables checked: \n"
+ << hist_mget_num_memtable_checked.ToString() << "\n"
+ << " Time to post process: \n" << hist_mget_post_process.ToString()
+ << "\n";
+
+ if (enabled_time) {
+ ASSERT_GT(hist_get.Average(), 0);
+ ASSERT_GT(hist_get_memtable.Average(), 0);
+ ASSERT_GT(hist_get_files.Average(), 0);
+ ASSERT_GT(hist_num_memtable_checked.Average(), 0);
+ // In read-only mode Get(), no super version operation is needed
+ ASSERT_EQ(hist_get_post_process.Average(), 0);
+ ASSERT_EQ(hist_get_snapshot.Average(), 0);
+
+ ASSERT_GT(hist_mget.Average(), 0);
+ ASSERT_GT(hist_mget_snapshot.Average(), 0);
+ ASSERT_GT(hist_mget_memtable.Average(), 0);
+ ASSERT_GT(hist_mget_files.Average(), 0);
+ ASSERT_GT(hist_mget_post_process.Average(), 0);
+ ASSERT_GT(hist_mget_num_memtable_checked.Average(), 0);
+ }
}
TEST(PerfContextTest, KeyComparisonCount) {
SetPerfLevel(kEnableCount);
- ProfileKeyComparison();
+ ProfileQueries();
SetPerfLevel(kDisable);
- ProfileKeyComparison();
+ ProfileQueries();
SetPerfLevel(kEnableTime);
- ProfileKeyComparison();
+ ProfileQueries(true);
}
// make perf_context_test
From 8ea232b9e3163ece812b1d8c2ae3653f9d0a7f13 Mon Sep 17 00:00:00 2001
From: sdong
Date: Thu, 2 Oct 2014 12:00:09 -0700
Subject: [PATCH 03/11] Add number of records dropped in compaction summary
Summary:
Add two stats to compaction summary:
1. Total input records from previous level
2. Total number of records dropped after compaction
Test Plan: See outputs of printing when runnning locally
Reviewers: ljin, igor, MarkCallaghan
Reviewed By: MarkCallaghan
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D24411
---
db/db_impl.cc | 21 +++++++++++++++++++--
db/db_impl.h | 1 +
db/internal_stats.cc | 10 +++++++---
db/internal_stats.h | 15 +++++++++++++++
4 files changed, 42 insertions(+), 5 deletions(-)
diff --git a/db/db_impl.cc b/db/db_impl.cc
index 85dab9f9e..5d6eaf197 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -2694,7 +2694,10 @@ Status DBImpl::ProcessKeyValueCompaction(
Iterator* input,
CompactionState* compact,
bool is_compaction_v2,
+ int* num_output_records,
LogBuffer* log_buffer) {
+ assert(num_output_records != nullptr);
+
size_t combined_idx = 0;
Status status;
std::string compaction_filter_value;
@@ -2965,6 +2968,7 @@ Status DBImpl::ProcessKeyValueCompaction(
}
compact->current_output()->largest.DecodeFrom(newkey);
compact->builder->Add(newkey, value);
+ (*num_output_records)++,
compact->current_output()->largest_seqno =
std::max(compact->current_output()->largest_seqno, seqno);
@@ -3140,6 +3144,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
mutex_.Unlock();
log_buffer->FlushBufferToLog();
+ int num_output_records = 0;
const uint64_t start_micros = env_->NowMicros();
unique_ptr input(versions_->MakeInputIterator(compact->compaction));
input->SeekToFirst();
@@ -3168,6 +3173,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
input.get(),
compact,
false,
+ &num_output_records,
log_buffer);
} else {
// temp_backup_input always point to the start of the current buffer
@@ -3249,6 +3255,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
input.get(),
compact,
true,
+ &num_output_records,
log_buffer);
if (!status.ok()) {
@@ -3286,6 +3293,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
input.get(),
compact,
true,
+ &num_output_records,
log_buffer);
compact->CleanupBatchBuffer();
@@ -3309,6 +3317,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
input.get(),
compact,
true,
+ &num_output_records,
log_buffer);
} // checking for compaction filter v2
@@ -3342,17 +3351,24 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
}
stats.files_out_levelnp1 = num_output_files;
+ uint64_t num_input_records = 0;
+
for (int i = 0; i < compact->compaction->num_input_files(0); i++) {
stats.bytes_readn += compact->compaction->input(0, i)->fd.GetFileSize();
+ stats.num_input_records += compact->compaction->input(0, i)->num_entries;
+ num_input_records += compact->compaction->input(0, i)->num_entries;
}
for (int i = 0; i < compact->compaction->num_input_files(1); i++) {
stats.bytes_readnp1 += compact->compaction->input(1, i)->fd.GetFileSize();
+ num_input_records += compact->compaction->input(1, i)->num_entries;
}
for (int i = 0; i < num_output_files; i++) {
stats.bytes_written += compact->outputs[i].file_size;
}
+ stats.num_dropped_records =
+ static_cast(num_input_records) - num_output_records;
RecordCompactionIOStats();
@@ -3375,7 +3391,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
"[%s] compacted to: %s, MB/sec: %.1f rd, %.1f wr, level %d, "
"files in(%d, %d) out(%d) "
"MB in(%.1f, %.1f) out(%.1f), read-write-amplify(%.1f) "
- "write-amplify(%.1f) %s\n",
+ "write-amplify(%.1f) %s, records in: %d, records dropped: %d\n",
cfd->GetName().c_str(), cfd->current()->LevelSummary(&tmp),
(stats.bytes_readn + stats.bytes_readnp1) /
static_cast(stats.micros),
@@ -3387,7 +3403,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
(stats.bytes_written + stats.bytes_readnp1 + stats.bytes_readn) /
(double)stats.bytes_readn,
stats.bytes_written / (double)stats.bytes_readn,
- status.ToString().c_str());
+ status.ToString().c_str(), stats.num_input_records,
+ stats.num_dropped_records);
return status;
}
diff --git a/db/db_impl.h b/db/db_impl.h
index f1a81e00c..622df4293 100644
--- a/db/db_impl.h
+++ b/db/db_impl.h
@@ -419,6 +419,7 @@ class DBImpl : public DB {
Iterator* input,
CompactionState* compact,
bool is_compaction_v2,
+ int* num_output_records,
LogBuffer* log_buffer);
// Call compaction_filter_v2->Filter() on kv-pairs in compact
diff --git a/db/internal_stats.cc b/db/internal_stats.cc
index c9f9306e2..3f60d72ce 100644
--- a/db/internal_stats.cc
+++ b/db/internal_stats.cc
@@ -30,7 +30,7 @@ void PrintLevelStatsHeader(char* buf, size_t len, const std::string& cf_name) {
"Level Files Size(MB) Score Read(GB) Rn(GB) Rnp1(GB) "
"Write(GB) Wnew(GB) RW-Amp W-Amp Rd(MB/s) Wr(MB/s) Rn(cnt) "
"Rnp1(cnt) Wnp1(cnt) Wnew(cnt) Comp(sec) Comp(cnt) Avg(sec) "
- "Stall(sec) Stall(cnt) Avg(ms)\n"
+ "Stall(sec) Stall(cnt) Avg(ms) RecordIn RecordDrop\n"
"--------------------------------------------------------------------"
"--------------------------------------------------------------------"
"--------------------------------------------------------------------\n",
@@ -65,7 +65,9 @@ void PrintLevelStats(char* buf, size_t len, const std::string& name,
"%8.3f " /* Avg(sec) */
"%10.2f " /* Stall(sec) */
"%10" PRIu64 " " /* Stall(cnt) */
- "%7.2f\n" /* Avg(ms) */,
+ "%7.2f" /* Avg(ms) */
+ "%8d " /* input entries */
+ "%10d\n" /* number of records reduced */,
name.c_str(), num_files, being_compacted, total_file_size / kMB, score,
bytes_read / kGB,
stats.bytes_readn / kGB,
@@ -85,7 +87,9 @@ void PrintLevelStats(char* buf, size_t len, const std::string& name,
stats.count == 0 ? 0 : stats.micros / 1000000.0 / stats.count,
stall_us / 1000000.0,
stalls,
- stalls == 0 ? 0 : stall_us / 1000.0 / stalls);
+ stalls == 0 ? 0 : stall_us / 1000.0 / stalls,
+ stats.num_input_records,
+ stats.num_dropped_records);
}
diff --git a/db/internal_stats.h b/db/internal_stats.h
index 2e04f24e7..18d67de5c 100644
--- a/db/internal_stats.h
+++ b/db/internal_stats.h
@@ -123,6 +123,13 @@ class InternalStats {
// Files written during compaction between levels N and N+1
int files_out_levelnp1;
+ // Total incoming entries during compaction between levels N and N+1
+ int num_input_records;
+
+ // Accumulated diff number of entries
+ // (num input entries - num output entires) for compaction levels N and N+1
+ int num_dropped_records;
+
// Number of compactions done
int count;
@@ -134,6 +141,8 @@ class InternalStats {
files_in_leveln(0),
files_in_levelnp1(0),
files_out_levelnp1(0),
+ num_input_records(0),
+ num_dropped_records(0),
count(count) {}
explicit CompactionStats(const CompactionStats& c)
@@ -144,6 +153,8 @@ class InternalStats {
files_in_leveln(c.files_in_leveln),
files_in_levelnp1(c.files_in_levelnp1),
files_out_levelnp1(c.files_out_levelnp1),
+ num_input_records(c.num_input_records),
+ num_dropped_records(c.num_dropped_records),
count(c.count) {}
void Add(const CompactionStats& c) {
@@ -154,6 +165,8 @@ class InternalStats {
this->files_in_leveln += c.files_in_leveln;
this->files_in_levelnp1 += c.files_in_levelnp1;
this->files_out_levelnp1 += c.files_out_levelnp1;
+ this->num_input_records += c.num_input_records;
+ this->num_dropped_records += c.num_dropped_records;
this->count += c.count;
}
@@ -165,6 +178,8 @@ class InternalStats {
this->files_in_leveln -= c.files_in_leveln;
this->files_in_levelnp1 -= c.files_in_levelnp1;
this->files_out_levelnp1 -= c.files_out_levelnp1;
+ this->num_input_records -= c.num_input_records;
+ this->num_dropped_records -= c.num_dropped_records;
this->count -= c.count;
}
};
From 0e516a75da6b59e1c5894955478d44089785328e Mon Sep 17 00:00:00 2001
From: Yueh-Hsuan Chiang
Date: Fri, 3 Oct 2014 00:10:58 -0700
Subject: [PATCH 04/11] Fix lint errors in java/rocksjni/options.cc
Summary:
Fix lint errors in java/rocksjni/options.cc
Test Plan:
make rocksdbjava
---
java/rocksjni/options.cc | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/java/rocksjni/options.cc b/java/rocksjni/options.cc
index 50416ef81..a8be5af8b 100644
--- a/java/rocksjni/options.cc
+++ b/java/rocksjni/options.cc
@@ -73,11 +73,13 @@ void Java_org_rocksdb_Options_setBuiltinComparator(
JNIEnv* env, jobject jobj, jlong jhandle, jint builtinComparator) {
switch (builtinComparator){
case 1:
- reinterpret_cast(jhandle)->comparator = rocksdb::ReverseBytewiseComparator();
- break;
+ reinterpret_cast(jhandle)->comparator =
+ rocksdb::ReverseBytewiseComparator();
+ break;
default:
- reinterpret_cast(jhandle)->comparator = rocksdb::BytewiseComparator();
- break;
+ reinterpret_cast(jhandle)->comparator =
+ rocksdb::BytewiseComparator();
+ break;
}
}
From 56dfd363fd51aa10c7f1d9d965c8bbbefffa6c30 Mon Sep 17 00:00:00 2001
From: Yueh-Hsuan Chiang
Date: Fri, 3 Oct 2014 00:25:27 -0700
Subject: [PATCH 05/11] Fix a check in database shutdown or Column family drop
during flush.
Summary:
Fix a check in database shutdown or Column family drop during flush.
Special thanks to Maurice Barnum who spots the problem :)
Test Plan: db_test
Reviewers: ljin, igor, sdong
Reviewed By: sdong
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D24273
---
db/db_impl.cc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/db/db_impl.cc b/db/db_impl.cc
index 5d6eaf197..7463f749b 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -1614,7 +1614,7 @@ Status DBImpl::FlushMemTableToOutputFile(
Status s = WriteLevel0Table(cfd, mutable_cf_options, mems, edit,
&file_number, log_buffer);
- if (s.ok() && shutting_down_.Acquire_Load() && cfd->IsDropped()) {
+ if (s.ok() && (shutting_down_.Acquire_Load() || cfd->IsDropped())) {
s = Status::ShutdownInProgress(
"Database shutdown or Column family drop during flush");
}
From 4eb5a40f7db18500b4f639698eb176eeecb150fc Mon Sep 17 00:00:00 2001
From: Yueh-Hsuan Chiang
Date: Fri, 3 Oct 2014 02:10:00 -0700
Subject: [PATCH 06/11] [Java] Fixed link error on library loading on Mac.
Summary:
Fixed link error on library loading on Mac.
Test Plan:
make rocksdbjava
make jtest
---
Makefile | 1 -
1 file changed, 1 deletion(-)
diff --git a/Makefile b/Makefile
index ad9082d31..705b7ff97 100644
--- a/Makefile
+++ b/Makefile
@@ -517,7 +517,6 @@ ROCKSDBJNILIB = librocksdbjni.so
ROCKSDB_JAR = rocksdbjni.jar
ifeq ($(PLATFORM), OS_MACOSX)
-ROCKSDBJNILIB = librocksdbjni.jnilib
JAVA_INCLUDE = -I/System/Library/Frameworks/JavaVM.framework/Headers/
endif
From df3373fbf7dcd7af0b0a27779bda0d6f37c05b9b Mon Sep 17 00:00:00 2001
From: Yueh-Hsuan Chiang
Date: Fri, 3 Oct 2014 02:14:43 -0700
Subject: [PATCH 07/11] [Java] Fix compile error on DbBenchmark.java
Summary:
Fix compile error on DbBenchmark.java
Test Plan:
make rocksdbjava
make jdb_bench
---
java/org/rocksdb/benchmark/DbBenchmark.java | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/java/org/rocksdb/benchmark/DbBenchmark.java b/java/org/rocksdb/benchmark/DbBenchmark.java
index 686d39445..612fdaf28 100644
--- a/java/org/rocksdb/benchmark/DbBenchmark.java
+++ b/java/org/rocksdb/benchmark/DbBenchmark.java
@@ -523,8 +523,8 @@ public class DbBenchmark {
BlockBasedTableConfig table_options = new BlockBasedTableConfig();
table_options.setBlockSize((Long)flags_.get(Flag.block_size))
.setBlockCacheSize((Long)flags_.get(Flag.cache_size))
- .setFilterBitsPerKey((Integer)flags_.get(Flag.bloom_bits))
- .setCacheNumShardBits((Integer)flags_.get(Flag.cache_numshardbits));
+ .setCacheNumShardBits(
+ (Integer)flags_.get(Flag.cache_numshardbits));
options.setTableFormatConfig(table_options);
}
options.setWriteBufferSize(
From a5757ff3c274215a97dee0de705c5b4220b23bc2 Mon Sep 17 00:00:00 2001
From: fyrz
Date: Fri, 3 Oct 2014 11:50:40 +0200
Subject: [PATCH 08/11] Listing of changes
- JavaDoc readability of RocksObject JavaDoc
- JavaDoc improvements BlockBasedTableConfig, GenericRateLimiterConfig, RocksDB
- JavaDoc improvements MemTableConfig
- JavaDoc improvements RocksObject
- JavaDoc improvements GenericRateLimiterConfig
- JavaDoc improvements ReadOptions
- JavaDoc improvements RateLimiterConfig
- JavaDoc improvements RestoreOptions
- JavaDoc improvements RestoreBackupableDB
- JavaDoc improvements BlockBasedTableConfig
- JavaDoc improvements Options
- JavaDoc improvements BackupableDB and BackupableDBOptions
---
java/org/rocksdb/BackupableDB.java | 23 +-
java/org/rocksdb/BackupableDBOptions.java | 50 +++--
java/org/rocksdb/BlockBasedTableConfig.java | 23 +-
.../org/rocksdb/GenericRateLimiterConfig.java | 34 ++-
java/org/rocksdb/MemTableConfig.java | 2 +-
java/org/rocksdb/Options.java | 204 +++++++++---------
java/org/rocksdb/RateLimiterConfig.java | 9 +-
java/org/rocksdb/ReadOptions.java | 5 +-
java/org/rocksdb/RestoreBackupableDB.java | 21 +-
java/org/rocksdb/RestoreOptions.java | 14 +-
java/org/rocksdb/RocksDB.java | 9 +-
java/org/rocksdb/RocksObject.java | 77 ++++---
12 files changed, 275 insertions(+), 196 deletions(-)
diff --git a/java/org/rocksdb/BackupableDB.java b/java/org/rocksdb/BackupableDB.java
index 108c4deb5..3ee29b347 100644
--- a/java/org/rocksdb/BackupableDB.java
+++ b/java/org/rocksdb/BackupableDB.java
@@ -8,19 +8,19 @@ package org.rocksdb;
/**
* A subclass of RocksDB which supports backup-related operations.
*
- * @see BackupableDBOptions
+ * @see org.rocksdb.BackupableDBOptions
*/
public class BackupableDB extends RocksDB {
/**
- * Open a BackupableDB under the specified path.
+ * Open a {@code BackupableDB} under the specified path.
* Note that the backup path should be set properly in the
* input BackupableDBOptions.
*
- * @param opt options for db.
- * @param bopt backup related options.
- * @param the db path for storing data. The path for storing
- * backup should be specified in the BackupableDBOptions.
- * @return reference to the opened BackupableDB.
+ * @param opt {@link org.rocksdb.Options} to set for the database.
+ * @param bopt {@link org.rocksdb.BackupableDBOptions} to use.
+ * @param db_path Path to store data to. The path for storing the backup should be
+ * specified in the {@link org.rocksdb.BackupableDBOptions}.
+ * @return BackupableDB reference to the opened database.
*/
public static BackupableDB open(
Options opt, BackupableDBOptions bopt, String db_path)
@@ -61,10 +61,9 @@ public class BackupableDB extends RocksDB {
/**
* Close the BackupableDB instance and release resource.
*
- * Internally, BackupableDB owns the rocksdb::DB pointer to its
- * associated RocksDB. The release of that RocksDB pointer is
- * handled in the destructor of the c++ rocksdb::BackupableDB and
- * should be transparent to Java developers.
+ * Internally, BackupableDB owns the {@code rocksdb::DB} pointer to its associated
+ * {@link org.rocksdb.RocksDB}. The release of that RocksDB pointer is handled in the destructor
+ * of the c++ {@code rocksdb::BackupableDB} and should be transparent to Java developers.
*/
@Override public synchronized void close() {
if (isInitialized()) {
@@ -74,7 +73,7 @@ public class BackupableDB extends RocksDB {
/**
* A protected construction that will be used in the static factory
- * method BackupableDB.open().
+ * method {@link #open(Options, BackupableDBOptions, String)}.
*/
protected BackupableDB() {
super();
diff --git a/java/org/rocksdb/BackupableDBOptions.java b/java/org/rocksdb/BackupableDBOptions.java
index 2c5047f77..07751a64d 100644
--- a/java/org/rocksdb/BackupableDBOptions.java
+++ b/java/org/rocksdb/BackupableDBOptions.java
@@ -7,33 +7,41 @@ package org.rocksdb;
/**
* BackupableDBOptions to control the behavior of a backupable database.
- * It will be used during the creation of a BackupableDB.
+ * It will be used during the creation of a {@link org.rocksdb.BackupableDB}.
*
* Note that dispose() must be called before an Options instance
* become out-of-scope to release the allocated memory in c++.
*
- * @param path Where to keep the backup files. Has to be different than dbname.
- Best to set this to dbname_ + "/backups"
- * @param shareTableFiles If share_table_files == true, backup will assume that
- * table files with same name have the same contents. This enables
- * incremental backups and avoids unnecessary data copies. If
- * share_table_files == false, each backup will be on its own and will not
- * share any data with other backups. default: true
- * @param sync If sync == true, we can guarantee you'll get consistent backup
- * even on a machine crash/reboot. Backup process is slower with sync
- * enabled. If sync == false, we don't guarantee anything on machine reboot.
- * However, chances are some of the backups are consistent. Default: true
- * @param destroyOldData If true, it will delete whatever backups there are
- * already. Default: false
- * @param backupLogFiles If false, we won't backup log files. This option can be
- * useful for backing up in-memory databases where log file are persisted,
- * but table files are in memory. Default: true
- * @param backupRateLimit Max bytes that can be transferred in a second during
- * backup. If 0 or negative, then go as fast as you can. Default: 0
- * @param restoreRateLimit Max bytes that can be transferred in a second during
- * restore. If 0 or negative, then go as fast as you can. Default: 0
+ * @see org.rocksdb.BackupableDB
*/
public class BackupableDBOptions extends RocksObject {
+
+ /**
+ * BackupableDBOptions constructor
+ *
+ * @param path Where to keep the backup files. Has to be different than db name.
+ * Best to set this to {@code db name_ + "/backups"}
+ * @param shareTableFiles If {@code share_table_files == true}, backup will assume
+ * that table files with same name have the same contents. This enables incremental
+ * backups and avoids unnecessary data copies. If {@code share_table_files == false},
+ * each backup will be on its own and will not share any data with other backups.
+ * Default: true
+ * @param sync If {@code sync == true}, we can guarantee you'll get consistent backup
+ * even on a machine crash/reboot. Backup process is slower with sync enabled.
+ * If {@code sync == false}, we don't guarantee anything on machine reboot.
+ * However,chances are some of the backups are consistent.
+ * Default: true
+ * @param destroyOldData If true, it will delete whatever backups there are already.
+ * Default: false
+ * @param backupLogFiles If false, we won't backup log files. This option can be
+ * useful for backing up in-memory databases where log file are persisted,but table
+ * files are in memory.
+ * Default: true
+ * @param backupRateLimit Max bytes that can be transferred in a second during backup.
+ * If 0 or negative, then go as fast as you can. Default: 0
+ * @param restoreRateLimit Max bytes that can be transferred in a second during restore.
+ * If 0 or negative, then go as fast as you can. Default: 0
+ */
public BackupableDBOptions(String path, boolean shareTableFiles, boolean sync,
boolean destroyOldData, boolean backupLogFiles, long backupRateLimit,
long restoreRateLimit) {
diff --git a/java/org/rocksdb/BlockBasedTableConfig.java b/java/org/rocksdb/BlockBasedTableConfig.java
index 9a6967a95..2f9f0ac64 100644
--- a/java/org/rocksdb/BlockBasedTableConfig.java
+++ b/java/org/rocksdb/BlockBasedTableConfig.java
@@ -27,7 +27,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Disable block cache. If this is set to true,
* then no block cache should be used, and the block_cache should
- * point to a nullptr object.
+ * point to a {@code nullptr} object.
* Default: false
*
* @param noBlockCache if use block cache
@@ -69,7 +69,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* Controls the number of shards for the block cache.
* This is applied only if cacheSize is set to non-negative.
*
- * @param numShardBits the number of shard bits. The resulting
+ * @param blockCacheNumShardBits the number of shard bits. The resulting
* number of shards would be 2 ^ numShardBits. Any negative
* number means use default settings."
* @return the reference to the current option.
@@ -176,13 +176,14 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Use the specified filter policy to reduce disk reads.
*
- * Filter should not be disposed before options instances using this filter is
- * disposed. If dispose() function is not called, then filter object will be
- * GC'd automatically.
+ * {@link org.rocksdb.Filter} should not be disposed before options instances
+ * using this filter is disposed. If {@link Filter#dispose()} function is not
+ * called, then filter object will be GC'd automatically.
*
- * Filter instance can be re-used in multiple options instances.
+ * {@link org.rocksdb.Filter} instance can be re-used in multiple options
+ * instances.
*
- * @param Filter Filter Policy java instance.
+ * @param filter {@link org.rocksdb.Filter} Filter Policy java instance.
* @return the reference to the current config.
*/
public BlockBasedTableConfig setFilter(Filter filter) {
@@ -206,7 +207,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
If not specified, each "table reader" object will pre-load index/filter
block during table initialization.
*
- * @param index and filter blocks should be put in block cache.
+ * @param cacheIndexAndFilterBlocks and filter blocks should be put in block cache.
* @return the reference to the current config.
*/
public BlockBasedTableConfig setCacheIndexAndFilterBlocks(
@@ -233,7 +234,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
if true, does not store prefix and allows prefix hash collision
(less memory consumption)
*
- * @param if hash collisions should be allowed.
+ * @param hashIndexAllowCollision points out if hash collisions should be allowed.
* @return the reference to the current config.
*/
public BlockBasedTableConfig setHashIndexAllowCollision(
@@ -256,7 +257,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* Size of compressed block cache. If 0, then block_cache_compressed is set
* to null.
*
- * @param size of compressed block cache.
+ * @param blockCacheCompressedSize of compressed block cache.
* @return the reference to the current config.
*/
public BlockBasedTableConfig setBlockCacheCompressedSize(
@@ -281,7 +282,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* Controls the number of shards for the block compressed cache.
* This is applied only if blockCompressedCacheSize is set to non-negative.
*
- * @param numShardBits the number of shard bits. The resulting
+ * @param blockCacheCompressedNumShardBits the number of shard bits. The resulting
* number of shards would be 2 ^ numShardBits. Any negative
* number means use default settings."
* @return the reference to the current option.
diff --git a/java/org/rocksdb/GenericRateLimiterConfig.java b/java/org/rocksdb/GenericRateLimiterConfig.java
index 78b8b37ec..2a2e7b657 100644
--- a/java/org/rocksdb/GenericRateLimiterConfig.java
+++ b/java/org/rocksdb/GenericRateLimiterConfig.java
@@ -7,18 +7,48 @@ package org.rocksdb;
/**
* Config for rate limiter, which is used to control write rate of flush and
* compaction.
+ *
+ * @see RateLimiterConfig
*/
public class GenericRateLimiterConfig extends RateLimiterConfig {
private static final long DEFAULT_REFILL_PERIOD_MICROS = (100 * 1000);
private static final int DEFAULT_FAIRNESS = 10;
-
+
+ /**
+ * GenericRateLimiterConfig constructor
+ *
+ * @param rateBytesPerSecond this is the only parameter you want to set
+ * most of the time. It controls the total write rate of compaction
+ * and flush in bytes per second. Currently, RocksDB does not enforce
+ * rate limit for anything other than flush and compaction, e.g. write to WAL.
+ * @param refillPeriodMicros this controls how often tokens are refilled. For example,
+ * when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
+ * 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to
+ * burstier writes while smaller value introduces more CPU overhead.
+ * The default should work for most cases.
+ * @param fairness RateLimiter accepts high-pri requests and low-pri requests.
+ * A low-pri request is usually blocked in favor of hi-pri request. Currently,
+ * RocksDB assigns low-pri to request from compaction and high-pri to request
+ * from flush. Low-pri requests can get blocked if flush requests come in
+ * continuously. This fairness parameter grants low-pri requests permission by
+ * fairness chance even though high-pri requests exist to avoid starvation.
+ * You should be good by leaving it at default 10.
+ */
public GenericRateLimiterConfig(long rateBytesPerSecond,
long refillPeriodMicros, int fairness) {
rateBytesPerSecond_ = rateBytesPerSecond;
refillPeriodMicros_ = refillPeriodMicros;
fairness_ = fairness;
}
-
+
+ /**
+ * GenericRateLimiterConfig constructor
+ *
+ * @param rateBytesPerSecond this is the only parameter you want to set
+ * most of the time. It controls the total write rate of compaction
+ * and flush in bytes per second. Currently, RocksDB does not enforce
+ * rate limit for anything other than flush and compaction, e.g. write to WAL.
+ */
public GenericRateLimiterConfig(long rateBytesPerSecond) {
this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS);
}
diff --git a/java/org/rocksdb/MemTableConfig.java b/java/org/rocksdb/MemTableConfig.java
index a473c2585..904aa37b5 100644
--- a/java/org/rocksdb/MemTableConfig.java
+++ b/java/org/rocksdb/MemTableConfig.java
@@ -21,7 +21,7 @@ public abstract class MemTableConfig {
* which will create a c++ shared-pointer to the c++ MemTableRepFactory
* that associated with the Java MemTableConfig.
*
- * @see Options.setMemTableFactory()
+ * @see Options#setMemTableConfig(MemTableConfig)
*/
abstract protected long newMemTableFactoryHandle();
}
diff --git a/java/org/rocksdb/Options.java b/java/org/rocksdb/Options.java
index 7ccc74834..642c6c4dd 100644
--- a/java/org/rocksdb/Options.java
+++ b/java/org/rocksdb/Options.java
@@ -7,10 +7,10 @@ package org.rocksdb;
/**
* Options to control the behavior of a database. It will be used
- * during the creation of a RocksDB (i.e., RocksDB.open()).
+ * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
*
- * If dispose() function is not called, then it will be GC'd automatically and
- * native resources will be released as part of the process.
+ * If {@link #dispose()} function is not called, then it will be GC'd automatically
+ * and native resources will be released as part of the process.
*/
public class Options extends RocksObject {
static {
@@ -30,7 +30,7 @@ public class Options extends RocksObject {
* Construct options for opening a RocksDB.
*
* This constructor will create (by allocating a block of memory)
- * an rocksdb::Options in the c++ side.
+ * an {@code rocksdb::Options} in the c++ side.
*/
public Options() {
super();
@@ -42,13 +42,14 @@ public class Options extends RocksObject {
/**
* If this value is set to true, then the database will be created
- * if it is missing during RocksDB.open().
+ * if it is missing during {@code RocksDB.open()}.
* Default: false
*
* @param flag a flag indicating whether to create a database the
- * specified database in RocksDB.open() operation is missing.
- * @return the instance of the current Options.
- * @see RocksDB.open()
+ * specified database in {@link org.rocksdb.RocksDB#open(Options, String)} operation
+ * is missing.
+ * @return the instance of the current Options
+ * @see org.rocksdb.RocksDB#open(Options, String)
*/
public Options setCreateIfMissing(boolean flag) {
assert(isInitialized());
@@ -59,7 +60,7 @@ public class Options extends RocksObject {
/**
* Use the specified object to interact with the environment,
* e.g. to read/write files, schedule background work, etc.
- * Default: RocksEnv.getDefault()
+ * Default: {@link RocksEnv#getDefault()}
*/
public Options setEnv(RocksEnv env) {
assert(isInitialized());
@@ -79,7 +80,7 @@ public class Options extends RocksObject {
* If true, the database will be created if it is missing.
*
* @return true if the createIfMissing option is set to true.
- * @see setCreateIfMissing()
+ * @see #setCreateIfMissing(boolean)
*/
public boolean createIfMissing() {
assert(isInitialized());
@@ -87,12 +88,12 @@ public class Options extends RocksObject {
}
/**
- * Set BuiltinComparator to be used with RocksDB.
+ * Set {@link org.rocksdb.Options.BuiltinComparator} to be used with RocksDB.
*
* Note: Comparator can be set once upon database creation.
*
* Default: BytewiseComparator.
- * @param builtinComparator a BuiltinComparator type.
+ * @param builtinComparator a {@link org.rocksdb.Options.BuiltinComparator} type.
*/
public void setBuiltinComparator(BuiltinComparator builtinComparator) {
assert(isInitialized());
@@ -106,7 +107,7 @@ public class Options extends RocksObject {
* on disk) before converting to a sorted on-disk file.
*
* Larger values increase performance, especially during bulk loads.
- * Up to max_write_buffer_number write buffers may be held in memory
+ * Up to {@code max_write_buffer_number} write buffers may be held in memory
* at the same time, so you may wish to adjust this parameter
* to control memory usage.
*
@@ -116,7 +117,7 @@ public class Options extends RocksObject {
* Default: 4MB
* @param writeBufferSize the size of write buffer.
* @return the instance of the current Options.
- * @see RocksDB.open()
+ * @see org.rocksdb.RocksDB#open(Options, String)
*/
public Options setWriteBufferSize(long writeBufferSize) {
assert(isInitialized());
@@ -128,7 +129,7 @@ public class Options extends RocksObject {
* Return size of write buffer size.
*
* @return size of write buffer.
- * @see setWriteBufferSize()
+ * @see #setWriteBufferSize(long)
*/
public long writeBufferSize() {
assert(isInitialized());
@@ -143,7 +144,7 @@ public class Options extends RocksObject {
*
* @param maxWriteBufferNumber maximum number of write buffers.
* @return the instance of the current Options.
- * @see RocksDB.open()
+ * @see org.rocksdb.RocksDB#open(Options, String)
*/
public Options setMaxWriteBufferNumber(int maxWriteBufferNumber) {
assert(isInitialized());
@@ -155,7 +156,7 @@ public class Options extends RocksObject {
* Returns maximum number of write buffers.
*
* @return maximum number of write buffers.
- * @see setMaxWriteBufferNumber()
+ * @see #setMaxWriteBufferNumber(int)
*/
public int maxWriteBufferNumber() {
assert(isInitialized());
@@ -181,9 +182,9 @@ public class Options extends RocksObject {
* Default: false
*
* @param errorIfExists if true, an exception will be thrown
- * during RocksDB.open() if the database already exists.
+ * during {@code RocksDB.open()} if the database already exists.
* @return the reference to the current option.
- * @see RocksDB.open()
+ * @see org.rocksdb.RocksDB#open(Options, String)
*/
public Options setErrorIfExists(boolean errorIfExists) {
assert(isInitialized());
@@ -237,8 +238,9 @@ public class Options extends RocksObject {
* Number of open files that can be used by the DB. You may need to
* increase this if your database has a large working set. Value -1 means
* files opened are always kept open. You can estimate number of files based
- * on target_file_size_base and target_file_size_multiplier for level-based
- * compaction. For universal-style compaction, you can usually set it to -1.
+ * on {@code target_file_size_base} and {@code target_file_size_multiplier}
+ * for level-based compaction. For universal-style compaction, you can usually
+ * set it to -1.
*
* @return the maximum number of open files.
*/
@@ -252,8 +254,9 @@ public class Options extends RocksObject {
* Number of open files that can be used by the DB. You may need to
* increase this if your database has a large working set. Value -1 means
* files opened are always kept open. You can estimate number of files based
- * on target_file_size_base and target_file_size_multiplier for level-based
- * compaction. For universal-style compaction, you can usually set it to -1.
+ * on {@code target_file_size_base} and {@code target_file_size_multiplier}
+ * for level-based compaction. For universal-style compaction, you can usually
+ * set it to -1.
* Default: 5000
*
* @param maxOpenFiles the maximum number of open files.
@@ -271,7 +274,7 @@ public class Options extends RocksObject {
* to stable storage. Their contents remain in the OS buffers till the
* OS decides to flush them. This option is good for bulk-loading
* of data. Once the bulk-loading is complete, please issue a
- * sync to the OS to flush all dirty buffesrs to stable storage.
+ * sync to the OS to flush all dirty buffers to stable storage.
*
* @return if true, then data-sync is disabled.
*/
@@ -286,7 +289,7 @@ public class Options extends RocksObject {
* to stable storage. Their contents remain in the OS buffers till the
* OS decides to flush them. This option is good for bulk-loading
* of data. Once the bulk-loading is complete, please issue a
- * sync to the OS to flush all dirty buffesrs to stable storage.
+ * sync to the OS to flush all dirty buffers to stable storage.
* Default: false
*
* @param disableDataSync a boolean flag to specify whether to
@@ -306,7 +309,7 @@ public class Options extends RocksObject {
* This parameter should be set to true while storing data to
* filesystem like ext3 that can lose files after a reboot.
*
- * @return true if fsync is used.
+ * @return boolean value indicating if fsync is used.
*/
public boolean useFsync() {
assert(isInitialized());
@@ -438,7 +441,8 @@ public class Options extends RocksObject {
* Default: 1
*
* @return the maximum number of concurrent background compaction jobs.
- * @see Env.setBackgroundThreads()
+ * @see org.rocksdb.RocksEnv#setBackgroundThreads(int)
+ * @see org.rocksdb.RocksEnv#setBackgroundThreads(int, int)
*/
public int maxBackgroundCompactions() {
assert(isInitialized());
@@ -451,7 +455,7 @@ public class Options extends RocksObject {
it does not use any locks to prevent concurrent updates.
*
* @return the instance of the current Options.
- * @see RocksDB.open()
+ * @see org.rocksdb.RocksDB#open(Options, String)
*/
public Options createStatistics() {
assert(isInitialized());
@@ -460,11 +464,11 @@ public class Options extends RocksObject {
}
/**
- * Returns statistics object. Calls createStatistics() if
- * C++ returns NULL pointer for statistics.
+ * Returns statistics object. Calls {@link #createStatistics()} if
+ * C++ returns {@code nullptr} for statistics.
*
* @return the instance of the statistics object.
- * @see createStatistics()
+ * @see #createStatistics()
*/
public Statistics statisticsPtr() {
assert(isInitialized());
@@ -489,8 +493,9 @@ public class Options extends RocksObject {
* compaction jobs.
* @return the reference to the current option.
*
- * @see Env.setBackgroundThreads()
- * @see maxBackgroundFlushes()
+ * @see org.rocksdb.RocksEnv#setBackgroundThreads(int)
+ * @see org.rocksdb.RocksEnv#setBackgroundThreads(int, int)
+ * @see #maxBackgroundFlushes()
*/
public Options setMaxBackgroundCompactions(int maxBackgroundCompactions) {
assert(isInitialized());
@@ -505,7 +510,8 @@ public class Options extends RocksObject {
* Default: 1
*
* @return the maximum number of concurrent background flush jobs.
- * @see Env.setBackgroundThreads()
+ * @see org.rocksdb.RocksEnv#setBackgroundThreads(int)
+ * @see org.rocksdb.RocksEnv#setBackgroundThreads(int, int)
*/
public int maxBackgroundFlushes() {
assert(isInitialized());
@@ -519,11 +525,12 @@ public class Options extends RocksObject {
* HIGH priority thread pool. For more information, see
* Default: 1
*
- * @param maxBackgroundFlushes
+ * @param maxBackgroundFlushes number of max concurrent flush jobs
* @return the reference to the current option.
*
- * @see Env.setBackgroundThreads()
- * @see maxBackgroundCompactions()
+ * @see org.rocksdb.RocksEnv#setBackgroundThreads(int)
+ * @see org.rocksdb.RocksEnv#setBackgroundThreads(int, int)
+ * @see #maxBackgroundCompactions()
*/
public Options setMaxBackgroundFlushes(int maxBackgroundFlushes) {
assert(isInitialized());
@@ -713,20 +720,22 @@ public class Options extends RocksObject {
/**
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs
* will be deleted.
- * 1. If both set to 0, logs will be deleted asap and will not get into
- * the archive.
- * 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
- * WAL files will be checked every 10 min and if total size is greater
- * then WAL_size_limit_MB, they will be deleted starting with the
- * earliest until size_limit is met. All empty files will be deleted.
- * 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
- * WAL files will be checked every WAL_ttl_secondsi / 2 and those that
- * are older than WAL_ttl_seconds will be deleted.
- * 4. If both are not 0, WAL files will be checked every 10 min and both
- * checks will be performed with ttl being first.
+ *
+ * - If both set to 0, logs will be deleted asap and will not get into
+ * the archive.
+ * - If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
+ * WAL files will be checked every 10 min and if total size is greater
+ * then WAL_size_limit_MB, they will be deleted starting with the
+ * earliest until size_limit is met. All empty files will be deleted.
+ * - If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
+ * WAL files will be checked every WAL_ttl_secondsi / 2 and those that
+ * are older than WAL_ttl_seconds will be deleted.
+ * - If both are not 0, WAL files will be checked every 10 min and both
+ * checks will be performed with ttl being first.
+ *
*
* @return the wal-ttl seconds
- * @see walSizeLimitMB()
+ * @see #walSizeLimitMB()
*/
public long walTtlSeconds() {
assert(isInitialized());
@@ -735,23 +744,24 @@ public class Options extends RocksObject {
private native long walTtlSeconds(long handle);
/**
- * WalTtlSeconds() and walSizeLimitMB() affect how archived logs
+ * {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect how archived logs
* will be deleted.
- * 1. If both set to 0, logs will be deleted asap and will not get into
- * the archive.
- * 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
+ *
+ * - If both set to 0, logs will be deleted asap and will not get into
+ * the archive.
+ * - If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* WAL files will be checked every 10 min and if total size is greater
* then WAL_size_limit_MB, they will be deleted starting with the
- * earliest until size_limit is met. All empty files will be deleted.
- * 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
+ * earliest until size_limit is met. All empty files will be deleted.
+ * - If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
- * are older than WAL_ttl_seconds will be deleted.
- * 4. If both are not 0, WAL files will be checked every 10 min and both
- * checks will be performed with ttl being first.
+ * are older than WAL_ttl_seconds will be deleted.
+ * - If both are not 0, WAL files will be checked every 10 min and both
+ * checks will be performed with ttl being first.
*
* @param walTtlSeconds the ttl seconds
* @return the reference to the current option.
- * @see setWalSizeLimitMB()
+ * @see #setWalSizeLimitMB(long)
*/
public Options setWalTtlSeconds(long walTtlSeconds) {
assert(isInitialized());
@@ -761,22 +771,23 @@ public class Options extends RocksObject {
private native void setWalTtlSeconds(long handle, long walTtlSeconds);
/**
- * WalTtlSeconds() and walSizeLimitMB() affect how archived logs
+ * {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect how archived logs
* will be deleted.
- * 1. If both set to 0, logs will be deleted asap and will not get into
- * the archive.
- * 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
+ *
+ * - If both set to 0, logs will be deleted asap and will not get into
+ * the archive.
+ * - If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* WAL files will be checked every 10 min and if total size is greater
* then WAL_size_limit_MB, they will be deleted starting with the
- * earliest until size_limit is met. All empty files will be deleted.
- * 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
- * WAL files will be checked every WAL_ttl_secondsi / 2 and those that
- * are older than WAL_ttl_seconds will be deleted.
- * 4. If both are not 0, WAL files will be checked every 10 min and both
- * checks will be performed with ttl being first.
- *
+ * earliest until size_limit is met. All empty files will be deleted.
+ * - If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
+ * WAL files will be checked every WAL_ttl_seconds i / 2 and those that
+ * are older than WAL_ttl_seconds will be deleted.
+ * - If both are not 0, WAL files will be checked every 10 min and both
+ * checks will be performed with ttl being first.
+ *
* @return size limit in mega-bytes.
- * @see walSizeLimitMB()
+ * @see #walSizeLimitMB()
*/
public long walSizeLimitMB() {
assert(isInitialized());
@@ -787,21 +798,22 @@ public class Options extends RocksObject {
/**
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs
* will be deleted.
- * 1. If both set to 0, logs will be deleted asap and will not get into
- * the archive.
- * 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
+ *
+ * - If both set to 0, logs will be deleted asap and will not get into
+ * the archive.
+ * - If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* WAL files will be checked every 10 min and if total size is greater
* then WAL_size_limit_MB, they will be deleted starting with the
- * earliest until size_limit is met. All empty files will be deleted.
- * 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
+ * earliest until size_limit is met. All empty files will be deleted.
+ * - If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
- * are older than WAL_ttl_seconds will be deleted.
- * 4. If both are not 0, WAL files will be checked every 10 min and both
- * checks will be performed with ttl being first.
+ * are older than WAL_ttl_seconds will be deleted.
+ * - If both are not 0, WAL files will be checked every 10 min and both
+ * checks will be performed with ttl being first.
*
* @param sizeLimitMB size limit in mega-bytes.
* @return the reference to the current option.
- * @see setWalSizeLimitMB()
+ * @see #setWalSizeLimitMB(long)
*/
public Options setWalSizeLimitMB(long sizeLimitMB) {
assert(isInitialized());
@@ -857,7 +869,7 @@ public class Options extends RocksObject {
* Data being read from file storage may be buffered in the OS
* Default: true
*
- * @param allowOsBufferif true, then OS buffering is allowed.
+ * @param allowOsBuffer if true, then OS buffering is allowed.
* @return the reference to the current option.
*/
public Options setAllowOsBuffer(boolean allowOsBuffer) {
@@ -1122,7 +1134,7 @@ public class Options extends RocksObject {
* Memtable format can be set using setTableFormatConfig.
*
* @return the name of the currently-used memtable factory.
- * @see setTableFormatConfig()
+ * @see #setTableFormatConfig(TableFormatConfig)
*/
public String memTableFactoryName() {
assert(isInitialized());
@@ -1273,7 +1285,7 @@ public class Options extends RocksObject {
long handle, int numLevels);
/**
- * The number of files in leve 0 to trigger compaction from level-0 to
+ * The number of files in level 0 to trigger compaction from level-0 to
* level-1. A value < 0 means that level-0 compaction will not be
* triggered by number of files at all.
* Default: 4
@@ -1400,7 +1412,7 @@ public class Options extends RocksObject {
*
* @return the target size of a level-0 file.
*
- * @see targetFileSizeMultiplier()
+ * @see #targetFileSizeMultiplier()
*/
public int targetFileSizeBase() {
return targetFileSizeBase(nativeHandle_);
@@ -1421,7 +1433,7 @@ public class Options extends RocksObject {
* @param targetFileSizeBase the target size of a level-0 file.
* @return the reference to the current option.
*
- * @see setTargetFileSizeMultiplier()
+ * @see #setTargetFileSizeMultiplier(int)
*/
public Options setTargetFileSizeBase(int targetFileSizeBase) {
setTargetFileSizeBase(nativeHandle_, targetFileSizeBase);
@@ -1471,7 +1483,7 @@ public class Options extends RocksObject {
* by default 'maxBytesForLevelBase' is 10MB.
*
* @return the upper-bound of the total size of leve-1 files in bytes.
- * @see maxBytesForLevelMultiplier()
+ * @see #maxBytesForLevelMultiplier()
*/
public long maxBytesForLevelBase() {
return maxBytesForLevelBase(nativeHandle_);
@@ -1491,7 +1503,7 @@ public class Options extends RocksObject {
* @return maxBytesForLevelBase the upper-bound of the total size of
* leve-1 files in bytes.
* @return the reference to the current option.
- * @see setMaxBytesForLevelMultiplier()
+ * @see #setMaxBytesForLevelMultiplier(int)
*/
public Options setMaxBytesForLevelBase(long maxBytesForLevelBase) {
setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase);
@@ -1507,7 +1519,7 @@ public class Options extends RocksObject {
*
* @return the ratio between the total size of level-(L+1) files and
* the total size of level-L files for all L.
- * @see maxBytesForLevelBase()
+ * @see #maxBytesForLevelBase()
*/
public int maxBytesForLevelMultiplier() {
return maxBytesForLevelMultiplier(nativeHandle_);
@@ -1522,7 +1534,7 @@ public class Options extends RocksObject {
* @param multiplier the ratio between the total size of level-(L+1)
* files and the total size of level-L files for all L.
* @return the reference to the current option.
- * @see setMaxBytesForLevelBase()
+ * @see #setMaxBytesForLevelBase(long)
*/
public Options setMaxBytesForLevelMultiplier(int multiplier) {
setMaxBytesForLevelMultiplier(nativeHandle_, multiplier);
@@ -1538,7 +1550,7 @@ public class Options extends RocksObject {
* (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
*
* @return the maximum number of bytes in all compacted files.
- * @see sourceCompactionFactor()
+ * @see #sourceCompactionFactor()
*/
public int expandedCompactionFactor() {
return expandedCompactionFactor(nativeHandle_);
@@ -1554,7 +1566,7 @@ public class Options extends RocksObject {
* @param expandedCompactionFactor the maximum number of bytes in all
* compacted files.
* @return the reference to the current option.
- * @see setSourceCompactionFactor()
+ * @see #setSourceCompactionFactor(int)
*/
public Options setExpandedCompactionFactor(int expandedCompactionFactor) {
setExpandedCompactionFactor(nativeHandle_, expandedCompactionFactor);
@@ -1573,7 +1585,7 @@ public class Options extends RocksObject {
* a compaction.
*
* @return the maximum number of bytes in all source files to be compactedo.
- * @see expendedCompactionFactor()
+ * @see #expandedCompactionFactor()
*/
public int sourceCompactionFactor() {
return sourceCompactionFactor(nativeHandle_);
@@ -1592,7 +1604,7 @@ public class Options extends RocksObject {
* @param sourceCompactionFactor the maximum number of bytes in all
* source files to be compacted in a single compaction run.
* @return the reference to the current option.
- * @see setExpendedCompactionFactor()
+ * @see #setExpandedCompactionFactor(int)
*/
public Options setSourceCompactionFactor(int sourceCompactionFactor) {
setSourceCompactionFactor(nativeHandle_, sourceCompactionFactor);
@@ -1979,7 +1991,7 @@ public class Options extends RocksObject {
* This value will be used only when a prefix-extractor is specified.
*
* @return the number of bloom-bits.
- * @see useFixedLengthPrefixExtractor()
+ * @see #useFixedLengthPrefixExtractor(int)
*/
public int memtablePrefixBloomBits() {
return memtablePrefixBloomBits(nativeHandle_);
@@ -2037,7 +2049,7 @@ public class Options extends RocksObject {
* Default: 0
*
* @return the level of locality of bloom-filter probes.
- * @see setMemTablePrefixBloomProbes
+ * @see #setMemtablePrefixBloomProbes(int)
*/
public int bloomLocality() {
return bloomLocality(nativeHandle_);
@@ -2149,7 +2161,7 @@ public class Options extends RocksObject {
*
* Default: 2
*
- * @return
+ * @return min partial merge operands
*/
public int minPartialMergeOperands() {
return minPartialMergeOperands(nativeHandle_);
diff --git a/java/org/rocksdb/RateLimiterConfig.java b/java/org/rocksdb/RateLimiterConfig.java
index 22de65921..1b309e6c9 100644
--- a/java/org/rocksdb/RateLimiterConfig.java
+++ b/java/org/rocksdb/RateLimiterConfig.java
@@ -10,11 +10,12 @@ package org.rocksdb;
*/
public abstract class RateLimiterConfig {
/**
- * This function should only be called by Options.setRateLimiter(),
- * which will create a c++ shared-pointer to the c++ RateLimiter
- * that is associated with the Java RateLimtierConifg.
+ * This function should only be called by
+ * {@link org.rocksdb.Options#setRateLimiter(long, long)}, which will
+ * create a c++ shared-pointer to the c++ {@code RateLimiter} that is associated
+ * with a Java RateLimiterConfig.
*
- * @see Options.setRateLimiter()
+ * @see org.rocksdb.Options#setRateLimiter(long, long)
*/
abstract protected long newRateLimiterHandle();
}
diff --git a/java/org/rocksdb/ReadOptions.java b/java/org/rocksdb/ReadOptions.java
index 97c47c7d6..3590a1a87 100644
--- a/java/org/rocksdb/ReadOptions.java
+++ b/java/org/rocksdb/ReadOptions.java
@@ -64,7 +64,7 @@ public class ReadOptions extends RocksObject {
private native boolean fillCache(long handle);
/**
- * Fill the cache when loading the block-based sst formated db.
+ * Fill the cache when loading the block-based sst formatted db.
* Callers may wish to set this field to false for bulk scans.
* Default: true
*
@@ -86,7 +86,8 @@ public class ReadOptions extends RocksObject {
* added data) and is optimized for sequential reads. It will return records
* that were inserted into the database after the creation of the iterator.
* Default: false
- * Not supported in ROCKSDB_LITE mode!
+ *
+ * Not supported in {@code ROCKSDB_LITE} mode!
*
* @return true if tailing iterator is enabled.
*/
diff --git a/java/org/rocksdb/RestoreBackupableDB.java b/java/org/rocksdb/RestoreBackupableDB.java
index dbde447a0..5bc8dfbec 100644
--- a/java/org/rocksdb/RestoreBackupableDB.java
+++ b/java/org/rocksdb/RestoreBackupableDB.java
@@ -11,9 +11,13 @@ package org.rocksdb;
* Note that dispose() must be called before this instance become out-of-scope
* to release the allocated memory in c++.
*
- * @param options Instance of BackupableDBOptions.
*/
public class RestoreBackupableDB extends RocksObject {
+ /**
+ * Constructor
+ *
+ * @param options {@link org.rocksdb.BackupableDBOptions} instance
+ */
public RestoreBackupableDB(BackupableDBOptions options) {
super();
nativeHandle_ = newRestoreBackupableDB(options.nativeHandle_);
@@ -30,6 +34,12 @@ public class RestoreBackupableDB extends RocksObject {
* database will diverge from backups 4 and 5 and the new backup will fail.
* If you want to create new backup, you will first have to delete backups 4
* and 5.
+ *
+ * @param backupId id pointing to backup
+ * @param dbDir database directory to restore to
+ * @param walDir directory where wal files are located
+ * @param restoreOptions {@link org.rocksdb.RestoreOptions} instance
+ * @throws RocksDBException
*/
public void restoreDBFromBackup(long backupId, String dbDir, String walDir,
RestoreOptions restoreOptions) throws RocksDBException {
@@ -39,6 +49,11 @@ public class RestoreBackupableDB extends RocksObject {
/**
* Restore from the latest backup.
+ *
+ * @param dbDir database directory to restore to
+ * @param walDir directory where wal files are located
+ * @param restoreOptions {@link org.rocksdb.RestoreOptions} instance
+ * @throws RocksDBException
*/
public void restoreDBFromLatestBackup(String dbDir, String walDir,
RestoreOptions restoreOptions) throws RocksDBException {
@@ -49,7 +64,7 @@ public class RestoreBackupableDB extends RocksObject {
/**
* Deletes old backups, keeping latest numBackupsToKeep alive.
*
- * @param Number of latest backups to keep
+ * @param numBackupsToKeep of latest backups to keep
*/
public void purgeOldBackups(int numBackupsToKeep) throws RocksDBException {
purgeOldBackups0(nativeHandle_, numBackupsToKeep);
@@ -58,7 +73,7 @@ public class RestoreBackupableDB extends RocksObject {
/**
* Deletes a specific backup.
*
- * @param ID of backup to delete.
+ * @param backupId of backup to delete.
*/
public void deleteBackup(long backupId) throws RocksDBException {
deleteBackup0(nativeHandle_, backupId);
diff --git a/java/org/rocksdb/RestoreOptions.java b/java/org/rocksdb/RestoreOptions.java
index 77a2b99bc..2325c8f6c 100644
--- a/java/org/rocksdb/RestoreOptions.java
+++ b/java/org/rocksdb/RestoreOptions.java
@@ -11,13 +11,17 @@ package org.rocksdb;
* Note that dispose() must be called before this instance become out-of-scope
* to release the allocated memory in c++.
*
- * @param If true, restore won't overwrite the existing log files in wal_dir. It
- * will also move all log files from archive directory to wal_dir. Use this
- * option in combination with BackupableDBOptions::backup_log_files = false
- * for persisting in-memory databases.
- * Default: false
*/
public class RestoreOptions extends RocksObject {
+ /**
+ * Constructor
+ *
+ * @param keepLogFiles If true, restore won't overwrite the existing log files in wal_dir. It
+ * will also move all log files from archive directory to wal_dir. Use this
+ * option in combination with BackupableDBOptions::backup_log_files = false
+ * for persisting in-memory databases.
+ * Default: false
+ */
public RestoreOptions(boolean keepLogFiles) {
super();
nativeHandle_ = newRestoreOptions(keepLogFiles);
diff --git a/java/org/rocksdb/RocksDB.java b/java/org/rocksdb/RocksDB.java
index a16586551..3fa2079a8 100644
--- a/java/org/rocksdb/RocksDB.java
+++ b/java/org/rocksdb/RocksDB.java
@@ -17,7 +17,7 @@ import org.rocksdb.NativeLibraryLoader;
* A RocksDB is a persistent ordered map from keys to values. It is safe for
* concurrent access from multiple threads without any external synchronization.
* All methods of this class could potentially throw RocksDBException, which
- * indicates sth wrong at the rocksdb library side and the call failed.
+ * indicates sth wrong at the RocksDB library side and the call failed.
*/
public class RocksDB extends RocksObject {
public static final int NOT_FOUND = -1;
@@ -95,12 +95,11 @@ public class RocksDB extends RocksObject {
* set to true.
*
* @param path the path to the rocksdb.
- * @param status an out value indicating the status of the Open().
* @return a rocksdb instance on success, null if the specified rocksdb can
* not be opened.
*
- * @see Options.setCreateIfMissing()
- * @see Options.createIfMissing()
+ * @see Options#setCreateIfMissing(boolean)
+ * @see org.rocksdb.Options#createIfMissing()
*/
public static RocksDB open(String path) throws RocksDBException {
RocksDB db = new RocksDB();
@@ -280,8 +279,8 @@ public class RocksDB extends RocksObject {
/**
* Returns a map of keys for which values were found in DB.
*
- * @param List of keys for which values need to be retrieved.
* @param opt Read options.
+ * @param keys of keys for which values need to be retrieved.
* @return Map where key of map is the key passed by user and value for map
* entry is the corresponding value in DB.
*
diff --git a/java/org/rocksdb/RocksObject.java b/java/org/rocksdb/RocksObject.java
index 353918d2e..828bb4f3c 100644
--- a/java/org/rocksdb/RocksObject.java
+++ b/java/org/rocksdb/RocksObject.java
@@ -7,16 +7,22 @@ package org.rocksdb;
/**
* RocksObject is the base-class of all RocksDB classes that has a pointer to
- * some c++ rocksdb object.
+ * some c++ {@code rocksdb} object.
*
- * RocksObject has dispose() function, which releases its associated c++ resource.
+ *
+ * RocksObject has {@code dispose()} function, which releases its associated c++
+ * resource.
+ *
+ *
* This function can be either called manually, or being called automatically
- * during the regular Java GC process. However, since Java may wrongly assume a
+ * during the regular Java GC process. However, since Java may wrongly assume a
* RocksObject only contains a long member variable and think it is small in size,
- * Java may give RocksObject low priority in the GC process. For this, it is
- * suggested to call dispose() manually. However, it is safe to let RocksObject go
- * out-of-scope without manually calling dispose() as dispose() will be called
- * in the finalizer during the regular GC process.
+ *
+ * Java may give {@code RocksObject} low priority in the GC process. For this, it is
+ * suggested to call {@code dispose()} manually. However, it is safe to let
+ * {@code RocksObject} go out-of-scope without manually calling {@code dispose()}
+ * as {@code dispose()} will be called in the finalizer during the
+ * regular GC process.
*/
public abstract class RocksObject {
protected RocksObject() {
@@ -26,16 +32,18 @@ public abstract class RocksObject {
/**
* Release the c++ object manually pointed by the native handle.
- *
- * Note that dispose() will also be called during the GC process
- * if it was not called before its RocksObject went out-of-scope.
+ *
+ * Note that {@code dispose()} will also be called during the GC process
+ * if it was not called before its {@code RocksObject} went out-of-scope.
* However, since Java may wrongly wrongly assume those objects are
* small in that they seems to only hold a long variable. As a result,
* they might have low priority in the GC process. To prevent this,
- * it is suggested to call dispose() manually.
- *
- * Note that once an instance of RocksObject has been disposed,
+ * it is suggested to call {@code dispose()} manually.
+ *
+ *
+ * Note that once an instance of {@code RocksObject} has been disposed,
* calling its function will lead undefined behavior.
+ *
*/
public final synchronized void dispose() {
if (isOwningNativeHandle() && isInitialized()) {
@@ -46,40 +54,41 @@ public abstract class RocksObject {
}
/**
- * The helper function of dispose() which all subclasses of RocksObject
- * must implement to release their associated C++ resource.
+ * The helper function of {@code dispose()} which all subclasses of
+ * {@code RocksObject} must implement to release their associated
+ * C++ resource.
*/
protected abstract void disposeInternal();
/**
* Revoke ownership of the native object.
- *
+ *
* This will prevent the object from attempting to delete the underlying
* native object in its finalizer. This must be used when another object
* takes over ownership of the native object or both will attempt to delete
* the underlying object when garbage collected.
- *
- * When disOwnNativeHandle() is called, dispose() will simply set nativeHandle_
- * to 0 without releasing its associated C++ resource. As a result,
- * incorrectly use this function may cause memory leak, and this function call
- * will not affect the return value of isInitialized().
- *
- * @see dispose()
- * @see isInitialized()
+ *
+ * When {@code disOwnNativeHandle()} is called, {@code dispose()} will simply set
+ * {@code nativeHandle_} to 0 without releasing its associated C++ resource.
+ * As a result, incorrectly use this function may cause memory leak, and this
+ * function call will not affect the return value of {@code isInitialized()}.
+ *
+ * @see #dispose()
+ * @see #isInitialized()
*/
protected void disOwnNativeHandle() {
owningHandle_ = false;
}
/**
- * Returns true if the current RocksObject is responsable to release its
- * native handle.
+ * Returns true if the current {@code RocksObject} is responsible to release
+ * its native handle.
*
- * @return true if the current RocksObject is responsible to release its
- * native handle.
+ * @return true if the current {@code RocksObject} is responsible to release
+ * its native handle.
*
- * @see disOwnNativeHandle()
- * @see dispose()
+ * @see #disOwnNativeHandle()
+ * @see #dispose()
*/
protected boolean isOwningNativeHandle() {
return owningHandle_;
@@ -90,14 +99,14 @@ public abstract class RocksObject {
*
* @return true if the associated native handle has been initialized.
*
- * @see dispose()
+ * @see #dispose()
*/
protected boolean isInitialized() {
return (nativeHandle_ != 0);
}
/**
- * Simply calls dispose() and release its c++ resource if it has not
+ * Simply calls {@code dispose()} and release its c++ resource if it has not
* yet released.
*/
@Override protected void finalize() {
@@ -110,8 +119,8 @@ public abstract class RocksObject {
protected long nativeHandle_;
/**
- * A flag indicating whether the current RocksObject is responsible to
- * release the c++ object stored in its nativeHandle_.
+ * A flag indicating whether the current {@code RocksObject} is responsible to
+ * release the c++ object stored in its {@code nativeHandle_}.
*/
private boolean owningHandle_;
}
From da8ff9ff896ee4af400c4083c9f78eaf86576457 Mon Sep 17 00:00:00 2001
From: fyrz
Date: Thu, 2 Oct 2014 21:31:19 +0200
Subject: [PATCH 09/11] Fixed Findbugs issues
- BackupableDB missing call to super.finalize(major)
- WriteBatchTest inefficient String usage(minor)
- RocksDB local dead variable store(medium)
---
java/org/rocksdb/BackupableDB.java | 1 +
java/org/rocksdb/RocksDB.java | 2 --
java/org/rocksdb/WriteBatchTest.java | 28 ++++++++++++++--------------
3 files changed, 15 insertions(+), 16 deletions(-)
diff --git a/java/org/rocksdb/BackupableDB.java b/java/org/rocksdb/BackupableDB.java
index 108c4deb5..1c8e3dc53 100644
--- a/java/org/rocksdb/BackupableDB.java
+++ b/java/org/rocksdb/BackupableDB.java
@@ -82,6 +82,7 @@ public class BackupableDB extends RocksDB {
@Override protected void finalize() {
close();
+ super.finalize();
}
protected native void open(long rocksDBHandle, long backupDBOptionsHandle);
diff --git a/java/org/rocksdb/RocksDB.java b/java/org/rocksdb/RocksDB.java
index a16586551..8985bc3b5 100644
--- a/java/org/rocksdb/RocksDB.java
+++ b/java/org/rocksdb/RocksDB.java
@@ -103,8 +103,6 @@ public class RocksDB extends RocksObject {
* @see Options.createIfMissing()
*/
public static RocksDB open(String path) throws RocksDBException {
- RocksDB db = new RocksDB();
-
// This allows to use the rocksjni default Options instead of
// the c++ one.
Options options = new Options();
diff --git a/java/org/rocksdb/WriteBatchTest.java b/java/org/rocksdb/WriteBatchTest.java
index 03a866313..770cd85b8 100644
--- a/java/org/rocksdb/WriteBatchTest.java
+++ b/java/org/rocksdb/WriteBatchTest.java
@@ -53,9 +53,9 @@ public class WriteBatchTest {
WriteBatchInternal.setSequence(batch, 100);
assert(100 == WriteBatchInternal.sequence(batch));
assert(3 == batch.count());
- assert(new String("Put(baz, boo)@102" +
- "Delete(box)@101" +
- "Put(foo, bar)@100")
+ assert(("Put(baz, boo)@102" +
+ "Delete(box)@101" +
+ "Put(foo, bar)@100")
.equals(new String(getContents(batch), "US-ASCII")));
} catch (UnsupportedEncodingException e) {
System.err.println(e);
@@ -79,16 +79,16 @@ public class WriteBatchTest {
b2.clear();
b2.put("b".getBytes("US-ASCII"), "vb".getBytes("US-ASCII"));
WriteBatchInternal.append(b1, b2);
- assert(new String("Put(a, va)@200" +
- "Put(b, vb)@201")
+ assert(("Put(a, va)@200" +
+ "Put(b, vb)@201")
.equals(new String(getContents(b1), "US-ASCII")));
assert(2 == b1.count());
b2.remove("foo".getBytes("US-ASCII"));
WriteBatchInternal.append(b1, b2);
- assert(new String("Put(a, va)@200" +
- "Put(b, vb)@202" +
- "Put(b, vb)@201" +
- "Delete(foo)@203")
+ assert(("Put(a, va)@200" +
+ "Put(b, vb)@202" +
+ "Put(b, vb)@201" +
+ "Delete(foo)@203")
.equals(new String(getContents(b1), "US-ASCII")));
assert(4 == b1.count());
} catch (UnsupportedEncodingException e) {
@@ -108,11 +108,11 @@ public class WriteBatchTest {
batch.putLogData("blob2".getBytes("US-ASCII"));
batch.merge("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII"));
assert(5 == batch.count());
- assert(new String("Merge(foo, bar)@4" +
- "Put(k1, v1)@0" +
- "Delete(k2)@3" +
- "Put(k2, v2)@1" +
- "Put(k3, v3)@2")
+ assert(("Merge(foo, bar)@4" +
+ "Put(k1, v1)@0" +
+ "Delete(k2)@3" +
+ "Put(k2, v2)@1" +
+ "Put(k3, v3)@2")
.equals(new String(getContents(batch), "US-ASCII")));
} catch (UnsupportedEncodingException e) {
System.err.println(e);
From 05204bb1199bb736495bdcd8a5898514fc5c2dca Mon Sep 17 00:00:00 2001
From: fyrz
Date: Fri, 3 Oct 2014 21:43:47 +0200
Subject: [PATCH 10/11] Lint changes
---
java/org/rocksdb/BackupableDB.java | 4 ++--
java/org/rocksdb/CompactionStyle.java | 2 +-
java/org/rocksdb/CompressionType.java | 2 +-
java/org/rocksdb/GenericRateLimiterConfig.java | 4 ++--
java/org/rocksdb/MemTableConfig.java | 2 +-
java/org/rocksdb/Options.java | 4 ++--
java/org/rocksdb/RocksDB.java | 8 ++++----
java/org/rocksdb/StatisticsCollector.java | 10 +++++-----
java/org/rocksdb/StatisticsCollectorCallback.java | 8 ++++----
java/org/rocksdb/StatsCollectorInput.java | 8 ++++----
10 files changed, 26 insertions(+), 26 deletions(-)
diff --git a/java/org/rocksdb/BackupableDB.java b/java/org/rocksdb/BackupableDB.java
index 3ee29b347..f8669fff4 100644
--- a/java/org/rocksdb/BackupableDB.java
+++ b/java/org/rocksdb/BackupableDB.java
@@ -47,10 +47,10 @@ public class BackupableDB extends RocksDB {
public void createNewBackup(boolean flushBeforeBackup) {
createNewBackup(nativeHandle_, flushBeforeBackup);
}
-
+
/**
* Deletes old backups, keeping latest numBackupsToKeep alive.
- *
+ *
* @param numBackupsToKeep Number of latest backups to keep.
*/
public void purgeOldBackups(int numBackupsToKeep) {
diff --git a/java/org/rocksdb/CompactionStyle.java b/java/org/rocksdb/CompactionStyle.java
index 5c41dfdd2..ade48358e 100644
--- a/java/org/rocksdb/CompactionStyle.java
+++ b/java/org/rocksdb/CompactionStyle.java
@@ -9,7 +9,7 @@ public enum CompactionStyle {
LEVEL((byte) 0),
UNIVERSAL((byte) 1),
FIFO((byte) 2);
-
+
private final byte value_;
private CompactionStyle(byte value) {
diff --git a/java/org/rocksdb/CompressionType.java b/java/org/rocksdb/CompressionType.java
index c5d6253a9..f29eccb9b 100644
--- a/java/org/rocksdb/CompressionType.java
+++ b/java/org/rocksdb/CompressionType.java
@@ -12,7 +12,7 @@ public enum CompressionType {
BZLIB2_COMPRESSION((byte) 3),
LZ4_COMPRESSION((byte) 4),
LZ4HC_COMPRESSION((byte) 5);
-
+
private final byte value_;
private CompressionType(byte value) {
diff --git a/java/org/rocksdb/GenericRateLimiterConfig.java b/java/org/rocksdb/GenericRateLimiterConfig.java
index 2a2e7b657..5023822a6 100644
--- a/java/org/rocksdb/GenericRateLimiterConfig.java
+++ b/java/org/rocksdb/GenericRateLimiterConfig.java
@@ -52,12 +52,12 @@ public class GenericRateLimiterConfig extends RateLimiterConfig {
public GenericRateLimiterConfig(long rateBytesPerSecond) {
this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS);
}
-
+
@Override protected long newRateLimiterHandle() {
return newRateLimiterHandle(rateBytesPerSecond_, refillPeriodMicros_,
fairness_);
}
-
+
private native long newRateLimiterHandle(long rateBytesPerSecond,
long refillPeriodMicros, int fairness);
private final long rateBytesPerSecond_;
diff --git a/java/org/rocksdb/MemTableConfig.java b/java/org/rocksdb/MemTableConfig.java
index 904aa37b5..a69b1008f 100644
--- a/java/org/rocksdb/MemTableConfig.java
+++ b/java/org/rocksdb/MemTableConfig.java
@@ -21,7 +21,7 @@ public abstract class MemTableConfig {
* which will create a c++ shared-pointer to the c++ MemTableRepFactory
* that associated with the Java MemTableConfig.
*
- * @see Options#setMemTableConfig(MemTableConfig)
+ * @see Options#setMemTableConfig(MemTableConfig)
*/
abstract protected long newMemTableFactoryHandle();
}
diff --git a/java/org/rocksdb/Options.java b/java/org/rocksdb/Options.java
index 642c6c4dd..b0989363b 100644
--- a/java/org/rocksdb/Options.java
+++ b/java/org/rocksdb/Options.java
@@ -20,7 +20,7 @@ public class Options extends RocksObject {
static final int DEFAULT_NUM_SHARD_BITS = -1;
/**
- * Builtin RocksDB comparators
+ * Builtin RocksDB comparators
*/
public enum BuiltinComparator {
BYTEWISE_COMPARATOR, REVERSE_BYTEWISE_COMPARATOR;
@@ -1115,7 +1115,7 @@ public class Options extends RocksObject {
setMemTableFactory(nativeHandle_, config.newMemTableFactoryHandle());
return this;
}
-
+
/**
* Use to control write rate of flush and compaction. Flush has higher
* priority than compaction. Rate limiting is disabled if nullptr.
diff --git a/java/org/rocksdb/RocksDB.java b/java/org/rocksdb/RocksDB.java
index 3fa2079a8..facd2914b 100644
--- a/java/org/rocksdb/RocksDB.java
+++ b/java/org/rocksdb/RocksDB.java
@@ -323,16 +323,16 @@ public class RocksDB extends RocksObject {
throws RocksDBException {
remove(nativeHandle_, writeOpt.nativeHandle_, key, key.length);
}
-
+
/**
* DB implementations can export properties about their state
via this method. If "property" is a valid property understood by this
DB implementation, fills "*value" with its current value and returns
true. Otherwise returns false.
-
-
+
+
Valid property names include:
-
+
"rocksdb.num-files-at-level" - return the number of files at level ,
where is an ASCII representation of a level number (e.g. "0").
"rocksdb.stats" - returns a multi-line string that describes statistics
diff --git a/java/org/rocksdb/StatisticsCollector.java b/java/org/rocksdb/StatisticsCollector.java
index 29815c46d..965637697 100644
--- a/java/org/rocksdb/StatisticsCollector.java
+++ b/java/org/rocksdb/StatisticsCollector.java
@@ -29,9 +29,9 @@ public class StatisticsCollector {
/**
* Constructor for statistics collector.
- *
+ *
* @param statsCollectorInputList List of statistics collector input.
- * @param statsCollectionIntervalInMilliSeconds Statistics collection time
+ * @param statsCollectionIntervalInMilliSeconds Statistics collection time
* period (specified in milliseconds).
*/
public StatisticsCollector(List statsCollectorInputList,
@@ -48,7 +48,7 @@ public class StatisticsCollector {
/**
* Shuts down statistics collector.
- *
+ *
* @param shutdownTimeout Time in milli-seconds to wait for shutdown before
* killing the collection process.
*/
@@ -70,13 +70,13 @@ public class StatisticsCollector {
try {
if(Thread.currentThread().isInterrupted()) {
break;
- }
+ }
for(StatsCollectorInput statsCollectorInput :
_statsCollectorInputList) {
Statistics statistics = statsCollectorInput.getStatistics();
StatisticsCollectorCallback statsCallback =
statsCollectorInput.getCallback();
-
+
// Collect ticker data
for(TickerType ticker : TickerType.values()) {
long tickerValue = statistics.getTickerCount(ticker);
diff --git a/java/org/rocksdb/StatisticsCollectorCallback.java b/java/org/rocksdb/StatisticsCollectorCallback.java
index a955ec216..b8d7a24ec 100644
--- a/java/org/rocksdb/StatisticsCollectorCallback.java
+++ b/java/org/rocksdb/StatisticsCollectorCallback.java
@@ -7,13 +7,13 @@ package org.rocksdb;
/**
* Callback interface provided to StatisticsCollector.
- *
+ *
* Thread safety:
- * StatisticsCollector doesn't make any guarantees about thread safety.
+ * StatisticsCollector doesn't make any guarantees about thread safety.
* If the same reference of StatisticsCollectorCallback is passed to multiple
- * StatisticsCollector references, then its the responsibility of the
+ * StatisticsCollector references, then its the responsibility of the
* user to make StatisticsCollectorCallback's implementation thread-safe.
- *
+ *
* @param tickerType
* @param tickerCount
*/
diff --git a/java/org/rocksdb/StatsCollectorInput.java b/java/org/rocksdb/StatsCollectorInput.java
index a1aa928d3..890977cdf 100644
--- a/java/org/rocksdb/StatsCollectorInput.java
+++ b/java/org/rocksdb/StatsCollectorInput.java
@@ -12,10 +12,10 @@ package org.rocksdb;
public class StatsCollectorInput {
private final Statistics _statistics;
private final StatisticsCollectorCallback _statsCallback;
-
+
/**
* Constructor for StatsCollectorInput.
- *
+ *
* @param statistics Reference of DB statistics.
* @param statsCallback Reference of statistics callback interface.
*/
@@ -24,11 +24,11 @@ public class StatsCollectorInput {
_statistics = statistics;
_statsCallback = statsCallback;
}
-
+
public Statistics getStatistics() {
return _statistics;
}
-
+
public StatisticsCollectorCallback getCallback() {
return _statsCallback;
}
From 69d4c5123e812a2c88dd94abc2bf1a39b0a04dbd Mon Sep 17 00:00:00 2001
From: fyrz
Date: Sat, 4 Oct 2014 11:17:06 +0200
Subject: [PATCH 11/11] Cross-platform fix version.sh
version.sh now works also on linux properly.
---
build_tools/version.sh | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/build_tools/version.sh b/build_tools/version.sh
index afa7ed277..c5a8595fb 100755
--- a/build_tools/version.sh
+++ b/build_tools/version.sh
@@ -1,14 +1,14 @@
#!/bin/sh
-if [ $# == 0 ]; then
+if [ "$#" = "0" ]; then
echo "Usage: $0 major|minor|patch"
exit 1
fi
-if [ $1 = "major" ]; then
+if [ "$1" = "major" ]; then
cat include/rocksdb/version.h | grep MAJOR | head -n1 | awk '{print $3}'
fi
-if [ $1 = "minor" ]; then
+if [ "$1" = "minor" ]; then
cat include/rocksdb/version.h | grep MINOR | head -n1 | awk '{print $3}'
fi
-if [ $1 = "patch" ]; then
+if [ "$1" = "patch" ]; then
cat include/rocksdb/version.h | grep PATCH | head -n1 | awk '{print $3}'
fi