"make format" against last 10 commits

Summary: This helps Windows port to format their changes, as discussed. Might have formatted some other codes too becasue last 10 commits include more.

Test Plan: Build it.

Reviewers: anthony, IslamAbdelRahman, kradhakrishnan, yhchiang, igor

Reviewed By: igor

Subscribers: leveldb, dhruba

Differential Revision: https://reviews.facebook.net/D41961
main
sdong 9 years ago
parent 76d3cd3286
commit f9728640f3
  1. 21
      db/c.cc
  2. 3
      db/compaction.cc
  3. 4
      db/compaction.h
  4. 4
      db/compaction_job.cc
  5. 4
      db/compaction_job_stats_test.cc
  6. 11
      db/compaction_job_test.cc
  7. 8
      db/compaction_picker.cc
  8. 6
      db/db_bench.cc
  9. 10
      db/db_impl.cc
  10. 84
      db/db_test.cc
  11. 7
      db/listener_test.cc
  12. 3
      db/transaction_log_impl.h
  13. 899
      include/rocksdb/c.h
  14. 2
      include/rocksdb/thread_status.h
  15. 36
      include/rocksdb/transaction_log.h
  16. 26
      include/rocksdb/utilities/spatial_db.h
  17. 3
      include/utilities/backupable_db.h
  18. 15
      include/utilities/pragma_error.h
  19. 18
      port/dirent.h
  20. 4
      port/port.h
  21. 3
      port/port_posix.h
  22. 21
      port/sys_time.h
  23. 7
      port/util_logger.h
  24. 1637
      port/win/env_win.cc
  25. 232
      port/win/port_win.cc
  26. 241
      port/win/port_win.h
  27. 181
      port/win/win_logger.cc
  28. 17
      port/win/win_logger.h
  29. 6
      table/block_based_table_builder.cc
  30. 3
      table/block_based_table_factory.cc
  31. 142
      table/cuckoo_table_builder_test.cc
  32. 4
      table/cuckoo_table_factory.h
  33. 20
      table/format.h
  34. 4
      table/plain_table_index.cc
  35. 2
      third-party/fbson/FbsonDocument.h
  36. 3
      third-party/fbson/FbsonStream.h
  37. 6
      tools/db_repl_stress.cc
  38. 191
      tools/db_stress.cc
  39. 3
      util/auto_roll_logger_test.cc
  40. 3
      util/autovector.h
  41. 3
      util/env_posix.cc
  42. 7
      util/env_test.cc
  43. 22
      util/hash_cuckoo_rep.cc
  44. 6
      util/hash_linklist_rep.cc
  45. 4
      util/histogram.h
  46. 3
      util/ldb_cmd.cc
  47. 8
      util/ldb_cmd.h
  48. 6
      util/mutable_cf_options.cc
  49. 52
      util/options.cc
  50. 9
      util/options_helper.cc
  51. 28
      util/options_test.cc
  52. 5
      util/slice.cc
  53. 41
      util/thread_local.cc
  54. 1
      util/thread_local.h
  55. 19
      utilities/backupable/backupable_db.cc
  56. 6
      utilities/checkpoint/checkpoint_test.cc
  57. 1
      utilities/geodb/geodb_impl.cc
  58. 3
      utilities/merge_operators/uint64add.cc
  59. 36
      utilities/spatialdb/spatial_db.cc
  60. 5
      utilities/ttl/ttl_test.cc

@ -2256,7 +2256,7 @@ void rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n)
}
void rocksdb_env_join_all_threads(rocksdb_env_t* env) {
env->rep->WaitForJoin();
env->rep->WaitForJoin();
}
void rocksdb_env_destroy(rocksdb_env_t* env) {
@ -2449,19 +2449,16 @@ extern void rocksdb_livefiles_destroy(
delete lf;
}
void rocksdb_get_options_from_string(
const rocksdb_options_t* base_options,
const char* opts_str, rocksdb_options_t* new_options,
char** errptr){
SaveError(errptr,
GetOptionsFromString(base_options->rep,
std::string(opts_str), &new_options->rep));
void rocksdb_get_options_from_string(const rocksdb_options_t* base_options,
const char* opts_str,
rocksdb_options_t* new_options,
char** errptr) {
SaveError(errptr,
GetOptionsFromString(base_options->rep, std::string(opts_str),
&new_options->rep));
}
void rocksdb_free(
void* ptr){
free(ptr);
}
void rocksdb_free(void* ptr) { free(ptr); }
} // end extern "C"

@ -270,7 +270,8 @@ const char* Compaction::InputLevelSummary(
is_first = false;
}
len += snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
"%" ROCKSDB_PRIszt "@%d", input_level.size(), input_level.level);
"%" ROCKSDB_PRIszt "@%d", input_level.size(),
input_level.level);
}
snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
" files to L%d", output_level());

@ -122,9 +122,7 @@ class Compaction {
bool IsTrivialMove() const;
// If true, then the compaction can be done by simply deleting input files.
bool deletion_compaction() const {
return deletion_compaction_;
}
bool deletion_compaction() const { return deletion_compaction_; }
// Add all inputs to this compaction as delete operations to *edit.
void AddInputDeletions(VersionEdit* edit);

@ -242,7 +242,7 @@ void CompactionJob::ReportStartedCompaction(
ThreadStatusUtil::SetThreadOperationProperty(
ThreadStatus::COMPACTION_PROP_FLAGS,
compaction->is_manual_compaction() +
compaction->is_manual_compaction() +
(compaction->deletion_compaction() << 1));
ThreadStatusUtil::SetThreadOperationProperty(
@ -263,7 +263,7 @@ void CompactionJob::ReportStartedCompaction(
if (compaction_job_stats_) {
compaction_job_stats_->is_manual_compaction =
compaction->is_manual_compaction();
compaction->is_manual_compaction();
}
}

@ -777,7 +777,5 @@ int main(int argc, char** argv) {
#else
int main(int argc, char** argv) {
return 0;
}
int main(int argc, char** argv) { return 0; }
#endif // !defined(IOS_CROSS_COMPILE)

@ -165,10 +165,9 @@ void VerifyInitializationOfCompactionJobStats(
#endif // !defined(IOS_CROSS_COMPILE)
}
void VerifyCompactionJobStats(
const CompactionJobStats& compaction_job_stats,
const std::vector<FileMetaData*>& files,
size_t num_output_files) {
void VerifyCompactionJobStats(const CompactionJobStats& compaction_job_stats,
const std::vector<FileMetaData*>& files,
size_t num_output_files) {
ASSERT_GE(compaction_job_stats.elapsed_micros, 0U);
ASSERT_EQ(compaction_job_stats.num_input_files, files.size());
ASSERT_EQ(compaction_job_stats.num_output_files, num_output_files);
@ -219,9 +218,7 @@ TEST_F(CompactionJobTest, Simple) {
ASSERT_OK(s);
mutex_.Unlock();
VerifyCompactionJobStats(
compaction_job_stats,
files, 1);
VerifyCompactionJobStats(compaction_job_stats, files, 1);
mock_table_factory_->AssertLatestFile(expected_results);
ASSERT_EQ(yield_callback_called, 20000);

@ -401,8 +401,9 @@ bool CompactionPicker::SetupOtherInputs(
if (expanded1.size() == output_level_inputs->size() &&
!FilesInCompaction(expanded1)) {
Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log,
"[%s] Expanding@%d %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt "(%" PRIu64 "+%" PRIu64
" bytes) to %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt " (%" PRIu64 "+%" PRIu64 "bytes)\n",
"[%s] Expanding@%d %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt "(%" PRIu64
"+%" PRIu64 " bytes) to %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt
" (%" PRIu64 "+%" PRIu64 "bytes)\n",
cf_name.c_str(), input_level, inputs->size(),
output_level_inputs->size(), inputs0_size, inputs1_size,
expanded0.size(), expanded1.size(), expanded0_size, inputs1_size);
@ -1225,7 +1226,8 @@ Compaction* UniversalCompactionPicker::PickCompaction(
return nullptr;
}
VersionStorageInfo::LevelSummaryStorage tmp;
LogToBuffer(log_buffer, 3072, "[%s] Universal: sorted runs files(%" ROCKSDB_PRIszt "): %s\n",
LogToBuffer(log_buffer, 3072,
"[%s] Universal: sorted runs files(%" ROCKSDB_PRIszt "): %s\n",
cf_name.c_str(), sorted_runs.size(),
vstorage->LevelSummary(&tmp));

@ -71,7 +71,7 @@ int main() {
#include "utilities/merge_operators.h"
#ifdef OS_WIN
#include <io.h> // open/close
#include <io.h> // open/close
#endif
using GFLAGS::ParseCommandLineFlags;
@ -3544,8 +3544,8 @@ class Benchmark {
char msg[100];
snprintf(msg, sizeof(msg),
"(reads:%" PRIu64 " merges:%" PRIu64 " total:%" PRIu64 " hits:%" \
PRIu64 " maxlength:%" ROCKSDB_PRIszt ")",
"(reads:%" PRIu64 " merges:%" PRIu64 " total:%" PRIu64
" hits:%" PRIu64 " maxlength:%" ROCKSDB_PRIszt ")",
num_gets, num_merges, readwrites_, num_hits, max_length);
thread->stats.AddMessage(msg);
}

@ -1631,9 +1631,9 @@ Status DBImpl::CompactFilesImpl(
CompactionJob compaction_job(
job_context->job_id, c.get(), db_options_, env_options_, versions_.get(),
&shutting_down_, log_buffer, directories_.GetDbDir(),
directories_.GetDataDir(c->output_path_id()), stats_,
snapshots_.GetAll(), table_cache_, std::move(yield_callback),
&event_logger_, c->mutable_cf_options()->paranoid_file_checks, dbname_,
directories_.GetDataDir(c->output_path_id()), stats_, snapshots_.GetAll(),
table_cache_, std::move(yield_callback), &event_logger_,
c->mutable_cf_options()->paranoid_file_checks, dbname_,
nullptr); // Here we pass a nullptr for CompactionJobStats because
// CompactFiles does not trigger OnCompactionCompleted(),
// which is the only place where CompactionJobStats is
@ -2602,8 +2602,8 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress, JobContext* job_context,
versions_.get(), &shutting_down_, log_buffer, directories_.GetDbDir(),
directories_.GetDataDir(c->output_path_id()), stats_,
snapshots_.GetAll(), table_cache_, std::move(yield_callback),
&event_logger_, c->mutable_cf_options()->paranoid_file_checks,
dbname_, &compaction_job_stats);
&event_logger_, c->mutable_cf_options()->paranoid_file_checks, dbname_,
&compaction_job_stats);
compaction_job.Prepare();
mutex_.Unlock();

@ -7,20 +7,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// Introduction of SyncPoint effectively disabled building and running this test in Release build.
// Introduction of SyncPoint effectively disabled building and running this test
// in Release build.
// which is a pity, it is a good test
#if !(defined NDEBUG) || !defined (OS_WIN)
#if !(defined NDEBUG) || !defined(OS_WIN)
#include <algorithm>
#include <iostream>
#include <set>
#ifndef OS_WIN
# include <unistd.h>
#endif
#include <thread>
#include <unordered_set>
#include <utility>
#include <fcntl.h>
#ifndef OS_WIN
#include <unistd.h>
#endif
#include "db/filename.h"
#include "db/dbformat.h"
@ -8590,7 +8591,7 @@ TEST_F(DBTest, TransactionLogIterator) {
} while (ChangeCompactOptions());
}
#ifndef NDEBUG // sync point is not included with DNDEBUG build
#ifndef NDEBUG // sync point is not included with DNDEBUG build
TEST_F(DBTest, TransactionLogIteratorRace) {
static const int LOG_ITERATOR_RACE_TEST_COUNT = 2;
static const char* sync_points[LOG_ITERATOR_RACE_TEST_COUNT][4] = {
@ -8716,7 +8717,6 @@ TEST_F(DBTest, TransactionLogIteratorCorruptedLog) {
//
class RecoveryTestHelper {
public:
// Number of WAL files to generate
static const int kWALFilesCount = 10;
// Starting number for the WAL file name like 00010.log
@ -8727,9 +8727,9 @@ class RecoveryTestHelper {
static const int kValueSize = 10;
// Create WAL files with values filled in
static void FillData(DBTest* test, Options& options,
const size_t wal_count, size_t & count) {
DBOptions & db_options = options;
static void FillData(DBTest* test, Options& options, const size_t wal_count,
size_t& count) {
DBOptions& db_options = options;
count = 0;
@ -8750,7 +8750,7 @@ class RecoveryTestHelper {
std::unique_ptr<log::Writer> current_log_writer;
for (size_t j = kWALFileOffset; j < wal_count + kWALFileOffset; j++) {
uint64_t current_log_number = j;
uint64_t current_log_number = j;
std::string fname = LogFileName(test->dbname_, current_log_number);
unique_ptr<WritableFile> file;
ASSERT_OK(db_options.env->NewWritableFile(fname, &file, env_options));
@ -8760,7 +8760,7 @@ class RecoveryTestHelper {
std::string key = "key" + ToString(count++);
std::string value = test->DummyString(kValueSize);
assert(current_log_writer.get() != nullptr);
uint64_t seq = versions->LastSequence() + 1;
uint64_t seq = versions->LastSequence() + 1;
WriteBatch batch;
batch.Put(key, value);
WriteBatchInternal::SetSequence(&batch, seq);
@ -8793,9 +8793,9 @@ class RecoveryTestHelper {
}
// Manuall corrupt the specified WAL
static void CorruptWAL(DBTest * test, Options& options,
const double off, const double len,
const int wal_file_id, const bool trunc = false) {
static void CorruptWAL(DBTest* test, Options& options, const double off,
const double len, const int wal_file_id,
const bool trunc = false) {
Env* env = options.env;
std::string fname = LogFileName(test->dbname_, wal_file_id);
uint64_t size;
@ -8839,18 +8839,18 @@ class RecoveryTestHelper {
// at the end of any of the logs
// - We do not expect to open the data store for corruption
TEST_F(DBTest, kTolerateCorruptedTailRecords) {
const int jstart = RecoveryTestHelper::kWALFileOffset;
const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
const int jstart = RecoveryTestHelper::kWALFileOffset;
const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
for (auto trunc : {true, false}) { /* Corruption style */
for (int i = 0; i < 4; i++) { /* Corruption offset position */
for (auto trunc : {true, false}) { /* Corruption style */
for (int i = 0; i < 4; i++) { /* Corruption offset position */
for (int j = jstart; j < jend; j++) { /* WAL file */
// Fill data for testing
Options options = CurrentOptions();
const size_t row_count = RecoveryTestHelper::FillData(this, options);
// test checksum failure or parsing
RecoveryTestHelper::CorruptWAL(this, options, /*off=*/ i * .3,
/*len%=*/ .1, /*wal=*/ j, trunc);
RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3,
/*len%=*/.1, /*wal=*/j, trunc);
if (trunc) {
options.wal_recovery_mode =
@ -8874,8 +8874,8 @@ TEST_F(DBTest, kTolerateCorruptedTailRecords) {
// We don't expect the data store to be opened if there is any corruption
// (leading, middle or trailing -- incomplete writes or corruption)
TEST_F(DBTest, kAbsoluteConsistency) {
const int jstart = RecoveryTestHelper::kWALFileOffset;
const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
const int jstart = RecoveryTestHelper::kWALFileOffset;
const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
// Verify clean slate behavior
Options options = CurrentOptions();
@ -8886,7 +8886,7 @@ TEST_F(DBTest, kAbsoluteConsistency) {
ASSERT_EQ(RecoveryTestHelper::GetData(this), row_count);
for (auto trunc : {true, false}) { /* Corruption style */
for (int i = 0; i < 4; i++) { /* Corruption offset position */
for (int i = 0; i < 4; i++) { /* Corruption offset position */
if (trunc && i == 0) {
continue;
}
@ -8895,7 +8895,7 @@ TEST_F(DBTest, kAbsoluteConsistency) {
// fill with new date
RecoveryTestHelper::FillData(this, options);
// corrupt the wal
RecoveryTestHelper::CorruptWAL(this, options, /*off=*/ i * .3,
RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3,
/*len%=*/.1, j, trunc);
// verify
options.wal_recovery_mode = WALRecoveryMode::kAbsoluteConsistency;
@ -8910,20 +8910,20 @@ TEST_F(DBTest, kAbsoluteConsistency) {
// - We expect to open data store under all circumstances
// - We expect only data upto the point where the first error was encountered
TEST_F(DBTest, kPointInTimeRecovery) {
const int jstart = RecoveryTestHelper::kWALFileOffset;
const int jstart = RecoveryTestHelper::kWALFileOffset;
const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
const int maxkeys = RecoveryTestHelper::kWALFilesCount *
RecoveryTestHelper::kKeysPerWALFile;
const int maxkeys =
RecoveryTestHelper::kWALFilesCount * RecoveryTestHelper::kKeysPerWALFile;
for (auto trunc : {true, false}) { /* Corruption style */
for (int i = 0; i < 4; i++) { /* Offset of corruption */
for (auto trunc : {true, false}) { /* Corruption style */
for (int i = 0; i < 4; i++) { /* Offset of corruption */
for (int j = jstart; j < jend; j++) { /* WAL file */
// Fill data for testing
Options options = CurrentOptions();
const size_t row_count = RecoveryTestHelper::FillData(this, options);
// Corrupt the wal
RecoveryTestHelper::CorruptWAL(this, options, /*off=*/ i * .3,
RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3,
/*len%=*/.1, j, trunc);
// Verify
@ -8945,11 +8945,11 @@ TEST_F(DBTest, kPointInTimeRecovery) {
}
const size_t min = RecoveryTestHelper::kKeysPerWALFile *
(j - RecoveryTestHelper::kWALFileOffset);
(j - RecoveryTestHelper::kWALFileOffset);
ASSERT_GE(recovered_row_count, min);
if (!trunc && i != 0) {
const size_t max = RecoveryTestHelper::kKeysPerWALFile *
(j - RecoveryTestHelper::kWALFileOffset + 1);
(j - RecoveryTestHelper::kWALFileOffset + 1);
ASSERT_LE(recovered_row_count, max);
}
}
@ -8961,18 +8961,18 @@ TEST_F(DBTest, kPointInTimeRecovery) {
// - We expect to open the data store under all scenarios
// - We expect to have recovered records past the corruption zone
TEST_F(DBTest, kSkipAnyCorruptedRecords) {
const int jstart = RecoveryTestHelper::kWALFileOffset;
const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
const int jstart = RecoveryTestHelper::kWALFileOffset;
const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
for (auto trunc : {true, false}) { /* Corruption style */
for (int i = 0; i < 4; i++) { /* Corruption offset */
for (auto trunc : {true, false}) { /* Corruption style */
for (int i = 0; i < 4; i++) { /* Corruption offset */
for (int j = jstart; j < jend; j++) { /* wal files */
// Fill data for testing
Options options = CurrentOptions();
const size_t row_count = RecoveryTestHelper::FillData(this, options);
// Corrupt the WAL
RecoveryTestHelper::CorruptWAL(this, options, /*off=*/ i * .3,
RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3,
/*len%=*/.1, j, trunc);
// Verify behavior
@ -11228,8 +11228,8 @@ TEST_F(DBTest, DynamicMemtableOptions) {
count++;
}
ASSERT_GT(sleep_count.load(), 0);
// Windows fails this test. Will tune in the future and figure out
// approp number
// Windows fails this test. Will tune in the future and figure out
// approp number
#ifndef OS_WIN
ASSERT_GT(static_cast<double>(count), 512 * 0.8);
ASSERT_LT(static_cast<double>(count), 512 * 1.2);
@ -11254,8 +11254,8 @@ TEST_F(DBTest, DynamicMemtableOptions) {
count++;
}
ASSERT_GT(sleep_count.load(), 0);
// Windows fails this test. Will tune in the future and figure out
// approp number
// Windows fails this test. Will tune in the future and figure out
// approp number
#ifndef OS_WIN
ASSERT_GT(static_cast<double>(count), 256 * 0.8);
ASSERT_LT(static_cast<double>(count), 266 * 1.2);

@ -216,11 +216,8 @@ TEST_F(EventListenerTest, OnSingleDBCompactionTest) {
// This simple Listener can only handle one flush at a time.
class TestFlushListener : public EventListener {
public:
explicit TestFlushListener(Env* env) :
slowdown_count(0),
stop_count(0),
db_closed(),
env_(env) {
explicit TestFlushListener(Env* env)
: slowdown_count(0), stop_count(0), db_closed(), env_(env) {
db_closed = false;
}
void OnTableFileCreated(

@ -90,7 +90,8 @@ class TransactionLogIteratorImpl : public TransactionLogIterator {
Env* env;
Logger* info_log;
virtual void Corruption(size_t bytes, const Status& s) override {
Log(InfoLogLevel::ERROR_LEVEL, info_log, "dropping %" ROCKSDB_PRIszt " bytes; %s", bytes,
Log(InfoLogLevel::ERROR_LEVEL, info_log,
"dropping %" ROCKSDB_PRIszt " bytes; %s", bytes,
s.ToString().c_str());
}
virtual void Info(const char* s) {

File diff suppressed because it is too large Load Diff

@ -33,7 +33,7 @@ namespace rocksdb {
// TODO(yhchiang): remove this function once c++14 is available
// as std::max will be able to cover this.
// Current MS compiler does not support constexpr
template<int A, int B>
template <int A, int B>
struct constexpr_max {
static const int result = (A > B) ? A : B;
};

@ -56,31 +56,29 @@ class LogFile {
};
struct BatchResult {
SequenceNumber sequence = 0;
std::unique_ptr<WriteBatch> writeBatchPtr;
SequenceNumber sequence = 0;
std::unique_ptr<WriteBatch> writeBatchPtr;
// Add empty __ctor and __dtor for the rule of five
// However, preserve the original semantics and prohibit copying
// as the unique_ptr member does not copy.
BatchResult() {
}
// Add empty __ctor and __dtor for the rule of five
// However, preserve the original semantics and prohibit copying
// as the unique_ptr member does not copy.
BatchResult() {}
~BatchResult() {
}
~BatchResult() {}
BatchResult(const BatchResult&) = delete;
BatchResult(const BatchResult&) = delete;
BatchResult& operator=(const BatchResult&) = delete;
BatchResult& operator=(const BatchResult&) = delete;
BatchResult(BatchResult && bResult) :
sequence(std::move(bResult.sequence)), writeBatchPtr(std::move(bResult.writeBatchPtr)) {
}
BatchResult(BatchResult&& bResult)
: sequence(std::move(bResult.sequence)),
writeBatchPtr(std::move(bResult.writeBatchPtr)) {}
BatchResult& operator=(BatchResult && bResult) {
sequence = std::move(bResult.sequence);
writeBatchPtr = std::move(bResult.writeBatchPtr);
return *this;
}
BatchResult& operator=(BatchResult&& bResult) {
sequence = std::move(bResult.sequence);
writeBatchPtr = std::move(bResult.writeBatchPtr);
return *this;
}
};
// A TransactionLogIterator is used to iterate over the transactions in a db.

@ -54,24 +54,18 @@ struct Variant {
/* implicit */ Variant(uint64_t i) : type_(kInt) { data_.i = i; }
/* implicit */ Variant(double d) : type_(kDouble) { data_.d = d; }
/* implicit */ Variant(const std::string& s) : type_(kString) {
new (&data_.s) std::string(s);
new (&data_.s) std::string(s);
}
Variant(const Variant& v) : type_(v.type_) {
Init(v, data_);
}
Variant(const Variant& v) : type_(v.type_) { Init(v, data_); }
Variant& operator=(const Variant& v);
Variant(Variant&& rhs) : type_(kNull) {
*this = std::move(rhs);
}
Variant(Variant&& rhs) : type_(kNull) { *this = std::move(rhs); }
Variant& operator=(Variant&& v);
~Variant() {
Destroy(type_, data_);
}
~Variant() { Destroy(type_, data_); }
Type type() const { return type_; }
bool get_bool() const { return data_.b; }
@ -83,16 +77,16 @@ struct Variant {
bool operator!=(const Variant& other) const { return !(*this == other); }
private:
Type type_;
union Data {
bool b;
uint64_t i;
double d;
// Current version of MS compiler not C++11 compliant so can not put std::string
bool b;
uint64_t i;
double d;
// Current version of MS compiler not C++11 compliant so can not put
// std::string
// however, even then we still need the rest of the maintenance.
char s[sizeof(std::string)];
char s[sizeof(std::string)];
} data_;
// Avoid type_punned aliasing problem

@ -11,6 +11,7 @@
#include "pragma_error.h"
ROCKSDB_WARNING("Warning: This file was moved to rocksdb/utilities/backupable_db.h")
ROCKSDB_WARNING(
"Warning: This file was moved to rocksdb/utilities/backupable_db.h")
#include "rocksdb/utilities/backupable_db.h"

@ -13,25 +13,24 @@
#define RDB_STR__(x) #x
#define RDB_STR(x) RDB_STR__(x)
#if defined(ROCKSDB_PLATFORM_POSIX)
// Wrap unportable warning macro
# define ROCKSDB_WARNING(x) _Pragma(RDB_STR(GCC warning(x)))
#define ROCKSDB_WARNING(x) _Pragma(RDB_STR(GCC warning(x)))
#elif defined(OS_WIN)
// Wrap unportable warning macro
#if defined(_MSC_VER)
// format it according to visual studio output (to get source lines and warnings in the IDE)
#define ROCKSDB_WARNING(x) __pragma( message(__FILE__ "(" RDB_STR(__LINE__) ") : warning: " x) )
// format it according to visual studio output (to get source lines and warnings
// in the IDE)
#define ROCKSDB_WARNING(x) \
__pragma(message(__FILE__ "(" RDB_STR(__LINE__) ") : warning: " x))
#else
// make #warning into #pragma GCC warning gcc 4.7+ and clang 3.2+ supported
#define ROCKSDB_WARNING(x) _Pragma(RDB_STR(GCC warning(x)))
// make #warning into #pragma GCC warning gcc 4.7+ and clang 3.2+ supported
#define ROCKSDB_WARNING(x) _Pragma(RDB_STR(GCC warning(x)))
#endif
#endif
#endif // STORAGE_LEVELDB_UTILITIES_PRAGMA_ERROR_H_

@ -13,15 +13,15 @@
#define STORAGE_LEVELDB_PORT_DIRENT_H_
#ifdef ROCKSDB_PLATFORM_POSIX
# include <sys/types.h>
# include <dirent.h>
#include <dirent.h>
#include <sys/types.h>
#elif defined(OS_WIN)
namespace rocksdb {
namespace port {
struct dirent {
char d_name[_MAX_PATH]; /* filename */
char d_name[_MAX_PATH]; /* filename */
};
struct DIR;
@ -32,7 +32,7 @@ dirent* readdir(DIR* dirp);
int closedir(DIR* dirp);
} // namespace port
} // namespace port
using port::dirent;
using port::DIR;
@ -40,12 +40,8 @@ using port::opendir;
using port::readdir;
using port::closedir;
} // namespace rocksdb
#endif
#endif // STORAGE_LEVELDB_PORT_DIRENT_H_
} // namespace rocksdb
#endif // OS_WIN
#endif // STORAGE_LEVELDB_PORT_DIRENT_H_

@ -15,8 +15,8 @@
// porting to a new platform, see "port_example.h" for documentation
// of what the new port_<platform>.h file must provide.
#if defined(ROCKSDB_PLATFORM_POSIX)
# include "port/port_posix.h"
#include "port/port_posix.h"
#elif defined(OS_WIN)
# include "port/win/port_win.h"
#include "port/win/port_win.h"
#endif

@ -11,7 +11,8 @@
#pragma once
// size_t printf formatting named in the manner of C99 standard formatting strings such as PRIu64
// size_t printf formatting named in the manner of C99 standard formatting
// strings such as PRIu64
// in fact, we could use that one
#define ROCKSDB_PRIszt "zu"

@ -7,7 +7,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// This file is a portable substitute for sys/time.h which does not exist on Windows
// This file is a portable substitute for sys/time.h which does not exist on
// Windows
#ifndef STORAGE_LEVELDB_PORT_SYS_TIME_H_
#define STORAGE_LEVELDB_PORT_SYS_TIME_H_
@ -22,18 +23,16 @@ namespace port {
// Avoid including winsock2.h for this definition
typedef struct timeval {
long tv_sec;
long tv_usec;
long tv_sec;
long tv_usec;
} timeval;
void gettimeofday(struct timeval* tv, struct timezone* tz);
inline
struct tm* localtime_r(const time_t *timep, struct tm *result) {
errno_t ret = localtime_s(result, timep);
return (ret == 0) ? result : NULL;
inline struct tm* localtime_r(const time_t* timep, struct tm* result) {
errno_t ret = localtime_s(result, timep);
return (ret == 0) ? result : NULL;
}
}
using port::timeval;
@ -42,8 +41,8 @@ using port::localtime_r;
}
#else
# include <time.h>
# include <sys/time.h>
#include <time.h>
#include <sys/time.h>
#endif
#endif // STORAGE_LEVELDB_PORT_SYS_TIME_H_
#endif // STORAGE_LEVELDB_PORT_SYS_TIME_H_

@ -14,11 +14,10 @@
// porting to a new platform, see "port_example.h" for documentation
// of what the new port_<platform>.h file must provide.
#if defined(ROCKSDB_PLATFORM_POSIX)
# include "util/posix_logger.h"
#include "util/posix_logger.h"
#elif defined(OS_WIN)
# include "port/win/win_logger.h"
#include "port/win/win_logger.h"
#endif
#endif // STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_
#endif // STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_

File diff suppressed because it is too large Load Diff

@ -28,181 +28,162 @@
#include "util/logging.h"
namespace rocksdb
{
namespace port
{
namespace rocksdb {
namespace port {
void gettimeofday(struct timeval* tv, struct timezone* /* tz */) {
using namespace std::chrono;
using namespace std::chrono;
microseconds usNow (duration_cast<microseconds>(system_clock::now().time_since_epoch()));
microseconds usNow(
duration_cast<microseconds>(system_clock::now().time_since_epoch()));
seconds secNow(duration_cast<seconds>(usNow));
seconds secNow(duration_cast<seconds>(usNow));
tv->tv_sec = secNow.count();
tv->tv_usec = usNow.count() - duration_cast<microseconds>(secNow).count();
tv->tv_sec = secNow.count();
tv->tv_usec = usNow.count() - duration_cast<microseconds>(secNow).count();
}
Mutex::Mutex(bool adaptive) : lock(m_mutex, std::defer_lock) {}
Mutex::Mutex(bool adaptive) : lock(m_mutex, std::defer_lock) {
}
Mutex::~Mutex() {
}
Mutex::~Mutex() {}
void Mutex::Lock() {
lock.lock();
lock.lock();
#ifndef NDEBUG
locked_ = true;
locked_ = true;
#endif
}
void Mutex::Unlock() {
#ifndef NDEBUG
locked_ = false;
locked_ = false;
#endif
lock.unlock();
lock.unlock();
}
void Mutex::AssertHeld() {
#ifndef NDEBUG
assert(locked_);
assert(locked_);
#endif
}
CondVar::CondVar(Mutex* mu) : mu_(mu) {
}
CondVar::CondVar(Mutex* mu) : mu_(mu) {}
CondVar::~CondVar() {
}
CondVar::~CondVar() {}
void CondVar::Wait() {
#ifndef NDEBUG
mu_->locked_ = false;
mu_->locked_ = false;
#endif
cv_.wait(mu_->getLock());
cv_.wait(mu_->getLock());
#ifndef NDEBUG
mu_->locked_ = true;
mu_->locked_ = true;
#endif
}
bool CondVar::TimedWait(uint64_t abs_time_us) {
#ifndef NDEBUG
mu_->locked_ = false;
mu_->locked_ = false;
#endif
using namespace std::chrono;
using namespace std::chrono;
microseconds usAbsTime(abs_time_us);
microseconds usNow(duration_cast<microseconds>(system_clock::now().time_since_epoch()));
microseconds relTimeUs = (usAbsTime > usNow) ? (usAbsTime - usNow) : microseconds::zero();
microseconds usAbsTime(abs_time_us);
microseconds usNow(
duration_cast<microseconds>(system_clock::now().time_since_epoch()));
microseconds relTimeUs =
(usAbsTime > usNow) ? (usAbsTime - usNow) : microseconds::zero();
std::_Cv_status cvStatus = cv_.wait_for(mu_->getLock(), relTimeUs);
std::_Cv_status cvStatus = cv_.wait_for(mu_->getLock(), relTimeUs);
#ifndef NDEBUG
mu_->locked_ = true;
mu_->locked_ = true;
#endif
if (cvStatus == std::cv_status::timeout) {
return true;
}
if (cvStatus == std::cv_status::timeout) {
return true;
}
return false;
return false;
}
void CondVar::Signal() {
cv_.notify_one();
}
void CondVar::Signal() { cv_.notify_one(); }
void CondVar::SignalAll() {
cv_.notify_all ();
}
void CondVar::SignalAll() { cv_.notify_all(); }
void InitOnce(OnceType* once, void (*initializer)()) {
std::call_once(*once, initializer);
std::call_once(*once, initializer);
}
// Private structure, exposed only by pointer
struct DIR {
intptr_t handle_;
bool firstread_;
struct __finddata64_t data_;
dirent entry_;
DIR() : handle_(-1), firstread_(true) {}
intptr_t handle_;
bool firstread_;
struct __finddata64_t data_;
dirent entry_;
DIR(const DIR&) = delete;
DIR& operator=(const DIR&) = delete;
DIR() : handle_(-1), firstread_(true) {}
~DIR() {
DIR(const DIR&) = delete;
DIR& operator=(const DIR&) = delete;
if (-1 != handle_) {
_findclose(handle_);
}
~DIR() {
if (-1 != handle_) {
_findclose(handle_);
}
}
};
DIR* opendir(const char* name) {
if (!name || *name == 0) {
errno = ENOENT;
return nullptr;
}
if (!name || *name == 0) {
errno = ENOENT;
return nullptr;
}
std::string pattern(name);
pattern.append("\\").append("*");
std::string pattern(name);
pattern.append("\\").append("*");
std::unique_ptr<DIR> dir(new DIR);
std::unique_ptr<DIR> dir(new DIR);
dir->handle_ = _findfirst64(pattern.c_str(), &dir->data_);
dir->handle_ = _findfirst64(pattern.c_str(), &dir->data_);
if (dir->handle_ == -1) {
return nullptr;
}
if (dir->handle_ == -1) {
return nullptr;
}
strncpy_s(dir->entry_.d_name, dir->data_.name, strlen(dir->data_.name));
strncpy_s(dir->entry_.d_name, dir->data_.name, strlen(dir->data_.name));
return dir.release();
return dir.release();
}
struct dirent* readdir(DIR* dirp) {
if (!dirp || dirp->handle_ == -1) {
errno = EBADF;
return nullptr;
}
if (!dirp || dirp->handle_ == -1) {
errno = EBADF;
return nullptr;
}
if (dirp->firstread_) {
dirp->firstread_ = false;
return &dirp->entry_;
}
if (dirp->firstread_) {
dirp->firstread_ = false;
return &dirp->entry_;
}
auto ret = _findnext64(dirp->handle_, &dirp->data_);
auto ret = _findnext64(dirp->handle_, &dirp->data_);
if (ret != 0) {
return nullptr;
}
if (ret != 0) {
return nullptr;
}
strncpy_s(dirp->entry_.d_name, dirp->data_.name, strlen(dirp->data_.name));
strncpy_s(dirp->entry_.d_name, dirp->data_.name, strlen(dirp->data_.name));
return &dirp->entry_;
return &dirp->entry_;
}
int closedir(DIR* dirp) {
delete dirp;
return 0;
delete dirp;
return 0;
}
int truncate(const char* path, int64_t len) {
if (path == nullptr) {
errno = EFAULT;
return -1;
@ -213,13 +194,12 @@ int truncate(const char* path, int64_t len) {
return -1;
}
HANDLE hFile = CreateFile(path,
GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL, // Security attrs
OPEN_EXISTING, // Truncate existing file only
FILE_ATTRIBUTE_NORMAL,
NULL);
HANDLE hFile =
CreateFile(path, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL, // Security attrs
OPEN_EXISTING, // Truncate existing file only
FILE_ATTRIBUTE_NORMAL, NULL);
if (INVALID_HANDLE_VALUE == hFile) {
auto lastError = GetLastError();
@ -237,10 +217,8 @@ int truncate(const char* path, int64_t len) {
FILE_END_OF_FILE_INFO end_of_file;
end_of_file.EndOfFile.QuadPart = len;
if (!SetFileInformationByHandle(hFile,
FileEndOfFileInfo,
&end_of_file,
sizeof(FILE_END_OF_FILE_INFO))) {
if (!SetFileInformationByHandle(hFile, FileEndOfFileInfo, &end_of_file,
sizeof(FILE_END_OF_FILE_INFO))) {
errno = EIO;
result = -1;
}
@ -260,14 +238,13 @@ namespace rocksdb {
namespace port {
__declspec(noinline)
void WINAPI InitializeJemalloc() {
je_init();
atexit(je_uninit);
__declspec(noinline) void WINAPI InitializeJemalloc() {
je_init();
atexit(je_uninit);
}
} // port
} // rocksdb
} // port
} // rocksdb
extern "C" {
@ -275,37 +252,39 @@ extern "C" {
#pragma comment(linker, "/INCLUDE:p_rocksdb_init_jemalloc")
typedef void (WINAPI *CRT_Startup_Routine)(void);
typedef void(WINAPI* CRT_Startup_Routine)(void);
// .CRT section is merged with .rdata on x64 so it must be constant data.
// must be of external linkage
// We put this into XCT since we want to run this earlier than C++ static constructors
// We put this into XCT since we want to run this earlier than C++ static
// constructors
// which are placed into XCU
#pragma const_seg(".CRT$XCT")
extern const CRT_Startup_Routine p_rocksdb_init_jemalloc;
const CRT_Startup_Routine p_rocksdb_init_jemalloc = rocksdb::port::InitializeJemalloc;
const CRT_Startup_Routine p_rocksdb_init_jemalloc =
rocksdb::port::InitializeJemalloc;
#pragma const_seg()
#else // _WIN64
#else // _WIN64
// x86 untested
#pragma comment(linker, "/INCLUDE:_p_rocksdb_init_jemalloc")
#pragma section(".CRT$XCT", read)
JEMALLOC_SECTION(".CRT$XCT") JEMALLOC_ATTR(used)
static const void (WINAPI *p_rocksdb_init_jemalloc)(void) = rocksdb::port::InitializeJemalloc;
JEMALLOC_SECTION(".CRT$XCT") JEMALLOC_ATTR(used) static const void(
WINAPI* p_rocksdb_init_jemalloc)(void) = rocksdb::port::InitializeJemalloc;
#endif // _WIN64
#endif // _WIN64
} // extern "C"
} // extern "C"
// Global operators to be replaced by a linker
void* operator new(size_t size) {
void* p = je_malloc(size);
if (!p) {
throw std::bad_alloc();
throw std::bad_alloc();
}
return p;
}
@ -318,13 +297,8 @@ void* operator new[](size_t size) {
return p;
}
void operator delete(void* p) {
je_free(p);
}
void operator delete[](void* p) {
je_free(p);
}
void operator delete(void* p) { je_free(p); }
#endif // JEMALLOC
void operator delete[](void* p) { je_free(p); }
#endif // JEMALLOC

@ -14,7 +14,7 @@
// Always want minimum headers
#ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
// Assume that for everywhere
@ -42,7 +42,8 @@
typedef SSIZE_T ssize_t;
// size_t printf formatting named in the manner of C99 standard formatting strings such as PRIu64
// size_t printf formatting named in the manner of C99 standard formatting
// strings such as PRIu64
// in fact, we could use that one
#define ROCKSDB_PRIszt "Iu"
@ -77,8 +78,7 @@ namespace rocksdb {
#define PREFETCH(addr, rw, locality)
namespace port
{
namespace port {
// For use at db/file_indexer.h kLevelMaxIndex
const int kMaxInt32 = INT32_MAX;
@ -88,82 +88,67 @@ const bool kLittleEndian = true;
class CondVar;
class Mutex
{
public:
/* implicit */ Mutex(bool adaptive = false);
~Mutex();
void Lock();
void Unlock();
// this will assert if the mutex is not locked
// it does NOT verify that mutex is held by a calling thread
void AssertHeld();
std::unique_lock<std::mutex>& getLock() {
return lock;
}
class Mutex {
public:
/* implicit */ Mutex(bool adaptive = false);
~Mutex();
void Lock();
void Unlock();
// this will assert if the mutex is not locked
// it does NOT verify that mutex is held by a calling thread
void AssertHeld();
private:
friend class CondVar;
std::mutex m_mutex;
std::unique_lock<std::mutex> lock;
std::unique_lock<std::mutex>& getLock() { return lock; }
private:
friend class CondVar;
std::mutex m_mutex;
std::unique_lock<std::mutex> lock;
#ifndef NDEBUG
bool locked_;
bool locked_;
#endif
// No copying
Mutex(const Mutex&);
void operator=(const Mutex&);
// No copying
Mutex(const Mutex&);
void operator=(const Mutex&);
};
class RWMutex
{
public:
RWMutex() {
InitializeSRWLock(&srwLock_);
}
class RWMutex {
public:
RWMutex() { InitializeSRWLock(&srwLock_); }
void ReadLock() {
AcquireSRWLockShared(&srwLock_);
}
void ReadLock() { AcquireSRWLockShared(&srwLock_); }
void WriteLock() {
AcquireSRWLockExclusive(&srwLock_);
}
void WriteLock() { AcquireSRWLockExclusive(&srwLock_); }
void ReadUnlock() {
ReleaseSRWLockShared(&srwLock_);
}
void ReadUnlock() { ReleaseSRWLockShared(&srwLock_); }
void WriteUnlock() {
ReleaseSRWLockExclusive(&srwLock_);
}
void WriteUnlock() { ReleaseSRWLockExclusive(&srwLock_); }
// Empty as in POSIX
void AssertHeld() { }
private:
// Empty as in POSIX
void AssertHeld() {}
private:
SRWLOCK srwLock_;
// No copying allowed
RWMutex(const RWMutex&);
void operator=(const RWMutex&);
};
class CondVar
{
public:
explicit CondVar(Mutex* mu);
~CondVar();
void Wait();
bool TimedWait(uint64_t expiration_time);
void Signal();
void SignalAll();
private:
std::condition_variable cv_;
Mutex * mu_;
class CondVar {
public:
explicit CondVar(Mutex* mu);
~CondVar();
void Wait();
bool TimedWait(uint64_t expiration_time);
void Signal();
void SignalAll();
private:
std::condition_variable cv_;
Mutex* mu_;
};
typedef std::once_flag OnceType;
@ -171,16 +156,15 @@ typedef std::once_flag OnceType;
extern void InitOnce(OnceType* once, void (*initializer)());
inline bool Snappy_Compress(const CompressionOptions& opts, const char* input,
size_t length, ::std::string* output)
{
size_t length, ::std::string* output) {
#ifdef SNAPPY
output->resize(snappy::MaxCompressedLength(length));
size_t outlen;
snappy::RawCompress(input, length, &(*output)[0], &outlen);
output->resize(outlen);
return true;
output->resize(snappy::MaxCompressedLength(length));
size_t outlen;
snappy::RawCompress(input, length, &(*output)[0], &outlen);
output->resize(outlen);
return true;
#endif
return false;
return false;
}
inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
@ -192,8 +176,7 @@ inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
#endif
}
inline bool Snappy_Uncompress(const char* input, size_t length,
char* output) {
inline bool Snappy_Uncompress(const char* input, size_t length, char* output) {
#ifdef SNAPPY
return snappy::RawUncompress(input, length, output);
#else
@ -223,14 +206,14 @@ inline bool Zlib_Compress(const CompressionOptions& opts, const char* input,
output->resize(length);
// Compress the input, and put compressed data in output.
_stream.next_in = (Bytef *)input;
_stream.next_in = (Bytef*)input;
_stream.avail_in = length;
// Initialize the output size.
_stream.avail_out = length;
_stream.next_out = (Bytef *)&(*output)[0];
_stream.next_out = (Bytef*)&(*output)[0];
int old_sz =0, new_sz =0, new_sz_delta =0;
int old_sz = 0, new_sz = 0, new_sz_delta = 0;
bool done = false;
while (!done) {
int st = deflate(&_stream, Z_FINISH);
@ -246,7 +229,7 @@ inline bool Zlib_Compress(const CompressionOptions& opts, const char* input,
new_sz = output->size() + (new_sz_delta < 10 ? 10 : new_sz_delta);
output->resize(new_sz);
// Set more output.
_stream.next_out = (Bytef *)&(*output)[old_sz];
_stream.next_out = (Bytef*)&(*output)[old_sz];
_stream.avail_out = new_sz - old_sz;
break;
case Z_BUF_ERROR:
@ -264,7 +247,7 @@ inline bool Zlib_Compress(const CompressionOptions& opts, const char* input,
}
inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
int* decompress_size, int windowBits = -14) {
int* decompress_size, int windowBits = -14) {
#ifdef ZLIB
z_stream _stream;
memset(&_stream, 0, sizeof(z_stream));
@ -272,13 +255,13 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
// For raw inflate, the windowBits should be -8..-15.
// If windowBits is bigger than zero, it will use either zlib
// header or gzip header. Adding 32 to it will do automatic detection.
int st = inflateInit2(&_stream,
windowBits > 0 ? windowBits + 32 : windowBits);
int st =
inflateInit2(&_stream, windowBits > 0 ? windowBits + 32 : windowBits);
if (st != Z_OK) {
return nullptr;
}
_stream.next_in = (Bytef *)input_data;
_stream.next_in = (Bytef*)input_data;
_stream.avail_in = input_length;
// Assume the decompressed data size will 5x of compressed size.
@ -286,14 +269,14 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
char* output = new char[output_len];
int old_sz = output_len;
_stream.next_out = (Bytef *)output;
_stream.next_out = (Bytef*)output;
_stream.avail_out = output_len;
char* tmp = nullptr;
int output_len_delta;
bool done = false;
//while(_stream.next_in != nullptr && _stream.avail_in != 0) {
// while(_stream.next_in != nullptr && _stream.avail_in != 0) {
while (!done) {
int st = inflate(&_stream, Z_SYNC_FLUSH);
switch (st) {
@ -311,7 +294,7 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
output = tmp;
// Set more output.
_stream.next_out = (Bytef *)(output + old_sz);
_stream.next_out = (Bytef*)(output + old_sz);
_stream.avail_out = output_len - old_sz;
break;
case Z_BUF_ERROR:
@ -349,15 +332,15 @@ inline bool BZip2_Compress(const CompressionOptions& opts, const char* input,
output->resize(length);
// Compress the input, and put compressed data in output.
_stream.next_in = (char *)input;
_stream.next_in = (char*)input;
_stream.avail_in = length;
// Initialize the output size.
_stream.next_out = (char *)&(*output)[0];
_stream.next_out = (char*)&(*output)[0];
_stream.avail_out = length;
int old_sz =0, new_sz =0;
while(_stream.next_in != nullptr && _stream.avail_in != 0) {
int old_sz = 0, new_sz = 0;
while (_stream.next_in != nullptr && _stream.avail_in != 0) {
int st = BZ2_bzCompress(&_stream, BZ_FINISH);
switch (st) {
case BZ_STREAM_END:
@ -369,7 +352,7 @@ inline bool BZip2_Compress(const CompressionOptions& opts, const char* input,
new_sz = (int)(output->size() * 1.2);
output->resize(new_sz);
// Set more output.
_stream.next_out = (char *)&(*output)[old_sz];
_stream.next_out = (char*)&(*output)[old_sz];
_stream.avail_out = new_sz - old_sz;
break;
case BZ_SEQUENCE_ERROR:
@ -397,7 +380,7 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
return nullptr;
}
_stream.next_in = (char *)input_data;
_stream.next_in = (char*)input_data;
_stream.avail_in = input_length;
// Assume the decompressed data size will be 5x of compressed size.
@ -405,12 +388,12 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
char* output = new char[output_len];
int old_sz = output_len;
_stream.next_out = (char *)output;
_stream.next_out = (char*)output;
_stream.avail_out = output_len;
char* tmp = nullptr;
while(_stream.next_in != nullptr && _stream.avail_in != 0) {
while (_stream.next_in != nullptr && _stream.avail_in != 0) {
int st = BZ2_bzDecompress(&_stream);
switch (st) {
case BZ_STREAM_END:
@ -425,7 +408,7 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
output = tmp;
// Set more output.
_stream.next_out = (char *)(output + old_sz);
_stream.next_out = (char*)(output + old_sz);
_stream.avail_out = output_len - old_sz;
break;
default:
@ -442,12 +425,12 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
return nullptr;
}
inline bool LZ4_Compress(const CompressionOptions &opts, const char *input,
inline bool LZ4_Compress(const CompressionOptions& opts, const char* input,
size_t length, ::std::string* output) {
#ifdef LZ4
int compressBound = LZ4_compressBound(length);
output->resize(8 + compressBound);
char *p = const_cast<char *>(output->c_str());
char* p = const_cast<char*>(output->c_str());
memcpy(p, &length, sizeof(length));
size_t outlen;
outlen = LZ4_compress_limitedOutput(input, p + 8, length, compressBound);
@ -468,7 +451,7 @@ inline char* LZ4_Uncompress(const char* input_data, size_t input_length,
}
int output_len;
memcpy(&output_len, input_data, sizeof(output_len));
char *output = new char[output_len];
char* output = new char[output_len];
*decompress_size = LZ4_decompress_safe_partial(
input_data + 8, output, input_length - 8, output_len, output_len);
if (*decompress_size < 0) {
@ -480,12 +463,12 @@ inline char* LZ4_Uncompress(const char* input_data, size_t input_length,
return nullptr;
}
inline bool LZ4HC_Compress(const CompressionOptions &opts, const char* input,
inline bool LZ4HC_Compress(const CompressionOptions& opts, const char* input,
size_t length, ::std::string* output) {
#ifdef LZ4
int compressBound = LZ4_compressBound(length);
output->resize(8 + compressBound);
char *p = const_cast<char *>(output->c_str());
char* p = const_cast<char*>(output->c_str());
memcpy(p, &length, sizeof(length));
size_t outlen;
#ifdef LZ4_VERSION_MAJOR // they only started defining this since r113
@ -515,47 +498,43 @@ inline bool LZ4HC_Compress(const CompressionOptions &opts, const char* input,
// For Thread Local Storage abstraction
typedef DWORD pthread_key_t;
inline
int pthread_key_create(pthread_key_t *key, void(*destructor)(void*)) {
// Not used
(void)destructor;
inline int pthread_key_create(pthread_key_t* key, void (*destructor)(void*)) {
// Not used
(void)destructor;
pthread_key_t k = TlsAlloc();
if (TLS_OUT_OF_INDEXES == k) {
return ENOMEM;
}
pthread_key_t k = TlsAlloc();
if (TLS_OUT_OF_INDEXES == k) {
return ENOMEM;
}
*key = k;
return 0;
*key = k;
return 0;
}
inline
int pthread_key_delete(pthread_key_t key) {
if (!TlsFree(key)) {
return EINVAL;
}
return 0;
inline int pthread_key_delete(pthread_key_t key) {
if (!TlsFree(key)) {
return EINVAL;
}
return 0;
}
inline
int pthread_setspecific(pthread_key_t key, const void *value) {
if (!TlsSetValue(key, const_cast<void*>(value))) {
return ENOMEM;
}
return 0;
inline int pthread_setspecific(pthread_key_t key, const void* value) {
if (!TlsSetValue(key, const_cast<void*>(value))) {
return ENOMEM;
}
return 0;
}
inline
void* pthread_getspecific(pthread_key_t key) {
void* result = TlsGetValue(key);
if (!result) {
if (GetLastError() != ERROR_SUCCESS) {
errno = EINVAL;
} else {
errno = NOERROR;
}
inline void* pthread_getspecific(pthread_key_t key) {
void* result = TlsGetValue(key);
if (!result) {
if (GetLastError() != ERROR_SUCCESS) {
errno = EINVAL;
} else {
errno = NOERROR;
}
return result;
}
return result;
}
// UNIX equiv although errno numbers will be off
@ -563,7 +542,7 @@ void* pthread_getspecific(pthread_key_t key) {
// feel space with zeros in case the file is extended.
int truncate(const char* path, int64_t length);
} // namespace port
} // namespace port
using port::pthread_key_t;
using port::pthread_key_create;
@ -572,6 +551,6 @@ using port::pthread_setspecific;
using port::pthread_getspecific;
using port::truncate;
} // namespace rocksdb
} // namespace rocksdb
#endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_

@ -24,42 +24,37 @@
namespace rocksdb {
WinLogger::WinLogger(uint64_t (*gettid)(), Env* env, FILE * file, const InfoLogLevel log_level)
: Logger(log_level),
gettid_(gettid),
log_size_(0),
last_flush_micros_(0),
env_(env),
flush_pending_(false),
file_(file) {
}
WinLogger::WinLogger(uint64_t (*gettid)(), Env* env, FILE* file,
const InfoLogLevel log_level)
: Logger(log_level),
gettid_(gettid),
log_size_(0),
last_flush_micros_(0),
env_(env),
flush_pending_(false),
file_(file) {}
void WinLogger::DebugWriter(const char* str, int len) {
size_t sz = fwrite(str, 1, len, file_);
if (sz == 0) {
perror("fwrite .. [BAD]");
}
size_t sz = fwrite(str, 1, len, file_);
if (sz == 0) {
perror("fwrite .. [BAD]");
}
}
WinLogger::~WinLogger() {
close();
}
WinLogger::~WinLogger() { close(); }
void WinLogger::close() {
fclose(file_);
}
void WinLogger::close() { fclose(file_); }
void WinLogger::Flush() {
if (flush_pending_) {
flush_pending_ = false;
fflush(file_);
}
if (flush_pending_) {
flush_pending_ = false;
fflush(file_);
}
last_flush_micros_ = env_->NowMicros();
last_flush_micros_ = env_->NowMicros();
}
void WinLogger::Logv(const char* format, va_list ap) {
IOSTATS_TIMER_GUARD(logger_nanos);
const uint64_t thread_id = (*gettid_)();
@ -69,88 +64,82 @@ void WinLogger::Logv(const char* format, va_list ap) {
char buffer[500];
std::unique_ptr<char[]> largeBuffer;
for (int iter = 0; iter < 2; ++iter) {
char* base;
int bufsize;
if (iter == 0) {
bufsize = sizeof(buffer);
base = buffer;
} else {
bufsize = 30000;
largeBuffer.reset(new char[bufsize]);
base = largeBuffer.get();
}
char* base;
int bufsize;
if (iter == 0) {
bufsize = sizeof(buffer);
base = buffer;
} else {
bufsize = 30000;
largeBuffer.reset(new char[bufsize]);
base = largeBuffer.get();
}
char* p = base;
char* limit = base + bufsize;
struct timeval now_tv;
gettimeofday(&now_tv, nullptr);
const time_t seconds = now_tv.tv_sec;
struct tm t;
localtime_s(&t, &seconds);
p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ", t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
t.tm_hour,
t.tm_min,
t.tm_sec,
static_cast<int>(now_tv.tv_usec),
static_cast<long long unsigned int>(thread_id));
// Print the message
if (p < limit) {
va_list backup_ap;
va_copy(backup_ap, ap);
int done = vsnprintf(p, limit - p, format, backup_ap);
if (done > 0){
p += done;
} else {
continue;
}
va_end(backup_ap);
char* p = base;
char* limit = base + bufsize;
struct timeval now_tv;
gettimeofday(&now_tv, nullptr);
const time_t seconds = now_tv.tv_sec;
struct tm t;
localtime_s(&t, &seconds);
p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour,
t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec),
static_cast<long long unsigned int>(thread_id));
// Print the message
if (p < limit) {
va_list backup_ap;
va_copy(backup_ap, ap);
int done = vsnprintf(p, limit - p, format, backup_ap);
if (done > 0) {
p += done;
} else {
continue;
}
va_end(backup_ap);
}
// Truncate to available space if necessary
if (p >= limit) {
if (iter == 0)
{
continue; // Try again with larger buffer
} else {
p = limit - 1;
}
// Truncate to available space if necessary
if (p >= limit) {
if (iter == 0) {
continue; // Try again with larger buffer
} else {
p = limit - 1;
}
}
// Add newline if necessary
if (p == base || p[-1] != '\n') {
*p++ = '\n';
}
// Add newline if necessary
if (p == base || p[-1] != '\n') {
*p++ = '\n';
}
assert(p <= limit);
const size_t write_size = p - base;
assert(p <= limit);
const size_t write_size = p - base;
size_t sz = fwrite(base, 1, write_size, file_);
if (sz == 0) {
perror("fwrite .. [BAD]");
}
size_t sz = fwrite(base, 1, write_size, file_);
if (sz == 0) {
perror("fwrite .. [BAD]");
}
flush_pending_ = true;
assert(sz == write_size);
if (sz > 0) {
log_size_ += write_size;
}
flush_pending_ = true;
assert(sz == write_size);
if (sz > 0) {
log_size_ += write_size;
}
uint64_t now_micros = static_cast<uint64_t>(now_tv.tv_sec) * 1000000 +
now_tv.tv_usec;
if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {
flush_pending_ = false;
fflush(file_);
last_flush_micros_ = now_micros;
}
break;
uint64_t now_micros =
static_cast<uint64_t>(now_tv.tv_sec) * 1000000 + now_tv.tv_usec;
if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {
flush_pending_ = false;
fflush(file_);
last_flush_micros_ = now_micros;
}
break;
}
}
size_t WinLogger::GetLogFileSize() const {
return log_size_;
}
size_t WinLogger::GetLogFileSize() const { return log_size_; }
} // namespace rocksdb

@ -23,8 +23,8 @@ class Env;
const int kDebugLogChunkSize = 128 * 1024;
class WinLogger : public rocksdb::Logger {
public:
WinLogger(uint64_t(*gettid)(), Env* env, FILE * file,
public:
WinLogger(uint64_t (*gettid)(), Env* env, FILE* file,
const InfoLogLevel log_level = InfoLogLevel::ERROR_LEVEL);
virtual ~WinLogger();
@ -43,14 +43,13 @@ public:
void DebugWriter(const char* str, int len);
private:
FILE* file_;
uint64_t(*gettid_)(); // Return the thread id for the current thread
std::atomic_size_t log_size_;
private:
FILE* file_;
uint64_t (*gettid_)(); // Return the thread id for the current thread
std::atomic_size_t log_size_;
std::atomic_uint_fast64_t last_flush_micros_;
Env* env_;
bool flush_pending_;
Env* env_;
bool flush_pending_;
const static uint64_t flush_every_seconds_ = 5;
};

@ -374,8 +374,10 @@ Slice CompressBlock(const Slice& raw,
// kBlockBasedTableMagicNumber was picked by running
// echo rocksdb.table.block_based | sha1sum
// and taking the leading 64 bits.
// Please note that kBlockBasedTableMagicNumber may also be accessed by other .cc files
// for that reason we declare it extern in the header but to get the space allocated
// Please note that kBlockBasedTableMagicNumber may also be accessed by other
// .cc files
// for that reason we declare it extern in the header but to get the space
// allocated
// it must be not extern in one place.
const uint64_t kBlockBasedTableMagicNumber = 0x88e241b785f4cff7ull;
// We also support reading and writing legacy block based table format (for

@ -123,7 +123,8 @@ std::string BlockBasedTableFactory::GetPrintableTableOptions() const {
table_options_.block_cache_compressed.get());
ret.append(buffer);
if (table_options_.block_cache_compressed) {
snprintf(buffer, kBufferSize, " block_cache_compressed_size: %" ROCKSDB_PRIszt "\n",
snprintf(buffer, kBufferSize,
" block_cache_compressed_size: %" ROCKSDB_PRIszt "\n",
table_options_.block_cache_compressed->GetCapacity());
ret.append(buffer);
}

@ -146,14 +146,13 @@ TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) {
uint32_t num_hash_fun = 4;
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm =
{
{user_keys[0], {0, 1, 2, 3}},
{user_keys[1], {1, 2, 3, 4}},
{user_keys[2], {2, 3, 4, 5}},
{user_keys[3], {3, 4, 5, 6}}
};
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1, 2, 3}},
{user_keys[1], {1, 2, 3, 4}},
{user_keys[2], {2, 3, 4, 5}},
{user_keys[3], {3, 4, 5, 6}}};
hash_map = std::move(hm);
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
@ -190,13 +189,13 @@ TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
uint32_t num_hash_fun = 4;
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm =
{
{user_keys[0], {0, 1, 2, 3}},
{user_keys[1], {0, 1, 2, 3}},
{user_keys[2], {0, 1, 2, 3}},
{user_keys[3], {0, 1, 2, 3}},
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1, 2, 3}},
{user_keys[1], {0, 1, 2, 3}},
{user_keys[2], {0, 1, 2, 3}},
{user_keys[3], {0, 1, 2, 3}},
};
hash_map = std::move(hm);
@ -234,13 +233,13 @@ TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
uint32_t num_hash_fun = 4;
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm =
{
{user_keys[0], {0, 1, 2, 3}},
{user_keys[1], {0, 1, 2, 3}},
{user_keys[2], {0, 1, 2, 3}},
{user_keys[3], {0, 1, 2, 3}},
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1, 2, 3}},
{user_keys[1], {0, 1, 2, 3}},
{user_keys[2], {0, 1, 2, 3}},
{user_keys[3], {0, 1, 2, 3}},
};
hash_map = std::move(hm);
@ -284,14 +283,14 @@ TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) {
std::vector<std::string> user_keys = {"key01", "key02", "key03",
"key04", "key05"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm =
{
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {2, 3}},
{user_keys[3], {3, 4}},
{user_keys[4], {0, 2}},
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {2, 3}},
{user_keys[3], {3, 4}},
{user_keys[4], {0, 2}},
};
hash_map = std::move(hm);
@ -330,14 +329,14 @@ TEST_F(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
std::vector<std::string> user_keys = {"key01", "key02", "key03",
"key04", "key05"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm =
{
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {3, 4}},
{user_keys[3], {4, 5}},
{user_keys[4], {0, 3}},
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {3, 4}},
{user_keys[3], {4, 5}},
{user_keys[4], {0, 3}},
};
hash_map = std::move(hm);
@ -375,14 +374,13 @@ TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
uint32_t num_hash_fun = 4;
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm =
{
{user_keys[0], {0, 1, 2, 3}},
{user_keys[1], {1, 2, 3, 4}},
{user_keys[2], {2, 3, 4, 5}},
{user_keys[3], {3, 4, 5, 6}}
};
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1, 2, 3}},
{user_keys[1], {1, 2, 3, 4}},
{user_keys[2], {2, 3, 4, 5}},
{user_keys[3], {3, 4, 5, 6}}};
hash_map = std::move(hm);
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
@ -415,13 +413,13 @@ TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
uint32_t num_hash_fun = 4;
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm =
{
{user_keys[0], {0, 1, 2, 3}},
{user_keys[1], {0, 1, 2, 3}},
{user_keys[2], {0, 1, 2, 3}},
{user_keys[3], {0, 1, 2, 3}},
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1, 2, 3}},
{user_keys[1], {0, 1, 2, 3}},
{user_keys[2], {0, 1, 2, 3}},
{user_keys[3], {0, 1, 2, 3}},
};
hash_map = std::move(hm);
@ -456,14 +454,14 @@ TEST_F(CuckooBuilderTest, WithCollisionPathUserKey) {
std::vector<std::string> user_keys = {"key01", "key02", "key03",
"key04", "key05"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm =
{
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {2, 3}},
{user_keys[3], {3, 4}},
{user_keys[4], {0, 2}},
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {2, 3}},
{user_keys[3], {3, 4}},
{user_keys[4], {0, 2}},
};
hash_map = std::move(hm);
@ -500,14 +498,14 @@ TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
uint32_t num_hash_fun = 2;
std::vector<std::string> user_keys = {"key01", "key02", "key03",
"key04", "key05"};
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm =
{
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {2, 3}},
{user_keys[3], {3, 4}},
{user_keys[4], {0, 1}},
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {2, 3}},
{user_keys[3], {3, 4}},
{user_keys[4], {0, 1}},
};
hash_map = std::move(hm);
@ -527,8 +525,10 @@ TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
}
TEST_F(CuckooBuilderTest, FailWhenSameKeyInserted) {
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = { { "repeatedkey", { 0, 1, 2, 3 } } };
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{"repeatedkey", {0, 1, 2, 3}}};
hash_map = std::move(hm);
uint32_t num_hash_fun = 4;
std::string user_key = "repeatedkey";

@ -18,9 +18,9 @@ static inline uint64_t CuckooHash(
const Slice& user_key, uint32_t hash_cnt, bool use_module_hash,
uint64_t table_size_, bool identity_as_first_hash,
uint64_t (*get_slice_hash)(const Slice&, uint32_t, uint64_t)) {
#if !defined NDEBUG || defined OS_WIN
// This part is used only in unit tests but we have to keep it for Windows build as we run test in both debug and release modes under Windows.
// This part is used only in unit tests but we have to keep it for Windows
// build as we run test in both debug and release modes under Windows.
if (get_slice_hash != nullptr) {
return get_slice_hash(user_key, hash_cnt, table_size_);
}

@ -182,19 +182,17 @@ struct BlockContents {
BlockContents() : cachable(false), compression_type(kNoCompression) {}
BlockContents(const Slice& _data, bool _cachable,
CompressionType _compression_type)
: data(_data), cachable(_cachable), compression_type(_compression_type) {}
CompressionType _compression_type)
: data(_data), cachable(_cachable), compression_type(_compression_type) {}
BlockContents(std::unique_ptr<char[]>&& _data, size_t _size, bool _cachable,
CompressionType _compression_type)
: data(_data.get(), _size),
cachable(_cachable),
compression_type(_compression_type),
allocation(std::move(_data)) {}
BlockContents(BlockContents&& other) {
*this = std::move(other);
}
CompressionType _compression_type)
: data(_data.get(), _size),
cachable(_cachable),
compression_type(_compression_type),
allocation(std::move(_data)) {}
BlockContents(BlockContents&& other) { *this = std::move(other); }
BlockContents& operator=(BlockContents&& other) {
data = std::move(other.data);

@ -203,8 +203,8 @@ Slice PlainTableIndexBuilder::FillIndexes(
assert(sub_index_offset == sub_index_size_);
Log(InfoLogLevel::DEBUG_LEVEL, ioptions_.info_log,
"hash table size: %d, suffix_map length %" ROCKSDB_PRIszt,
index_size_, sub_index_size_);
"hash table size: %d, suffix_map length %" ROCKSDB_PRIszt, index_size_,
sub_index_size_);
return Slice(allocated, GetTotalSize());
}

@ -472,7 +472,7 @@ class BlobVal : public FbsonValue {
BlobVal();
private:
private:
// Disable as this class can only be allocated dynamically
BlobVal(const BlobVal&) = delete;
BlobVal& operator=(const BlobVal&) = delete;

@ -31,10 +31,9 @@
#endif
#if defined OS_WIN && !defined snprintf
# define snprintf _snprintf
#define snprintf _snprintf
#endif
#include <inttypes.h>
#include <iostream>

@ -137,8 +137,10 @@ int main(int argc, const char** argv) {
replThread.stop.store(true, std::memory_order_release);
if (replThread.no_read < dataPump.no_records) {
// no. read should be => than inserted.
fprintf(stderr, "No. of Record's written and read not same\nRead : %" ROCKSDB_PRIszt
" Written : %" ROCKSDB_PRIszt "\n", replThread.no_read, dataPump.no_records);
fprintf(stderr,
"No. of Record's written and read not same\nRead : %" ROCKSDB_PRIszt
" Written : %" ROCKSDB_PRIszt "\n",
replThread.no_read, dataPump.no_records);
exit(1);
}
fprintf(stderr, "Successful!\n");

@ -770,7 +770,7 @@ class SharedState {
std::vector<std::vector<uint32_t>> values_;
// Has to make it owned by a smart ptr as port::Mutex is not copyable
// and storing it in the container may require copying depending on the impl.
std::vector<std::vector<std::unique_ptr<port::Mutex>>> key_locks_;
std::vector<std::vector<std::unique_ptr<port::Mutex> > > key_locks_;
};
const uint32_t SharedState::SENTINEL = 0xffffffff;
@ -937,115 +937,90 @@ class StressTest {
return true;
}
std::unordered_map<std::string, std::vector<std::string>> options_tbl = {
{"write_buffer_size",
{
ToString(FLAGS_write_buffer_size),
std::unordered_map<std::string, std::vector<std::string> > options_tbl = {
{"write_buffer_size",
{ToString(FLAGS_write_buffer_size),
ToString(FLAGS_write_buffer_size * 2),
ToString(FLAGS_write_buffer_size * 4)
}
},
{"max_write_buffer_number",
{
ToString(FLAGS_max_write_buffer_number),
ToString(FLAGS_write_buffer_size * 4)}},
{"max_write_buffer_number",
{ToString(FLAGS_max_write_buffer_number),
ToString(FLAGS_max_write_buffer_number * 2),
ToString(FLAGS_max_write_buffer_number * 4)
}
},
{"arena_block_size",
{
ToString(Options().arena_block_size),
ToString(FLAGS_write_buffer_size / 4),
ToString(FLAGS_write_buffer_size / 8),
}
},
{"memtable_prefix_bloom_bits", {"0", "8", "10"}},
{"memtable_prefix_bloom_probes", {"4", "5", "6"}},
{"memtable_prefix_bloom_huge_page_tlb_size",
{
"0",
ToString(2 * 1024 * 1024)
}
},
{"max_successive_merges", {"0", "2", "4"}},
{"filter_deletes", {"0", "1"}},
{"inplace_update_num_locks", {"100", "200", "300"}},
// TODO(ljin): enable test for this option
// {"disable_auto_compactions", {"100", "200", "300"}},
{"soft_rate_limit", {"0", "0.5", "0.9"}},
{"hard_rate_limit", {"0", "1.1", "2.0"}},
{"level0_file_num_compaction_trigger",
{
ToString(FLAGS_level0_file_num_compaction_trigger),
ToString(FLAGS_level0_file_num_compaction_trigger + 2),
ToString(FLAGS_level0_file_num_compaction_trigger + 4),
}
},
{"level0_slowdown_writes_trigger",
{
ToString(FLAGS_level0_slowdown_writes_trigger),
ToString(FLAGS_level0_slowdown_writes_trigger + 2),
ToString(FLAGS_level0_slowdown_writes_trigger + 4),
}
},
{"level0_stop_writes_trigger",
{
ToString(FLAGS_level0_stop_writes_trigger),
ToString(FLAGS_level0_stop_writes_trigger + 2),
ToString(FLAGS_level0_stop_writes_trigger + 4),
}
},
{"max_grandparent_overlap_factor",
{
ToString(Options().max_grandparent_overlap_factor - 5),
ToString(Options().max_grandparent_overlap_factor),
ToString(Options().max_grandparent_overlap_factor + 5),
}
},
{"expanded_compaction_factor",
{
ToString(Options().expanded_compaction_factor - 5),
ToString(Options().expanded_compaction_factor),
ToString(Options().expanded_compaction_factor + 5),
}
},
{"source_compaction_factor",
{
ToString(Options().source_compaction_factor),
ToString(Options().source_compaction_factor * 2),
ToString(Options().source_compaction_factor * 4),
}
},
{"target_file_size_base",
{
ToString(FLAGS_target_file_size_base),
ToString(FLAGS_target_file_size_base * 2),
ToString(FLAGS_target_file_size_base * 4),
}
},
{"target_file_size_multiplier",
{
ToString(FLAGS_target_file_size_multiplier),
"1",
"2",
}
},
{"max_bytes_for_level_base",
{
ToString(FLAGS_max_bytes_for_level_base / 2),
ToString(FLAGS_max_bytes_for_level_base),
ToString(FLAGS_max_bytes_for_level_base * 2),
}
},
{"max_bytes_for_level_multiplier",
{
ToString(FLAGS_max_bytes_for_level_multiplier),
"1",
"2",
}
},
{"max_mem_compaction_level", {"0", "1", "2"}},
{"max_sequential_skip_in_iterations", {"4", "8", "12"}},
ToString(FLAGS_max_write_buffer_number * 4)}},
{"arena_block_size",
{
ToString(Options().arena_block_size),
ToString(FLAGS_write_buffer_size / 4),
ToString(FLAGS_write_buffer_size / 8),
}},
{"memtable_prefix_bloom_bits", {"0", "8", "10"}},
{"memtable_prefix_bloom_probes", {"4", "5", "6"}},
{"memtable_prefix_bloom_huge_page_tlb_size",
{"0", ToString(2 * 1024 * 1024)}},
{"max_successive_merges", {"0", "2", "4"}},
{"filter_deletes", {"0", "1"}},
{"inplace_update_num_locks", {"100", "200", "300"}},
// TODO(ljin): enable test for this option
// {"disable_auto_compactions", {"100", "200", "300"}},
{"soft_rate_limit", {"0", "0.5", "0.9"}},
{"hard_rate_limit", {"0", "1.1", "2.0"}},
{"level0_file_num_compaction_trigger",
{
ToString(FLAGS_level0_file_num_compaction_trigger),
ToString(FLAGS_level0_file_num_compaction_trigger + 2),
ToString(FLAGS_level0_file_num_compaction_trigger + 4),
}},
{"level0_slowdown_writes_trigger",
{
ToString(FLAGS_level0_slowdown_writes_trigger),
ToString(FLAGS_level0_slowdown_writes_trigger + 2),
ToString(FLAGS_level0_slowdown_writes_trigger + 4),
}},
{"level0_stop_writes_trigger",
{
ToString(FLAGS_level0_stop_writes_trigger),
ToString(FLAGS_level0_stop_writes_trigger + 2),
ToString(FLAGS_level0_stop_writes_trigger + 4),
}},
{"max_grandparent_overlap_factor",
{
ToString(Options().max_grandparent_overlap_factor - 5),
ToString(Options().max_grandparent_overlap_factor),
ToString(Options().max_grandparent_overlap_factor + 5),
}},
{"expanded_compaction_factor",
{
ToString(Options().expanded_compaction_factor - 5),
ToString(Options().expanded_compaction_factor),
ToString(Options().expanded_compaction_factor + 5),
}},
{"source_compaction_factor",
{
ToString(Options().source_compaction_factor),
ToString(Options().source_compaction_factor * 2),
ToString(Options().source_compaction_factor * 4),
}},
{"target_file_size_base",
{
ToString(FLAGS_target_file_size_base),
ToString(FLAGS_target_file_size_base * 2),
ToString(FLAGS_target_file_size_base * 4),
}},
{"target_file_size_multiplier",
{
ToString(FLAGS_target_file_size_multiplier), "1", "2",
}},
{"max_bytes_for_level_base",
{
ToString(FLAGS_max_bytes_for_level_base / 2),
ToString(FLAGS_max_bytes_for_level_base),
ToString(FLAGS_max_bytes_for_level_base * 2),
}},
{"max_bytes_for_level_multiplier",
{
ToString(FLAGS_max_bytes_for_level_multiplier), "1", "2",
}},
{"max_mem_compaction_level", {"0", "1", "2"}},
{"max_sequential_skip_in_iterations", {"4", "8", "12"}},
};
options_table_ = std::move(options_tbl);

@ -205,7 +205,8 @@ TEST_F(AutoRollLoggerTest, CompositeRollByTimeAndSizeLogger) {
}
#ifndef OS_WIN
//TODO: does not build for Windows because of PosixLogger use below. Need to port
// TODO: does not build for Windows because of PosixLogger use below. Need to
// port
TEST_F(AutoRollLoggerTest, CreateLoggerFromOptions) {
DBOptions options;
shared_ptr<Logger> logger;

@ -242,8 +242,7 @@ class autovector {
void push_back(const T& item) {
if (num_stack_items_ < kSize) {
values_[num_stack_items_++] = item;
}
else {
} else {
vect_.push_back(item);
}
}

@ -1827,7 +1827,8 @@ class PosixEnv : public Env {
#if defined(_GNU_SOURCE) && defined(__GLIBC_PREREQ)
#if __GLIBC_PREREQ(2, 12)
char name_buf[16];
snprintf(name_buf, sizeof name_buf, "rocksdb:bg%" ROCKSDB_PRIszt, bgthreads_.size());
snprintf(name_buf, sizeof name_buf, "rocksdb:bg%" ROCKSDB_PRIszt,
bgthreads_.size());
name_buf[sizeof name_buf - 1] = '\0';
pthread_setname_np(t, name_buf);
#endif

@ -7,11 +7,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <sys/types.h>
#ifndef OS_WIN
# include <sys/ioctl.h>
#include <sys/ioctl.h>
#endif
#include <sys/types.h>
#include <iostream>
#include <unordered_set>
@ -860,7 +859,7 @@ class TestLogger : public Logger {
int n = vsnprintf(new_format, sizeof(new_format) - 1, format, backup_ap);
// 48 bytes for extra information + bytes allocated
// When we have n == -1 there is not a terminating zero expected
// When we have n == -1 there is not a terminating zero expected
#ifdef OS_WIN
if (n < 0) {
char_0_count++;

@ -40,17 +40,13 @@ struct CuckooStep {
CuckooStep() : bucket_id_(-1), prev_step_id_(kNullStep), depth_(1) {}
// MSVC does not support = default yet
CuckooStep(CuckooStep&& o)
{
*this = std::move(o);
}
CuckooStep& operator=(CuckooStep&& rhs)
{
bucket_id_ = std::move(rhs.bucket_id_);
prev_step_id_ = std::move(rhs.prev_step_id_);
depth_ = std::move(rhs.depth_);
return *this;
CuckooStep(CuckooStep&& o) { *this = std::move(o); }
CuckooStep& operator=(CuckooStep&& rhs) {
bucket_id_ = std::move(rhs.bucket_id_);
prev_step_id_ = std::move(rhs.prev_step_id_);
depth_ = std::move(rhs.depth_);
return *this;
}
CuckooStep(const CuckooStep&) = delete;
@ -411,8 +407,8 @@ bool HashCuckooRep::QuickInsert(const char* internal_key, const Slice& user_key,
}
if (cuckoo_bucket_id != -1) {
cuckoo_array_[cuckoo_bucket_id]
.store(const_cast<char*>(internal_key), std::memory_order_release);
cuckoo_array_[cuckoo_bucket_id].store(const_cast<char*>(internal_key),
std::memory_order_release);
return true;
}

@ -594,9 +594,9 @@ void HashLinkListRep::Insert(KeyHandle handle) {
if (bucket_entries_logging_threshold_ > 0 &&
header->GetNumEntries() ==
static_cast<uint32_t>(bucket_entries_logging_threshold_)) {
Info(logger_,
"HashLinkedList bucket %" ROCKSDB_PRIszt " has more than %d "
"entries. Key to insert: %s",
Info(logger_, "HashLinkedList bucket %" ROCKSDB_PRIszt
" has more than %d "
"entries. Key to insert: %s",
GetHash(transformed), header->GetNumEntries(),
GetLengthPrefixedSlice(x->key).ToString(true).c_str());
}

@ -54,9 +54,7 @@ class HistogramBucketMapper {
class HistogramImpl {
public:
HistogramImpl() {
memset(buckets_, 0, sizeof(buckets_));
}
HistogramImpl() { memset(buckets_, 0, sizeof(buckets_)); }
virtual void Clear();
virtual bool Empty();
virtual void Add(uint64_t value);

@ -590,7 +590,8 @@ void ManifestDumpCommand::DoCommand() {
// containing the db for files of the form MANIFEST_[0-9]+
auto CloseDir = [](DIR* p) { closedir(p); };
std::unique_ptr<DIR, decltype(CloseDir)> d(opendir(db_path_.c_str()), CloseDir);
std::unique_ptr<DIR, decltype(CloseDir)> d(opendir(db_path_.c_str()),
CloseDir);
if (d == nullptr) {
exec_state_ =

@ -357,13 +357,9 @@ private:
* Otherwise an exception is thrown.
*/
bool StringToBool(string val) {
std::transform(val.begin(), val.end(), val.begin(),
[](char ch) -> char
{
return ::tolower(ch);
});
[](char ch) -> char { return ::tolower(ch); });
if (val == "true") {
return true;
} else if (val == "false") {

@ -63,10 +63,12 @@ uint64_t MutableCFOptions::ExpandedCompactionByteSizeLimit(int level) const {
void MutableCFOptions::Dump(Logger* log) const {
// Memtable related options
Log(log, " write_buffer_size: %" ROCKSDB_PRIszt, write_buffer_size);
Log(log, " write_buffer_size: %" ROCKSDB_PRIszt,
write_buffer_size);
Log(log, " max_write_buffer_number: %d",
max_write_buffer_number);
Log(log, " arena_block_size: %" ROCKSDB_PRIszt, arena_block_size);
Log(log, " arena_block_size: %" ROCKSDB_PRIszt,
arena_block_size);
Log(log, " memtable_prefix_bloom_bits: %" PRIu32,
memtable_prefix_bloom_bits);
Log(log, " memtable_prefix_bloom_probes: %" PRIu32,

@ -307,11 +307,14 @@ void DBOptions::Dump(Logger* log) const {
Warn(log, " Options.max_total_wal_size: %" PRIu64, max_total_wal_size);
Warn(log, " Options.disableDataSync: %d", disableDataSync);
Warn(log, " Options.use_fsync: %d", use_fsync);
Warn(log, " Options.max_log_file_size: %" ROCKSDB_PRIszt, max_log_file_size);
Warn(log, "Options.max_manifest_file_size: %lu",
(unsigned long)max_manifest_file_size);
Warn(log, " Options.log_file_time_to_roll: %" ROCKSDB_PRIszt, log_file_time_to_roll);
Warn(log, " Options.keep_log_file_num: %" ROCKSDB_PRIszt, keep_log_file_num);
Warn(log, " Options.max_log_file_size: %" ROCKSDB_PRIszt,
max_log_file_size);
Warn(log, "Options.max_manifest_file_size: %" PRIu64,
max_manifest_file_size);
Warn(log, " Options.log_file_time_to_roll: %" ROCKSDB_PRIszt,
log_file_time_to_roll);
Warn(log, " Options.keep_log_file_num: %" ROCKSDB_PRIszt,
keep_log_file_num);
Warn(log, " Options.allow_os_buffer: %d", allow_os_buffer);
Warn(log, " Options.allow_mmap_reads: %d", allow_mmap_reads);
Warn(log, " Options.allow_mmap_writes: %d", allow_mmap_writes);
@ -333,8 +336,9 @@ void DBOptions::Dump(Logger* log) const {
WAL_ttl_seconds);
Warn(log, " Options.WAL_size_limit_MB: %" PRIu64,
WAL_size_limit_MB);
Warn(log, " Options.manifest_preallocation_size: %" ROCKSDB_PRIszt,
manifest_preallocation_size);
Warn(log,
" Options.manifest_preallocation_size: %" ROCKSDB_PRIszt,
manifest_preallocation_size);
Warn(log, " Options.allow_os_buffer: %d",
allow_os_buffer);
Warn(log, " Options.allow_mmap_reads: %d",
@ -347,8 +351,10 @@ void DBOptions::Dump(Logger* log) const {
stats_dump_period_sec);
Warn(log, " Options.advise_random_on_open: %d",
advise_random_on_open);
Warn(log, " Options.db_write_buffer_size: %" ROCKSDB_PRIszt "d",
db_write_buffer_size);
Warn(log,
" Options.db_write_buffer_size: %" ROCKSDB_PRIszt
"d",
db_write_buffer_size);
Warn(log, " Options.access_hint_on_compaction_start: %s",
access_hints[access_hint_on_compaction_start]);
Warn(log, " Options.use_adaptive_mutex: %d",
@ -384,7 +390,8 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
Warn(log, " Options.table_factory: %s", table_factory->Name());
Warn(log, " table_factory options: %s",
table_factory->GetPrintableTableOptions().c_str());
Warn(log, " Options.write_buffer_size: %" ROCKSDB_PRIszt, write_buffer_size);
Warn(log, " Options.write_buffer_size: %" ROCKSDB_PRIszt,
write_buffer_size);
Warn(log, " Options.max_write_buffer_number: %d", max_write_buffer_number);
if (!compression_per_level.empty()) {
for (unsigned int i = 0; i < compression_per_level.size(); i++) {
@ -430,8 +437,9 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
max_bytes_for_level_multiplier);
for (size_t i = 0; i < max_bytes_for_level_multiplier_additional.size();
i++) {
Warn(log, "Options.max_bytes_for_level_multiplier_addtl[%" ROCKSDB_PRIszt "]: %d", i,
max_bytes_for_level_multiplier_additional[i]);
Warn(log, "Options.max_bytes_for_level_multiplier_addtl[%" ROCKSDB_PRIszt
"]: %d",
i, max_bytes_for_level_multiplier_additional[i]);
}
Warn(log, " Options.max_sequential_skip_in_iterations: %" PRIu64,
max_sequential_skip_in_iterations);
@ -442,8 +450,9 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
Warn(log, " Options.max_grandparent_overlap_factor: %d",
max_grandparent_overlap_factor);
Warn(log, " Options.arena_block_size: %" ROCKSDB_PRIszt,
arena_block_size);
Warn(log,
" Options.arena_block_size: %" ROCKSDB_PRIszt,
arena_block_size);
Warn(log, " Options.soft_rate_limit: %.2f",
soft_rate_limit);
Warn(log, " Options.hard_rate_limit: %.2f",
@ -483,8 +492,9 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
collector_names.c_str());
Warn(log, " Options.inplace_update_support: %d",
inplace_update_support);
Warn(log, " Options.inplace_update_num_locks: %" ROCKSDB_PRIszt,
inplace_update_num_locks);
Warn(log,
" Options.inplace_update_num_locks: %" ROCKSDB_PRIszt,
inplace_update_num_locks);
Warn(log, " Options.min_partial_merge_operands: %u",
min_partial_merge_operands);
// TODO: easier config for bloom (maybe based on avg key/value size)
@ -493,13 +503,15 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
Warn(log, " Options.memtable_prefix_bloom_probes: %d",
memtable_prefix_bloom_probes);
Warn(log, " Options.memtable_prefix_bloom_huge_page_tlb_size: %" ROCKSDB_PRIszt,
memtable_prefix_bloom_huge_page_tlb_size);
Warn(log,
" Options.memtable_prefix_bloom_huge_page_tlb_size: %" ROCKSDB_PRIszt,
memtable_prefix_bloom_huge_page_tlb_size);
Warn(log, " Options.bloom_locality: %d",
bloom_locality);
Warn(log, " Options.max_successive_merges: %" ROCKSDB_PRIszt,
max_successive_merges);
Warn(log,
" Options.max_successive_merges: %" ROCKSDB_PRIszt,
max_successive_merges);
Warn(log, " Options.optimize_fllters_for_hits: %d",
optimize_filters_for_hits);
} // ColumnFamilyOptions::Dump

@ -277,8 +277,7 @@ Status GetMutableOptionsFromStrings(
namespace {
std::string trim(const std::string& str) {
if (str.empty())
return std::string();
if (str.empty()) return std::string();
size_t start = 0;
size_t end = str.size() - 1;
while (isspace(str[start]) != 0 && start <= end) {
@ -564,8 +563,7 @@ bool ParseDBOption(const std::string& name, const std::string& value,
} else {
return false;
}
}
catch (const std::exception& e) {
} catch (const std::exception& e) {
return false;
}
return true;
@ -680,7 +678,8 @@ Status GetPlainTableOptionsFromMap(
} else if (o.first == "full_scan_mode") {
new_table_options->full_scan_mode = ParseBoolean(o.first, o.second);
} else if (o.first == "store_index_in_file") {
new_table_options->store_index_in_file = ParseBoolean(o.first, o.second);
new_table_options->store_index_in_file =
ParseBoolean(o.first, o.second);
} else {
return Status::InvalidArgument("Unrecognized option: " + o.first);
}

@ -51,12 +51,12 @@ Options PrintAndGetOptions(size_t total_write_buffer_limit,
StderrLogger logger;
if (FLAGS_enable_print) {
printf(
"---- total_write_buffer_limit: %" ROCKSDB_PRIszt " "
"read_amplification_threshold: %d write_amplification_threshold: %d "
"target_db_size %" PRIu64 " ----\n",
total_write_buffer_limit, read_amplification_threshold,
write_amplification_threshold, target_db_size);
printf("---- total_write_buffer_limit: %" ROCKSDB_PRIszt
" "
"read_amplification_threshold: %d write_amplification_threshold: %d "
"target_db_size %" PRIu64 " ----\n",
total_write_buffer_limit, read_amplification_threshold,
write_amplification_threshold, target_db_size);
}
Options options =
@ -337,14 +337,14 @@ TEST_F(OptionsTest, GetColumnFamilyOptionsFromStringTest) {
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
"memtable_prefix_bloom_bits=14k;max_write_buffer_number=-15K",
&new_cf_opt));
ASSERT_EQ(new_cf_opt.memtable_prefix_bloom_bits, 14UL*kilo);
ASSERT_EQ(new_cf_opt.max_write_buffer_number, -15*kilo);
ASSERT_EQ(new_cf_opt.memtable_prefix_bloom_bits, 14UL * kilo);
ASSERT_EQ(new_cf_opt.max_write_buffer_number, -15 * kilo);
// Units (m)
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
"max_write_buffer_number=16m;inplace_update_num_locks=17M",
&new_cf_opt));
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 16*mega);
ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 17*mega);
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 16 * mega);
ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 17 * mega);
// Units (g)
ASSERT_OK(GetColumnFamilyOptionsFromString(
base_cf_opt,
@ -352,8 +352,8 @@ TEST_F(OptionsTest, GetColumnFamilyOptionsFromStringTest) {
"arena_block_size=19G",
&new_cf_opt));
ASSERT_EQ(new_cf_opt.write_buffer_size, 18*giga);
ASSERT_EQ(new_cf_opt.arena_block_size, 19*giga);
ASSERT_EQ(new_cf_opt.write_buffer_size, 18 * giga);
ASSERT_EQ(new_cf_opt.arena_block_size, 19 * giga);
ASSERT_TRUE(new_cf_opt.prefix_extractor.get() != nullptr);
std::string prefix_name(new_cf_opt.prefix_extractor->Name());
ASSERT_EQ(prefix_name, "rocksdb.CappedPrefix.8");
@ -361,8 +361,8 @@ TEST_F(OptionsTest, GetColumnFamilyOptionsFromStringTest) {
// Units (t)
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
"write_buffer_size=20t;arena_block_size=21T", &new_cf_opt));
ASSERT_EQ(new_cf_opt.write_buffer_size, 20*tera);
ASSERT_EQ(new_cf_opt.arena_block_size, 21*tera);
ASSERT_EQ(new_cf_opt.write_buffer_size, 20 * tera);
ASSERT_EQ(new_cf_opt.arena_block_size, 21 * tera);
// Nested block based table options
// Emtpy

@ -96,12 +96,12 @@ class NoopTransform : public SliceTransform {
// Do not want to include the whole /port/port.h here for one define
#ifdef OS_WIN
# define snprintf _snprintf
#define snprintf _snprintf
#endif
// Return a string that contains the copy of the referenced data.
std::string Slice::ToString(bool hex) const {
std::string result; // RVO/NRVO/move
std::string result; // RVO/NRVO/move
if (hex) {
char buf[10];
for (size_t i = 0; i < size_; i++) {
@ -115,7 +115,6 @@ std::string Slice::ToString(bool hex) const {
}
}
const SliceTransform* NewFixedPrefixTransform(size_t prefix_len) {
return new FixedPrefixTransform(prefix_len);
}

@ -25,19 +25,23 @@ __thread ThreadLocalPtr::ThreadData* ThreadLocalPtr::StaticMeta::tls_ = nullptr;
// See http://www.codeproject.com/Articles/8113/Thread-Local-Storage-The-C-Way
// and http://www.nynaeve.net/?p=183
//
// really we do this to have clear conscience since using TLS with thread-pools is iffy
// although OK within a request. But otherwise, threads have no identity in its modern use.
// really we do this to have clear conscience since using TLS with thread-pools
// is iffy
// although OK within a request. But otherwise, threads have no identity in its
// modern use.
// This runs on windows only called from the System Loader
#ifdef OS_WIN
// Windows cleanup routine is invoked from a System Loader with a different
// signature so we can not directly hookup the original OnThreadExit which is private member
// so we make StaticMeta class share with the us the address of the function so we can invoke it.
// signature so we can not directly hookup the original OnThreadExit which is
// private member
// so we make StaticMeta class share with the us the address of the function so
// we can invoke it.
namespace wintlscleanup {
// This is set to OnThreadExit in StaticMeta singleton constructor
UnrefHandler thread_local_inclass_routine = nullptr;
UnrefHandler thread_local_inclass_routine = nullptr;
pthread_key_t thread_local_key = -1;
// Static callback function to call with each thread termination.
@ -46,26 +50,26 @@ void NTAPI WinOnThreadExit(PVOID module, DWORD reason, PVOID reserved) {
if (DLL_THREAD_DETACH == reason) {
if (thread_local_key != -1 && thread_local_inclass_routine != nullptr) {
void* tls = pthread_getspecific(thread_local_key);
if(tls != nullptr) {
if (tls != nullptr) {
thread_local_inclass_routine(tls);
}
}
}
}
} // wintlscleanup
} // wintlscleanup
# ifdef _WIN64
#ifdef _WIN64
# pragma comment(linker, "/include:_tls_used")
# pragma comment(linker, "/include:p_thread_callback_on_exit")
#pragma comment(linker, "/include:_tls_used")
#pragma comment(linker, "/include:p_thread_callback_on_exit")
#else // _WIN64
# pragma comment(linker, "/INCLUDE:__tls_used")
# pragma comment(linker, "/INCLUDE:_p_thread_callback_on_exit")
#pragma comment(linker, "/INCLUDE:__tls_used")
#pragma comment(linker, "/INCLUDE:_p_thread_callback_on_exit")
# endif // _WIN64
#endif // _WIN64
// extern "C" suppresses C++ name mangling so we know the symbol name for the
// linker /INCLUDE:symbol pragma above.
@ -81,7 +85,8 @@ extern "C" {
// When defining a const variable, it must have external linkage to be sure the
// linker doesn't discard it.
extern const PIMAGE_TLS_CALLBACK p_thread_callback_on_exit;
const PIMAGE_TLS_CALLBACK p_thread_callback_on_exit = wintlscleanup::WinOnThreadExit;
const PIMAGE_TLS_CALLBACK p_thread_callback_on_exit =
wintlscleanup::WinOnThreadExit;
// Reset the default section.
#pragma const_seg()
@ -96,7 +101,7 @@ PIMAGE_TLS_CALLBACK p_thread_callback_on_exit = wintlscleanup::WinOnThreadExit;
} // extern "C"
#endif // OS_WIN
#endif // OS_WIN
ThreadLocalPtr::StaticMeta* ThreadLocalPtr::Instance() {
static ThreadLocalPtr::StaticMeta inst;
@ -136,7 +141,7 @@ ThreadLocalPtr::StaticMeta::StaticMeta() : next_instance_id_(0) {
head_.prev = &head_;
#ifdef OS_WIN
// Share with Windows its cleanup routine and the key
// Share with Windows its cleanup routine and the key
wintlscleanup::thread_local_inclass_routine = OnThreadExit;
wintlscleanup::thread_local_key = pthread_key_;
#endif
@ -160,8 +165,8 @@ void ThreadLocalPtr::StaticMeta::RemoveThreadData(
ThreadLocalPtr::ThreadData* ThreadLocalPtr::StaticMeta::GetThreadLocal() {
#if defined(OS_MACOSX) || defined(OS_WIN)
// Make this local variable name look like a member variable so that we
// can share all the code below
// Make this local variable name look like a member variable so that we
// can share all the code below
ThreadData* tls_ =
static_cast<ThreadData*>(pthread_getspecific(Instance()->pthread_key_));
#endif

@ -17,7 +17,6 @@
#include "util/autovector.h"
#include "port/port.h"
namespace rocksdb {
// Cleanup function that will be called for a stored thread local

@ -333,9 +333,7 @@ class BackupEngineImpl : public BackupEngine {
CopyWorkItem(const CopyWorkItem&) = delete;
CopyWorkItem& operator=(const CopyWorkItem&) = delete;
CopyWorkItem(CopyWorkItem&& o) {
*this = std::move(o);
}
CopyWorkItem(CopyWorkItem&& o) { *this = std::move(o); }
CopyWorkItem& operator=(CopyWorkItem&& o) {
src_path = std::move(o.src_path);
@ -390,12 +388,9 @@ class BackupEngineImpl : public BackupEngine {
return *this;
}
BackupAfterCopyWorkItem(std::future<CopyResult>&& _result,
bool _shared,
bool _needed_to_copy,
Env* _backup_env,
std::string _dst_path_tmp,
std::string _dst_path,
BackupAfterCopyWorkItem(std::future<CopyResult>&& _result, bool _shared,
bool _needed_to_copy, Env* _backup_env,
std::string _dst_path_tmp, std::string _dst_path,
std::string _dst_relative)
: result(std::move(_result)),
shared(_shared),
@ -412,8 +407,7 @@ class BackupEngineImpl : public BackupEngine {
RestoreAfterCopyWorkItem() {}
RestoreAfterCopyWorkItem(std::future<CopyResult>&& _result,
uint32_t _checksum_value)
: result(std::move(_result)),
checksum_value(_checksum_value) {}
: result(std::move(_result)), checksum_value(_checksum_value) {}
RestoreAfterCopyWorkItem(RestoreAfterCopyWorkItem&& o) {
*this = std::move(o);
}
@ -1561,7 +1555,8 @@ Status BackupEngineImpl::BackupMeta::StoreToFile(bool sync) {
len += snprintf(buf.get(), buf_size, "%" PRId64 "\n", timestamp_);
len += snprintf(buf.get() + len, buf_size - len, "%" PRIu64 "\n",
sequence_number_);
len += snprintf(buf.get() + len, buf_size - len, "%" ROCKSDB_PRIszt "\n", files_.size());
len += snprintf(buf.get() + len, buf_size - len, "%" ROCKSDB_PRIszt "\n",
files_.size());
for (const auto& file : files_) {
// use crc32 for now, switch to something else if needed
len += snprintf(buf.get() + len, buf_size - len, "%s crc32 %u\n",

@ -8,10 +8,10 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// Syncpoint prevents us building and running tests in release
#if !defined(NDEBUG) || !defined (OS_WIN)
#if !defined(NDEBUG) || !defined(OS_WIN)
#ifndef OS_WIN
# include <unistd.h>
#include <unistd.h>
#endif
#include <iostream>
#include <thread>
@ -351,7 +351,7 @@ TEST_F(DBTest, CheckpointCF) {
#endif
int main(int argc, char** argv) {
#if !defined(NDEBUG) || !defined (OS_WIN)
#if !defined(NDEBUG) || !defined(OS_WIN)
rocksdb::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();

@ -42,7 +42,6 @@ const double GeoDBImpl::MaxLatitude = 85.05112878;
const double GeoDBImpl::MinLongitude = -180;
const double GeoDBImpl::MaxLongitude = 180;
GeoDBImpl::GeoDBImpl(DB* db, const GeoDBOptions& options) :
GeoDB(db, options), db_(db), options_(options) {
}

@ -51,7 +51,8 @@ class UInt64AddOperator : public AssociativeMergeOperator {
} else if (logger != nullptr) {
// If value is corrupted, treat it as 0
Log(InfoLogLevel::ERROR_LEVEL, logger,
"uint64 value corruption, size: %" ROCKSDB_PRIszt " > %" ROCKSDB_PRIszt,
"uint64 value corruption, size: %" ROCKSDB_PRIszt
" > %" ROCKSDB_PRIszt,
value.size(), sizeof(uint64_t));
}

@ -67,27 +67,26 @@ inline bool GetSpatialIndexName(const std::string& column_family_name,
void Variant::Init(const Variant& v, Data& d) {
switch (v.type_) {
case kNull:
break;
case kBool:
d.b = v.data_.b;
break;
case kInt:
d.i = v.data_.i;
break;
case kDouble:
d.d = v.data_.d;
break;
case kString:
new (d.s) std::string(*GetStringPtr(v.data_));
break;
default:
assert(false);
case kNull:
break;
case kBool:
d.b = v.data_.b;
break;
case kInt:
d.i = v.data_.i;
break;
case kDouble:
d.d = v.data_.d;
break;
case kString:
new (d.s) std::string(*GetStringPtr(v.data_));
break;
default:
assert(false);
}
}
Variant& Variant::operator=(const Variant& v) {
// Construct first a temp so exception from a string ctor
// does not change this object
Data tmp;
@ -104,7 +103,6 @@ Variant& Variant::operator=(const Variant& v) {
}
Variant& Variant::operator=(Variant&& rhs) {
Destroy(type_, data_);
if (rhs.type_ == kString) {
new (data_.s) std::string(std::move(*GetStringPtr(rhs.data_)));
@ -116,9 +114,7 @@ Variant& Variant::operator=(Variant&& rhs) {
return *this;
}
bool Variant::operator==(const Variant& rhs) const {
if (type_ != rhs.type_) {
return false;
}

@ -18,10 +18,7 @@ namespace {
typedef std::map<std::string, std::string> KVMap;
enum BatchOperation {
OP_PUT = 0,
OP_DELETE = 1
};
enum BatchOperation { OP_PUT = 0, OP_DELETE = 1 };
}
class SpecialTimeEnv : public EnvWrapper {

Loading…
Cancel
Save