"make format" in some recent commits

Summary: Run "make format" for some recent commits.

Test Plan: Build and run tests

Reviewers: IslamAbdelRahman

Reviewed By: IslamAbdelRahman

Subscribers: leveldb, dhruba

Differential Revision: https://reviews.facebook.net/D49707
main
sdong 9 years ago
parent 6388e7f4e2
commit 296c3a1f94
  1. 27
      db/db_impl.cc
  2. 81
      db/db_test.cc
  3. 6
      include/rocksdb/db.h
  4. 3
      include/rocksdb/env.h
  5. 5
      include/rocksdb/wal_filter.h
  6. 58
      port/win/env_win.cc
  7. 3
      util/env.cc
  8. 9
      util/options.cc
  9. 4
      util/options_helper.h

@ -1158,7 +1158,8 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
bool batch_changed = false;
WalFilter::WalProcessingOption wal_processing_option =
db_options_.wal_filter->LogRecord(batch, &new_batch, &batch_changed);
db_options_.wal_filter->LogRecord(batch, &new_batch,
&batch_changed);
switch (wal_processing_option) {
case WalFilter::WalProcessingOption::kContinueProcessing:
@ -1183,13 +1184,14 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
}
default: {
assert(false); // unhandled case
status = Status::NotSupported("Unknown WalProcessingOption returned"
" by Wal Filter ", db_options_.wal_filter->Name());
status = Status::NotSupported(
"Unknown WalProcessingOption returned"
" by Wal Filter ",
db_options_.wal_filter->Name());
MaybeIgnoreError(&status);
if (!status.ok()) {
return status;
}
else {
} else {
// Ignore the error with current record processing.
continue;
}
@ -1203,13 +1205,16 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
int original_count = WriteBatchInternal::Count(&batch);
if (new_count > original_count) {
Log(InfoLogLevel::FATAL_LEVEL, db_options_.info_log,
"Recovering log #%" PRIu64 " mode %d log filter %s returned "
"Recovering log #%" PRIu64
" mode %d log filter %s returned "
"more records (%d) than original (%d) which is not allowed. "
"Aborting recovery.",
log_number, db_options_.wal_recovery_mode,
db_options_.wal_filter->Name(), new_count, original_count);
status = Status::NotSupported("More than original # of records "
"returned by Wal Filter ", db_options_.wal_filter->Name());
status = Status::NotSupported(
"More than original # of records "
"returned by Wal Filter ",
db_options_.wal_filter->Name());
return status;
}
// Set the same sequence number in the new_batch
@ -4172,8 +4177,7 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
mutable_cf_options.write_buffer_size);
unique_ptr<WritableFileWriter> file_writer(
new WritableFileWriter(std::move(lfile), opt_env_opt));
new_log = new log::Writer(std::move(file_writer),
new_log_number,
new_log = new log::Writer(std::move(file_writer), new_log_number,
db_options_.recycle_log_file_num > 0);
}
}
@ -4815,8 +4819,7 @@ Status DB::Open(const DBOptions& db_options, const std::string& dbname,
new WritableFileWriter(std::move(lfile), opt_env_options));
impl->logs_.emplace_back(
new_log_number,
new log::Writer(std::move(file_writer),
new_log_number,
new log::Writer(std::move(file_writer), new_log_number,
impl->db_options_.recycle_log_file_num > 0));
// set column family handles

@ -5073,8 +5073,8 @@ class RecoveryTestHelper {
ASSERT_OK(db_options.env->NewWritableFile(fname, &file, env_options));
unique_ptr<WritableFileWriter> file_writer(
new WritableFileWriter(std::move(file), env_options));
current_log_writer.reset(new log::Writer(
std::move(file_writer), current_log_number,
current_log_writer.reset(
new log::Writer(std::move(file_writer), current_log_number,
db_options.recycle_log_file_num > 0));
for (int i = 0; i < kKeysPerWALFile; i++) {
@ -9891,15 +9891,13 @@ TEST_F(DBTest, PauseBackgroundWorkTest) {
#ifndef ROCKSDB_LITE
namespace {
void ValidateKeyExistence(DB* db,
const std::vector<Slice>& keys_must_exist,
void ValidateKeyExistence(DB* db, const std::vector<Slice>& keys_must_exist,
const std::vector<Slice>& keys_must_not_exist) {
// Ensure that expected keys exist
std::vector<std::string> values;
if (keys_must_exist.size() > 0) {
std::vector<Status> status_list = db->MultiGet(ReadOptions(),
keys_must_exist,
&values);
std::vector<Status> status_list =
db->MultiGet(ReadOptions(), keys_must_exist, &values);
for (size_t i = 0; i < keys_must_exist.size(); i++) {
ASSERT_OK(status_list[i]);
}
@ -9907,9 +9905,8 @@ namespace {
// Ensure that given keys don't exist
if (keys_must_not_exist.size() > 0) {
std::vector<Status> status_list = db->MultiGet(ReadOptions(),
keys_must_not_exist,
&values);
std::vector<Status> status_list =
db->MultiGet(ReadOptions(), keys_must_not_exist, &values);
for (size_t i = 0; i < keys_must_not_exist.size(); i++) {
ASSERT_TRUE(status_list[i].IsNotFound());
}
@ -9929,21 +9926,22 @@ TEST_F(DBTest, WalFilterTest) {
size_t apply_option_at_record_index_;
// Current record index, incremented with each record encountered.
size_t current_record_index_;
public:
TestWalFilter(WalFilter::WalProcessingOption wal_processing_option,
size_t apply_option_for_record_index) :
wal_processing_option_(wal_processing_option),
size_t apply_option_for_record_index)
: wal_processing_option_(wal_processing_option),
apply_option_at_record_index_(apply_option_for_record_index),
current_record_index_(0) {}
virtual WalProcessingOption LogRecord(const WriteBatch& batch,
WriteBatch* new_batch, bool* batch_changed) const override {
WriteBatch* new_batch,
bool* batch_changed) const override {
WalFilter::WalProcessingOption option_to_return;
if (current_record_index_ == apply_option_at_record_index_) {
option_to_return = wal_processing_option_;
}
else {
} else {
option_to_return = WalProcessingOption::kContinueProcessing;
}
@ -9955,9 +9953,7 @@ TEST_F(DBTest, WalFilterTest) {
return option_to_return;
}
virtual const char* Name() const override {
return "TestWalFilter";
}
virtual const char* Name() const override { return "TestWalFilter"; }
};
// Create 3 batches with two keys each
@ -9972,7 +9968,8 @@ TEST_F(DBTest, WalFilterTest) {
// Test with all WAL processing options
for (int option = 0;
option < static_cast<int>(WalFilter::WalProcessingOption::kWalProcessingOptionMax);
option < static_cast<int>(
WalFilter::WalProcessingOption::kWalProcessingOptionMax);
option++) {
Options options = OptionsForLogIterTest();
DestroyAndReopen(options);
@ -9999,8 +9996,8 @@ TEST_F(DBTest, WalFilterTest) {
// Reopen database with option to use WAL filter
options = OptionsForLogIterTest();
options.wal_filter = &test_wal_filter;
Status status = TryReopenWithColumnFamilies({ "default", "pikachu" },
options);
Status status =
TryReopenWithColumnFamilies({"default", "pikachu"}, options);
if (wal_processing_option ==
WalFilter::WalProcessingOption::kCorruptedRecord) {
assert(!status.ok());
@ -10029,7 +10026,8 @@ TEST_F(DBTest, WalFilterTest) {
break;
}
case WalFilter::WalProcessingOption::kIgnoreCurrentRecord: {
fprintf(stderr, "Testing with ignoring record %" ROCKSDB_PRIszt " only\n",
fprintf(stderr,
"Testing with ignoring record %" ROCKSDB_PRIszt " only\n",
apply_option_for_record_index);
// We expect the record with apply_option_for_record_index to be not
// found.
@ -10037,8 +10035,7 @@ TEST_F(DBTest, WalFilterTest) {
for (size_t j = 0; j < batch_keys[i].size(); j++) {
if (i == apply_option_for_record_index) {
keys_must_not_exist.push_back(Slice(batch_keys[i][j]));
}
else {
} else {
keys_must_exist.push_back(Slice(batch_keys[i][j]));
}
}
@ -10046,7 +10043,9 @@ TEST_F(DBTest, WalFilterTest) {
break;
}
case WalFilter::WalProcessingOption::kStopReplay: {
fprintf(stderr, "Testing with stopping replay from record %" ROCKSDB_PRIszt "\n",
fprintf(stderr,
"Testing with stopping replay from record %" ROCKSDB_PRIszt
"\n",
apply_option_for_record_index);
// We expect records beyond apply_option_for_record_index to be not
// found.
@ -10054,8 +10053,7 @@ TEST_F(DBTest, WalFilterTest) {
for (size_t j = 0; j < batch_keys[i].size(); j++) {
if (i >= apply_option_for_record_index) {
keys_must_not_exist.push_back(Slice(batch_keys[i][j]));
}
else {
} else {
keys_must_exist.push_back(Slice(batch_keys[i][j]));
}
}
@ -10097,10 +10095,11 @@ TEST_F(DBTest, WalFilterTestWithChangeBatch) {
size_t num_keys_to_add_in_new_batch_;
// Number of keys added to new batch
size_t num_keys_added_;
public:
ChangeBatchHandler(WriteBatch* new_write_batch,
size_t num_keys_to_add_in_new_batch) :
new_write_batch_(new_write_batch),
size_t num_keys_to_add_in_new_batch)
: new_write_batch_(new_write_batch),
num_keys_to_add_in_new_batch_(num_keys_to_add_in_new_batch),
num_keys_added_(0) {}
virtual void Put(const Slice& key, const Slice& value) override {
@ -10119,17 +10118,17 @@ TEST_F(DBTest, WalFilterTestWithChangeBatch) {
size_t num_keys_to_add_in_new_batch_;
// Current record index, incremented with each record encountered.
size_t current_record_index_;
public:
TestWalFilterWithChangeBatch(
size_t change_records_from_index,
size_t num_keys_to_add_in_new_batch) :
change_records_from_index_(change_records_from_index),
TestWalFilterWithChangeBatch(size_t change_records_from_index,
size_t num_keys_to_add_in_new_batch)
: change_records_from_index_(change_records_from_index),
num_keys_to_add_in_new_batch_(num_keys_to_add_in_new_batch),
current_record_index_(0) {}
virtual WalProcessingOption LogRecord(const WriteBatch& batch,
WriteBatch* new_batch, bool* batch_changed) const override {
WriteBatch* new_batch,
bool* batch_changed) const override {
if (current_record_index_ >= change_records_from_index_) {
ChangeBatchHandler handler(new_batch, num_keys_to_add_in_new_batch_);
batch.Iterate(&handler);
@ -10139,7 +10138,8 @@ TEST_F(DBTest, WalFilterTestWithChangeBatch) {
// Filter is passed as a const object for RocksDB to not modify the
// object, however we modify it for our own purpose here and hence
// cast the constness away.
(const_cast<TestWalFilterWithChangeBatch*>(this)->current_record_index_)++;
(const_cast<TestWalFilterWithChangeBatch*>(this)
->current_record_index_)++;
return WalProcessingOption::kContinueProcessing;
}
@ -10193,8 +10193,7 @@ TEST_F(DBTest, WalFilterTestWithChangeBatch) {
for (size_t j = 0; j < batch_keys[i].size(); j++) {
if (i >= change_records_from_index && j >= num_keys_to_add_in_new_batch) {
keys_must_not_exist.push_back(Slice(batch_keys[i][j]));
}
else {
} else {
keys_must_exist.push_back(Slice(batch_keys[i][j]));
}
}
@ -10225,7 +10224,8 @@ TEST_F(DBTest, WalFilterTestWithChangeBatchExtraKeys) {
class TestWalFilterWithChangeBatchAddExtraKeys : public WalFilter {
public:
virtual WalProcessingOption LogRecord(const WriteBatch& batch,
WriteBatch* new_batch, bool* batch_changed) const override {
WriteBatch* new_batch,
bool* batch_changed) const override {
*new_batch = batch;
new_batch->Put("key_extra", "value_extra");
*batch_changed = true;
@ -10265,8 +10265,7 @@ TEST_F(DBTest, WalFilterTestWithChangeBatchExtraKeys) {
// Reopen database with option to use WAL filter
options = OptionsForLogIterTest();
options.wal_filter = &test_wal_filter_extra_keys;
Status status =
TryReopenWithColumnFamilies({ "default", "pikachu" }, options);
Status status = TryReopenWithColumnFamilies({"default", "pikachu"}, options);
ASSERT_TRUE(status.IsNotSupported());
// Reopen without filter, now reopen should succeed - previous

@ -503,7 +503,8 @@ class DB {
return CompactRange(options, DefaultColumnFamily(), begin, end);
}
virtual Status SetOptions(ColumnFamilyHandle* /*column_family*/,
virtual Status SetOptions(
ColumnFamilyHandle* /*column_family*/,
const std::unordered_map<std::string, std::string>& /*new_options*/) {
return Status::NotSupported("Not implemented");
}
@ -663,8 +664,7 @@ class DB {
//
// If cf_name is not specified, then the metadata of the default
// column family will be returned.
virtual void GetColumnFamilyMetaData(
ColumnFamilyHandle* /*column_family*/,
virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* /*column_family*/,
ColumnFamilyMetaData* /*metadata*/) {}
// Get the metadata of the default column family.

@ -416,8 +416,7 @@ class RandomAccessFile {
// For cases when read-ahead is implemented in the platform dependent
// layer
virtual void EnableReadAhead() {
}
virtual void EnableReadAhead() {}
// Tries to get an unique ID for this file that will be the same each time
// the file is opened (and will stay the same while the file is open).

@ -28,7 +28,7 @@ public:
kWalProcessingOptionMax = 4
};
virtual ~WalFilter() { };
virtual ~WalFilter() {}
// LogRecord is invoked for each log record encountered for all the logs
// during replay on logs on recovery. This method can be used to:
@ -55,7 +55,8 @@ public:
// Please see WalProcessingOption enum above for
// details.
virtual WalProcessingOption LogRecord(const WriteBatch& batch,
WriteBatch* new_batch, bool* batch_changed) const = 0;
WriteBatch* new_batch,
bool* batch_changed) const = 0;
// Returns a name that identifies this WAL filter.
// The name will be printed to LOG file on start up for diagnosis.

@ -697,30 +697,35 @@ class WinRandomAccessFile : public RandomAccessFile {
buffered_start_; // file offset set that is currently buffered
/*
* The function reads a requested amount of bytes into the specified aligned buffer
* Upon success the function sets the length of the buffer to the amount of bytes actually
* read even though it might be less than actually requested.
* It then copies the amount of bytes requested by the user (left) to the user supplied
* buffer (dest) and reduces left by the amount of bytes copied to the user buffer
* The function reads a requested amount of bytes into the specified aligned
* buffer Upon success the function sets the length of the buffer to the
* amount of bytes actually read even though it might be less than actually
* requested. It then copies the amount of bytes requested by the user (left)
* to the user supplied buffer (dest) and reduces left by the amount of bytes
* copied to the user buffer
*
* @user_offset [in] - offset on disk where the read was requested by the user
* @first_page_start [in] - actual page aligned disk offset that we want to read from
* @bytes_to_read [in] - total amount of bytes that will be read from disk which is generally
* greater or equal to the amount that the user has requested due to the
* either alignment requirements or read_ahead in effect.
* @left [in/out] total amount of bytes that needs to be copied to the user buffer. It is reduced
* by the amount of bytes that actually copied
* @first_page_start [in] - actual page aligned disk offset that we want to
* read from
* @bytes_to_read [in] - total amount of bytes that will be read from disk
* which is generally greater or equal to the amount
* that the user has requested due to the
* either alignment requirements or read_ahead in
* effect.
* @left [in/out] total amount of bytes that needs to be copied to the user
* buffer. It is reduced by the amount of bytes that actually
* copied
* @buffer - buffer to use
* @dest - user supplied buffer
*/
SSIZE_T ReadIntoBuffer(uint64_t user_offset, uint64_t first_page_start,
size_t bytes_to_read, size_t& left, AlignedBuffer& buffer, char* dest) const {
size_t bytes_to_read, size_t& left,
AlignedBuffer& buffer, char* dest) const {
assert(buffer.CurrentSize() == 0);
assert(buffer.Capacity() >= bytes_to_read);
SSIZE_T read = pread(hFile_, buffer.Destination(), bytes_to_read,
first_page_start);
SSIZE_T read =
pread(hFile_, buffer.Destination(), bytes_to_read, first_page_start);
if (read > 0) {
buffer.Size(read);
@ -739,8 +744,8 @@ class WinRandomAccessFile : public RandomAccessFile {
}
SSIZE_T ReadIntoOneShotBuffer(uint64_t user_offset, uint64_t first_page_start,
size_t bytes_to_read, size_t& left, char* dest) const {
size_t bytes_to_read, size_t& left,
char* dest) const {
AlignedBuffer bigBuffer;
bigBuffer.Alignment(buffer_.Alignment());
bigBuffer.AllocateNewBuffer(bytes_to_read);
@ -749,9 +754,10 @@ class WinRandomAccessFile : public RandomAccessFile {
bigBuffer, dest);
}
SSIZE_T ReadIntoInstanceBuffer(uint64_t user_offset, uint64_t first_page_start,
size_t bytes_to_read, size_t& left, char* dest) const {
SSIZE_T ReadIntoInstanceBuffer(uint64_t user_offset,
uint64_t first_page_start,
size_t bytes_to_read, size_t& left,
char* dest) const {
SSIZE_T read = ReadIntoBuffer(user_offset, first_page_start, bytes_to_read,
left, buffer_, dest);
@ -789,9 +795,7 @@ class WinRandomAccessFile : public RandomAccessFile {
}
}
virtual void EnableReadAhead() override {
this->Hint(SEQUENTIAL);
}
virtual void EnableReadAhead() override { this->Hint(SEQUENTIAL); }
virtual Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
@ -877,9 +881,7 @@ class WinRandomAccessFile : public RandomAccessFile {
}
virtual void Hint(AccessPattern pattern) override {
if (pattern == SEQUENTIAL &&
!use_os_buffer_ &&
if (pattern == SEQUENTIAL && !use_os_buffer_ &&
compaction_readahead_size_ > 0) {
std::lock_guard<std::mutex> lg(buffer_mut_);
if (!read_ahead_) {
@ -888,12 +890,12 @@ class WinRandomAccessFile : public RandomAccessFile {
// - one for memory alignment which added implicitly by AlignedBuffer
// - We add one more alignment because we will read one alignment more
// from disk
buffer_.AllocateNewBuffer(compaction_readahead_size_ + buffer_.Alignment());
buffer_.AllocateNewBuffer(compaction_readahead_size_ +
buffer_.Alignment());
}
}
}
virtual Status InvalidateCache(size_t offset, size_t length) override {
return Status::OK();
}

@ -293,7 +293,8 @@ void AssignEnvOptions(EnvOptions* env_options, const DBOptions& options) {
env_options->set_fd_cloexec = options.is_fd_close_on_exec;
env_options->bytes_per_sync = options.bytes_per_sync;
env_options->compaction_readahead_size = options.compaction_readahead_size;
env_options->random_access_max_buffer_size = options.random_access_max_buffer_size;
env_options->random_access_max_buffer_size =
options.random_access_max_buffer_size;
env_options->rate_limiter = options.rate_limiter.get();
env_options->allow_fallocate = options.allow_fallocate;
}

@ -260,7 +260,8 @@ DBOptions::DBOptions()
skip_stats_update_on_db_open(false),
wal_recovery_mode(WALRecoveryMode::kTolerateCorruptedTailRecords)
#ifndef ROCKSDB_LITE
, wal_filter(nullptr)
,
wal_filter(nullptr)
#endif // ROCKSDB_LITE
{
}
@ -322,7 +323,8 @@ DBOptions::DBOptions(const Options& options)
wal_recovery_mode(options.wal_recovery_mode),
row_cache(options.row_cache)
#ifndef ROCKSDB_LITE
, wal_filter(options.wal_filter)
,
wal_filter(options.wal_filter)
#endif // ROCKSDB_LITE
{
}
@ -405,7 +407,8 @@ void DBOptions::Dump(Logger* log) const {
" Options.compaction_readahead_size: %" ROCKSDB_PRIszt
"d",
compaction_readahead_size);
Header(log,
Header(
log,
" Options.random_access_max_buffer_size: %" ROCKSDB_PRIszt
"d",
random_access_max_buffer_size);

@ -181,8 +181,8 @@ static std::unordered_map<std::string, OptionTypeInfo> db_options_type_info = {
{offsetof(struct DBOptions, compaction_readahead_size), OptionType::kSizeT,
OptionVerificationType::kNormal}},
{"random_access_max_buffer_size",
{ offsetof(struct DBOptions, random_access_max_buffer_size), OptionType::kSizeT,
OptionVerificationType::kNormal}},
{offsetof(struct DBOptions, random_access_max_buffer_size),
OptionType::kSizeT, OptionVerificationType::kNormal}},
{"use_adaptive_mutex",
{offsetof(struct DBOptions, use_adaptive_mutex), OptionType::kBoolean,
OptionVerificationType::kNormal}},

Loading…
Cancel
Save