Lint everything

Summary:
```
arc2 lint --everything
```

run the linter on the whole code repo to fix exisitng lint issues

Test Plan: make check -j64

Reviewers: sdong, rven, anthony, kradhakrishnan, yhchiang

Reviewed By: yhchiang

Subscribers: dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D50769
main
Islam AbdelRahman 9 years ago
parent dac5b248b1
commit a163cc2d5a
  1. 6
      db/db_bench.cc
  2. 1
      db/listener_test.cc
  3. 2
      doc/log_format.txt
  4. 4
      include/rocksdb/options.h
  5. 16
      include/rocksdb/table.h
  6. 2
      java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
  7. 2
      java/src/main/java/org/rocksdb/DBOptionsInterface.java
  8. 1
      port/port.h
  9. 8
      port/port_posix.h
  10. 5
      port/win/env_win.cc
  11. 2
      port/win/port_win.cc
  12. 15
      port/win/win_logger.cc
  13. 8
      table/plain_table_factory.cc
  14. 2
      table/plain_table_factory.h
  15. 2
      third-party/gtest-1.7.0/fused-src/gtest/gtest-all.cc
  16. 4
      util/env.cc
  17. 8
      util/options_helper.cc
  18. 9
      util/options_test.cc
  19. 3
      utilities/merge_operators/string_append/stringappend.cc
  20. 4
      utilities/merge_operators/string_append/stringappend.h
  21. 1
      utilities/merge_operators/string_append/stringappend2.cc
  22. 7
      utilities/redis/redis_list_iterator.h

@ -376,8 +376,8 @@ DEFINE_int32(compaction_readahead_size, 0, "Compaction readahead size");
DEFINE_int32(random_access_max_buffer_size, 1024 * 1024,
"Maximum windows randomaccess buffer size");
DEFINE_int32(writable_file_max_buffer_size, 1024 * 1024,
"Maximum write buffer for Writeable File");
DEFINE_int32(writable_file_max_buffer_size, 1024 * 1024,
"Maximum write buffer for Writable File");
DEFINE_int32(skip_table_builder_flush, false, "Skip flushing block in "
"table builder ");
@ -2448,7 +2448,7 @@ class Benchmark {
block_based_options.block_size = FLAGS_block_size;
block_based_options.block_restart_interval = FLAGS_block_restart_interval;
block_based_options.filter_policy = filter_policy_;
block_based_options.skip_table_builder_flush =
block_based_options.skip_table_builder_flush =
FLAGS_skip_table_builder_flush;
block_based_options.format_version = 2;
options.table_factory.reset(

@ -533,4 +533,3 @@ int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

@ -11,7 +11,7 @@ Each block consists of a sequence of records:
A record never starts within the last six bytes of a block (since it
won't fit). Any leftover bytes here form the trailer, which must
consist entirely of zero bytes and must be skipped by readers.
consist entirely of zero bytes and must be skipped by readers.
Aside: if exactly seven bytes are left in the current block, and a new
non-zero length record is added, the writer must emit a FIRST record

@ -1090,8 +1090,8 @@ struct DBOptions {
size_t random_access_max_buffer_size;
// This is the maximum buffer size that is used by WritableFileWriter.
// On Windows, we need to maintain an aligned buffer for writes.
// We allow the buffer to grow until it's size hits the limit.
// On Windows, we need to maintain an aligned buffer for writes.
// We allow the buffer to grow until it's size hits the limit.
//
// Default: 1024 * 1024 (1 MB)
size_t writable_file_max_buffer_size;

@ -129,19 +129,19 @@ struct BlockBasedTableOptions {
bool whole_key_filtering = true;
// If true, block will not be explictly flushed to disk during building
// a SstTable. Instead, buffer in WritableFileWriter will take
// care of the flushing when it is full.
// a SstTable. Instead, buffer in WritableFileWriter will take
// care of the flushing when it is full.
//
// On Windows, this option helps a lot when unbuffered I/O
// (allow_os_buffer = false) is used, since it avoids small
// unbuffered disk write.
// On Windows, this option helps a lot when unbuffered I/O
// (allow_os_buffer = false) is used, since it avoids small
// unbuffered disk write.
//
// User may also adjust writable_file_max_buffer_size to optimize disk I/O
// size.
// User may also adjust writable_file_max_buffer_size to optimize disk I/O
// size.
//
// Default: false
bool skip_table_builder_flush = false;
// We currently have three versions:
// 0 -- This version is currently written out by all RocksDB's versions by
// default. Can be read by really old RocksDB's. Doesn't support changing

@ -231,7 +231,7 @@ public interface ColumnFamilyOptionsInterface {
/**
* Same as fixed length prefix extractor, except that when slice is
* Same as fixed length prefix extractor, except that when slice is
* shorter than the fixed length, it will use the full key.
*
* @param n use the first n bytes of a key as its prefix.

@ -565,7 +565,7 @@ public interface DBOptionsInterface {
* are older than WAL_ttl_seconds will be deleted.</li>
* <li>If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first.</li>
* </ol>
* </ol>
*
* @param sizeLimitMB size limit in mega-bytes.
* @return the instance of the current Object.

@ -19,4 +19,3 @@
#elif defined(OS_WIN)
#include "port/win/port_win.h"
#endif

@ -32,14 +32,11 @@
#else
#define PLATFORM_IS_LITTLE_ENDIAN false
#endif
#elif defined(OS_FREEBSD)
#elif defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || \
defined(OS_DRAGONFLYBSD) || defined(OS_ANDROID)
#include <sys/endian.h>
#include <sys/types.h>
#define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
#elif defined(OS_OPENBSD) || defined(OS_NETBSD) ||\
defined(OS_DRAGONFLYBSD) || defined(OS_ANDROID)
#include <sys/types.h>
#include <sys/endian.h>
#else
#include <endian.h>
#endif
@ -159,4 +156,3 @@ extern int GetMaxOpenFiles();
} // namespace port
} // namespace rocksdb

@ -610,7 +610,7 @@ class WinSequentialFile : public SequentialFile {
HANDLE file_;
// There is no equivalent of advising away buffered pages as in posix.
// To implement this flag we would need to do unbuffered reads which
// To implement this flag we would need to do unbuffered reads which
// will need to be aligned (not sure there is a guarantee that the buffer
// passed in is aligned).
// Hence we currently ignore this flag. It is used only in a few cases
@ -970,8 +970,7 @@ class WinWritableFile : public WritableFile {
virtual Status PositionedAppend(const Slice& data, uint64_t offset) override {
Status s;
SSIZE_T ret = pwrite(hFile_, data.data(),
data.size(), offset);
SSIZE_T ret = pwrite(hFile_, data.data(), data.size(), offset);
// Error break
if (ret < 0) {

@ -67,7 +67,7 @@ bool CondVar::TimedWait(uint64_t abs_time_us) {
using namespace std::chrono;
// MSVC++ library implements wait_until in terms of wait_for so
// we need to convert absoulte wait into relative wait.
// we need to convert absolute wait into relative wait.
microseconds usAbsTime(abs_time_us);
microseconds usNow(

@ -10,6 +10,8 @@
// Logger implementation that can be shared by all environments
// where enough posix functionality is available.
#include "port/win/win_logger.h"
#include <stdint.h>
#include <algorithm>
#include <stdio.h>
@ -21,7 +23,6 @@
#include <Windows.h>
#include "port/win/win_logger.h"
#include "port/sys_time.h"
#include "util/iostats_context_imp.h"
@ -53,8 +54,9 @@ void WinLogger::close() { CloseHandle(file_); }
void WinLogger::Flush() {
if (flush_pending_) {
flush_pending_ = false;
// With Windows API writes go to OS buffers directly so no fflush needed unlike
// with C runtime API. We don't flush all the way to disk for perf reasons.
// With Windows API writes go to OS buffers directly so no fflush needed
// unlike with C runtime API. We don't flush all the way to disk
// for perf reasons.
}
last_flush_micros_ = env_->NowMicros();
@ -124,7 +126,7 @@ void WinLogger::Logv(const char* format, va_list ap) {
assert(p <= limit);
const size_t write_size = p - base;
DWORD bytesWritten = 0;
DWORD bytesWritten = 0;
BOOL ret = WriteFile(file_, base, write_size, &bytesWritten, NULL);
if (ret == FALSE) {
std::string errSz = GetWindowsErrSz(GetLastError());
@ -141,8 +143,9 @@ void WinLogger::Logv(const char* format, va_list ap) {
static_cast<uint64_t>(now_tv.tv_sec) * 1000000 + now_tv.tv_usec;
if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {
flush_pending_ = false;
// With Windows API writes go to OS buffers directly so no fflush needed unlike
// with C runtime API. We don't flush all the way to disk for perf reasons.
// With Windows API writes go to OS buffers directly so no fflush needed
// unlike with C runtime API. We don't flush all the way to disk
// for perf reasons.
last_flush_micros_ = now_micros;
}
break;

@ -21,8 +21,8 @@ Status PlainTableFactory::NewTableReader(
return PlainTableReader::Open(
table_reader_options.ioptions, table_reader_options.env_options,
table_reader_options.internal_comparator, std::move(file), file_size,
table, table_options_.bloom_bits_per_key, table_options_.hash_table_ratio,
table_options_.index_sparseness, table_options_.huge_page_tlb_size,
table, table_options_.bloom_bits_per_key, table_options_.hash_table_ratio,
table_options_.index_sparseness, table_options_.huge_page_tlb_size,
table_options_.full_scan_mode);
}
@ -36,8 +36,8 @@ TableBuilder* PlainTableFactory::NewTableBuilder(
return new PlainTableBuilder(
table_builder_options.ioptions,
table_builder_options.int_tbl_prop_collector_factories, column_family_id,
file, table_options_.user_key_len, table_options_.encoding_type,
table_options_.index_sparseness, table_options_.bloom_bits_per_key, 6,
file, table_options_.user_key_len, table_options_.encoding_type,
table_options_.index_sparseness, table_options_.bloom_bits_per_key, 6,
table_options_.huge_page_tlb_size, table_options_.hash_table_ratio,
table_options_.store_index_in_file);
}

@ -151,7 +151,7 @@ class PlainTableFactory : public TableFactory {
unique_ptr<RandomAccessFileReader>&& file,
uint64_t file_size,
unique_ptr<TableReader>* table) const override;
TableBuilder* NewTableBuilder(
const TableBuilderOptions& table_builder_options,
uint32_t column_family_id, WritableFileWriter* file) const override;

@ -2592,7 +2592,7 @@ class Hunk {
// Print a unified diff header for one hunk.
// The format is
// "@@ -<left_start>,<left_length> +<right_start>,<right_length> @@"
// where the left/right parts are ommitted if unnecessary.
// where the left/right parts are omitted if unnecessary.
void PrintHeader(std::ostream* ss) const {
*ss << "@@ ";
if (removes_) {

@ -296,8 +296,8 @@ void AssignEnvOptions(EnvOptions* env_options, const DBOptions& options) {
env_options->random_access_max_buffer_size =
options.random_access_max_buffer_size;
env_options->rate_limiter = options.rate_limiter.get();
env_options->writable_file_max_buffer_size =
options.writable_file_max_buffer_size;
env_options->writable_file_max_buffer_size =
options.writable_file_max_buffer_size;
env_options->allow_fallocate = options.allow_fallocate;
}

@ -1153,7 +1153,7 @@ std::string ParsePlainTableOptions(const std::string& name,
const std::string& org_value,
PlainTableOptions* new_option,
bool input_strings_escaped = false) {
const std::string& value =
const std::string& value =
input_strings_escaped ? UnescapeOptionString(org_value) : org_value;
const auto iter = plain_table_type_info.find(name);
if (iter == plain_table_type_info.end()) {
@ -1239,7 +1239,7 @@ Status GetPlainTableOptionsFromString(
if (!s.ok()) {
return s;
}
return GetPlainTableOptionsFromMap(table_options, opts_map,
return GetPlainTableOptionsFromMap(table_options, opts_map,
new_table_options);
}
@ -1384,8 +1384,8 @@ Status GetTableFactoryFromMap(
return Status::OK();
} else if (factory_name == PlainTableFactory().Name()) {
PlainTableOptions pt_opt;
s = GetPlainTableOptionsFromMap(PlainTableOptions(), opt_map,
&pt_opt, true);
s = GetPlainTableOptionsFromMap(PlainTableOptions(), opt_map, &pt_opt,
true);
if (!s.ok()) {
return s;
}

@ -535,10 +535,11 @@ TEST_F(OptionsTest, GetPlainTableOptionsFromString) {
&new_opt));
// unrecognized EncodingType
ASSERT_NOK(GetPlainTableOptionsFromString(table_opt,
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
"encoding_type=kPrefixXX",
&new_opt));
ASSERT_NOK(GetPlainTableOptionsFromString(
table_opt,
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
"encoding_type=kPrefixXX",
&new_opt));
}
#endif // !ROCKSDB_LITE

@ -55,6 +55,3 @@ std::shared_ptr<MergeOperator> MergeOperators::CreateStringAppendOperator() {
}
} // namespace rocksdb

@ -12,7 +12,8 @@ namespace rocksdb {
class StringAppendOperator : public AssociativeMergeOperator {
public:
StringAppendOperator(char delim_char); /// Constructor: specify delimiter
// Constructor: specify delimiter
explicit StringAppendOperator(char delim_char);
virtual bool Merge(const Slice& key,
const Slice* existing_value,
@ -28,4 +29,3 @@ class StringAppendOperator : public AssociativeMergeOperator {
};
} // namespace rocksdb

@ -110,4 +110,3 @@ MergeOperators::CreateStringAppendTESTOperator() {
}
} // namespace rocksdb

@ -37,8 +37,8 @@
* @author Deon Nicholas (dnicholas@fb.com)
*/
#ifndef ROCKSDB_LITE
#pragma once
#ifndef ROCKSDB_LITE
#include <string>
@ -65,7 +65,7 @@ class RedisListIterator {
/// e) result_ will always contain data_[0..cur_byte_) and a header
/// f) Whenever corrupt data is encountered or an invalid operation is
/// attempted, a RedisListException will immediately be thrown.
RedisListIterator(const std::string& list_data)
explicit RedisListIterator(const std::string& list_data)
: data_(list_data.data()),
num_bytes_(static_cast<uint32_t>(list_data.size())),
cur_byte_(0),
@ -73,7 +73,6 @@ class RedisListIterator {
cur_elem_length_(0),
length_(0),
result_() {
// Initialize the result_ (reserve enough space for header)
InitializeResult();
@ -269,7 +268,7 @@ class RedisListIterator {
data_+cur_byte_+ sizeof(uint32_t) + cur_elem_length_);
}
/// Will ThrowError() if neccessary.
/// Will ThrowError() if necessary.
/// Checks for common/ubiquitous errors that can arise after most operations.
/// This method should be called before any reading operation.
/// If this function succeeds, then we are guaranteed to be in a valid state.

Loading…
Cancel
Save