Run automatic formatter against public header files (#5115)

Summary:
Automatically format public headers so it looks more consistent.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5115

Differential Revision: D14632854

Pulled By: siying

fbshipit-source-id: ce9929ea62f9dcd65c69660b23eed1931cb0ae84
main
Siying Dong 6 years ago committed by Facebook Github Bot
parent 5f6adf3f6a
commit 1f7f5a5a79
  1. 3
      include/rocksdb/comparator.h
  2. 5
      include/rocksdb/concurrent_task_limiter.h
  3. 12
      include/rocksdb/convenience.h
  4. 39
      include/rocksdb/db.h
  5. 43
      include/rocksdb/env.h
  6. 58
      include/rocksdb/env_encryption.h
  7. 12
      include/rocksdb/filter_policy.h
  8. 5
      include/rocksdb/flush_block_policy.h
  9. 14
      include/rocksdb/listener.h
  10. 13
      include/rocksdb/memtablerep.h
  11. 7
      include/rocksdb/merge_operator.h
  12. 9
      include/rocksdb/metadata.h
  13. 18
      include/rocksdb/options.h
  14. 5
      include/rocksdb/perf_context.h
  15. 28
      include/rocksdb/slice.h
  16. 2
      include/rocksdb/slice_transform.h
  17. 5
      include/rocksdb/sst_file_writer.h
  18. 4
      include/rocksdb/statistics.h
  19. 3
      include/rocksdb/status.h
  20. 18
      include/rocksdb/table.h
  21. 26
      include/rocksdb/thread_status.h
  22. 1
      include/rocksdb/threadpool.h
  23. 5
      include/rocksdb/transaction_log.h
  24. 4
      include/rocksdb/types.h
  25. 1
      include/rocksdb/universal_compaction.h
  26. 7
      include/rocksdb/utilities/backupable_db.h
  27. 2
      include/rocksdb/utilities/db_ttl.h
  28. 2
      include/rocksdb/utilities/env_librados.h
  29. 14
      include/rocksdb/utilities/env_mirror.h
  30. 22
      include/rocksdb/utilities/ldb_cmd_execute_result.h
  31. 3
      include/rocksdb/utilities/sim_cache.h
  32. 50
      include/rocksdb/utilities/stackable_db.h
  33. 13
      include/rocksdb/utilities/table_properties_collectors.h
  34. 1
      include/rocksdb/utilities/transaction_db.h
  35. 14
      include/rocksdb/utilities/utility_db.h
  36. 2
      include/rocksdb/wal_filter.h
  37. 2
      include/rocksdb/write_batch.h

@ -55,8 +55,7 @@ class Comparator {
// If *start < limit, changes *start to a short string in [start,limit).
// Simple comparator implementations may return with *start unchanged,
// i.e., an implementation of this method that does nothing is correct.
virtual void FindShortestSeparator(
std::string* start,
virtual void FindShortestSeparator(std::string* start,
const Slice& limit) const = 0;
// Changes *key to a short string >= *key.

@ -16,7 +16,6 @@ namespace rocksdb {
class ConcurrentTaskLimiter {
public:
virtual ~ConcurrentTaskLimiter() {}
// Returns a name that identifies this concurrent task limiter.
@ -41,7 +40,7 @@ class ConcurrentTaskLimiter {
// @param limit: max concurrent tasks.
// limit = 0 means no new task allowed.
// limit < 0 means no limitation.
extern ConcurrentTaskLimiter* NewConcurrentTaskLimiter(
const std::string& name, int32_t limit);
extern ConcurrentTaskLimiter* NewConcurrentTaskLimiter(const std::string& name,
int32_t limit);
} // namespace rocksdb

@ -277,13 +277,11 @@ Status GetPlainTableOptionsFromMap(
// BlockBasedTableOptions as part of the string for block-based table factory:
// "write_buffer_size=1024;block_based_table_factory={block_size=4k};"
// "max_write_buffer_num=2"
Status GetColumnFamilyOptionsFromString(
const ColumnFamilyOptions& base_options,
Status GetColumnFamilyOptionsFromString(const ColumnFamilyOptions& base_options,
const std::string& opts_str,
ColumnFamilyOptions* new_options);
Status GetDBOptionsFromString(
const DBOptions& base_options,
Status GetDBOptionsFromString(const DBOptions& base_options,
const std::string& opts_str,
DBOptions* new_options);
@ -301,12 +299,10 @@ Status GetStringFromCompressionType(std::string* compression_str,
std::vector<CompressionType> GetSupportedCompressions();
Status GetBlockBasedTableOptionsFromString(
const BlockBasedTableOptions& table_options,
const std::string& opts_str,
const BlockBasedTableOptions& table_options, const std::string& opts_str,
BlockBasedTableOptions* new_table_options);
Status GetPlainTableOptionsFromString(
const PlainTableOptions& table_options,
Status GetPlainTableOptionsFromString(const PlainTableOptions& table_options,
const std::string& opts_str,
PlainTableOptions* new_table_options);

@ -131,8 +131,7 @@ class DB {
// OK on success.
// Stores nullptr in *dbptr and returns a non-OK status on error.
// Caller should delete *dbptr when it is no longer needed.
static Status Open(const Options& options,
const std::string& name,
static Status Open(const Options& options, const std::string& name,
DB** dbptr);
// Open the database for read only. All DB interfaces
@ -142,8 +141,8 @@ class DB {
//
// Not supported in ROCKSDB_LITE, in which case the function will
// return Status::NotSupported.
static Status OpenForReadOnly(const Options& options,
const std::string& name, DB** dbptr,
static Status OpenForReadOnly(const Options& options, const std::string& name,
DB** dbptr,
bool error_if_log_file_exist = false);
// Open the database for read only with column families. When opening DB with
@ -394,7 +393,8 @@ class DB {
virtual Status Get(const ReadOptions& options,
ColumnFamilyHandle* column_family, const Slice& key,
PinnableSlice* value) = 0;
virtual Status Get(const ReadOptions& options, const Slice& key, std::string* value) {
virtual Status Get(const ReadOptions& options, const Slice& key,
std::string* value) {
return Get(options, DefaultColumnFamily(), key, value);
}
@ -415,8 +415,9 @@ class DB {
virtual std::vector<Status> MultiGet(const ReadOptions& options,
const std::vector<Slice>& keys,
std::vector<std::string>* values) {
return MultiGet(options, std::vector<ColumnFamilyHandle*>(
keys.size(), DefaultColumnFamily()),
return MultiGet(
options,
std::vector<ColumnFamilyHandle*>(keys.size(), DefaultColumnFamily()),
keys, values);
}
@ -779,13 +780,10 @@ class DB {
// include_flags should be of type DB::SizeApproximationFlags
virtual void GetApproximateSizes(ColumnFamilyHandle* column_family,
const Range* range, int n, uint64_t* sizes,
uint8_t include_flags
= INCLUDE_FILES) = 0;
uint8_t include_flags = INCLUDE_FILES) = 0;
virtual void GetApproximateSizes(const Range* range, int n, uint64_t* sizes,
uint8_t include_flags
= INCLUDE_FILES) {
GetApproximateSizes(DefaultColumnFamily(), range, n, sizes,
include_flags);
uint8_t include_flags = INCLUDE_FILES) {
GetApproximateSizes(DefaultColumnFamily(), range, n, sizes, include_flags);
}
// The method is similar to GetApproximateSizes, except it
@ -802,8 +800,7 @@ class DB {
// Deprecated versions of GetApproximateSizes
ROCKSDB_DEPRECATED_FUNC virtual void GetApproximateSizes(
const Range* range, int n, uint64_t* sizes,
bool include_memtable) {
const Range* range, int n, uint64_t* sizes, bool include_memtable) {
uint8_t include_flags = SizeApproximationFlags::INCLUDE_FILES;
if (include_memtable) {
include_flags |= SizeApproximationFlags::INCLUDE_MEMTABLES;
@ -811,9 +808,8 @@ class DB {
GetApproximateSizes(DefaultColumnFamily(), range, n, sizes, include_flags);
}
ROCKSDB_DEPRECATED_FUNC virtual void GetApproximateSizes(
ColumnFamilyHandle* column_family,
const Range* range, int n, uint64_t* sizes,
bool include_memtable) {
ColumnFamilyHandle* column_family, const Range* range, int n,
uint64_t* sizes, bool include_memtable) {
uint8_t include_flags = SizeApproximationFlags::INCLUDE_FILES;
if (include_memtable) {
include_flags |= SizeApproximationFlags::INCLUDE_MEMTABLES;
@ -1073,8 +1069,7 @@ class DB {
ColumnFamilyMetaData* /*metadata*/) {}
// Get the metadata of the default column family.
void GetColumnFamilyMetaData(
ColumnFamilyMetaData* metadata) {
void GetColumnFamilyMetaData(ColumnFamilyMetaData* metadata) {
GetColumnFamilyMetaData(DefaultColumnFamily(), metadata);
}
@ -1275,8 +1270,8 @@ class DB {
// Given a time window, return an iterator for accessing stats history
// User is responsible for deleting StatsHistoryIterator after use
virtual Status GetStatsHistory(uint64_t /*start_time*/,
uint64_t /*end_time*/,
virtual Status GetStatsHistory(
uint64_t /*start_time*/, uint64_t /*end_time*/,
std::unique_ptr<StatsHistoryIterator>* /*stats_iterator*/) {
return Status::NotSupported("GetStatsHistory() is not implemented.");
}

@ -54,7 +54,6 @@ const size_t kDefaultPageSize = 4 * 1024;
// Options while opening a file to read/write
struct EnvOptions {
// Construct with default Options
EnvOptions();
@ -321,11 +320,7 @@ class Env {
static std::string PriorityToString(Priority priority);
// Priority for requesting bytes in rate limiter scheduler
enum IOPriority {
IO_LOW = 0,
IO_HIGH = 1,
IO_TOTAL = 2
};
enum IOPriority { IO_LOW = 0, IO_HIGH = 1, IO_TOTAL = 2 };
// Arrange to run "(*function)(arg)" once in a background thread, in
// the thread pool specified by pri. By default, jobs go to the 'LOW'
@ -377,9 +372,7 @@ class Env {
// Default implementation simply relies on NowMicros.
// In platform-specific implementations, NowNanos() should return time points
// that are MONOTONIC.
virtual uint64_t NowNanos() {
return NowMicros() * 1000;
}
virtual uint64_t NowNanos() { return NowMicros() * 1000; }
// 0 indicates not supported.
virtual uint64_t NowCPUNanos() { return 0; }
@ -551,7 +544,6 @@ class SequentialFile {
// A file abstraction for randomly reading the contents of a file.
class RandomAccessFile {
public:
RandomAccessFile() {}
virtual ~RandomAccessFile();
@ -622,8 +614,7 @@ class WritableFile {
: last_preallocated_block_(0),
preallocation_block_size_(0),
io_priority_(Env::IO_TOTAL),
write_hint_(Env::WLTH_NOT_SET) {
}
write_hint_(Env::WLTH_NOT_SET) {}
virtual ~WritableFile();
// Append data to the end of the file
@ -651,7 +642,8 @@ class WritableFile {
//
// PositionedAppend() requires aligned buffer to be passed in. The alignment
// required is queried via GetRequiredBufferAlignment()
virtual Status PositionedAppend(const Slice& /* data */, uint64_t /* offset */) {
virtual Status PositionedAppend(const Slice& /* data */,
uint64_t /* offset */) {
return Status::NotSupported();
}
@ -670,15 +662,11 @@ class WritableFile {
* Override this method for environments where we need to sync
* metadata as well.
*/
virtual Status Fsync() {
return Sync();
}
virtual Status Fsync() { return Sync(); }
// true if Sync() and Fsync() are safe to call concurrently with Append()
// and Flush().
virtual bool IsSyncThreadSafe() const {
return false;
}
virtual bool IsSyncThreadSafe() const { return false; }
// Indicates the upper layers if the current WritableFile implementation
// uses direct IO.
@ -691,9 +679,7 @@ class WritableFile {
* Change the priority in rate limiter if rate limiting is enabled.
* If rate limiting is not enabled, this call has no effect.
*/
virtual void SetIOPriority(Env::IOPriority pri) {
io_priority_ = pri;
}
virtual void SetIOPriority(Env::IOPriority pri) { io_priority_ = pri; }
virtual Env::IOPriority GetIOPriority() { return io_priority_; }
@ -705,9 +691,7 @@ class WritableFile {
/*
* Get the size of valid data in the file.
*/
virtual uint64_t GetFileSize() {
return 0;
}
virtual uint64_t GetFileSize() { return 0; }
/*
* Get and set the default pre-allocation block size for writes to
@ -907,7 +891,8 @@ class Logger {
// and format. Any log with level under the internal log level
// of *this (see @SetInfoLogLevel and @GetInfoLogLevel) will not be
// printed.
virtual void Logv(const InfoLogLevel log_level, const char* format, va_list ap);
virtual void Logv(const InfoLogLevel log_level, const char* format,
va_list ap);
virtual size_t GetLogFileSize() const { return kDoNotSupportGetLogFileSize; }
// Flush to the OS buffers
@ -928,12 +913,12 @@ class Logger {
InfoLogLevel log_level_;
};
// Identifies a locked file.
class FileLock {
public:
FileLock() {}
virtual ~FileLock();
private:
// No copying allowed
FileLock(const FileLock&);
@ -1174,9 +1159,7 @@ class EnvWrapper : public Env {
return target_->GetThreadStatusUpdater();
}
uint64_t GetThreadID() const override {
return target_->GetThreadID();
}
uint64_t GetThreadID() const override { return target_->GetThreadID(); }
std::string GenerateUniqueId() override {
return target_->GenerateUniqueId();

@ -20,8 +20,8 @@ class EncryptionProvider;
Env* NewEncryptedEnv(Env* base_env, EncryptionProvider* provider);
// BlockAccessCipherStream is the base class for any cipher stream that
// supports random access at block level (without requiring data from other blocks).
// E.g. CTR (Counter operation mode) supports this requirement.
// supports random access at block level (without requiring data from other
// blocks). E.g. CTR (Counter operation mode) supports this requirement.
class BlockAccessCipherStream {
public:
virtual ~BlockAccessCipherStream(){};
@ -43,11 +43,13 @@ class BlockAccessCipherStream {
// Encrypt a block of data at the given block index.
// Length of data is equal to BlockSize();
virtual Status EncryptBlock(uint64_t blockIndex, char *data, char* scratch) = 0;
virtual Status EncryptBlock(uint64_t blockIndex, char* data,
char* scratch) = 0;
// Decrypt a block of data at the given block index.
// Length of data is equal to BlockSize();
virtual Status DecryptBlock(uint64_t blockIndex, char *data, char* scratch) = 0;
virtual Status DecryptBlock(uint64_t blockIndex, char* data,
char* scratch) = 0;
};
// BlockCipher
@ -74,9 +76,9 @@ class BlockCipher {
class ROT13BlockCipher : public BlockCipher {
private:
size_t blockSize_;
public:
ROT13BlockCipher(size_t blockSize)
: blockSize_(blockSize) {}
ROT13BlockCipher(size_t blockSize) : blockSize_(blockSize) {}
virtual ~ROT13BlockCipher(){};
// BlockSize returns the size of each block supported by this cipher stream.
@ -102,6 +104,7 @@ class CTRCipherStream final : public BlockAccessCipherStream {
BlockCipher& cipher_;
std::string iv_;
uint64_t initialCounter_;
public:
CTRCipherStream(BlockCipher& c, const char* iv, uint64_t initialCounter)
: cipher_(c), iv_(iv, c.BlockSize()), initialCounter_(initialCounter){};
@ -116,29 +119,31 @@ class CTRCipherStream final : public BlockAccessCipherStream {
// Encrypt a block of data at the given block index.
// Length of data is equal to BlockSize();
virtual Status EncryptBlock(uint64_t blockIndex, char *data, char *scratch) override;
virtual Status EncryptBlock(uint64_t blockIndex, char* data,
char* scratch) override;
// Decrypt a block of data at the given block index.
// Length of data is equal to BlockSize();
virtual Status DecryptBlock(uint64_t blockIndex, char *data, char *scratch) override;
virtual Status DecryptBlock(uint64_t blockIndex, char* data,
char* scratch) override;
};
// The encryption provider is used to create a cipher stream for a specific file.
// The returned cipher stream will be used for actual encryption/decryption
// actions.
// The encryption provider is used to create a cipher stream for a specific
// file. The returned cipher stream will be used for actual
// encryption/decryption actions.
class EncryptionProvider {
public:
virtual ~EncryptionProvider(){};
// GetPrefixLength returns the length of the prefix that is added to every file
// and used for storing encryption options.
// For optimal performance, the prefix length should be a multiple of
// the page size.
// GetPrefixLength returns the length of the prefix that is added to every
// file and used for storing encryption options. For optimal performance, the
// prefix length should be a multiple of the page size.
virtual size_t GetPrefixLength() = 0;
// CreateNewPrefix initialized an allocated block of prefix memory
// for a new file.
virtual Status CreateNewPrefix(const std::string& fname, char *prefix, size_t prefixLength) = 0;
virtual Status CreateNewPrefix(const std::string& fname, char* prefix,
size_t prefixLength) = 0;
// CreateCipherStream creates a block access cipher stream for a file given
// given name and options.
@ -155,23 +160,23 @@ class EncryptionProvider {
class CTREncryptionProvider : public EncryptionProvider {
private:
BlockCipher& cipher_;
protected:
const static size_t defaultPrefixLength = 4096;
public:
CTREncryptionProvider(BlockCipher& c)
: cipher_(c) {};
CTREncryptionProvider(BlockCipher& c) : cipher_(c){};
virtual ~CTREncryptionProvider() {}
// GetPrefixLength returns the length of the prefix that is added to every file
// and used for storing encryption options.
// For optimal performance, the prefix length should be a multiple of
// the page size.
// GetPrefixLength returns the length of the prefix that is added to every
// file and used for storing encryption options. For optimal performance, the
// prefix length should be a multiple of the page size.
virtual size_t GetPrefixLength() override;
// CreateNewPrefix initialized an allocated block of prefix memory
// for a new file.
virtual Status CreateNewPrefix(const std::string& fname, char *prefix, size_t prefixLength) override;
virtual Status CreateNewPrefix(const std::string& fname, char* prefix,
size_t prefixLength) override;
// CreateCipherStream creates a block access cipher stream for a file given
// given name and options.
@ -185,10 +190,11 @@ class CTREncryptionProvider : public EncryptionProvider {
// It will be encrypted later (before written to disk).
// Returns the amount of space (starting from the start of the prefix)
// that has been initialized.
virtual size_t PopulateSecretPrefixPart(char *prefix, size_t prefixLength, size_t blockSize);
virtual size_t PopulateSecretPrefixPart(char* prefix, size_t prefixLength,
size_t blockSize);
// CreateCipherStreamFromPrefix creates a block access cipher stream for a file given
// given name and options. The given prefix is already decrypted.
// CreateCipherStreamFromPrefix creates a block access cipher stream for a
// file given given name and options. The given prefix is already decrypted.
virtual Status CreateCipherStreamFromPrefix(
const std::string& fname, const EnvOptions& options,
uint64_t initialCounter, const Slice& iv, const Slice& prefix,

@ -19,9 +19,9 @@
#pragma once
#include <stdlib.h>
#include <memory>
#include <stdexcept>
#include <stdlib.h>
#include <string>
#include <vector>
@ -102,8 +102,8 @@ class FilterPolicy {
//
// Warning: do not change the initial contents of *dst. Instead,
// append the newly constructed filter to *dst.
virtual void CreateFilter(const Slice* keys, int n, std::string* dst)
const = 0;
virtual void CreateFilter(const Slice* keys, int n,
std::string* dst) const = 0;
// "filter" contains the data appended by a preceding call to
// CreateFilter() on this class. This method must return true if
@ -114,9 +114,7 @@ class FilterPolicy {
// Get the FilterBitsBuilder, which is ONLY used for full filter block
// It contains interface to take individual key, then generate filter
virtual FilterBitsBuilder* GetFilterBitsBuilder() const {
return nullptr;
}
virtual FilterBitsBuilder* GetFilterBitsBuilder() const { return nullptr; }
// Get the FilterBitsReader, which is ONLY used for full filter block
// It contains interface to tell if key can be in filter
@ -147,4 +145,4 @@ class FilterPolicy {
// trailing spaces in keys.
extern const FilterPolicy* NewBloomFilterPolicy(
int bits_per_key, bool use_block_based_builder = false);
}
} // namespace rocksdb

@ -20,8 +20,7 @@ class FlushBlockPolicy {
public:
// Keep track of the key/value sequences and return the boolean value to
// determine if table builder should flush current data block.
virtual bool Update(const Slice& key,
const Slice& value) = 0;
virtual bool Update(const Slice& key, const Slice& value) = 0;
virtual ~FlushBlockPolicy() {}
};
@ -59,4 +58,4 @@ class FlushBlockBySizePolicyFactory : public FlushBlockPolicyFactory {
const BlockBuilder& data_block_builder);
};
} // rocksdb
} // namespace rocksdb

@ -192,8 +192,8 @@ struct FlushJobInfo {
struct CompactionJobInfo {
CompactionJobInfo() = default;
explicit CompactionJobInfo(const CompactionJobStats& _stats) :
stats(_stats) {}
explicit CompactionJobInfo(const CompactionJobStats& _stats)
: stats(_stats) {}
// the id of the column family where the compaction happened.
uint32_t cf_id;
@ -244,7 +244,6 @@ struct MemTableInfo {
uint64_t num_entries;
// Total number of deletes in memtable
uint64_t num_deletes;
};
struct ExternalFileIngestionInfo {
@ -324,8 +323,7 @@ class EventListener {
// Note that the this function must be implemented in a way such that
// it should not run for an extended period of time before the function
// returns. Otherwise, RocksDB may be blocked.
virtual void OnCompactionBegin(DB* /*db*/,
const CompactionJobInfo& /*ci*/) {}
virtual void OnCompactionBegin(DB* /*db*/, const CompactionJobInfo& /*ci*/) {}
// A callback function for RocksDB which will be called whenever
// a registered RocksDB compacts a file. The default implementation
@ -380,8 +378,7 @@ class EventListener {
// Note that if applications would like to use the passed reference
// outside this function call, they should make copies from these
// returned value.
virtual void OnMemTableSealed(
const MemTableInfo& /*info*/) {}
virtual void OnMemTableSealed(const MemTableInfo& /*info*/) {}
// A callback function for RocksDB which will be called before
// a column family handle is deleted.
@ -457,8 +454,7 @@ class EventListener {
#else
class EventListener {
};
class EventListener {};
#endif // ROCKSDB_LITE

@ -35,11 +35,11 @@
#pragma once
#include <memory>
#include <stdexcept>
#include <rocksdb/slice.h>
#include <stdint.h>
#include <stdlib.h>
#include <rocksdb/slice.h>
#include <memory>
#include <stdexcept>
namespace rocksdb {
@ -324,9 +324,7 @@ class VectorRepFactory : public MemTableRepFactory {
Allocator*, const SliceTransform*,
Logger* logger) override;
virtual const char* Name() const override {
return "VectorRepFactory";
}
virtual const char* Name() const override { return "VectorRepFactory"; }
};
// This class contains a fixed array of buckets, each
@ -337,8 +335,7 @@ class VectorRepFactory : public MemTableRepFactory {
// link lists in the skiplist
extern MemTableRepFactory* NewHashSkipListRepFactory(
size_t bucket_count = 1000000, int32_t skiplist_height = 4,
int32_t skiplist_branching_factor = 4
);
int32_t skiplist_branching_factor = 4);
// The factory is to create memtables based on a hash table:
// it contains a fixed array of buckets, each pointing to either a linked list

@ -239,13 +239,10 @@ class AssociativeMergeOperator : public MergeOperator {
// returns false, it is because client specified bad data or there was
// internal corruption. The client should assume that this will be treated
// as an error by the library.
virtual bool Merge(const Slice& key,
const Slice* existing_value,
const Slice& value,
std::string* new_value,
virtual bool Merge(const Slice& key, const Slice* existing_value,
const Slice& value, std::string* new_value,
Logger* logger) const = 0;
private:
// Default implementations of the MergeOperator functions
bool FullMergeV2(const MergeOperationInput& merge_in,

@ -22,8 +22,8 @@ struct SstFileMetaData;
struct ColumnFamilyMetaData {
ColumnFamilyMetaData() : size(0), file_count(0), name("") {}
ColumnFamilyMetaData(const std::string& _name, uint64_t _size,
const std::vector<LevelMetaData>&& _levels) :
size(_size), name(_name), levels(_levels) {}
const std::vector<LevelMetaData>&& _levels)
: size(_size), name(_name), levels(_levels) {}
// The size of this column family in bytes, which is equal to the sum of
// the file size of its "levels".
@ -39,9 +39,8 @@ struct ColumnFamilyMetaData {
// The metadata that describes a level.
struct LevelMetaData {
LevelMetaData(int _level, uint64_t _size,
const std::vector<SstFileMetaData>&& _files) :
level(_level), size(_size),
files(_files) {}
const std::vector<SstFileMetaData>&& _files)
: level(_level), size(_size), files(_files) {}
// The level which this meta data describes.
const int level;

@ -10,11 +10,11 @@
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <memory>
#include <vector>
#include <limits>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "rocksdb/advanced_options.h"
#include "rocksdb/comparator.h"
@ -94,8 +94,7 @@ struct ColumnFamilyOptions : public AdvancedColumnFamilyOptions {
// an iterator, only Put() and Get() API calls
//
// Not supported in ROCKSDB_LITE
ColumnFamilyOptions* OptimizeForPointLookup(
uint64_t block_cache_size_mb);
ColumnFamilyOptions* OptimizeForPointLookup(uint64_t block_cache_size_mb);
// Default values for some parameters in ColumnFamilyOptions are not
// optimized for heavy workloads and big datasets, which means you might
@ -341,7 +340,6 @@ struct DbPath {
DbPath(const std::string& p, uint64_t t) : path(p), target_size(t) {}
};
struct DBOptions {
// The function recovers options to the option as in version 4.6.
DBOptions* OldDefaults(int rocksdb_major_version = 4,
@ -722,12 +720,7 @@ struct DBOptions {
// Specify the file access pattern once a compaction is started.
// It will be applied to all input files of a compaction.
// Default: NORMAL
enum AccessHint {
NONE,
NORMAL,
SEQUENTIAL,
WILLNEED
};
enum AccessHint { NONE, NORMAL, SEQUENTIAL, WILLNEED };
AccessHint access_hint_on_compaction_start = NORMAL;
// If true, always create a new file descriptor and new table reader
@ -782,7 +775,6 @@ struct DBOptions {
// Dynamically changeable through SetDBOptions() API.
size_t writable_file_max_buffer_size = 1024 * 1024;
// Use adaptive mutex, which spins in the user space before resorting
// to kernel. This could reduce context switch when the mutex is not
// heavily contended. However, if the mutex is hot, we could end up

@ -5,8 +5,8 @@
#pragma once
#include <map>
#include <stdint.h>
#include <map>
#include <string>
#include "rocksdb/perf_level.h"
@ -42,7 +42,6 @@ struct PerfContextByLevel {
};
struct PerfContext {
~PerfContext();
PerfContext() {}
@ -230,4 +229,4 @@ struct PerfContext {
// if defined(NPERF_CONTEXT), then the pointer is not thread-local
PerfContext* get_perf_context();
}
} // namespace rocksdb

@ -19,9 +19,9 @@
#pragma once
#include <assert.h>
#include <cstdio>
#include <stddef.h>
#include <string.h>
#include <cstdio>
#include <string>
#ifdef __cpp_lib_string_view
@ -52,9 +52,7 @@ class Slice {
// Create a slice that refers to s[0,strlen(s)-1]
/* implicit */
Slice(const char* s) : data_(s) {
size_ = (s == nullptr) ? 0 : strlen(s);
}
Slice(const char* s) : data_(s) { size_ = (s == nullptr) ? 0 : strlen(s); }
// Create a single slice from SliceParts using buf as storage.
// buf must exist as long as the returned Slice exists.
@ -77,7 +75,10 @@ class Slice {
}
// Change this slice to refer to an empty array
void clear() { data_ = ""; size_ = 0; }
void clear() {
data_ = "";
size_ = 0;
}
// Drop the first "n" bytes from this slice.
void remove_prefix(size_t n) {
@ -117,8 +118,7 @@ class Slice {
// Return true iff "x" is a prefix of "*this"
bool starts_with(const Slice& x) const {
return ((size_ >= x.size_) &&
(memcmp(data_, x.data_, x.size_) == 0));
return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0));
}
bool ends_with(const Slice& x) const {
@ -219,8 +219,8 @@ class PinnableSlice : public Slice, public Cleanable {
// A set of Slices that are virtually concatenated together. 'parts' points
// to an array of Slices. The number of elements in the array is 'num_parts'.
struct SliceParts {
SliceParts(const Slice* _parts, int _num_parts) :
parts(_parts), num_parts(_num_parts) { }
SliceParts(const Slice* _parts, int _num_parts)
: parts(_parts), num_parts(_num_parts) {}
SliceParts() : parts(nullptr), num_parts(0) {}
const Slice* parts;
@ -232,17 +232,17 @@ inline bool operator==(const Slice& x, const Slice& y) {
(memcmp(x.data(), y.data(), x.size()) == 0));
}
inline bool operator!=(const Slice& x, const Slice& y) {
return !(x == y);
}
inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); }
inline int Slice::compare(const Slice& b) const {
assert(data_ != nullptr && b.data_ != nullptr);
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
int r = memcmp(data_, b.data_, min_len);
if (r == 0) {
if (size_ < b.size_) r = -1;
else if (size_ > b.size_) r = +1;
if (size_ < b.size_)
r = -1;
else if (size_ > b.size_)
r = +1;
}
return r;
}

@ -98,4 +98,4 @@ extern const SliceTransform* NewCappedPrefixTransform(size_t cap_len);
extern const SliceTransform* NewNoopTransform();
}
} // namespace rocksdb

@ -77,8 +77,9 @@ class SstFileWriter {
// be ingested into this column_family, note that passing nullptr means that
// the column_family is unknown.
// If invalidate_page_cache is set to true, SstFileWriter will give the OS a
// hint that this file pages is not needed every time we write 1MB to the file.
// To use the rate limiter an io_priority smaller than IO_TOTAL can be passed.
// hint that this file pages is not needed every time we write 1MB to the
// file. To use the rate limiter an io_priority smaller than IO_TOTAL can be
// passed.
SstFileWriter(const EnvOptions& env_options, const Options& options,
ColumnFamilyHandle* column_family = nullptr,
bool invalidate_page_cache = true,

@ -495,9 +495,7 @@ class Statistics {
}
// Resets all ticker and histogram stats
virtual Status Reset() {
return Status::NotSupported("Not implemented");
}
virtual Status Reset() { return Status::NotSupported("Not implemented"); }
// String representation of the statistic object.
virtual std::string ToString() const {

@ -305,7 +305,8 @@ class Status {
static const char* CopyState(const char* s);
};
inline Status::Status(const Status& s) : code_(s.code_), subcode_(s.subcode_), sev_(s.sev_) {
inline Status::Status(const Status& s)
: code_(s.code_), subcode_(s.subcode_), sev_(s.sev_) {
state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_);
}
inline Status::Status(const Status& s, Severity sev)

@ -356,13 +356,13 @@ struct PlainTableOptions {
};
// -- Plain Table with prefix-only seek
// For this factory, you need to set Options.prefix_extractor properly to make it
// work. Look-up will starts with prefix hash lookup for key prefix. Inside the
// hash bucket found, a binary search is executed for hash conflicts. Finally,
// a linear search is used.
// For this factory, you need to set Options.prefix_extractor properly to make
// it work. Look-up will starts with prefix hash lookup for key prefix. Inside
// the hash bucket found, a binary search is executed for hash conflicts.
// Finally, a linear search is used.
extern TableFactory* NewPlainTableFactory(const PlainTableOptions& options =
PlainTableOptions());
extern TableFactory* NewPlainTableFactory(
const PlainTableOptions& options = PlainTableOptions());
struct CuckooTablePropertyNames {
// The key that is used to fill empty buckets.
@ -496,8 +496,7 @@ class TableFactory {
//
// If the function cannot find a way to sanitize the input DB Options,
// a non-ok Status will be returned.
virtual Status SanitizeOptions(
const DBOptions& db_opts,
virtual Status SanitizeOptions(const DBOptions& db_opts,
const ColumnFamilyOptions& cf_opts) const = 0;
// Return a string that contains printable format of table configurations.
@ -538,7 +537,8 @@ class TableFactory {
// @block_based_table_factory: block based table factory to use. If NULL, use
// a default one.
// @plain_table_factory: plain table factory to use. If NULL, use a default one.
// @cuckoo_table_factory: cuckoo table factory to use. If NULL, use a default one.
// @cuckoo_table_factory: cuckoo table factory to use. If NULL, use a default
// one.
extern TableFactory* NewAdaptiveTableFactory(
std::shared_ptr<TableFactory> table_factory_to_write = nullptr,
std::shared_ptr<TableFactory> block_based_table_factory = nullptr,

@ -20,8 +20,7 @@
#include <utility>
#include <vector>
#if !defined(ROCKSDB_LITE) && \
!defined(NROCKSDB_THREAD_STATUS) && \
#if !defined(ROCKSDB_LITE) && !defined(NROCKSDB_THREAD_STATUS) && \
defined(ROCKSDB_SUPPORT_THREAD_LOCAL)
#define ROCKSDB_USING_THREAD_STATUS
#endif
@ -105,16 +104,14 @@ struct ThreadStatus {
NUM_STATE_TYPES
};
ThreadStatus(const uint64_t _id,
const ThreadType _thread_type,
const std::string& _db_name,
const std::string& _cf_name,
ThreadStatus(const uint64_t _id, const ThreadType _thread_type,
const std::string& _db_name, const std::string& _cf_name,
const OperationType _operation_type,
const uint64_t _op_elapsed_micros,
const OperationStage _operation_stage,
const uint64_t _op_props[],
const StateType _state_type) :
thread_id(_id), thread_type(_thread_type),
const uint64_t _op_props[], const StateType _state_type)
: thread_id(_id),
thread_type(_thread_type),
db_name(_db_name),
cf_name(_cf_name),
operation_type(_operation_type),
@ -172,23 +169,20 @@ struct ThreadStatus {
static const std::string MicrosToString(uint64_t op_elapsed_time);
// Obtain a human-readable string describing the specified operation stage.
static const std::string& GetOperationStageName(
OperationStage stage);
static const std::string& GetOperationStageName(OperationStage stage);
// Obtain the name of the "i"th operation property of the
// specified operation.
static const std::string& GetOperationPropertyName(
OperationType op_type, int i);
static const std::string& GetOperationPropertyName(OperationType op_type,
int i);
// Translate the "i"th property of the specified operation given
// a property value.
static std::map<std::string, uint64_t>
InterpretOperationProperties(
static std::map<std::string, uint64_t> InterpretOperationProperties(
OperationType op_type, const uint64_t* op_properties);
// Obtain the name of a state given its type.
static const std::string& GetStateName(StateType state_type);
};
} // namespace rocksdb

@ -47,7 +47,6 @@ class ThreadPool {
virtual void SubmitJob(const std::function<void()>&) = 0;
// This moves the function in for efficiency
virtual void SubmitJob(std::function<void()>&&) = 0;
};
// NewThreadPool() is a function that could be used to create a ThreadPool

@ -5,11 +5,11 @@
#pragma once
#include <memory>
#include <vector>
#include "rocksdb/status.h"
#include "rocksdb/types.h"
#include "rocksdb/write_batch.h"
#include <memory>
#include <vector>
namespace rocksdb {
@ -39,7 +39,6 @@ class LogFile {
// For an archived-log-file = /archive/000003.log
virtual std::string PathName() const = 0;
// Primary identifier for log file.
// This is directly proportional to creation time of the log file
virtual uint64_t LogNumber() const = 0;

@ -32,9 +32,7 @@ struct FullKey {
SequenceNumber sequence;
EntryType type;
FullKey()
: sequence(0)
{} // Intentionally left uninitialized (for speed)
FullKey() : sequence(0) {} // Intentionally left uninitialized (for speed)
FullKey(const Slice& u, const SequenceNumber& seq, EntryType t)
: user_key(u), sequence(seq), type(t) {}
std::string DebugString(bool hex = false) const;

@ -22,7 +22,6 @@ enum CompactionStopStyle {
class CompactionOptionsUniversal {
public:
// Percentage flexibility while comparing file size. If the candidate file(s)
// size is 1% smaller than the next file's size, then include next file into
// this candidate set. // Default: 1

@ -15,10 +15,10 @@
#endif
#include <inttypes.h>
#include <string>
#include <functional>
#include <map>
#include <string>
#include <vector>
#include <functional>
#include "rocksdb/utilities/stackable_db.h"
@ -257,8 +257,7 @@ class BackupEngine {
// BackupableDBOptions have to be the same as the ones used in previous
// BackupEngines for the same backup directory.
static Status Open(Env* db_env,
const BackupableDBOptions& options,
static Status Open(Env* db_env, const BackupableDBOptions& options,
BackupEngine** backup_engine_ptr);
// same as CreateNewBackup, but stores extra application metadata

@ -9,8 +9,8 @@
#include <string>
#include <vector>
#include "rocksdb/utilities/stackable_db.h"
#include "rocksdb/db.h"
#include "rocksdb/utilities/stackable_db.h"
namespace rocksdb {

@ -172,4 +172,4 @@ class EnvLibrados : public EnvWrapper {
librados::IoCtx* _GetIoctx(const std::string& prefix);
friend class LibradosWritableFile;
};
}
} // namespace rocksdb

@ -19,8 +19,8 @@
#ifndef ROCKSDB_LITE
#include <iostream>
#include <algorithm>
#include <iostream>
#include <vector>
#include "rocksdb/env.h"
@ -36,16 +36,10 @@ class EnvMirror : public EnvWrapper {
public:
EnvMirror(Env* a, Env* b, bool free_a = false, bool free_b = false)
: EnvWrapper(a),
a_(a),
b_(b),
free_a_(free_a),
free_b_(free_b) {}
: EnvWrapper(a), a_(a), b_(b), free_a_(free_a), free_b_(free_b) {}
~EnvMirror() {
if (free_a_)
delete a_;
if (free_b_)
delete b_;
if (free_a_) delete a_;
if (free_b_) delete b_;
}
Status NewSequentialFile(const std::string& f,

@ -14,13 +14,15 @@ namespace rocksdb {
class LDBCommandExecuteResult {
public:
enum State {
EXEC_NOT_STARTED = 0, EXEC_SUCCEED = 1, EXEC_FAILED = 2,
EXEC_NOT_STARTED = 0,
EXEC_SUCCEED = 1,
EXEC_FAILED = 2,
};
LDBCommandExecuteResult() : state_(EXEC_NOT_STARTED), message_("") {}
LDBCommandExecuteResult(State state, std::string& msg) :
state_(state), message_(msg) {}
LDBCommandExecuteResult(State state, std::string& msg)
: state_(state), message_(msg) {}
std::string ToString() {
std::string ret;
@ -44,17 +46,11 @@ public:
message_ = "";
}
bool IsSucceed() {
return state_ == EXEC_SUCCEED;
}
bool IsSucceed() { return state_ == EXEC_SUCCEED; }
bool IsNotStarted() {
return state_ == EXEC_NOT_STARTED;
}
bool IsNotStarted() { return state_ == EXEC_NOT_STARTED; }
bool IsFailed() {
return state_ == EXEC_FAILED;
}
bool IsFailed() { return state_ == EXEC_FAILED; }
static LDBCommandExecuteResult Succeed(std::string msg) {
return LDBCommandExecuteResult(EXEC_SUCCEED, msg);
@ -72,4 +68,4 @@ private:
bool operator!=(const LDBCommandExecuteResult&);
};
}
} // namespace rocksdb

@ -73,7 +73,8 @@ class SimCache : public Cache {
// stop logging to the file automatically after reaching a specific size in
// bytes, a values of 0 disable this feature
virtual Status StartActivityLogging(const std::string& activity_log_file,
Env* env, uint64_t max_logging_size = 0) = 0;
Env* env,
uint64_t max_logging_size = 0) = 0;
// Stop cache activity logging if any
virtual void StopActivityLogging() = 0;

@ -13,7 +13,6 @@
#undef DeleteFile
#endif
namespace rocksdb {
// This class contains APIs to stack rocksdb wrappers.Eg. Stack TTL over base d
@ -37,9 +36,7 @@ class StackableDB : public DB {
virtual Status Close() override { return db_->Close(); }
virtual DB* GetBaseDB() {
return db_;
}
virtual DB* GetBaseDB() { return db_; }
virtual DB* GetRootDB() override { return db_->GetRootDB(); }
@ -144,9 +141,7 @@ class StackableDB : public DB {
return db_->Merge(options, column_family, key, value);
}
virtual Status Write(const WriteOptions& opts, WriteBatch* updates)
override {
virtual Status Write(const WriteOptions& opts, WriteBatch* updates) override {
return db_->Write(opts, updates);
}
@ -163,10 +158,7 @@ class StackableDB : public DB {
return db_->NewIterators(options, column_families, iterators);
}
virtual const Snapshot* GetSnapshot() override {
return db_->GetSnapshot();
}
virtual const Snapshot* GetSnapshot() override { return db_->GetSnapshot(); }
virtual void ReleaseSnapshot(const Snapshot* snapshot) override {
return db_->ReleaseSnapshot(snapshot);
@ -197,12 +189,10 @@ class StackableDB : public DB {
}
using DB::GetApproximateSizes;
virtual void GetApproximateSizes(ColumnFamilyHandle* column_family,
const Range* r, int n, uint64_t* sizes,
uint8_t include_flags
= INCLUDE_FILES) override {
return db_->GetApproximateSizes(column_family, r, n, sizes,
include_flags);
virtual void GetApproximateSizes(
ColumnFamilyHandle* column_family, const Range* r, int n, uint64_t* sizes,
uint8_t include_flags = INCLUDE_FILES) override {
return db_->GetApproximateSizes(column_family, r, n, sizes, include_flags);
}
using DB::GetApproximateMemTableStats;
@ -251,24 +241,20 @@ class StackableDB : public DB {
}
using DB::MaxMemCompactionLevel;
virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family)
override {
virtual int MaxMemCompactionLevel(
ColumnFamilyHandle* column_family) override {
return db_->MaxMemCompactionLevel(column_family);
}
using DB::Level0StopWriteTrigger;
virtual int Level0StopWriteTrigger(ColumnFamilyHandle* column_family)
override {
virtual int Level0StopWriteTrigger(
ColumnFamilyHandle* column_family) override {
return db_->Level0StopWriteTrigger(column_family);
}
virtual const std::string& GetName() const override {
return db_->GetName();
}
virtual const std::string& GetName() const override { return db_->GetName(); }
virtual Env* GetEnv() const override {
return db_->GetEnv();
}
virtual Env* GetEnv() const override { return db_->GetEnv(); }
using DB::GetOptions;
virtual Options GetOptions(ColumnFamilyHandle* column_family) const override {
@ -291,9 +277,7 @@ class StackableDB : public DB {
return db_->Flush(fopts, column_families);
}
virtual Status SyncWAL() override {
return db_->SyncWAL();
}
virtual Status SyncWAL() override { return db_->SyncWAL(); }
virtual Status FlushWAL(bool sync) override { return db_->FlushWAL(sync); }
@ -312,8 +296,7 @@ class StackableDB : public DB {
db_->GetLiveFilesMetaData(metadata);
}
virtual void GetColumnFamilyMetaData(
ColumnFamilyHandle *column_family,
virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* column_family,
ColumnFamilyMetaData* cf_meta) override {
db_->GetColumnFamilyMetaData(column_family, cf_meta);
}
@ -329,7 +312,8 @@ class StackableDB : public DB {
return db_->GetLatestSequenceNumber();
}
virtual bool SetPreserveDeletesSequenceNumber(SequenceNumber seqnum) override {
virtual bool SetPreserveDeletesSequenceNumber(
SequenceNumber seqnum) override {
return db_->SetPreserveDeletesSequenceNumber(seqnum);
}

@ -40,8 +40,7 @@ class CompactOnDeletionCollectorFactory
private:
friend std::shared_ptr<CompactOnDeletionCollectorFactory>
NewCompactOnDeletionCollectorFactory(
size_t sliding_window_size,
NewCompactOnDeletionCollectorFactory(size_t sliding_window_size,
size_t deletion_trigger);
// A factory of a table property collector that marks a SST
// file as need-compaction when it observe at least "D" deletion
@ -49,10 +48,9 @@ class CompactOnDeletionCollectorFactory
//
// @param sliding_window_size "N"
// @param deletion_trigger "D"
CompactOnDeletionCollectorFactory(
size_t sliding_window_size,
size_t deletion_trigger) :
sliding_window_size_(sliding_window_size),
CompactOnDeletionCollectorFactory(size_t sliding_window_size,
size_t deletion_trigger)
: sliding_window_size_(sliding_window_size),
deletion_trigger_(deletion_trigger) {}
std::atomic<size_t> sliding_window_size_;
@ -69,8 +67,7 @@ class CompactOnDeletionCollectorFactory
// @param deletion_trigger "D". Note that even when "N" is changed,
// the specified number for "D" will not be changed.
extern std::shared_ptr<CompactOnDeletionCollectorFactory>
NewCompactOnDeletionCollectorFactory(
size_t sliding_window_size,
NewCompactOnDeletionCollectorFactory(size_t sliding_window_size,
size_t deletion_trigger);
} // namespace rocksdb

@ -127,7 +127,6 @@ struct TransactionOptions {
// return 0 if
// a.compare(b) returns 0.
// If positive, specifies the wait timeout in milliseconds when
// a transaction attempts to lock a key.
//

@ -4,12 +4,12 @@
#pragma once
#ifndef ROCKSDB_LITE
#include <vector>
#include <string>
#include <vector>
#include "rocksdb/utilities/stackable_db.h"
#include "rocksdb/utilities/db_ttl.h"
#include "rocksdb/db.h"
#include "rocksdb/utilities/db_ttl.h"
#include "rocksdb/utilities/stackable_db.h"
namespace rocksdb {
@ -24,11 +24,9 @@ class UtilityDB {
#elif _WIN32
__declspec(deprecated)
#endif
static Status OpenTtlDB(const Options& options,
const std::string& name,
StackableDB** dbptr,
int32_t ttl = 0,
bool read_only = false);
static Status
OpenTtlDB(const Options& options, const std::string& name,
StackableDB** dbptr, int32_t ttl = 0, bool read_only = false);
};
} // namespace rocksdb

@ -5,8 +5,8 @@
#pragma once
#include <string>
#include <map>
#include <string>
namespace rocksdb {

@ -24,10 +24,10 @@
#pragma once
#include <stdint.h>
#include <atomic>
#include <stack>
#include <string>
#include <stdint.h>
#include "rocksdb/status.h"
#include "rocksdb/write_batch_base.h"

Loading…
Cancel
Save