Fix all the lint errors.

Summary:
Scripted and removed all trailing spaces and converted all tabs to
spaces.

Also fixed other lint errors.
All lint errors from this point of time should be taken seriously.

Test Plan: make all check

Reviewers: dhruba

Reviewed By: dhruba

CC: leveldb

Differential Revision: https://reviews.facebook.net/D7059
main
Abhishek Kona 12 years ago
parent 9b838535d1
commit d29f181923
  1. 28
      db/c.cc
  2. 14
      db/db_bench.cc
  3. 14
      db/db_filesnapshot.cc
  4. 26
      db/db_impl.cc
  5. 2
      db/db_impl.h
  6. 8
      db/db_test.cc
  7. 2
      db/memtable.cc
  8. 2
      db/memtable.h
  9. 16
      db/memtablelist.cc
  10. 10
      db/memtablelist.h
  11. 6
      db/version_edit.h
  12. 50
      db/version_set.cc
  13. 10
      db/version_set.h
  14. 16
      include/leveldb/options.h
  15. 2
      include/leveldb/table_builder.h
  16. 2
      util/bloom.cc
  17. 2
      util/crc32c.cc
  18. 20
      util/env_hdfs.cc
  19. 6
      util/env_posix.cc
  20. 2
      util/options.cc

@ -445,7 +445,7 @@ void leveldb_options_set_block_restart_interval(leveldb_options_t* opt, int n) {
}
void leveldb_options_set_target_file_size_base(
leveldb_options_t* opt, uint64_t n) {
leveldb_options_t* opt, uint64_t n) {
opt->rep.target_file_size_base = n;
}
@ -455,47 +455,47 @@ void leveldb_options_set_target_file_size_multiplier(
}
void leveldb_options_set_max_bytes_for_level_base(
leveldb_options_t* opt, uint64_t n) {
leveldb_options_t* opt, uint64_t n) {
opt->rep.max_bytes_for_level_base = n;
}
void leveldb_options_set_max_bytes_for_level_multiplier(
leveldb_options_t* opt, int n) {
leveldb_options_t* opt, int n) {
opt->rep.max_bytes_for_level_multiplier = n;
}
void leveldb_options_set_expanded_compaction_factor(
leveldb_options_t* opt, int n) {
leveldb_options_t* opt, int n) {
opt->rep.expanded_compaction_factor = n;
}
void leveldb_options_set_max_grandparent_overlap_factor(
leveldb_options_t* opt, int n) {
leveldb_options_t* opt, int n) {
opt->rep.max_grandparent_overlap_factor = n;
}
void leveldb_options_set_num_levels(leveldb_options_t* opt, int n) {
opt->rep.num_levels = n;
opt->rep.num_levels = n;
}
void leveldb_options_set_level0_file_num_compaction_trigger(
leveldb_options_t* opt, int n) {
opt->rep.level0_file_num_compaction_trigger = n;
leveldb_options_t* opt, int n) {
opt->rep.level0_file_num_compaction_trigger = n;
}
void leveldb_options_set_level0_slowdown_writes_trigger(
leveldb_options_t* opt, int n) {
opt->rep.level0_slowdown_writes_trigger = n;
leveldb_options_t* opt, int n) {
opt->rep.level0_slowdown_writes_trigger = n;
}
void leveldb_options_set_level0_stop_writes_trigger(
leveldb_options_t* opt, int n) {
opt->rep.level0_stop_writes_trigger = n;
leveldb_options_t* opt, int n) {
opt->rep.level0_stop_writes_trigger = n;
}
void leveldb_options_set_max_mem_compaction_level(
leveldb_options_t* opt, int n) {
opt->rep.max_mem_compaction_level = n;
leveldb_options_t* opt, int n) {
opt->rep.max_mem_compaction_level = n;
}
void leveldb_options_set_compression(leveldb_options_t* opt, int t) {

@ -87,7 +87,7 @@ static bool FLAGS_histogram = false;
// (initialized to default value by "main")
static int FLAGS_write_buffer_size = 0;
// The number of in-memory memtables.
// The number of in-memory memtables.
// Each memtable is of size FLAGS_write_buffer_size.
// This is initialized to default value of 2 in "main" function.
static int FLAGS_max_write_buffer_number = 0;
@ -452,7 +452,7 @@ struct ThreadState {
Stats stats;
SharedState* shared;
ThreadState(int index)
/* implicit */ ThreadState(int index)
: tid(index),
rand(1000 + index) {
}
@ -979,7 +979,7 @@ class Benchmark {
FLAGS_delete_obsolete_files_period_micros;
options.rate_limit = FLAGS_rate_limit;
options.table_cache_numshardbits = FLAGS_table_cache_numshardbits;
options.max_grandparent_overlap_factor =
options.max_grandparent_overlap_factor =
FLAGS_max_grandparent_overlap_factor;
options.disable_auto_compactions = FLAGS_disable_auto_compactions;
options.source_compaction_factor = FLAGS_source_compaction_factor;
@ -1272,7 +1272,7 @@ int main(int argc, char** argv) {
FLAGS_write_buffer_size = leveldb::Options().write_buffer_size;
FLAGS_max_write_buffer_number = leveldb::Options().max_write_buffer_number;
FLAGS_open_files = leveldb::Options().max_open_files;
FLAGS_max_background_compactions =
FLAGS_max_background_compactions =
leveldb::Options().max_background_compactions;
// Compression test code above refers to FLAGS_block_size
FLAGS_block_size = leveldb::Options().block_size;
@ -1320,7 +1320,7 @@ int main(int argc, char** argv) {
exit(1);
}
} else if (sscanf(argv[i], "--table_cache_numshardbits=%d%c",
&n, &junk) == 1) {
&n, &junk) == 1) {
if (n <= 0 || n > 20) {
fprintf(stderr, "The cache cannot be sharded into 2**%d pieces\n", n);
exit(1);
@ -1433,10 +1433,10 @@ int main(int argc, char** argv) {
} else if (sscanf(argv[i], "--max_grandparent_overlap_factor=%d%c",
&n, &junk) == 1) {
FLAGS_max_grandparent_overlap_factor = n;
} else if (sscanf(argv[i], "--disable_auto_compactions=%d%c",
} else if (sscanf(argv[i], "--disable_auto_compactions=%d%c",
&n, &junk) == 1 && (n == 0 || n ==1)) {
FLAGS_disable_auto_compactions = n;
} else if (sscanf(argv[i], "--source_compaction_factor=%d%c",
} else if (sscanf(argv[i], "--source_compaction_factor=%d%c",
&n, &junk) == 1 && n > 0) {
FLAGS_source_compaction_factor = n;
} else {

@ -1,6 +1,6 @@
// Copyright (c) 2012 Facebook.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// found in the LICENSE file.
#include "db/db_impl.h"
#include "db/filename.h"
@ -16,17 +16,17 @@ namespace leveldb {
Status DBImpl::DisableFileDeletions() {
MutexLock l(&mutex_);
disable_delete_obsolete_files_ = true;
disable_delete_obsolete_files_ = true;
return Status::OK();
}
Status DBImpl::EnableFileDeletions() {
MutexLock l(&mutex_);
disable_delete_obsolete_files_ = false;
disable_delete_obsolete_files_ = false;
return Status::OK();
}
Status DBImpl::GetLiveFiles(std::vector<std::string>& ret,
Status DBImpl::GetLiveFiles(std::vector<std::string>& ret,
uint64_t* manifest_file_size) {
*manifest_file_size = 0;
@ -34,7 +34,7 @@ Status DBImpl::GetLiveFiles(std::vector<std::string>& ret,
// flush all dirty data to disk.
Status status = Flush(FlushOptions());
if (!status.ok()) {
Log(options_.info_log, "Cannot Flush data %s\n",
Log(options_.info_log, "Cannot Flush data %s\n",
status.ToString().c_str());
return status;
}
@ -42,7 +42,7 @@ Status DBImpl::GetLiveFiles(std::vector<std::string>& ret,
MutexLock l(&mutex_);
// Make a set of all of the live *.sst files
std::set<uint64_t> live;
std::set<uint64_t> live;
versions_->AddLiveFilesCurrentVersion(&live);
ret.resize(live.size() + 2); //*.sst + CURRENT + MANIFEST
@ -55,7 +55,7 @@ Status DBImpl::GetLiveFiles(std::vector<std::string>& ret,
}
ret[live.size()] = CurrentFileName("");
ret[live.size()+1] = DescriptorFileName("",
ret[live.size()+1] = DescriptorFileName("",
versions_->ManifestFileNumber());
// find length of manifest file while holding the mutex lock

@ -265,7 +265,7 @@ DBImpl::~DBImpl() {
delete logger_;
}
// Do not flush and close database elegantly. Simulate a crash.
// Do not flush and close database elegantly. Simulate a crash.
void DBImpl::TEST_Destroy_DBImpl() {
// ensure that no new memtable flushes can occur
flush_on_destroy_ = false;
@ -718,7 +718,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
// created file might not be considered as a live-file by another
// compaction thread that is concurrently deleting obselete files.
// The pending_outputs can be cleared only after the new version is
// committed so that other threads can recognize this file as a
// committed so that other threads can recognize this file as a
// valid one.
// pending_outputs_.erase(meta.number);
@ -778,7 +778,7 @@ Status DBImpl::CompactMemTable(bool* madeProgress) {
}
// Replace immutable memtable with the generated Table
s = imm_.InstallMemtableFlushResults(m, versions_, s, &mutex_,
s = imm_.InstallMemtableFlushResults(m, versions_, s, &mutex_,
options_.info_log, file_number, pending_outputs_);
if (s.ok()) {
@ -851,10 +851,10 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
MutexLock l(&mutex_);
// When a manual compaction arrives, temporarily throttle down
// the number of background compaction threads to 1. This is
// needed to ensure that this manual compaction can compact
// any range of keys/files. We artificialy increase
// When a manual compaction arrives, temporarily throttle down
// the number of background compaction threads to 1. This is
// needed to ensure that this manual compaction can compact
// any range of keys/files. We artificialy increase
// bg_compaction_scheduled_ by a large number, this causes
// the system to have a single background thread. Now,
// this manual compaction can progress without stomping
@ -987,7 +987,7 @@ void DBImpl::BackgroundCall() {
MaybeScheduleLogDBDeployStats();
// Previous compaction may have produced too many files in a level,
// So reschedule another compaction if we made progress in the
// So reschedule another compaction if we made progress in the
// last compaction.
if (madeProgress) {
MaybeScheduleCompaction();
@ -995,13 +995,13 @@ void DBImpl::BackgroundCall() {
bg_cv_.SignalAll();
}
Status DBImpl::BackgroundCompaction(bool* madeProgress,
Status DBImpl::BackgroundCompaction(bool* madeProgress,
DeletionState& deletion_state) {
*madeProgress = false;
mutex_.AssertHeld();
while (imm_.IsFlushPending()) {
Log(options_.info_log,
Log(options_.info_log,
"BackgroundCompaction doing CompactMemTable, compaction slots available %d",
options_.max_background_compactions - bg_compaction_scheduled_);
Status stat = CompactMemTable(madeProgress);
@ -1129,7 +1129,7 @@ void DBImpl::AllocateCompactionOutputFileNumbers(CompactionState* compact) {
// Frees up unused file number.
void DBImpl::ReleaseCompactionUnusedFileNumbers(CompactionState* compact) {
mutex_.AssertHeld();
for (std::list<uint64_t>::iterator it =
for (std::list<uint64_t>::iterator it =
compact->allocated_file_numbers.begin();
it != compact->allocated_file_numbers.end(); ++it) {
uint64_t file_number = *it;
@ -1291,7 +1291,7 @@ inline SequenceNumber DBImpl::findEarliestVisibleSnapshot(
Status DBImpl::DoCompactionWork(CompactionState* compact) {
int64_t imm_micros = 0; // Micros spent doing imm_ compactions
Log(options_.info_log,
Log(options_.info_log,
"Compacting %d@%d + %d@%d files, compaction slots available %d",
compact->compaction->num_input_files(0),
compact->compaction->level(),
@ -1877,7 +1877,7 @@ Status DBImpl::MakeRoomForWrite(bool force) {
s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile);
if (!s.ok()) {
// Avoid chewing through file number space in a tight loop.
versions_->ReuseFileNumber(new_log_number);
versions_->ReuseFileNumber(new_log_number);
break;
}
delete log_;

@ -158,7 +158,7 @@ protected:
Status InstallCompactionResults(CompactionState* compact);
void AllocateCompactionOutputFileNumbers(CompactionState* compact);
void ReleaseCompactionUnusedFileNumbers(CompactionState* compact);
// Returns the list of live files in 'live' and the list
// of all files in the filesystem in 'allfiles'.

@ -1703,7 +1703,7 @@ TEST(DBTest, DeletionMarkers2) {
TEST(DBTest, OverlapInLevel0) {
do {
int tmp = dbfull()->MaxMemCompactionLevel();
int tmp = dbfull()->MaxMemCompactionLevel();
ASSERT_EQ(tmp, 2) << "Fix test to match config";
// Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
@ -2429,17 +2429,17 @@ class ModelDB: public DB {
virtual int NumberLevels()
{
return 1;
return 1;
}
virtual int MaxMemCompactionLevel()
{
return 1;
return 1;
}
virtual int Level0StopWriteTrigger()
{
return -1;
return -1;
}
virtual Status Flush(const leveldb::FlushOptions& options) {

@ -24,7 +24,7 @@ MemTable::MemTable(const InternalKeyComparator& cmp, int numlevel)
table_(comparator_, &arena_),
flush_in_progress_(false),
flush_completed_(false),
file_number_(0),
file_number_(0),
edit_(numlevel) {
}

@ -88,7 +88,7 @@ class MemTable {
Table table_;
// These are used to manage memtable flushes to storage
bool flush_in_progress_; // started the flush
bool flush_in_progress_; // started the flush
bool flush_completed_; // finished the flush
uint64_t file_number_; // filled up after flush is complete

@ -1,9 +1,11 @@
// Copyright (c) 2012 Facebook.
#include "db/memtablelist.h"
#include <string>
#include "leveldb/db.h"
#include "db/memtable.h"
#include "db/memtablelist.h"
#include "leveldb/env.h"
#include "leveldb/iterator.h"
#include "util/coding.h"
@ -57,7 +59,7 @@ MemTable* MemTableList::PickMemtableToFlush() {
MemTable* m = *it;
if (!m->flush_in_progress_) {
assert(!m->flush_completed_);
num_flush_not_started_--;
num_flush_not_started_--;
if (num_flush_not_started_ == 0) {
imm_flush_needed.Release_Store(NULL);
}
@ -110,11 +112,11 @@ Status MemTableList::InstallMemtableFlushResults(MemTable* m,
if (!m->flush_completed_) {
break;
}
Log(info_log,
Log(info_log,
"Level-0 commit table #%llu: started",
(unsigned long long)m->file_number_);
// this can release and reacquire the mutex.
// this can release and reacquire the mutex.
s = vset->LogAndApply(&m->edit_, mu);
if (s.ok()) { // commit new state
@ -149,11 +151,11 @@ Status MemTableList::InstallMemtableFlushResults(MemTable* m,
return s;
}
// New memtables are inserted at the front of the list.
// New memtables are inserted at the front of the list.
void MemTableList::Add(MemTable* m) {
assert(size_ >= num_flush_not_started_);
size_++;
memlist_.push_front(m);
size_++;
memlist_.push_front(m);
num_flush_not_started_++;
if (num_flush_not_started_ == 1) {
imm_flush_needed.Release_Store((void *)1);

@ -19,15 +19,15 @@ class MemTableListIterator;
//
// This class stores refeernces to all the immutable memtables.
// The memtables are flushed to L0 as soon as possible and in
// The memtables are flushed to L0 as soon as possible and in
// any order. If there are more than one immutable memtable, their
// flushes can occur concurrently. However, they are 'committed'
// flushes can occur concurrently. However, they are 'committed'
// to the manifest in FIFO order to maintain correctness and
// recoverability from a crash.
//
class MemTableList {
public:
// A list of memtables.
// A list of memtables.
MemTableList() : size_(0), num_flush_not_started_(0),
commit_in_progress_(false) {
imm_flush_needed.Release_Store(NULL);
@ -62,7 +62,7 @@ class MemTableList {
uint64_t file_number,
std::set<uint64_t>& pending_outputs);
// New memtables are inserted at the front of the list.
// New memtables are inserted at the front of the list.
void Add(MemTable* m);
// Returns an estimate of the number of bytes of data in use.
@ -72,7 +72,7 @@ class MemTableList {
// Return the most recent value found, if any.
bool Get(const LookupKey& key, std::string* value, Status* s);
// Returns the list of underlying memtables.
// Returns the list of underlying memtables.
void GetMemTables(std::vector<MemTable*>* list);
// Copying allowed

@ -29,9 +29,9 @@ struct FileMetaData {
class VersionEdit {
public:
VersionEdit(int number_levels) :
number_levels_(number_levels) {
Clear();
/* implicit */ VersionEdit(int number_levels) :
number_levels_(number_levels) {
Clear();
}
~VersionEdit() { }

@ -456,7 +456,7 @@ int Version::PickLevelForMemTableOutput(
}
// Store in "*inputs" all files in "level" that overlap [begin,end]
// If hint_index is specified, then it points to a file in the
// If hint_index is specified, then it points to a file in the
// overlapping range.
// The file_index returns a pointer to any file in an overlapping range.
void Version::GetOverlappingInputs(
@ -551,7 +551,7 @@ void Version::GetOverlappingInputsBinarySearch(
break;
}
}
// If there were no overlapping files, return immediately.
if (!foundOverlap) {
return;
@ -562,7 +562,7 @@ void Version::GetOverlappingInputsBinarySearch(
}
ExtendOverlappingInputs(level, user_begin, user_end, inputs, mid);
}
// Store in "*inputs" all files in "level" that overlap [begin,end]
// The midIndex specifies the index of at least one file that
// overlaps the specified range. From that file, iterate backward
@ -646,8 +646,8 @@ struct VersionSet::ManifestWriter {
bool done;
port::CondVar cv;
VersionEdit* edit;
explicit ManifestWriter(port::Mutex* mu, VersionEdit* e) :
explicit ManifestWriter(port::Mutex* mu, VersionEdit* e) :
done(false), cv(mu), edit(e) {}
};
@ -966,7 +966,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
if (w.done) {
return w.status;
}
std::vector<VersionEdit*> batch_edits;
Version* v = new Version(this, current_version_number_++);
Builder builder(this, current_);
@ -978,7 +978,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
std::deque<ManifestWriter*>::iterator iter = manifest_writers_.begin();
for (; iter != manifest_writers_.end(); ++iter) {
last_writer = *iter;
LogAndApplyHelper(&builder, v, last_writer->edit, mu);
LogAndApplyHelper(&builder, v, last_writer->edit, mu);
batch_edits.push_back(last_writer->edit);
}
builder.SaveTo(v);
@ -1038,7 +1038,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
// find offset in manifest file where this version is stored.
new_manifest_file_size = descriptor_file_->GetFileSize();
mu->Lock();
}
@ -1050,7 +1050,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
prev_log_number_ = edit->prev_log_number_;
} else {
Log(options_->info_log, "Error in committing version %ld",
Log(options_->info_log, "Error in committing version %ld",
v->GetVersionNumber());
delete v;
if (!new_manifest_file.empty()) {
@ -1279,7 +1279,7 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
// Write out each individual edit
if (verbose) {
printf("*************************Edit[%d] = %s\n",
printf("*************************Edit[%d] = %s\n",
count, edit.DebugString().c_str());
}
count++;
@ -1433,7 +1433,7 @@ void VersionSet::Finalize(Version* v) {
}
// a static compator used to sort files based on their size
static bool compareSize(const VersionSet::Fsize& first,
static bool compareSize(const VersionSet::Fsize& first,
const VersionSet::Fsize& second) {
return (first.file->file_size > second.file->file_size);
}
@ -1690,11 +1690,11 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
}
double VersionSet::MaxBytesForLevel(int level) {
// Note: the result for level zero is not really used since we set
// the level-0 compaction threshold based on number of files.
// Note: the result for level zero is not really used since we set
// the level-0 compaction threshold based on number of files.
assert(level >= 0);
assert(level < NumberLevels());
return level_max_bytes_[level];
return level_max_bytes_[level];
}
uint64_t VersionSet::MaxFileSizeForLevel(int level) {
@ -1715,7 +1715,7 @@ int64_t VersionSet::MaxGrandParentOverlapBytes(int level) {
return result;
}
// verify that the files listed in this compaction are present
// verify that the files listed in this compaction are present
// in the current version
bool VersionSet::VerifyCompactionFileConsistency(Compaction* c) {
if (c->input_version_ != current_) {
@ -1774,11 +1774,11 @@ void VersionSet::ReleaseCompactionFiles(Compaction* c, Status status) {
// The total size of files that are currently being compacted
uint64_t VersionSet::SizeBeingCompacted(int level) {
uint64_t total = 0;
for (std::set<Compaction*>::iterator it =
for (std::set<Compaction*>::iterator it =
compactions_in_progress_[level].begin();
it != compactions_in_progress_[level].end();
++it) {
Compaction* c = (*it);
Compaction* c = (*it);
assert(c->level() == level);
for (int i = 0; i < c->num_input_files(0); i++) {
total += c->input(0,i)->file_size;
@ -1838,7 +1838,7 @@ Compaction* VersionSet::PickCompactionBySize(int level) {
// Do not pick this file if its parents at level+1 are being compacted.
// Maybe we can avoid redoing this work in SetupOtherInputs
int parent_index = -1;
if (ParentRangeInCompaction(&f->smallest, &f->largest, level,
if (ParentRangeInCompaction(&f->smallest, &f->largest, level,
&parent_index)) {
continue;
}
@ -1887,7 +1887,7 @@ Compaction* VersionSet::PickCompaction() {
if (c == NULL && (current_->file_to_compact_ != NULL)) {
level = current_->file_to_compact_level_;
c = new Compaction(level, MaxFileSizeForLevel(level),
MaxGrandParentOverlapBytes(level), NumberLevels(), true);
MaxGrandParentOverlapBytes(level), NumberLevels(), true);
c->inputs_[0].push_back(current_->file_to_compact_);
}
@ -1937,7 +1937,7 @@ Compaction* VersionSet::PickCompaction() {
bool VersionSet::ParentRangeInCompaction(const InternalKey* smallest,
const InternalKey* largest, int level, int* parent_index) {
std::vector<FileMetaData*> inputs;
current_->GetOverlappingInputs(level+1, smallest, largest,
&inputs, *parent_index, parent_index);
return FilesInCompaction(inputs);
@ -1948,7 +1948,7 @@ bool VersionSet::FilesInCompaction(std::vector<FileMetaData*>& files) {
for (unsigned int i = 0; i < files.size(); i++) {
if (files[i]->being_compacted) {
return true;
}
}
}
return false;
}
@ -1984,7 +1984,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
current_->GetOverlappingInputs(level+1, &new_start, &new_limit,
&expanded1, c->parent_index_,
&c->parent_index_);
if (expanded1.size() == c->inputs_[1].size() &&
if (expanded1.size() == c->inputs_[1].size() &&
!FilesInCompaction(expanded1)) {
Log(options_->info_log,
"Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
@ -2085,8 +2085,8 @@ Compaction::Compaction(int level, uint64_t target_file_size,
}
Compaction::~Compaction() {
delete[] level_ptrs_;
delete edit_;
delete[] level_ptrs_;
delete edit_;
if (input_version_ != NULL) {
input_version_->Unref();
}
@ -2176,7 +2176,7 @@ void Compaction::ReleaseInputs() {
}
void Compaction::ResetNextCompactionIndex() {
input_version_->ResetNextCompactionIndex(level_);
input_version_->ResetNextCompactionIndex(level_);
}
static void InputSummary(std::vector<FileMetaData*>& files,

@ -146,8 +146,8 @@ class Version {
// in increasing order of keys
std::vector<FileMetaData*>* files_;
// A list for the same set of files that are stored in files_,
// but files in each level are now sorted based on file
// A list for the same set of files that are stored in files_,
// but files in each level are now sorted based on file
// size. The file with the largest size is at the front.
// This vector stores the index of the file from files_.
std::vector< std::vector<int> > files_by_size_;
@ -191,7 +191,7 @@ class Version {
// to find the next compaction candidate file.
void ResetNextCompactionIndex(int level) {
next_file_to_compact_by_size_[level] = 0;
}
}
// No copying allowed
Version(const Version&);
@ -448,7 +448,7 @@ class VersionSet {
uint64_t SizeBeingCompacted(int level);
// Returns true if any one of the parent files are being compacted
bool ParentRangeInCompaction(const InternalKey* smallest,
bool ParentRangeInCompaction(const InternalKey* smallest,
const InternalKey* largest, int level, int* index);
// Returns true if any one of the specified files are being compacted
@ -542,7 +542,7 @@ class Compaction {
// mark (or clear) all files that are being compacted
void MarkFilesBeingCompacted(bool);
// In case of compaction error, reset the nextIndex that is used
// to pick up the next file to be compacted from files_by_size_
void ResetNextCompactionIndex();

@ -93,7 +93,7 @@ struct Options {
// on disk) before converting to a sorted on-disk file.
//
// Larger values increase performance, especially during bulk loads.
// Up to max_write_buffer_number write buffers may be held in memory
// Up to max_write_buffer_number write buffers may be held in memory
// at the same time,
// so you may wish to adjust this parameter to control memory usage.
// Also, a larger write buffer will result in a longer recovery time
@ -103,7 +103,7 @@ struct Options {
size_t write_buffer_size;
// The maximum number of write buffers that are built up in memory.
// The default is 2, so that when 1 write buffer is being flushed to
// The default is 2, so that when 1 write buffer is being flushed to
// storage, new writes can continue to the other write buffer.
// Default: 2
int max_write_buffer_number;
@ -239,7 +239,7 @@ struct Options {
int expanded_compaction_factor;
// Maximum number of bytes in all source files to be compacted in a
// single compaction run. We avoid picking too many files in the
// single compaction run. We avoid picking too many files in the
// source level so that we do not exceed the total source bytes
// for compaction to exceed
// (source_compaction_factor * targetFileSizeLevel()) many bytes.
@ -292,7 +292,7 @@ struct Options {
// value is 0 which means that obsolete files get removed after
// every compaction run.
uint64_t delete_obsolete_files_period_micros;
// Maximum number of concurrent background compactions.
// Default: 1
int max_background_compactions;
@ -321,12 +321,12 @@ struct Options {
void Dump(Logger * log) const;
// This method allows an application to modify/delete a key-value at
// This method allows an application to modify/delete a key-value at
// the time of compaction. The compaction process invokes this
// method for every kv that is being compacted. A return value
// of false indicates that the kv should be preserved in the
// output of this compaction run and a return value of true
// indicates that this key-value should be removed from the
// indicates that this key-value should be removed from the
// output of the compaction. The application can inspect
// the existing value of the key, modify it if needed and
// return back the new value for this key. The application
@ -336,8 +336,8 @@ struct Options {
// The compaction_filter_args, if specified here, are passed
// back to the invocation of the CompactionFilter.
void* compaction_filter_args;
bool (*CompactionFilter)(void* compaction_filter_args,
int level, const Slice& key,
bool (*CompactionFilter)(void* compaction_filter_args,
int level, const Slice& key,
const Slice& existing_value, Slice** new_value);
// Disable automatic compactions. Manual compactions can still

@ -27,7 +27,7 @@ class TableBuilder {
public:
// Create a builder that will store the contents of the table it is
// building in *file. Does not close the file. It is up to the
// caller to close the file after calling Finish(). The output file
// caller to close the file after calling Finish(). The output file
// will be part of level specified by 'level'. A value of -1 means
// that the caller does not know which level the output file will reside.
TableBuilder(const Options& options, WritableFile* file, int level=-1);

@ -28,7 +28,7 @@ class BloomFilterPolicy : public FilterPolicy {
}
public:
explicit BloomFilterPolicy(int bits_per_key,
explicit BloomFilterPolicy(int bits_per_key,
uint32_t (*hash_func)(const Slice& key))
: bits_per_key_(bits_per_key), hash_func_(hash_func) {
initialize();

@ -296,7 +296,7 @@ uint32_t Extend(uint32_t crc, const char* buf, size_t size) {
uint64_t l = crc ^ 0xffffffffu;
// Align n to (1 << m) byte boundary
#define ALIGN(n, m) ((n + ((1 << m) - 1)) & ~((1 << m) - 1))
#define ALIGN(n, m) ((n + ((1 << m) - 1)) & ~((1 << m) - 1))
#define STEP1 do { \
int c = (l & 0xff) ^ *p++; \

@ -35,7 +35,7 @@
//
// This file defines an HDFS environment for leveldb. It uses the libhdfs
// api to access HDFS. All HDFS files created by one instance of leveldb
// will reside on the same HDFS cluster.
// will reside on the same HDFS cluster.
//
namespace leveldb {
@ -65,15 +65,15 @@ class HdfsReadableFile: virtual public SequentialFile, virtual public RandomAcce
Log(mylog, "[hdfs] HdfsReadableFile opening file %s\n",
filename_.c_str());
hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_RDONLY, 0, 0, 0);
Log(mylog, "[hdfs] HdfsReadableFile opened file %s hfile_=0x%p\n",
Log(mylog, "[hdfs] HdfsReadableFile opened file %s hfile_=0x%p\n",
filename_.c_str(), hfile_);
}
virtual ~HdfsReadableFile() {
Log(mylog, "[hdfs] HdfsReadableFile closing file %s\n",
Log(mylog, "[hdfs] HdfsReadableFile closing file %s\n",
filename_.c_str());
hdfsCloseFile(fileSys_, hfile_);
Log(mylog, "[hdfs] HdfsReadableFile closed file %s\n",
Log(mylog, "[hdfs] HdfsReadableFile closed file %s\n",
filename_.c_str());
hfile_ = NULL;
}
@ -85,7 +85,7 @@ class HdfsReadableFile: virtual public SequentialFile, virtual public RandomAcce
// sequential access, read data at current offset in file
virtual Status Read(size_t n, Slice* result, char* scratch) {
Status s;
Log(mylog, "[hdfs] HdfsReadableFile reading %s %ld\n",
Log(mylog, "[hdfs] HdfsReadableFile reading %s %ld\n",
filename_.c_str(), n);
size_t bytes_read = hdfsRead(fileSys_, hfile_, scratch, (tSize)n);
Log(mylog, "[hdfs] HdfsReadableFile read %s\n", filename_.c_str());
@ -106,7 +106,7 @@ class HdfsReadableFile: virtual public SequentialFile, virtual public RandomAcce
char* scratch) const {
Status s;
Log(mylog, "[hdfs] HdfsReadableFile preading %s\n", filename_.c_str());
ssize_t bytes_read = hdfsPread(fileSys_, hfile_, offset,
ssize_t bytes_read = hdfsPread(fileSys_, hfile_, offset,
(void*)scratch, (tSize)n);
Log(mylog, "[hdfs] HdfsReadableFile pread %s\n", filename_.c_str());
*result = Slice(scratch, (bytes_read < 0) ? 0 : bytes_read);
@ -153,7 +153,7 @@ class HdfsReadableFile: virtual public SequentialFile, virtual public RandomAcce
size = pFileInfo->mSize;
hdfsFreeFileInfo(pFileInfo, 1);
} else {
throw new leveldb::HdfsFatalException("fileSize on unknown file " +
throw leveldb::HdfsFatalException("fileSize on unknown file " +
filename_);
}
return size;
@ -250,14 +250,14 @@ class HdfsLogger : public Logger {
uint64_t (*gettid_)(); // Return the thread id for the current thread
public:
HdfsLogger(HdfsWritableFile* f, uint64_t (*gettid)())
HdfsLogger(HdfsWritableFile* f, uint64_t (*gettid)())
: file_(f), gettid_(gettid) {
Log(mylog, "[hdfs] HdfsLogger opened %s\n",
Log(mylog, "[hdfs] HdfsLogger opened %s\n",
file_->getName().c_str());
}
virtual ~HdfsLogger() {
Log(mylog, "[hdfs] HdfsLogger closed %s\n",
Log(mylog, "[hdfs] HdfsLogger closed %s\n",
file_->getName().c_str());
delete file_;
if (mylog != NULL && mylog == this) {

@ -26,7 +26,7 @@
#include "util/logging.h"
#include "util/posix_logger.h"
bool useOsBuffer = 1; // cache data in OS buffers
bool useOsBuffer = 1; // cache data in OS buffers
bool useFsReadAhead = 1; // allow filesystem to do readaheads
bool useMmapRead = 0; // do not use mmaps for reading files
bool useMmapWrite = 1; // use mmaps for appending to files
@ -84,7 +84,7 @@ class PosixRandomAccessFile: public RandomAccessFile {
public:
PosixRandomAccessFile(const std::string& fname, int fd)
: filename_(fname), fd_(fd) {
: filename_(fname), fd_(fd) {
if (!useFsReadAhead) {
// disable read-aheads
posix_fadvise(fd, 0, 0, POSIX_FADV_RANDOM);
@ -741,7 +741,7 @@ class PosixEnv : public Env {
return Status::OK();
}
// Allow increasing the number of worker threads.
// Allow increasing the number of worker threads.
virtual void SetBackgroundThreads(int num) {
if (num > num_threads_) {
num_threads_ = num;

@ -127,7 +127,7 @@ Options::Dump(
Log(log," Options.no_block_cache: %d",
no_block_cache);
Log(log," Options.table_cache_numshardbits: %d",
table_cache_numshardbits);
table_cache_numshardbits);
Log(log," Options.delete_obsolete_files_period_micros: %ld",
delete_obsolete_files_period_micros);
Log(log," Options.max_background_compactions: %d",

Loading…
Cancel
Save