From 08a63ad10b0176acaf161b26c74beb3066818a02 Mon Sep 17 00:00:00 2001 From: Hui Xiao Date: Fri, 28 Oct 2022 13:16:50 -0700 Subject: [PATCH] Run clang format against files under example/, memory/ and memtable/ folders (#10893) Summary: **Context/Summary:** Run the following to format ``` find ./examples -iname *.h -o -iname *.cc | xargs clang-format -i find ./memory -iname *.h -o -iname *.cc | xargs clang-format -i find ./memtable -iname *.h -o -iname *.cc | xargs clang-format -i ``` **Test** - Manual inspection to ensure changes are cosmetic only - CI Pull Request resolved: https://github.com/facebook/rocksdb/pull/10893 Reviewed By: jay-zhuang Differential Revision: D40779187 Pulled By: hx235 fbshipit-source-id: 529cbb0f0fbd698d95817e8c42fe3ce32254d9b0 --- examples/column_families_example.cc | 6 +- examples/compact_files_example.cc | 60 ++++----- examples/optimistic_transaction_example.cc | 2 +- examples/simple_example.cc | 2 +- memory/allocator.h | 1 + memory/arena_test.cc | 2 +- memory/concurrent_arena.cc | 2 + memory/concurrent_arena.h | 1 + memtable/alloc_tracker.cc | 1 + memtable/hash_linklist_rep.cc | 37 +++--- memtable/hash_skiplist_rep.cc | 28 ++-- memtable/inlineskiplist.h | 21 +-- memtable/inlineskiplist_test.cc | 6 +- memtable/memtablerep_bench.cc | 4 +- memtable/skiplist.h | 58 +++++---- memtable/skiplist_test.cc | 10 +- memtable/skiplistrep.cc | 143 +++++++++++---------- memtable/stl_wrappers.h | 2 +- memtable/vectorrep.cc | 44 +++---- memtable/write_buffer_manager_test.cc | 1 + 20 files changed, 213 insertions(+), 218 deletions(-) diff --git a/examples/column_families_example.cc b/examples/column_families_example.cc index d28b8e776..3828d3fb3 100644 --- a/examples/column_families_example.cc +++ b/examples/column_families_example.cc @@ -7,8 +7,8 @@ #include #include "rocksdb/db.h" -#include "rocksdb/slice.h" #include "rocksdb/options.h" +#include "rocksdb/slice.h" #if defined(OS_WIN) std::string kDBPath = "C:\\Windows\\TEMP\\rocksdb_column_families_example"; @@ -52,8 +52,8 @@ int main() { column_families.push_back(ColumnFamilyDescriptor( ROCKSDB_NAMESPACE::kDefaultColumnFamilyName, ColumnFamilyOptions())); // open the new one, too - column_families.push_back(ColumnFamilyDescriptor( - "new_cf", ColumnFamilyOptions())); + column_families.push_back( + ColumnFamilyDescriptor("new_cf", ColumnFamilyOptions())); std::vector handles; s = DB::Open(DBOptions(), kDBPath, column_families, &handles, &db); assert(s.ok()); diff --git a/examples/compact_files_example.cc b/examples/compact_files_example.cc index e56b30d59..1ecf8c794 100644 --- a/examples/compact_files_example.cc +++ b/examples/compact_files_example.cc @@ -8,6 +8,7 @@ #include #include + #include "rocksdb/db.h" #include "rocksdb/env.h" #include "rocksdb/options.h" @@ -39,29 +40,27 @@ class Compactor : public EventListener { // and column family. It is the caller's responsibility to // destroy the returned CompactionTask. Returns "nullptr" // if it cannot find a proper compaction task. - virtual CompactionTask* PickCompaction( - DB* db, const std::string& cf_name) = 0; + virtual CompactionTask* PickCompaction(DB* db, + const std::string& cf_name) = 0; // Schedule and run the specified compaction task in background. - virtual void ScheduleCompaction(CompactionTask *task) = 0; + virtual void ScheduleCompaction(CompactionTask* task) = 0; }; // Example structure that describes a compaction task. struct CompactionTask { - CompactionTask( - DB* _db, Compactor* _compactor, - const std::string& _column_family_name, - const std::vector& _input_file_names, - const int _output_level, - const CompactionOptions& _compact_options, - bool _retry_on_fail) - : db(_db), - compactor(_compactor), - column_family_name(_column_family_name), - input_file_names(_input_file_names), - output_level(_output_level), - compact_options(_compact_options), - retry_on_fail(_retry_on_fail) {} + CompactionTask(DB* _db, Compactor* _compactor, + const std::string& _column_family_name, + const std::vector& _input_file_names, + const int _output_level, + const CompactionOptions& _compact_options, bool _retry_on_fail) + : db(_db), + compactor(_compactor), + column_family_name(_column_family_name), + input_file_names(_input_file_names), + output_level(_output_level), + compact_options(_compact_options), + retry_on_fail(_retry_on_fail) {} DB* db; Compactor* compactor; const std::string& column_family_name; @@ -77,15 +76,13 @@ class FullCompactor : public Compactor { public: explicit FullCompactor(const Options options) : options_(options) { compact_options_.compression = options_.compression; - compact_options_.output_file_size_limit = - options_.target_file_size_base; + compact_options_.output_file_size_limit = options_.target_file_size_base; } // When flush happens, it determines whether to trigger compaction. If // triggered_writes_stop is true, it will also set the retry flag of // compaction-task to true. - void OnFlushCompleted( - DB* db, const FlushJobInfo& info) override { + void OnFlushCompleted(DB* db, const FlushJobInfo& info) override { CompactionTask* task = PickCompaction(db, info.cf_name); if (task != nullptr) { if (info.triggered_writes_stop) { @@ -97,8 +94,7 @@ class FullCompactor : public Compactor { } // Always pick a compaction which includes all files whenever possible. - CompactionTask* PickCompaction( - DB* db, const std::string& cf_name) override { + CompactionTask* PickCompaction(DB* db, const std::string& cf_name) override { ColumnFamilyMetaData cf_meta; db->GetColumnFamilyMetaData(&cf_meta); @@ -111,9 +107,8 @@ class FullCompactor : public Compactor { input_file_names.push_back(file.name); } } - return new CompactionTask( - db, this, cf_name, input_file_names, - options_.num_levels - 1, compact_options_, false); + return new CompactionTask(db, this, cf_name, input_file_names, + options_.num_levels - 1, compact_options_, false); } // Schedule the specified compaction task in background. @@ -127,16 +122,14 @@ class FullCompactor : public Compactor { assert(task); assert(task->db); Status s = task->db->CompactFiles( - task->compact_options, - task->input_file_names, - task->output_level); + task->compact_options, task->input_file_names, task->output_level); printf("CompactFiles() finished with status %s\n", s.ToString().c_str()); if (!s.ok() && !s.IsIOError() && task->retry_on_fail) { // If a compaction task with its retry_on_fail=true failed, // try to schedule another compaction in case the reason // is not an IO error. - CompactionTask* new_task = task->compactor->PickCompaction( - task->db, task->column_family_name); + CompactionTask* new_task = + task->compactor->PickCompaction(task->db, task->column_family_name); task->compactor->ScheduleCompaction(new_task); } } @@ -167,14 +160,13 @@ int main() { // because of options.level0_stop_writes_trigger for (int i = 1000; i < 99999; ++i) { db->Put(WriteOptions(), std::to_string(i), - std::string(500, 'a' + (i % 26))); + std::string(500, 'a' + (i % 26))); } // verify the values are still there std::string value; for (int i = 1000; i < 99999; ++i) { - db->Get(ReadOptions(), std::to_string(i), - &value); + db->Get(ReadOptions(), std::to_string(i), &value); assert(value == std::string(500, 'a' + (i % 26))); } diff --git a/examples/optimistic_transaction_example.cc b/examples/optimistic_transaction_example.cc index e0398f66e..fb0514a69 100644 --- a/examples/optimistic_transaction_example.cc +++ b/examples/optimistic_transaction_example.cc @@ -8,8 +8,8 @@ #include "rocksdb/db.h" #include "rocksdb/options.h" #include "rocksdb/slice.h" -#include "rocksdb/utilities/transaction.h" #include "rocksdb/utilities/optimistic_transaction_db.h" +#include "rocksdb/utilities/transaction.h" using ROCKSDB_NAMESPACE::DB; using ROCKSDB_NAMESPACE::OptimisticTransactionDB; diff --git a/examples/simple_example.cc b/examples/simple_example.cc index 24e97506e..2d49c4d14 100644 --- a/examples/simple_example.cc +++ b/examples/simple_example.cc @@ -7,8 +7,8 @@ #include #include "rocksdb/db.h" -#include "rocksdb/slice.h" #include "rocksdb/options.h" +#include "rocksdb/slice.h" using ROCKSDB_NAMESPACE::DB; using ROCKSDB_NAMESPACE::Options; diff --git a/memory/allocator.h b/memory/allocator.h index 002ad5f1d..0d7cd60a9 100644 --- a/memory/allocator.h +++ b/memory/allocator.h @@ -13,6 +13,7 @@ #pragma once #include #include + #include "rocksdb/write_buffer_manager.h" namespace ROCKSDB_NAMESPACE { diff --git a/memory/arena_test.cc b/memory/arena_test.cc index 0aaf39826..30887c23a 100644 --- a/memory/arena_test.cc +++ b/memory/arena_test.cc @@ -36,7 +36,7 @@ bool CheckMemoryAllocated(size_t allocated, size_t expected) { void MemoryAllocatedBytesTest(size_t huge_page_size) { const int N = 17; - size_t req_sz; // requested size + size_t req_sz; // requested size size_t bsz = 32 * 1024; // block size size_t expected_memory_allocated; diff --git a/memory/concurrent_arena.cc b/memory/concurrent_arena.cc index 3d45ca949..1619bd93b 100644 --- a/memory/concurrent_arena.cc +++ b/memory/concurrent_arena.cc @@ -8,7 +8,9 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "memory/concurrent_arena.h" + #include + #include "port/port.h" #include "util/random.h" diff --git a/memory/concurrent_arena.h b/memory/concurrent_arena.h index d2fbc2c93..f14507d30 100644 --- a/memory/concurrent_arena.h +++ b/memory/concurrent_arena.h @@ -11,6 +11,7 @@ #include #include #include + #include "memory/allocator.h" #include "memory/arena.h" #include "port/lang.h" diff --git a/memtable/alloc_tracker.cc b/memtable/alloc_tracker.cc index fe2134347..4c6d35431 100644 --- a/memtable/alloc_tracker.cc +++ b/memtable/alloc_tracker.cc @@ -8,6 +8,7 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include + #include "memory/allocator.h" #include "memory/arena.h" #include "rocksdb/write_buffer_manager.h" diff --git a/memtable/hash_linklist_rep.cc b/memtable/hash_linklist_rep.cc index f990e89f7..a71768304 100644 --- a/memtable/hash_linklist_rep.cc +++ b/memtable/hash_linklist_rep.cc @@ -77,9 +77,7 @@ struct Node { next_.store(x, std::memory_order_release); } // No-barrier variants that can be safely used in a few locations. - Node* NoBarrier_Next() { - return next_.load(std::memory_order_relaxed); - } + Node* NoBarrier_Next() { return next_.load(std::memory_order_relaxed); } void NoBarrier_SetNext(Node* x) { next_.store(x, std::memory_order_relaxed); } @@ -296,9 +294,9 @@ class HashLinkListRep : public MemTableRep { // Advance to the first entry with a key >= target void Seek(const Slice& internal_key, const char* memtable_key) override { - const char* encoded_key = - (memtable_key != nullptr) ? - memtable_key : EncodeKey(&tmp_, internal_key); + const char* encoded_key = (memtable_key != nullptr) + ? memtable_key + : EncodeKey(&tmp_, internal_key); iter_.Seek(encoded_key); } @@ -324,7 +322,7 @@ class HashLinkListRep : public MemTableRep { // To destruct with the iterator. std::unique_ptr full_list_; std::unique_ptr allocator_; - std::string tmp_; // For passing to EncodeKey + std::string tmp_; // For passing to EncodeKey }; class LinkListIterator : public MemTableRep::Iterator { @@ -365,8 +363,8 @@ class HashLinkListRep : public MemTableRep { // Advance to the first entry with a key >= target void Seek(const Slice& internal_key, const char* /*memtable_key*/) override { - node_ = hash_link_list_rep_->FindGreaterOrEqualInBucket(head_, - internal_key); + node_ = + hash_link_list_rep_->FindGreaterOrEqualInBucket(head_, internal_key); } // Retreat to the last entry with a key <= target @@ -398,15 +396,14 @@ class HashLinkListRep : public MemTableRep { head_ = head; node_ = nullptr; } + private: friend class HashLinkListRep; const HashLinkListRep* const hash_link_list_rep_; Node* head_; Node* node_; - virtual void SeekToHead() { - node_ = head_; - } + virtual void SeekToHead() { node_ = head_; } }; class DynamicIterator : public HashLinkListRep::LinkListIterator { @@ -486,7 +483,7 @@ class HashLinkListRep : public MemTableRep { // This is used when there wasn't a bucket. It is cheaper than // instantiating an empty bucket over which to iterate. public: - EmptyIterator() { } + EmptyIterator() {} bool Valid() const override { return false; } const char* key() const override { assert(false); @@ -521,7 +518,7 @@ HashLinkListRep::HashLinkListRep( bucket_entries_logging_threshold_(bucket_entries_logging_threshold), if_log_bucket_dist_when_flash_(if_log_bucket_dist_when_flash) { char* mem = allocator_->AllocateAligned(sizeof(Pointer) * bucket_size, - huge_page_tlb_size, logger); + huge_page_tlb_size, logger); buckets_ = new (mem) Pointer[bucket_size]; @@ -530,8 +527,7 @@ HashLinkListRep::HashLinkListRep( } } -HashLinkListRep::~HashLinkListRep() { -} +HashLinkListRep::~HashLinkListRep() {} KeyHandle HashLinkListRep::Allocate(const size_t len, char** buf) { char* mem = allocator_->AllocateAligned(sizeof(Node) + len); @@ -633,9 +629,10 @@ void HashLinkListRep::Insert(KeyHandle handle) { if (bucket_entries_logging_threshold_ > 0 && header->GetNumEntries() == static_cast(bucket_entries_logging_threshold_)) { - Info(logger_, "HashLinkedList bucket %" ROCKSDB_PRIszt - " has more than %d " - "entries. Key to insert: %s", + Info(logger_, + "HashLinkedList bucket %" ROCKSDB_PRIszt + " has more than %d " + "entries. Key to insert: %s", GetHash(transformed), header->GetNumEntries(), GetLengthPrefixedSlice(x->key).ToString(true).c_str()); } @@ -786,7 +783,7 @@ MemTableRep::Iterator* HashLinkListRep::GetIterator(Arena* alloc_arena) { for (itr.SeekToFirst(); itr.Valid(); itr.Next()) { list->Insert(itr.key()); count++; - } + } } } if (if_log_bucket_dist_when_flash_) { diff --git a/memtable/hash_skiplist_rep.cc b/memtable/hash_skiplist_rep.cc index dc58046a4..9d093829b 100644 --- a/memtable/hash_skiplist_rep.cc +++ b/memtable/hash_skiplist_rep.cc @@ -118,9 +118,9 @@ class HashSkipListRep : public MemTableRep { // Advance to the first entry with a key >= target void Seek(const Slice& internal_key, const char* memtable_key) override { if (list_ != nullptr) { - const char* encoded_key = - (memtable_key != nullptr) ? - memtable_key : EncodeKey(&tmp_, internal_key); + const char* encoded_key = (memtable_key != nullptr) + ? memtable_key + : EncodeKey(&tmp_, internal_key); iter_.Seek(encoded_key); } } @@ -158,6 +158,7 @@ class HashSkipListRep : public MemTableRep { iter_.SetList(list); own_list_ = false; } + private: // if list_ is nullptr, we should NEVER call any methods on iter_ // if list_ is nullptr, this Iterator is not Valid() @@ -167,14 +168,14 @@ class HashSkipListRep : public MemTableRep { // responsible for it's cleaning. This is a poor man's std::shared_ptr bool own_list_; std::unique_ptr arena_; - std::string tmp_; // For passing to EncodeKey + std::string tmp_; // For passing to EncodeKey }; class DynamicIterator : public HashSkipListRep::Iterator { public: explicit DynamicIterator(const HashSkipListRep& memtable_rep) - : HashSkipListRep::Iterator(nullptr, false), - memtable_rep_(memtable_rep) {} + : HashSkipListRep::Iterator(nullptr, false), + memtable_rep_(memtable_rep) {} // Advance to the first entry with a key >= target void Seek(const Slice& k, const char* memtable_key) override { @@ -208,7 +209,7 @@ class HashSkipListRep : public MemTableRep { // This is used when there wasn't a bucket. It is cheaper than // instantiating an empty bucket over which to iterate. public: - EmptyIterator() { } + EmptyIterator() {} bool Valid() const override { return false; } const char* key() const override { assert(false); @@ -239,8 +240,8 @@ HashSkipListRep::HashSkipListRep(const MemTableRep::KeyComparator& compare, transform_(transform), compare_(compare), allocator_(allocator) { - auto mem = allocator->AllocateAligned( - sizeof(std::atomic) * bucket_size); + auto mem = + allocator->AllocateAligned(sizeof(std::atomic) * bucket_size); buckets_ = new (mem) std::atomic[bucket_size]; for (size_t i = 0; i < bucket_size_; ++i) { @@ -248,8 +249,7 @@ HashSkipListRep::HashSkipListRep(const MemTableRep::KeyComparator& compare, } } -HashSkipListRep::~HashSkipListRep() { -} +HashSkipListRep::~HashSkipListRep() {} HashSkipListRep::Bucket* HashSkipListRep::GetInitializedBucket( const Slice& transformed) { @@ -281,9 +281,7 @@ bool HashSkipListRep::Contains(const char* key) const { return bucket->Contains(key); } -size_t HashSkipListRep::ApproximateMemoryUsage() { - return 0; -} +size_t HashSkipListRep::ApproximateMemoryUsage() { return 0; } void HashSkipListRep::Get(const LookupKey& k, void* callback_args, bool (*callback_func)(void* arg, const char* entry)) { @@ -388,7 +386,7 @@ MemTableRepFactory* NewHashSkipListRepFactory( size_t bucket_count, int32_t skiplist_height, int32_t skiplist_branching_factor) { return new HashSkipListRepFactory(bucket_count, skiplist_height, - skiplist_branching_factor); + skiplist_branching_factor); } } // namespace ROCKSDB_NAMESPACE diff --git a/memtable/inlineskiplist.h b/memtable/inlineskiplist.h index 4a4e63df0..abb3c3ddb 100644 --- a/memtable/inlineskiplist.h +++ b/memtable/inlineskiplist.h @@ -43,9 +43,11 @@ #pragma once #include #include + #include #include #include + #include "memory/allocator.h" #include "port/likely.h" #include "port/port.h" @@ -62,8 +64,8 @@ class InlineSkipList { struct Splice; public: - using DecodedKey = \ - typename std::remove_reference::type::DecodedType; + using DecodedKey = + typename std::remove_reference::type::DecodedType; static const uint16_t kMaxPossibleHeight = 32; @@ -264,9 +266,9 @@ class InlineSkipList { // point to a node that is before the key, and after should point to // a node that is after the key. after should be nullptr if a good after // node isn't conveniently available. - template - void FindSpliceForLevel(const DecodedKey& key, Node* before, Node* after, int level, - Node** out_prev, Node** out_next); + template + void FindSpliceForLevel(const DecodedKey& key, Node* before, Node* after, + int level, Node** out_prev, Node** out_next); // Recomputes Splice levels from highest_level (inclusive) down to // lowest_level (inclusive). @@ -766,8 +768,8 @@ void InlineSkipList::FindSpliceForLevel(const DecodedKey& key, PREFETCH(next->Next(level), 0, 1); } if (prefetch_before == true) { - if (next != nullptr && level>0) { - PREFETCH(next->Next(level-1), 0, 1); + if (next != nullptr && level > 0) { + PREFETCH(next->Next(level - 1), 0, 1); } } assert(before == head_ || next == nullptr || @@ -791,7 +793,7 @@ void InlineSkipList::RecomputeSpliceLevels(const DecodedKey& key, assert(recompute_level <= splice->height_); for (int i = recompute_level - 1; i >= 0; --i) { FindSpliceForLevel(key, splice->prev_[i + 1], splice->next_[i + 1], i, - &splice->prev_[i], &splice->next_[i]); + &splice->prev_[i], &splice->next_[i]); } } @@ -881,8 +883,7 @@ bool InlineSkipList::Insert(const char* key, Splice* splice, // we're pessimistic, recompute everything recompute_height = max_height; } - } else if (KeyIsAfterNode(key_decoded, - splice->next_[recompute_height])) { + } else if (KeyIsAfterNode(key_decoded, splice->next_[recompute_height])) { // key is from after splice if (allow_partial_splice_fix) { Node* bad = splice->next_[recompute_height]; diff --git a/memtable/inlineskiplist_test.cc b/memtable/inlineskiplist_test.cc index 1f3c6a691..f85644064 100644 --- a/memtable/inlineskiplist_test.cc +++ b/memtable/inlineskiplist_test.cc @@ -8,8 +8,10 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "memtable/inlineskiplist.h" + #include #include + #include "memory/concurrent_arena.h" #include "rocksdb/env.h" #include "test_util/testharness.h" @@ -34,9 +36,7 @@ static Key Decode(const char* key) { struct TestComparator { using DecodedType = Key; - static DecodedType decode_key(const char* b) { - return Decode(b); - } + static DecodedType decode_key(const char* b) { return Decode(b); } int operator()(const char* a, const char* b) const { if (Decode(a) < Decode(b)) { diff --git a/memtable/memtablerep_bench.cc b/memtable/memtablerep_bench.cc index 1eaa7658f..a915abed7 100644 --- a/memtable/memtablerep_bench.cc +++ b/memtable/memtablerep_bench.cc @@ -467,8 +467,8 @@ class FillBenchmark : public Benchmark { num_write_ops_per_thread_ = FLAGS_num_operations; } - void RunThreads(std::vector* /*threads*/, uint64_t* bytes_written, - uint64_t* bytes_read, bool /*write*/, + void RunThreads(std::vector* /*threads*/, + uint64_t* bytes_written, uint64_t* bytes_read, bool /*write*/, uint64_t* read_hits) override { FillBenchmarkThread(table_, key_gen_, bytes_written, bytes_read, sequence_, num_write_ops_per_thread_, read_hits)(); diff --git a/memtable/skiplist.h b/memtable/skiplist.h index 52818e302..e3cecd30c 100644 --- a/memtable/skiplist.h +++ b/memtable/skiplist.h @@ -33,14 +33,16 @@ #pragma once #include #include + #include + #include "memory/allocator.h" #include "port/port.h" #include "util/random.h" namespace ROCKSDB_NAMESPACE { -template +template class SkipList { private: struct Node; @@ -119,7 +121,7 @@ class SkipList { // Immutable after construction Comparator const compare_; - Allocator* const allocator_; // Allocator used for allocations of nodes + Allocator* const allocator_; // Allocator used for allocations of nodes Node* const head_; @@ -164,9 +166,9 @@ class SkipList { }; // Implementation details follow -template +template struct SkipList::Node { - explicit Node(const Key& k) : key(k) { } + explicit Node(const Key& k) : key(k) {} Key const key; @@ -200,43 +202,43 @@ struct SkipList::Node { std::atomic next_[1]; }; -template -typename SkipList::Node* -SkipList::NewNode(const Key& key, int height) { +template +typename SkipList::Node* SkipList::NewNode( + const Key& key, int height) { char* mem = allocator_->AllocateAligned( sizeof(Node) + sizeof(std::atomic) * (height - 1)); return new (mem) Node(key); } -template +template inline SkipList::Iterator::Iterator(const SkipList* list) { SetList(list); } -template +template inline void SkipList::Iterator::SetList(const SkipList* list) { list_ = list; node_ = nullptr; } -template +template inline bool SkipList::Iterator::Valid() const { return node_ != nullptr; } -template +template inline const Key& SkipList::Iterator::key() const { assert(Valid()); return node_->key; } -template +template inline void SkipList::Iterator::Next() { assert(Valid()); node_ = node_->Next(0); } -template +template inline void SkipList::Iterator::Prev() { // Instead of using explicit "prev" links, we just search for the // last node that falls before key. @@ -247,7 +249,7 @@ inline void SkipList::Iterator::Prev() { } } -template +template inline void SkipList::Iterator::Seek(const Key& target) { node_ = list_->FindGreaterOrEqual(target); } @@ -269,7 +271,7 @@ inline void SkipList::Iterator::SeekToFirst() { node_ = list_->head_->Next(0); } -template +template inline void SkipList::Iterator::SeekToLast() { node_ = list_->FindLast(); if (node_ == list_->head_) { @@ -277,7 +279,7 @@ inline void SkipList::Iterator::SeekToLast() { } } -template +template int SkipList::RandomHeight() { auto rnd = Random::GetTLSInstance(); @@ -291,15 +293,15 @@ int SkipList::RandomHeight() { return height; } -template +template bool SkipList::KeyIsAfterNode(const Key& key, Node* n) const { // nullptr n is considered infinite return (n != nullptr) && (compare_(n->key, key) < 0); } -template -typename SkipList::Node* SkipList:: - FindGreaterOrEqual(const Key& key) const { +template +typename SkipList::Node* +SkipList::FindGreaterOrEqual(const Key& key) const { // Note: It looks like we could reduce duplication by implementing // this function as FindLessThan(key)->Next(0), but we wouldn't be able // to exit early on equality and the result wouldn't even be correct. @@ -315,8 +317,8 @@ typename SkipList::Node* SkipList:: assert(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x)); // Make sure we haven't overshot during our search assert(x == head_ || KeyIsAfterNode(key, x)); - int cmp = (next == nullptr || next == last_bigger) - ? 1 : compare_(next->key, key); + int cmp = + (next == nullptr || next == last_bigger) ? 1 : compare_(next->key, key); if (cmp == 0 || (cmp > 0 && level == 0)) { return next; } else if (cmp < 0) { @@ -330,7 +332,7 @@ typename SkipList::Node* SkipList:: } } -template +template typename SkipList::Node* SkipList::FindLessThan(const Key& key, Node** prev) const { Node* x = head_; @@ -360,7 +362,7 @@ SkipList::FindLessThan(const Key& key, Node** prev) const { } } -template +template typename SkipList::Node* SkipList::FindLast() const { Node* x = head_; @@ -424,14 +426,14 @@ SkipList::SkipList(const Comparator cmp, Allocator* allocator, // prev_ does not need to be freed, as its life cycle is tied up with // the allocator as a whole. prev_ = reinterpret_cast( - allocator_->AllocateAligned(sizeof(Node*) * kMaxHeight_)); + allocator_->AllocateAligned(sizeof(Node*) * kMaxHeight_)); for (int i = 0; i < kMaxHeight_; i++) { head_->SetNext(i, nullptr); prev_[i] = head_; } } -template +template void SkipList::Insert(const Key& key) { // fast path for sequential insertion if (!KeyIsAfterNode(key, prev_[0]->NoBarrier_Next(0)) && @@ -460,7 +462,7 @@ void SkipList::Insert(const Key& key) { for (int i = GetMaxHeight(); i < height; i++) { prev_[i] = head_; } - //fprintf(stderr, "Change height from %d to %d\n", max_height_, height); + // fprintf(stderr, "Change height from %d to %d\n", max_height_, height); // It is ok to mutate max_height_ without any synchronization // with concurrent readers. A concurrent reader that observes @@ -483,7 +485,7 @@ void SkipList::Insert(const Key& key) { prev_height_ = height; } -template +template bool SkipList::Contains(const Key& key) const { Node* x = FindGreaterOrEqual(key); if (x != nullptr && Equal(key, x->key)) { diff --git a/memtable/skiplist_test.cc b/memtable/skiplist_test.cc index 1d43d734b..a07088511 100644 --- a/memtable/skiplist_test.cc +++ b/memtable/skiplist_test.cc @@ -8,7 +8,9 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "memtable/skiplist.h" + #include + #include "memory/arena.h" #include "rocksdb/env.h" #include "test_util/testharness.h" @@ -169,7 +171,7 @@ class ConcurrentTest { static uint64_t hash(Key key) { return key & 0xff; } static uint64_t HashNumbers(uint64_t k, uint64_t g) { - uint64_t data[2] = { k, g }; + uint64_t data[2] = {k, g}; return Hash(reinterpret_cast(data), sizeof(data), 0); } @@ -311,11 +313,7 @@ class TestState { int seed_; std::atomic quit_flag_; - enum ReaderState { - STARTING, - RUNNING, - DONE - }; + enum ReaderState { STARTING, RUNNING, DONE }; explicit TestState(int s) : seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {} diff --git a/memtable/skiplistrep.cc b/memtable/skiplistrep.cc index 5b8577e87..40f13a2c1 100644 --- a/memtable/skiplistrep.cc +++ b/memtable/skiplistrep.cc @@ -21,74 +21,76 @@ class SkipListRep : public MemTableRep { const size_t lookahead_; friend class LookaheadIterator; -public: - explicit SkipListRep(const MemTableRep::KeyComparator& compare, - Allocator* allocator, const SliceTransform* transform, - const size_t lookahead) - : MemTableRep(allocator), - skip_list_(compare, allocator), - cmp_(compare), - transform_(transform), - lookahead_(lookahead) {} - - KeyHandle Allocate(const size_t len, char** buf) override { - *buf = skip_list_.AllocateKey(len); - return static_cast(*buf); - } + + public: + explicit SkipListRep(const MemTableRep::KeyComparator& compare, + Allocator* allocator, const SliceTransform* transform, + const size_t lookahead) + : MemTableRep(allocator), + skip_list_(compare, allocator), + cmp_(compare), + transform_(transform), + lookahead_(lookahead) {} + + KeyHandle Allocate(const size_t len, char** buf) override { + *buf = skip_list_.AllocateKey(len); + return static_cast(*buf); + } // Insert key into the list. // REQUIRES: nothing that compares equal to key is currently in the list. - void Insert(KeyHandle handle) override { - skip_list_.Insert(static_cast(handle)); - } + void Insert(KeyHandle handle) override { + skip_list_.Insert(static_cast(handle)); + } - bool InsertKey(KeyHandle handle) override { - return skip_list_.Insert(static_cast(handle)); - } + bool InsertKey(KeyHandle handle) override { + return skip_list_.Insert(static_cast(handle)); + } - void InsertWithHint(KeyHandle handle, void** hint) override { - skip_list_.InsertWithHint(static_cast(handle), hint); - } + void InsertWithHint(KeyHandle handle, void** hint) override { + skip_list_.InsertWithHint(static_cast(handle), hint); + } - bool InsertKeyWithHint(KeyHandle handle, void** hint) override { - return skip_list_.InsertWithHint(static_cast(handle), hint); - } + bool InsertKeyWithHint(KeyHandle handle, void** hint) override { + return skip_list_.InsertWithHint(static_cast(handle), hint); + } - void InsertWithHintConcurrently(KeyHandle handle, void** hint) override { - skip_list_.InsertWithHintConcurrently(static_cast(handle), hint); - } + void InsertWithHintConcurrently(KeyHandle handle, void** hint) override { + skip_list_.InsertWithHintConcurrently(static_cast(handle), hint); + } - bool InsertKeyWithHintConcurrently(KeyHandle handle, void** hint) override { - return skip_list_.InsertWithHintConcurrently(static_cast(handle), - hint); - } + bool InsertKeyWithHintConcurrently(KeyHandle handle, void** hint) override { + return skip_list_.InsertWithHintConcurrently(static_cast(handle), + hint); + } - void InsertConcurrently(KeyHandle handle) override { - skip_list_.InsertConcurrently(static_cast(handle)); - } + void InsertConcurrently(KeyHandle handle) override { + skip_list_.InsertConcurrently(static_cast(handle)); + } - bool InsertKeyConcurrently(KeyHandle handle) override { - return skip_list_.InsertConcurrently(static_cast(handle)); - } + bool InsertKeyConcurrently(KeyHandle handle) override { + return skip_list_.InsertConcurrently(static_cast(handle)); + } // Returns true iff an entry that compares equal to key is in the list. - bool Contains(const char* key) const override { - return skip_list_.Contains(key); - } - - size_t ApproximateMemoryUsage() override { - // All memory is allocated through allocator; nothing to report here - return 0; - } - - void Get(const LookupKey& k, void* callback_args, - bool (*callback_func)(void* arg, const char* entry)) override { - SkipListRep::Iterator iter(&skip_list_); - Slice dummy_slice; - for (iter.Seek(dummy_slice, k.memtable_key().data()); - iter.Valid() && callback_func(callback_args, iter.key()); iter.Next()) { - } - } + bool Contains(const char* key) const override { + return skip_list_.Contains(key); + } + + size_t ApproximateMemoryUsage() override { + // All memory is allocated through allocator; nothing to report here + return 0; + } + + void Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, const char* entry)) override { + SkipListRep::Iterator iter(&skip_list_); + Slice dummy_slice; + for (iter.Seek(dummy_slice, k.memtable_key().data()); + iter.Valid() && callback_func(callback_args, iter.key()); + iter.Next()) { + } + } uint64_t ApproximateNumEntries(const Slice& start_ikey, const Slice& end_ikey) override { @@ -218,7 +220,7 @@ public: void SeekToLast() override { iter_.SeekToLast(); } protected: - std::string tmp_; // For passing to EncodeKey + std::string tmp_; // For passing to EncodeKey }; // Iterator over the contents of a skip list which also keeps track of the @@ -227,8 +229,8 @@ public: // the target key hasn't been found. class LookaheadIterator : public MemTableRep::Iterator { public: - explicit LookaheadIterator(const SkipListRep& rep) : - rep_(rep), iter_(&rep_.skip_list_), prev_(iter_) {} + explicit LookaheadIterator(const SkipListRep& rep) + : rep_(rep), iter_(&rep_.skip_list_), prev_(iter_) {} ~LookaheadIterator() override {} @@ -271,9 +273,9 @@ public: } void Seek(const Slice& internal_key, const char* memtable_key) override { - const char *encoded_key = - (memtable_key != nullptr) ? - memtable_key : EncodeKey(&tmp_, internal_key); + const char* encoded_key = (memtable_key != nullptr) + ? memtable_key + : EncodeKey(&tmp_, internal_key); if (prev_.Valid() && rep_.cmp_(encoded_key, prev_.key()) >= 0) { // prev_.key() is smaller or equal to our target key; do a quick @@ -313,7 +315,7 @@ public: } protected: - std::string tmp_; // For passing to EncodeKey + std::string tmp_; // For passing to EncodeKey private: const SkipListRep& rep_; @@ -323,19 +325,20 @@ public: MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override { if (lookahead_ > 0) { - void *mem = - arena ? arena->AllocateAligned(sizeof(SkipListRep::LookaheadIterator)) - : operator new(sizeof(SkipListRep::LookaheadIterator)); + void* mem = + arena ? arena->AllocateAligned(sizeof(SkipListRep::LookaheadIterator)) + : + operator new(sizeof(SkipListRep::LookaheadIterator)); return new (mem) SkipListRep::LookaheadIterator(*this); } else { - void *mem = - arena ? arena->AllocateAligned(sizeof(SkipListRep::Iterator)) - : operator new(sizeof(SkipListRep::Iterator)); + void* mem = arena ? arena->AllocateAligned(sizeof(SkipListRep::Iterator)) + : + operator new(sizeof(SkipListRep::Iterator)); return new (mem) SkipListRep::Iterator(&skip_list_); } } }; -} +} // namespace static std::unordered_map skiplist_factory_info = { #ifndef ROCKSDB_LITE diff --git a/memtable/stl_wrappers.h b/memtable/stl_wrappers.h index e9f8f214c..783a8088d 100644 --- a/memtable/stl_wrappers.h +++ b/memtable/stl_wrappers.h @@ -29,5 +29,5 @@ struct Compare : private Base { } }; -} +} // namespace stl_wrappers } // namespace ROCKSDB_NAMESPACE diff --git a/memtable/vectorrep.cc b/memtable/vectorrep.cc index 26c699ca6..293163349 100644 --- a/memtable/vectorrep.cc +++ b/memtable/vectorrep.cc @@ -48,13 +48,14 @@ class VectorRep : public MemTableRep { std::shared_ptr> bucket_; std::vector::const_iterator mutable cit_; const KeyComparator& compare_; - std::string tmp_; // For passing to EncodeKey + std::string tmp_; // For passing to EncodeKey bool mutable sorted_; void DoSort() const; + public: explicit Iterator(class VectorRep* vrep, - std::shared_ptr> bucket, - const KeyComparator& compare); + std::shared_ptr> bucket, + const KeyComparator& compare); // Initialize an iterator over the specified collection. // The returned iterator is not valid. @@ -123,12 +124,10 @@ void VectorRep::MarkReadOnly() { } size_t VectorRep::ApproximateMemoryUsage() { - return - sizeof(bucket_) + sizeof(*bucket_) + - bucket_->size() * - sizeof( - std::remove_reference::type::value_type - ); + return sizeof(bucket_) + sizeof(*bucket_) + + bucket_->size() * + sizeof( + std::remove_reference::type::value_type); } VectorRep::VectorRep(const KeyComparator& compare, Allocator* allocator, @@ -142,13 +141,13 @@ VectorRep::VectorRep(const KeyComparator& compare, Allocator* allocator, } VectorRep::Iterator::Iterator(class VectorRep* vrep, - std::shared_ptr> bucket, - const KeyComparator& compare) -: vrep_(vrep), - bucket_(bucket), - cit_(bucket_->end()), - compare_(compare), - sorted_(false) { } + std::shared_ptr> bucket, + const KeyComparator& compare) + : vrep_(vrep), + bucket_(bucket), + cit_(bucket_->end()), + compare_(compare), + sorted_(false) {} void VectorRep::Iterator::DoSort() const { // vrep is non-null means that we are working on an immutable memtable @@ -216,12 +215,11 @@ void VectorRep::Iterator::Seek(const Slice& user_key, // Do binary search to find first value not less than the target const char* encoded_key = (memtable_key != nullptr) ? memtable_key : EncodeKey(&tmp_, user_key); - cit_ = std::equal_range(bucket_->begin(), - bucket_->end(), - encoded_key, - [this] (const char* a, const char* b) { + cit_ = std::equal_range(bucket_->begin(), bucket_->end(), encoded_key, + [this](const char* a, const char* b) { return compare_(a, b) < 0; - }).first; + }) + .first; } // Advance to the first entry with a key <= target @@ -282,7 +280,7 @@ MemTableRep::Iterator* VectorRep::GetIterator(Arena* arena) { } } else { std::shared_ptr tmp; - tmp.reset(new Bucket(*bucket_)); // make a copy + tmp.reset(new Bucket(*bucket_)); // make a copy if (arena == nullptr) { return new Iterator(nullptr, tmp, compare_); } else { @@ -290,7 +288,7 @@ MemTableRep::Iterator* VectorRep::GetIterator(Arena* arena) { } } } -} // anon namespace +} // namespace static std::unordered_map vector_rep_table_info = { {"count", diff --git a/memtable/write_buffer_manager_test.cc b/memtable/write_buffer_manager_test.cc index 546df894a..1cc4c2cc5 100644 --- a/memtable/write_buffer_manager_test.cc +++ b/memtable/write_buffer_manager_test.cc @@ -8,6 +8,7 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "rocksdb/write_buffer_manager.h" + #include "test_util/testharness.h" namespace ROCKSDB_NAMESPACE {