From 711881bc257d761fd44924686069cb5a0cea580b Mon Sep 17 00:00:00 2001 From: storagezhang Date: Thu, 25 Mar 2021 21:17:17 -0700 Subject: [PATCH] Fix some typos in comments (#8066) Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/8066 Reviewed By: jay-zhuang Differential Revision: D27280799 Pulled By: mrambacher fbshipit-source-id: 68f91f5af4ffe0a84be581961bf9366887f47702 --- cache/lru_cache.h | 4 +- cache/lru_cache_test.cc | 2 +- db/blob/db_blob_basic_test.cc | 2 +- db/column_family.h | 4 +- db/compaction/compaction.cc | 2 +- db/compaction/compaction.h | 2 +- db/compaction/compaction_iterator.cc | 2 +- db/compaction/compaction_iterator_test.cc | 54 +++++++++----------- db/compaction/compaction_job.cc | 4 +- db/compaction/compaction_picker_test.cc | 8 +-- db/compaction/compaction_picker_universal.cc | 2 +- db/db_iter.cc | 4 +- db/db_iter.h | 2 +- db/dbformat.h | 2 +- db/error_handler.h | 2 +- db/forward_iterator.cc | 2 +- db/memtable.h | 6 +-- db/memtable_list.cc | 2 +- db/range_del_aggregator.h | 4 +- db/snapshot_impl.h | 2 +- db/table_cache.h | 2 +- db/version_edit.h | 4 +- db/version_set.cc | 12 ++--- 23 files changed, 63 insertions(+), 67 deletions(-) diff --git a/cache/lru_cache.h b/cache/lru_cache.h index 32edd9ec9..cee3f148b 100644 --- a/cache/lru_cache.h +++ b/cache/lru_cache.h @@ -239,7 +239,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard { // not threadsafe size_t TEST_GetLRUSize(); - // Retrives high pri pool ratio + // Retrieves high pri pool ratio double GetHighPriPoolRatio(); private: @@ -328,7 +328,7 @@ class LRUCache // Retrieves number of elements in LRU, for unit test purpose only size_t TEST_GetLRUSize(); - // Retrives high pri pool ratio + // Retrieves high pri pool ratio double GetHighPriPoolRatio(); private: diff --git a/cache/lru_cache_test.cc b/cache/lru_cache_test.cc index 08c05024a..b30227e7b 100644 --- a/cache/lru_cache_test.cc +++ b/cache/lru_cache_test.cc @@ -30,7 +30,7 @@ class LRUCacheTest : public testing::Test { DeleteCache(); cache_ = reinterpret_cast( port::cacheline_aligned_alloc(sizeof(LRUCacheShard))); - new (cache_) LRUCacheShard(capacity, false /*strict_capcity_limit*/, + new (cache_) LRUCacheShard(capacity, false /*strict_capacity_limit*/, high_pri_pool_ratio, use_adaptive_mutex, kDontChargeCacheMetadata); } diff --git a/db/blob/db_blob_basic_test.cc b/db/blob/db_blob_basic_test.cc index 2f20b9500..a47dd541b 100644 --- a/db/blob/db_blob_basic_test.cc +++ b/db/blob/db_blob_basic_test.cc @@ -236,7 +236,7 @@ TEST_F(DBBlobBasicTest, GenerateIOTracing) { ASSERT_OK(env_->FileExists(trace_file)); } { - // Parse trace file to check file opertions related to blob files are + // Parse trace file to check file operations related to blob files are // recorded. std::unique_ptr trace_reader; ASSERT_OK( diff --git a/db/column_family.h b/db/column_family.h index 74ed74bc6..7dafe2a31 100644 --- a/db/column_family.h +++ b/db/column_family.h @@ -253,7 +253,7 @@ extern Status CheckCFPathsSupported(const DBOptions& db_options, extern ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options, const ColumnFamilyOptions& src); -// Wrap user defined table proproties collector factories `from cf_options` +// Wrap user defined table properties collector factories `from cf_options` // into internal ones in int_tbl_prop_collector_factories. Add a system internal // one too. extern void GetIntTblPropCollectorFactory( @@ -441,7 +441,7 @@ class ColumnFamilyData { // Get SuperVersion stored in thread local storage. If it does not exist, // get a reference from a current SuperVersion. SuperVersion* GetThreadLocalSuperVersion(DBImpl* db); - // Try to return SuperVersion back to thread local storage. Retrun true on + // Try to return SuperVersion back to thread local storage. Return true on // success and false on failure. It fails when the thread local storage // contains anything other than SuperVersion::kSVInUse flag. bool ReturnThreadLocalSuperVersion(SuperVersion* sv); diff --git a/db/compaction/compaction.cc b/db/compaction/compaction.cc index 2550e0c47..82bf73d0c 100644 --- a/db/compaction/compaction.cc +++ b/db/compaction/compaction.cc @@ -519,7 +519,7 @@ uint64_t Compaction::OutputFilePreallocationSize() const { // Over-estimate slightly so we don't end up just barely crossing // the threshold - // No point to prellocate more than 1GB. + // No point to preallocate more than 1GB. return std::min(uint64_t{1073741824}, preallocation_size + (preallocation_size / 10)); } diff --git a/db/compaction/compaction.h b/db/compaction/compaction.h index d25ffd603..049ab5e1c 100644 --- a/db/compaction/compaction.h +++ b/db/compaction/compaction.h @@ -341,7 +341,7 @@ class Compaction { const uint32_t output_path_id_; CompressionType output_compression_; CompressionOptions output_compression_opts_; - // If true, then the comaction can be done by simply deleting input files. + // If true, then the compaction can be done by simply deleting input files. const bool deletion_compaction_; // Compaction input files organized by level. Constant after construction diff --git a/db/compaction/compaction_iterator.cc b/db/compaction/compaction_iterator.cc index 1e8ffd1f6..557b81bc9 100644 --- a/db/compaction/compaction_iterator.cc +++ b/db/compaction/compaction_iterator.cc @@ -135,7 +135,7 @@ CompactionIterator::CompactionIterator( } CompactionIterator::~CompactionIterator() { - // input_ Iteartor lifetime is longer than pinned_iters_mgr_ lifetime + // input_ Iterator lifetime is longer than pinned_iters_mgr_ lifetime input_->SetPinnedItersMgr(nullptr); } diff --git a/db/compaction/compaction_iterator_test.cc b/db/compaction/compaction_iterator_test.cc index 12a427075..e14e5ec12 100644 --- a/db/compaction/compaction_iterator_test.cc +++ b/db/compaction/compaction_iterator_test.cc @@ -38,7 +38,7 @@ class NoMergingMergeOp : public MergeOperator { // Compaction filter that gets stuck when it sees a particular key, // then gets unstuck when told to. -// Always returns Decition::kRemove. +// Always returns Decision::kRemove. class StallingFilter : public CompactionFilter { public: Decision FilterV2(int /*level*/, const Slice& key, ValueType /*type*/, @@ -189,7 +189,7 @@ class FakeCompaction : public CompactionIterator::CompactionProxy { bool is_allow_ingest_behind = false; }; -// A simplifed snapshot checker which assumes each snapshot has a global +// A simplified snapshot checker which assumes each snapshot has a global // last visible sequence. class TestSnapshotChecker : public SnapshotChecker { public: @@ -711,7 +711,7 @@ TEST_P(CompactionIteratorTest, ZeroOutSequenceAtBottomLevel) { RunTest({test::KeyStr("a", 1, kTypeValue), test::KeyStr("b", 2, kTypeValue)}, {"v1", "v2"}, {test::KeyStr("a", 0, kTypeValue), test::KeyStr("b", 2, kTypeValue)}, - {"v1", "v2"}, kMaxSequenceNumber /*last_commited_seq*/, + {"v1", "v2"}, kMaxSequenceNumber /*last_committed_seq*/, nullptr /*merge_operator*/, nullptr /*compaction_filter*/, true /*bottommost_level*/); } @@ -720,15 +720,14 @@ TEST_P(CompactionIteratorTest, ZeroOutSequenceAtBottomLevel) { // permanently. TEST_P(CompactionIteratorTest, RemoveDeletionAtBottomLevel) { AddSnapshot(1); - RunTest({test::KeyStr("a", 1, kTypeDeletion), - test::KeyStr("b", 3, kTypeDeletion), - test::KeyStr("b", 1, kTypeValue)}, - {"", "", ""}, - {test::KeyStr("b", 3, kTypeDeletion), - test::KeyStr("b", 0, kTypeValue)}, - {"", ""}, - kMaxSequenceNumber /*last_commited_seq*/, nullptr /*merge_operator*/, - nullptr /*compaction_filter*/, true /*bottommost_level*/); + RunTest( + {test::KeyStr("a", 1, kTypeDeletion), test::KeyStr("b", 3, kTypeDeletion), + test::KeyStr("b", 1, kTypeValue)}, + {"", "", ""}, + {test::KeyStr("b", 3, kTypeDeletion), test::KeyStr("b", 0, kTypeValue)}, + {"", ""}, kMaxSequenceNumber /*last_committed_seq*/, + nullptr /*merge_operator*/, nullptr /*compaction_filter*/, + true /*bottommost_level*/); } // In bottommost level, single deletions earlier than earliest snapshot can be @@ -738,7 +737,7 @@ TEST_P(CompactionIteratorTest, RemoveSingleDeletionAtBottomLevel) { RunTest({test::KeyStr("a", 1, kTypeSingleDeletion), test::KeyStr("b", 2, kTypeSingleDeletion)}, {"", ""}, {test::KeyStr("b", 2, kTypeSingleDeletion)}, {""}, - kMaxSequenceNumber /*last_commited_seq*/, nullptr /*merge_operator*/, + kMaxSequenceNumber /*last_committed_seq*/, nullptr /*merge_operator*/, nullptr /*compaction_filter*/, true /*bottommost_level*/); } @@ -895,7 +894,7 @@ TEST_F(CompactionIteratorWithSnapshotCheckerTest, {"v1", "v2", "v3"}, {test::KeyStr("a", 0, kTypeValue), test::KeyStr("b", 2, kTypeValue), test::KeyStr("c", 3, kTypeValue)}, - {"v1", "v2", "v3"}, kMaxSequenceNumber /*last_commited_seq*/, + {"v1", "v2", "v3"}, kMaxSequenceNumber /*last_committed_seq*/, nullptr /*merge_operator*/, nullptr /*compaction_filter*/, true /*bottommost_level*/); } @@ -906,9 +905,7 @@ TEST_F(CompactionIteratorWithSnapshotCheckerTest, RunTest( {test::KeyStr("a", 1, kTypeDeletion), test::KeyStr("b", 2, kTypeDeletion), test::KeyStr("c", 3, kTypeDeletion)}, - {"", "", ""}, - {}, - {"", ""}, kMaxSequenceNumber /*last_commited_seq*/, + {"", "", ""}, {}, {"", ""}, kMaxSequenceNumber /*last_committed_seq*/, nullptr /*merge_operator*/, nullptr /*compaction_filter*/, true /*bottommost_level*/); } @@ -916,15 +913,14 @@ TEST_F(CompactionIteratorWithSnapshotCheckerTest, TEST_F(CompactionIteratorWithSnapshotCheckerTest, NotRemoveDeletionIfValuePresentToEarlierSnapshot) { AddSnapshot(2,1); - RunTest( - {test::KeyStr("a", 4, kTypeDeletion), test::KeyStr("a", 1, kTypeValue), - test::KeyStr("b", 3, kTypeValue)}, - {"", "", ""}, - {test::KeyStr("a", 4, kTypeDeletion), test::KeyStr("a", 0, kTypeValue), - test::KeyStr("b", 3, kTypeValue)}, - {"", "", ""}, kMaxSequenceNumber /*last_commited_seq*/, - nullptr /*merge_operator*/, nullptr /*compaction_filter*/, - true /*bottommost_level*/); + RunTest({test::KeyStr("a", 4, kTypeDeletion), + test::KeyStr("a", 1, kTypeValue), test::KeyStr("b", 3, kTypeValue)}, + {"", "", ""}, + {test::KeyStr("a", 4, kTypeDeletion), + test::KeyStr("a", 0, kTypeValue), test::KeyStr("b", 3, kTypeValue)}, + {"", "", ""}, kMaxSequenceNumber /*last_committed_seq*/, + nullptr /*merge_operator*/, nullptr /*compaction_filter*/, + true /*bottommost_level*/); } TEST_F(CompactionIteratorWithSnapshotCheckerTest, @@ -936,7 +932,7 @@ TEST_F(CompactionIteratorWithSnapshotCheckerTest, {"", "", ""}, {test::KeyStr("b", 2, kTypeSingleDeletion), test::KeyStr("c", 3, kTypeSingleDeletion)}, - {"", ""}, kMaxSequenceNumber /*last_commited_seq*/, + {"", ""}, kMaxSequenceNumber /*last_committed_seq*/, nullptr /*merge_operator*/, nullptr /*compaction_filter*/, true /*bottommost_level*/); } @@ -986,8 +982,8 @@ TEST_F(CompactionIteratorWithSnapshotCheckerTest, } // Compaction filter should keep uncommitted key as-is, and -// * Convert the latest velue to deletion, and/or -// * if latest value is a merge, apply filter to all suequent merges. +// * Convert the latest value to deletion, and/or +// * if latest value is a merge, apply filter to all subsequent merges. TEST_F(CompactionIteratorWithSnapshotCheckerTest, CompactionFilter_Value) { std::unique_ptr compaction_filter( diff --git a/db/compaction/compaction_job.cc b/db/compaction/compaction_job.cc index 6db50531e..4813fd313 100644 --- a/db/compaction/compaction_job.cc +++ b/db/compaction/compaction_job.cc @@ -150,7 +150,7 @@ struct CompactionJob::SubcompactionState { // This subcompaction's output could be empty if compaction was aborted // before this subcompaction had a chance to generate any output files. // When subcompactions are executed sequentially this is more likely and - // will be particulalry likely for the later subcompactions to be empty. + // will be particularly likely for the later subcompactions to be empty. // Once they are run in parallel however it should be much rarer. return nullptr; } else { @@ -410,7 +410,7 @@ void CompactionJob::Prepare() { AutoThreadOperationStageUpdater stage_updater( ThreadStatus::STAGE_COMPACTION_PREPARE); - // Generate file_levels_ for compaction berfore making Iterator + // Generate file_levels_ for compaction before making Iterator auto* c = compact_->compaction; assert(c->column_family_data() != nullptr); assert(c->column_family_data()->current()->storage_info()->NumLevelFiles( diff --git a/db/compaction/compaction_picker_test.cc b/db/compaction/compaction_picker_test.cc index bcd977667..008dfe11c 100644 --- a/db/compaction/compaction_picker_test.cc +++ b/db/compaction/compaction_picker_test.cc @@ -650,7 +650,7 @@ TEST_F(CompactionPickerTest, UniversalPeriodicCompaction3) { TEST_F(CompactionPickerTest, UniversalPeriodicCompaction4) { // The case where universal periodic compaction couldn't form - // a compaction that inlcudes any file marked for periodic compaction. + // a compaction that includes any file marked for periodic compaction. // Right now we form the compaction anyway if it is more than one // sorted run. Just put the case here to validate that it doesn't // crash. @@ -800,7 +800,7 @@ TEST_F(CompactionPickerTest, CompactionPriMinOverlapping2) { Add(2, 6U, "150", "175", 60000000U); // Overlaps with file 26, 27, total size 521M Add(2, 7U, "176", "200", 60000000U); // Overlaps with file 27, 28, total size - // 520M, the smalelst overlapping + // 520M, the smallest overlapping Add(2, 8U, "201", "300", 60000000U); // Overlaps with file 28, 29, total size 521M @@ -1228,7 +1228,7 @@ TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri1) { Add(0, 32U, "001", "400", 1000000000U, 0, 0); Add(0, 33U, "001", "400", 1000000000U, 0, 0); - // L1 total size 2GB, score 2.2. If one file being comapcted, score 1.1. + // L1 total size 2GB, score 2.2. If one file being compacted, score 1.1. Add(1, 4U, "050", "300", 1000000000U, 0, 0); file_map_[4u].first->being_compacted = true; Add(1, 5U, "301", "350", 1000000000U, 0, 0); @@ -1261,7 +1261,7 @@ TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri2) { Add(0, 32U, "001", "400", 1000000000U, 0, 0); Add(0, 33U, "001", "400", 1000000000U, 0, 0); - // L1 total size 2GB, score 2.2. If one file being comapcted, score 1.1. + // L1 total size 2GB, score 2.2. If one file being compacted, score 1.1. Add(1, 4U, "050", "300", 1000000000U, 0, 0); Add(1, 5U, "301", "350", 1000000000U, 0, 0); diff --git a/db/compaction/compaction_picker_universal.cc b/db/compaction/compaction_picker_universal.cc index 1e95191d6..e04252514 100644 --- a/db/compaction/compaction_picker_universal.cc +++ b/db/compaction/compaction_picker_universal.cc @@ -733,7 +733,7 @@ Compaction* UniversalCompactionBuilder::PickCompactionToReduceSortedRuns( } // Look at overall size amplification. If size amplification -// exceeeds the configured value, then do a compaction +// exceeds the configured value, then do a compaction // of the candidate files all the way upto the earliest // base file (overrides configured values of file-size ratios, // min_merge_width and max_merge_width). diff --git a/db/db_iter.cc b/db/db_iter.cc index 71ae91bb0..9daca967a 100644 --- a/db/db_iter.cc +++ b/db/db_iter.cc @@ -1343,7 +1343,7 @@ void DBIter::Seek(const Slice& target) { // we need to find out the next key that is visible to the user. ClearSavedValue(); if (prefix_same_as_start_) { - // The case where the iterator needs to be invalidated if it has exausted + // The case where the iterator needs to be invalidated if it has exhausted // keys within the same prefix of the seek key. assert(prefix_extractor_ != nullptr); Slice target_prefix = prefix_extractor_->Transform(target); @@ -1418,7 +1418,7 @@ void DBIter::SeekForPrev(const Slice& target) { // backward direction. ClearSavedValue(); if (prefix_same_as_start_) { - // The case where the iterator needs to be invalidated if it has exausted + // The case where the iterator needs to be invalidated if it has exhausted // keys within the same prefix of the seek key. assert(prefix_extractor_ != nullptr); Slice target_prefix = prefix_extractor_->Transform(target); diff --git a/db/db_iter.h b/db/db_iter.h index 621d3280e..83780c685 100644 --- a/db/db_iter.h +++ b/db/db_iter.h @@ -235,7 +235,7 @@ class DBIter final : public Iterator { // If `skipping_saved_key` is true, the function will keep iterating until it // finds a user key that is larger than `saved_key_`. // If `prefix` is not null, the iterator needs to stop when all keys for the - // prefix are exhausted and the interator is set to invalid. + // prefix are exhausted and the iterator is set to invalid. bool FindNextUserEntry(bool skipping_saved_key, const Slice* prefix); // Internal implementation of FindNextUserEntry(). bool FindNextUserEntryInternal(bool skipping_saved_key, const Slice* prefix); diff --git a/db/dbformat.h b/db/dbformat.h index 76cc40aa8..c3f5c5437 100644 --- a/db/dbformat.h +++ b/db/dbformat.h @@ -616,7 +616,7 @@ class IterKey { void EnlargeBuffer(size_t key_size); }; -// Convert from a SliceTranform of user keys, to a SliceTransform of +// Convert from a SliceTransform of user keys, to a SliceTransform of // user keys. class InternalKeySliceTransform : public SliceTransform { public: diff --git a/db/error_handler.h b/db/error_handler.h index 6c7373b90..ab1169bc9 100644 --- a/db/error_handler.h +++ b/db/error_handler.h @@ -103,7 +103,7 @@ class ErrorHandler { bool auto_recovery_; bool recovery_in_prog_; // A flag to indicate that for the soft error, we should not allow any - // backrgound work execpt the work is from recovery. + // background work except the work is from recovery. bool soft_error_no_bg_work_; // Used to store the context for recover, such as flush reason. diff --git a/db/forward_iterator.cc b/db/forward_iterator.cc index a15a825a4..80dd1bb9e 100644 --- a/db/forward_iterator.cc +++ b/db/forward_iterator.cc @@ -426,7 +426,7 @@ void ForwardIterator::SeekInternal(const Slice& internal_key, if (seek_to_first) { l0_iters_[i]->SeekToFirst(); } else { - // If the target key passes over the larget key, we are sure Next() + // If the target key passes over the largest key, we are sure Next() // won't go over this file. if (user_comparator_->Compare(target_user_key, l0[i]->largest.user_key()) > 0) { diff --git a/db/memtable.h b/db/memtable.h index 81d40f077..486aa6302 100644 --- a/db/memtable.h +++ b/db/memtable.h @@ -72,7 +72,7 @@ using MultiGetRange = MultiGetContext::Range; // Note: Many of the methods in this class have comments indicating that // external synchronization is required as these methods are not thread-safe. // It is up to higher layers of code to decide how to prevent concurrent -// invokation of these methods. This is usually done by acquiring either +// invocation of these methods. This is usually done by acquiring either // the db mutex or the single writer thread. // // Some of these methods are documented to only require external @@ -139,7 +139,7 @@ class MemTable { // operations on the same MemTable (unless this Memtable is immutable). size_t ApproximateMemoryUsage(); - // As a cheap version of `ApproximateMemoryUsage()`, this function doens't + // As a cheap version of `ApproximateMemoryUsage()`, this function doesn't // require external synchronization. The value may be less accurate though size_t ApproximateMemoryUsageFast() const { return approximate_memory_usage_.load(std::memory_order_relaxed); @@ -533,7 +533,7 @@ class MemTable { SequenceNumber atomic_flush_seqno_; // keep track of memory usage in table_, arena_, and range_del_table_. - // Gets refrshed inside `ApproximateMemoryUsage()` or `ShouldFlushNow` + // Gets refreshed inside `ApproximateMemoryUsage()` or `ShouldFlushNow` std::atomic approximate_memory_usage_; #ifndef ROCKSDB_LITE diff --git a/db/memtable_list.cc b/db/memtable_list.cc index 438549c11..97d076b03 100644 --- a/db/memtable_list.cc +++ b/db/memtable_list.cc @@ -521,7 +521,7 @@ void MemTableList::Add(MemTable* m, autovector* to_delete) { InstallNewVersion(); // this method is used to move mutable memtable into an immutable list. // since mutable memtable is already refcounted by the DBImpl, - // and when moving to the imutable list we don't unref it, + // and when moving to the immutable list we don't unref it, // we don't have to ref the memtable here. we just take over the // reference from the DBImpl. current_->Add(m, to_delete); diff --git a/db/range_del_aggregator.h b/db/range_del_aggregator.h index 5e0f336d3..8bbee50fb 100644 --- a/db/range_del_aggregator.h +++ b/db/range_del_aggregator.h @@ -43,12 +43,12 @@ class TruncatedRangeDelIterator { void InternalNext(); - // Seeks to the tombstone with the highest viisble sequence number that covers + // Seeks to the tombstone with the highest visible sequence number that covers // target (a user key). If no such tombstone exists, the position will be at // the earliest tombstone that ends after target. void Seek(const Slice& target); - // Seeks to the tombstone with the highest viisble sequence number that covers + // Seeks to the tombstone with the highest visible sequence number that covers // target (a user key). If no such tombstone exists, the position will be at // the latest tombstone that starts before target. void SeekForPrev(const Slice& target); diff --git a/db/snapshot_impl.h b/db/snapshot_impl.h index 785f814f8..bfa44e3f5 100644 --- a/db/snapshot_impl.h +++ b/db/snapshot_impl.h @@ -23,7 +23,7 @@ class SnapshotImpl : public Snapshot { SequenceNumber number_; // const after creation // It indicates the smallest uncommitted data at the time the snapshot was // taken. This is currently used by WritePrepared transactions to limit the - // scope of queries to IsInSnpashot. + // scope of queries to IsInSnapshot. SequenceNumber min_uncommitted_ = kMinUnCommittedSeq; virtual SequenceNumber GetSequenceNumber() const override { return number_; } diff --git a/db/table_cache.h b/db/table_cache.h index 1c163cdeb..df459ecbe 100644 --- a/db/table_cache.h +++ b/db/table_cache.h @@ -183,7 +183,7 @@ class TableCache { Cache* get_cache() const { return cache_; } - // Capacity of the backing Cache that indicates inifinite TableCache capacity. + // Capacity of the backing Cache that indicates infinite TableCache capacity. // For example when max_open_files is -1 we set the backing Cache to this. static const int kInfiniteCapacity = 0x400000; diff --git a/db/version_edit.h b/db/version_edit.h index 9054f88ff..7b6884793 100644 --- a/db/version_edit.h +++ b/db/version_edit.h @@ -74,7 +74,7 @@ enum NewFileCustomTag : uint32_t { kNeedCompaction = 2, // Since Manifest is not entirely forward-compatible, we currently encode // kMinLogNumberToKeep as part of NewFile as a hack. This should be removed - // when manifest becomes forward-comptabile. + // when manifest becomes forward-compatible. kMinLogNumberToKeepHack = 3, kOldestBlobFileNumber = 4, kOldestAncesterTime = 5, @@ -195,7 +195,7 @@ struct FileMetaData { // The file could be the compaction output from other SST files, which could // in turn be outputs for compact older SST files. We track the memtable - // flush timestamp for the oldest SST file that eventaully contribute data + // flush timestamp for the oldest SST file that eventually contribute data // to this file. 0 means the information is not available. uint64_t oldest_ancester_time = kUnknownOldestAncesterTime; diff --git a/db/version_set.cc b/db/version_set.cc index ebf297ff5..7aad3557f 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -408,7 +408,7 @@ class FilePickerMultiGet { int GetCurrentLevel() const { return curr_level_; } // Iterates through files in the current level until it finds a file that - // contains atleast one key from the MultiGet batch + // contains at least one key from the MultiGet batch bool GetNextFileInLevelWithKeys(MultiGetRange* next_file_range, size_t* file_index, FdWithKeyRange** fd, bool* is_last_key_in_file) { @@ -2786,7 +2786,7 @@ struct Fsize { FileMetaData* file; }; -// Compator that is used to sort files based on their size +// Comparator that is used to sort files based on their size // In normal mode: descending size bool CompareCompensatedSizeDescending(const Fsize& first, const Fsize& second) { return (first.file->compensated_file_size > @@ -3206,7 +3206,7 @@ void VersionStorageInfo::GetCleanInputsWithinInterval( // specified range. From that file, iterate backwards and // forwards to find all overlapping files. // if within_range is set, then only store the maximum clean inputs -// within range [begin, end]. "clean" means there is a boudnary +// within range [begin, end]. "clean" means there is a boundary // between the files in "*inputs" and the surrounding files void VersionStorageInfo::GetOverlappingInputsRangeBinarySearch( int level, const InternalKey* begin, const InternalKey* end, @@ -3517,7 +3517,7 @@ void VersionStorageInfo::CalculateBaseBytes(const ImmutableCFOptions& ioptions, // 1. the L0 size is larger than level size base, or // 2. number of L0 files reaches twice the L0->L1 compaction trigger // We don't do this otherwise to keep the LSM-tree structure stable - // unless the L0 compation is backlogged. + // unless the L0 compaction is backlogged. base_level_size = l0_size; if (base_level_ == num_levels_ - 1) { level_multiplier_ = 1.0; @@ -4354,7 +4354,7 @@ Status VersionSet::ProcessManifestWrites( return s; } -// 'datas' is gramatically incorrect. We still use this notation to indicate +// 'datas' is grammatically incorrect. We still use this notation to indicate // that this variable represents a collection of column_family_data. Status VersionSet::LogAndApply( const autovector& column_family_datas, @@ -4796,7 +4796,7 @@ Status VersionSet::TryRecoverFromOneManifest( Status VersionSet::ListColumnFamilies(std::vector* column_families, const std::string& dbname, FileSystem* fs) { - // these are just for performance reasons, not correcntes, + // these are just for performance reasons, not correctness, // so we're fine using the defaults FileOptions soptions; // Read "CURRENT" file, which contains a pointer to the current manifest file