diff --git a/db/column_family.cc b/db/column_family.cc index fe56db721..591bea672 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -614,7 +614,7 @@ bool ColumnFamilyData::ReturnThreadLocalSuperVersion(SuperVersion* sv) { void* expected = SuperVersion::kSVInUse; if (local_sv_->CompareAndSwap(static_cast(sv), expected)) { // When we see kSVInUse in the ThreadLocal, we are sure ThreadLocal - // storage has not been altered and no Scrape has happend. The + // storage has not been altered and no Scrape has happened. The // SuperVersion is still current. return true; } else { diff --git a/db/column_family_test.cc b/db/column_family_test.cc index a87294b8f..76c1bbbef 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -268,7 +268,7 @@ class ColumnFamilyTest : public testing::Test { VectorLogPtr wal_files; Status s; // GetSortedWalFiles is a flakey function -- it gets all the wal_dir - // children files and then later checks for their existance. if some of the + // children files and then later checks for their existence. if some of the // log files doesn't exist anymore, it reports an error. it does all of this // without DB mutex held, so if a background process deletes the log file // while the function is being executed, it returns an error. We retry the diff --git a/db/compaction_picker.cc b/db/compaction_picker.cc index 90293004c..dfa0081e9 100644 --- a/db/compaction_picker.cc +++ b/db/compaction_picker.cc @@ -1248,12 +1248,12 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp( cf_name.c_str(), file_num_buf, loop); } - // Check if the suceeding files need compaction. + // Check if the succeeding files need compaction. for (unsigned int i = loop + 1; candidate_count < max_files_to_compact && i < sorted_runs.size(); i++) { - const SortedRun* suceeding_sr = &sorted_runs[i]; - if (suceeding_sr->being_compacted) { + const SortedRun* succeeding_sr = &sorted_runs[i]; + if (succeeding_sr->being_compacted) { break; } // Pick files if the total/last candidate file size (increased by the @@ -1263,14 +1263,14 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp( // kCompactionStopStyleSimilarSize, it's simply the size of the last // picked file. double sz = candidate_size * (100.0 + ratio) / 100.0; - if (sz < static_cast(suceeding_sr->size)) { + if (sz < static_cast(succeeding_sr->size)) { break; } if (ioptions_.compaction_options_universal.stop_style == kCompactionStopStyleSimilarSize) { // Similar-size stopping rule: also check the last picked file isn't // far larger than the next candidate file. - sz = (suceeding_sr->size * (100.0 + ratio)) / 100.0; + sz = (succeeding_sr->size * (100.0 + ratio)) / 100.0; if (sz < static_cast(candidate_size)) { // If the small file we've encountered begins a run of similar-size // files, we'll pick them up on a future iteration of the outer @@ -1278,9 +1278,9 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp( // by the last-resort read amp strategy which disregards size ratios. break; } - candidate_size = suceeding_sr->compensated_file_size; + candidate_size = succeeding_sr->compensated_file_size; } else { // default kCompactionStopStyleTotalSize - candidate_size += suceeding_sr->compensated_file_size; + candidate_size += succeeding_sr->compensated_file_size; } candidate_count++; } diff --git a/db/corruption_test.cc b/db/corruption_test.cc index ce31cf61c..b9a246138 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -103,7 +103,7 @@ class CorruptionTest : public testing::Test { // db itself will raise errors because data is corrupted. // Instead, we want the reads to be successful and this test // will detect whether the appropriate corruptions have - // occured. + // occurred. Iterator* iter = db_->NewIterator(ReadOptions(false, true)); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { uint64_t key; diff --git a/db/db_iter.cc b/db/db_iter.cc index 3ad90ffd4..ce75f4386 100644 --- a/db/db_iter.cc +++ b/db/db_iter.cc @@ -254,7 +254,7 @@ void DBIter::FindNextUserEntryInternal(bool skipping) { } // If we have sequentially iterated via numerous keys and still not // found the next user-key, then it is better to seek so that we can - // avoid too many key comparisons. We seek to the last occurence of + // avoid too many key comparisons. We seek to the last occurrence of // our current key by looking for sequence number 0. if (skipping && num_skipped > max_skip_) { num_skipped = 0; diff --git a/db/db_test.cc b/db/db_test.cc index 96973aeb2..5dc607e24 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -1295,7 +1295,7 @@ static long TestGetTickerCount(const Options& options, Tickers ticker_type) { // A helper function that ensures the table properties returned in // `GetPropertiesOfAllTablesTest` is correct. -// This test assumes entries size is differnt for each of the tables. +// This test assumes entries size is different for each of the tables. namespace { void VerifyTableProperties(DB* db, uint64_t expected_entries_size) { TablePropertiesCollection props; @@ -1955,7 +1955,7 @@ TEST_F(DBTest, GetEncountersEmptyLevel) { // * sstable B in level 2 // Then do enough Get() calls to arrange for an automatic compaction // of sstable A. A bug would cause the compaction to be marked as - // occuring at level 1 (instead of the correct level 0). + // occurring at level 1 (instead of the correct level 0). // Step 1: First place sstables in levels 0 and 2 int compaction_count = 0; @@ -11648,7 +11648,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { // Test max_mem_compaction_level. - // Destory DB and start from scratch + // Destroy DB and start from scratch options.max_background_compactions = 1; options.max_background_flushes = 0; options.max_mem_compaction_level = 2; diff --git a/db/memtable_list.cc b/db/memtable_list.cc index 8adbc65e6..54473dc0d 100644 --- a/db/memtable_list.cc +++ b/db/memtable_list.cc @@ -161,7 +161,7 @@ void MemTableList::RollbackMemtableFlush(const autovector& mems, assert(!mems.empty()); // If the flush was not successful, then just reset state. - // Maybe a suceeding attempt to flush will be successful. + // Maybe a succeeding attempt to flush will be successful. for (MemTable* m : mems) { assert(m->flush_in_progress_); assert(m->file_number_ == 0); @@ -184,7 +184,7 @@ Status MemTableList::InstallMemtableFlushResults( ThreadStatus::STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS); mu->AssertHeld(); - // flush was sucessful + // flush was successful for (size_t i = 0; i < mems.size(); ++i) { // All the edits are associated with the first memtable of this batch. assert(i == 0 || mems[i]->GetEdits()->NumEntries() == 0); @@ -193,7 +193,7 @@ Status MemTableList::InstallMemtableFlushResults( mems[i]->file_number_ = file_number; } - // if some other thread is already commiting, then return + // if some other thread is already committing, then return Status s; if (commit_in_progress_) { return s; diff --git a/db/version_set.cc b/db/version_set.cc index a623e51ae..8fb8e300e 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -275,7 +275,7 @@ class FilePicker { static_cast(search_right_bound_)); } else { // search_left_bound > search_right_bound, key does not exist in - // this level. Since no comparision is done in this level, it will + // this level. Since no comparison is done in this level, it will // need to search all files in the next level. search_left_bound_ = 0; search_right_bound_ = FileIndexer::kLevelMaxIndex; diff --git a/table/block_based_table_builder.cc b/table/block_based_table_builder.cc index 727c77413..7b19574fe 100644 --- a/table/block_based_table_builder.cc +++ b/table/block_based_table_builder.cc @@ -208,7 +208,7 @@ class HashIndexBuilder : public IndexBuilder { pending_entry_index_ = static_cast(current_restart_index_); } else { // entry number increments when keys share the prefix reside in - // differnt data blocks. + // different data blocks. auto last_restart_index = pending_entry_index_ + pending_block_num_ - 1; assert(last_restart_index <= current_restart_index_); if (last_restart_index != current_restart_index_) { @@ -383,7 +383,7 @@ extern const uint64_t kLegacyBlockBasedTableMagicNumber = 0xdb4775248b80fb57ull; // A collector that collects properties of interest to block-based table. // For now this class looks heavy-weight since we only write one additional // property. -// But in the forseeable future, we will add more and more properties that are +// But in the foreseeable future, we will add more and more properties that are // specific to block-based table. class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector : public IntTblPropCollector { diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 96e26b1c5..ed7fb0ba5 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -1347,7 +1347,7 @@ Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader, Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log, "BlockBasedTableOptions::kHashSearch requires " "options.prefix_extractor to be set." - " Fall back to binary seach index."); + " Fall back to binary search index."); index_type_on_file = BlockBasedTableOptions::kBinarySearch; } @@ -1367,7 +1367,7 @@ Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader, // problem with prefix hash index loading. Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log, "Unable to read the metaindex block." - " Fall back to binary seach index."); + " Fall back to binary search index."); return BinarySearchIndexReader::Create( file, footer, footer.index_handle(), env, comparator, index_reader); } diff --git a/table/block_hash_index.cc b/table/block_hash_index.cc index a8c965864..02ebcbc9e 100644 --- a/table/block_hash_index.cc +++ b/table/block_hash_index.cc @@ -98,7 +98,7 @@ BlockHashIndex* CreateBlockHashIndexOnTheFly( pending_entry_index = current_restart_index; } else { // entry number increments when keys share the prefix reside in - // differnt data blocks. + // different data blocks. auto last_restart_index = pending_entry_index + pending_block_num - 1; assert(last_restart_index <= current_restart_index); if (last_restart_index != current_restart_index) { diff --git a/tools/db_stress.cc b/tools/db_stress.cc index 986344596..83d55a62d 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -174,7 +174,7 @@ DEFINE_int32(compaction_thread_pool_adjust_interval, 0, "The interval (in milliseconds) to adjust compaction thread pool " "size. Don't change it periodically if the value is 0."); -DEFINE_int32(compaction_thread_pool_varations, 2, +DEFINE_int32(compaction_thread_pool_variations, 2, "Range of bakground thread pool size variations when adjusted " "periodically."); diff --git a/util/bloom.cc b/util/bloom.cc index 007d4f273..d3f3abd61 100644 --- a/util/bloom.cc +++ b/util/bloom.cc @@ -43,7 +43,7 @@ class FullFilterBitsBuilder : public FilterBitsBuilder { // When creating filter, it is ensured that // total_bits = num_lines * CACHE_LINE_SIZE * 8 // dst len is >= 5, 1 for num_probes, 4 for num_lines - // Then total_bits = (len - 5) * 8, and cache_line_size could be calulated + // Then total_bits = (len - 5) * 8, and cache_line_size could be calculated // +----------------------------------------------------------------+ // | filter data with length total_bits/8 | // +----------------------------------------------------------------+ diff --git a/util/env_hdfs.cc b/util/env_hdfs.cc index b0a3b6751..298eb48fa 100644 --- a/util/env_hdfs.cc +++ b/util/env_hdfs.cc @@ -562,7 +562,7 @@ Status HdfsEnv::GetFileModificationTime(const std::string& fname, } // The rename is not atomic. HDFS does not allow a renaming if the -// target already exists. So, we delete the target before attemting the +// target already exists. So, we delete the target before attempting the // rename. Status HdfsEnv::RenameFile(const std::string& src, const std::string& target) { hdfsDelete(fileSys_, target.c_str(), 1); diff --git a/util/histogram.cc b/util/histogram.cc index 67621a5fc..5a875e54d 100644 --- a/util/histogram.cc +++ b/util/histogram.cc @@ -19,7 +19,7 @@ namespace rocksdb { HistogramBucketMapper::HistogramBucketMapper() : // Add newer bucket index here. - // Should be alwyas added in sorted order. + // Should be always added in sorted order. // If you change this, you also need to change // size of array buckets_ in HistogramImpl bucketValues_( diff --git a/util/histogram_test.cc b/util/histogram_test.cc index edceffaaa..22ddb4b42 100644 --- a/util/histogram_test.cc +++ b/util/histogram_test.cc @@ -33,7 +33,7 @@ TEST_F(HistogramTest, BasicOperation) { ASSERT_TRUE(percentile99 >= percentile85); } - ASSERT_EQ(histogram.Average(), 50.5); // avg is acurately caluclated. + ASSERT_EQ(histogram.Average(), 50.5); // avg is acurately calculated. } TEST_F(HistogramTest, EmptyHistogram) { diff --git a/utilities/backupable/backupable_db.cc b/utilities/backupable/backupable_db.cc index 1f86aeda0..6a2f11cfb 100644 --- a/utilities/backupable/backupable_db.cc +++ b/utilities/backupable/backupable_db.cc @@ -419,7 +419,7 @@ BackupEngineImpl::BackupEngineImpl(Env* db_env, &backuped_file_infos_, backup_env_))))); } - if (options_.destroy_old_data) { // Destory old data + if (options_.destroy_old_data) { // Destroy old data assert(!read_only_); Log(options_.info_log, "Backup Engine started with destroy_old_data == true, deleting all "