diff --git a/Makefile b/Makefile index d016614dc..c3de356b4 100644 --- a/Makefile +++ b/Makefile @@ -216,7 +216,7 @@ missing_make_config_paths := $(shell \ done | sort | uniq) $(foreach path, $(missing_make_config_paths), \ - $(warning Warning: $(path) dont exist)) + $(warning Warning: $(path) does not exist)) ifeq ($(PLATFORM), OS_AIX) # no debug info diff --git a/build_tools/gnu_parallel b/build_tools/gnu_parallel index 1cf164fff..464c7792f 100755 --- a/build_tools/gnu_parallel +++ b/build_tools/gnu_parallel @@ -5801,7 +5801,7 @@ sub workdir { . "-" . $self->seq(); } else { $workdir = $opt::workdir; - # Rsync treats /./ special. We dont want that + # Rsync treats /./ special. We don't want that $workdir =~ s:/\./:/:g; # Remove /./ $workdir =~ s:/+$::; # Remove ending / if any $workdir =~ s:^\./::g; # Remove starting ./ if any diff --git a/db/column_family_test.cc b/db/column_family_test.cc index 24ff4e08b..feee3dcf4 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -821,7 +821,7 @@ TEST_P(ColumnFamilyTest, BulkAddDrop) { } TEST_P(ColumnFamilyTest, DropTest) { - // first iteration - dont reopen DB before dropping + // first iteration - don't reopen DB before dropping // second iteration - reopen DB before dropping for (int iter = 0; iter < 2; ++iter) { Open({"default"}); diff --git a/db/db_impl/db_impl.h b/db/db_impl/db_impl.h index 119555cb4..3ffb6a10e 100644 --- a/db/db_impl/db_impl.h +++ b/db/db_impl/db_impl.h @@ -2021,7 +2021,7 @@ class DBImpl : public DB { // REQUIRES: mutex locked std::unique_ptr thread_persist_stats_; - // When set, we use a separate queue for writes that dont write to memtable. + // When set, we use a separate queue for writes that don't write to memtable. // In 2PC these are the writes at Prepare phase. const bool two_write_queues_; const bool manual_wal_flush_; diff --git a/db/db_iterator_test.cc b/db/db_iterator_test.cc index 99ffb5ce4..f8bf9fd1d 100644 --- a/db/db_iterator_test.cc +++ b/db/db_iterator_test.cc @@ -1873,7 +1873,7 @@ TEST_P(DBIteratorTest, IterPrevKeyCrossingBlocksRandomized) { DestroyAndReopen(options); const int kNumKeys = 500; - // Small number of merge operands to make sure that DBIter::Prev() dont + // Small number of merge operands to make sure that DBIter::Prev() don't // fall back to Seek() const int kNumMergeOperands = 3; // Use value size that will make sure that every block contain 1 key @@ -1908,7 +1908,7 @@ TEST_P(DBIteratorTest, IterPrevKeyCrossingBlocksRandomized) { ASSERT_OK(Flush()); // Separate values and merge operands in different file so that we - // make sure that we dont merge them while flushing but actually + // make sure that we don't merge them while flushing but actually // merge them in the read path for (int i = 0; i < kNumKeys; i++) { if (rnd.PercentTrue(kNoMergeOpPercentage)) { diff --git a/db/external_sst_file_ingestion_job.cc b/db/external_sst_file_ingestion_job.cc index 4cec5d376..890dbc73f 100644 --- a/db/external_sst_file_ingestion_job.cc +++ b/db/external_sst_file_ingestion_job.cc @@ -46,7 +46,7 @@ Status ExternalSstFileIngestionJob::Prepare( TablePropertiesCollectorFactory::Context::kUnknownColumnFamily && f.cf_id != cfd_->GetID()) { return Status::InvalidArgument( - "External file column family id dont match"); + "External file column family id don't match"); } } @@ -55,7 +55,7 @@ Status ExternalSstFileIngestionJob::Prepare( if (num_files == 0) { return Status::InvalidArgument("The list of files is empty"); } else if (num_files > 1) { - // Verify that passed files dont have overlapping ranges + // Verify that passed files don't have overlapping ranges autovector sorted_files; for (size_t i = 0; i < num_files; i++) { sorted_files.push_back(&files_to_ingest_[i]); @@ -212,7 +212,7 @@ Status ExternalSstFileIngestionJob::Run() { if (ingestion_options_.snapshot_consistency && !db_snapshots_->empty()) { // We need to assign a global sequence number to all the files even - // if the dont overlap with any ranges since we have snapshots + // if the don't overlap with any ranges since we have snapshots force_global_seqno = true; } // It is safe to use this instead of LastAllocatedSequence since we are @@ -588,7 +588,7 @@ Status ExternalSstFileIngestionJob::AssignLevelAndSeqnoForIngestedFile( continue; } - // We dont overlap with any keys in this level, but we still need to check + // We don't overlap with any keys in this level, but we still need to check // if our file can fit in it if (IngestedFileFitInLevel(file_to_ingest, lvl)) { target_level = lvl; @@ -646,7 +646,7 @@ Status ExternalSstFileIngestionJob::AssignGlobalSeqnoForIngestedFile( return Status::InvalidArgument("Global seqno is required, but disabled"); } else if (file_to_ingest->global_seqno_offset == 0) { return Status::InvalidArgument( - "Trying to set global seqno for a file that dont have a global seqno " + "Trying to set global seqno for a file that don't have a global seqno " "field"); } diff --git a/db/external_sst_file_test.cc b/db/external_sst_file_test.cc index 0b91910a1..259d8ba93 100644 --- a/db/external_sst_file_test.cc +++ b/db/external_sst_file_test.cc @@ -476,17 +476,17 @@ TEST_F(ExternalSSTFileTest, Basic) { } ASSERT_NE(db_->GetLatestSequenceNumber(), 0U); - // Key range of file5 (400 => 499) dont overlap with any keys in DB + // Key range of file5 (400 => 499) don't overlap with any keys in DB ASSERT_OK(DeprecatedAddFile({file5})); // This file has overlapping values with the existing data s = DeprecatedAddFile({file6}); ASSERT_FALSE(s.ok()) << s.ToString(); - // Key range of file7 (500 => 598) dont overlap with any keys in DB + // Key range of file7 (500 => 598) don't overlap with any keys in DB ASSERT_OK(DeprecatedAddFile({file7})); - // Key range of file7 (600 => 700) dont overlap with any keys in DB + // Key range of file7 (600 => 700) don't overlap with any keys in DB ASSERT_OK(DeprecatedAddFile({file8})); // Make sure values are correct before and after flush/compaction @@ -1609,15 +1609,15 @@ TEST_F(ExternalSSTFileTest, AddExternalSstFileWithCustomCompartor) { generated_files[7]}; ASSERT_NOK(DeprecatedAddFile(in_files)); - // These 2 files dont overlap with each other + // These 2 files don't overlap with each other in_files = {generated_files[0], generated_files[2]}; ASSERT_OK(DeprecatedAddFile(in_files)); - // These 2 files dont overlap with each other but overlap with keys in DB + // These 2 files don't overlap with each other but overlap with keys in DB in_files = {generated_files[3], generated_files[7]}; ASSERT_NOK(DeprecatedAddFile(in_files)); - // Files dont overlap and dont overlap with DB key range + // Files don't overlap and don't overlap with DB key range in_files = {generated_files[4], generated_files[6], generated_files[8]}; ASSERT_OK(DeprecatedAddFile(in_files)); @@ -1797,7 +1797,7 @@ TEST_P(ExternalSSTFileTest, IngestFileWithGlobalSeqnoAssignedLevel) { options, file_data, -1, true, write_global_seqno, verify_checksums_before_ingest, false, false, &true_data)); - // This file dont overlap with anything in the DB, will go to L4 + // This file don't overlap with anything in the DB, will go to L4 ASSERT_EQ("0,0,0,0,1", FilesPerLevel()); // Insert 80 -> 130 using AddFile @@ -1822,7 +1822,7 @@ TEST_P(ExternalSSTFileTest, IngestFileWithGlobalSeqnoAssignedLevel) { options, file_data, -1, true, write_global_seqno, verify_checksums_before_ingest, false, false, &true_data)); - // This file dont overlap with anything in the DB and fit in L4 as well + // This file don't overlap with anything in the DB and fit in L4 as well ASSERT_EQ("2,0,0,0,2", FilesPerLevel()); // Insert 10 -> 40 using AddFile @@ -2059,16 +2059,16 @@ TEST_F(ExternalSSTFileTest, FileWithCFInfo) { IngestExternalFileOptions ifo; - // SST CF dont match + // SST CF don't match ASSERT_NOK(db_->IngestExternalFile(handles_[0], {cf1_sst}, ifo)); - // SST CF dont match + // SST CF don't match ASSERT_NOK(db_->IngestExternalFile(handles_[2], {cf1_sst}, ifo)); // SST CF match ASSERT_OK(db_->IngestExternalFile(handles_[1], {cf1_sst}, ifo)); - // SST CF dont match + // SST CF don't match ASSERT_NOK(db_->IngestExternalFile(handles_[1], {cf_default_sst}, ifo)); - // SST CF dont match + // SST CF don't match ASSERT_NOK(db_->IngestExternalFile(handles_[2], {cf_default_sst}, ifo)); // SST CF match ASSERT_OK(db_->IngestExternalFile(handles_[0], {cf_default_sst}, ifo)); diff --git a/db/write_thread.cc b/db/write_thread.cc index 5f50bba63..36bda43fb 100644 --- a/db/write_thread.cc +++ b/db/write_thread.cc @@ -464,7 +464,7 @@ size_t WriteThread::EnterAsBatchGroupLeader(Writer* leader, } if (w->callback != nullptr && !w->callback->AllowWriteBatching()) { - // dont batch writes that don't want to be batched + // don't batch writes that don't want to be batched break; } diff --git a/env/io_posix.h b/env/io_posix.h index 535bd99cd..ddc3244e4 100644 --- a/env/io_posix.h +++ b/env/io_posix.h @@ -27,7 +27,7 @@ #define POSIX_FADV_RANDOM 1 /* [MC1] expect random page refs */ #define POSIX_FADV_SEQUENTIAL 2 /* [MC1] expect sequential page refs */ #define POSIX_FADV_WILLNEED 3 /* [MC1] will need these pages */ -#define POSIX_FADV_DONTNEED 4 /* [MC1] dont need these pages */ +#define POSIX_FADV_DONTNEED 4 /* [MC1] don't need these pages */ #endif namespace ROCKSDB_NAMESPACE { diff --git a/file/delete_scheduler.cc b/file/delete_scheduler.cc index bb318e595..8ebdd1560 100644 --- a/file/delete_scheduler.cc +++ b/file/delete_scheduler.cc @@ -216,7 +216,7 @@ void DeleteScheduler::BackgroundEmptyTrash() { const FileAndDir& fad = queue_.front(); std::string path_in_trash = fad.fname; - // We dont need to hold the lock while deleting the file + // We don't need to hold the lock while deleting the file mu_.Unlock(); uint64_t deleted_bytes = 0; bool is_complete = true; diff --git a/file/delete_scheduler_test.cc b/file/delete_scheduler_test.cc index cff645de5..687eae512 100644 --- a/file/delete_scheduler_test.cc +++ b/file/delete_scheduler_test.cc @@ -90,7 +90,7 @@ class DeleteSchedulerTest : public testing::Test { } void NewDeleteScheduler() { - // Tests in this file are for DeleteScheduler component and dont create any + // Tests in this file are for DeleteScheduler component and don't create any // DBs, so we need to set max_trash_db_ratio to 100% (instead of default // 25%) std::shared_ptr @@ -306,7 +306,7 @@ TEST_F(DeleteSchedulerTest, RateLimitingMultiThreaded) { } // Disable rate limiting by setting rate_bytes_per_sec_ to 0 and make sure -// that when DeleteScheduler delete a file it delete it immediately and dont +// that when DeleteScheduler delete a file it delete it immediately and don't // move it to trash TEST_F(DeleteSchedulerTest, DisableRateLimiting) { int bg_delete_file = 0; diff --git a/table/block_based/block.cc b/table/block_based/block.cc index ab783fe2a..04829faf8 100644 --- a/table/block_based/block.cc +++ b/table/block_based/block.cc @@ -520,8 +520,8 @@ bool DataBlockIter::ParseNextDataKey(const char* limit) { return false; } else { if (shared == 0) { - // If this key dont share any bytes with prev key then we dont need - // to decode it and can use it's address in the block directly. + // If this key doesn't share any bytes with prev key then we don't need + // to decode it and can use its address in the block directly. key_.SetKey(Slice(p, non_shared), false /* copy */); key_pinned_ = true; } else { @@ -592,8 +592,8 @@ bool IndexBlockIter::ParseNextIndexKey() { return false; } if (shared == 0) { - // If this key dont share any bytes with prev key then we dont need - // to decode it and can use it's address in the block directly. + // If this key doesn't share any bytes with prev key then we don't need + // to decode it and can use its address in the block directly. key_.SetKey(Slice(p, non_shared), false /* copy */); key_pinned_ = true; } else { diff --git a/table/internal_iterator.h b/table/internal_iterator.h index 780db64b3..1d36e5019 100644 --- a/table/internal_iterator.h +++ b/table/internal_iterator.h @@ -122,7 +122,7 @@ class InternalIteratorBase : public Cleanable { // iterate_upper_bound. virtual bool MayBeOutOfUpperBound() { return true; } - // Pass the PinnedIteratorsManager to the Iterator, most Iterators dont + // Pass the PinnedIteratorsManager to the Iterator, most Iterators don't // communicate with PinnedIteratorsManager so default implementation is no-op // but for Iterators that need to communicate with PinnedIteratorsManager // they will implement this function and use the passed pointer to communicate diff --git a/table/sst_file_writer.cc b/table/sst_file_writer.cc index a5d08ea77..9e8a18583 100644 --- a/table/sst_file_writer.cc +++ b/table/sst_file_writer.cc @@ -150,7 +150,7 @@ struct SstFileWriter::Rep { if (bytes_since_last_fadvise > kFadviseTrigger || closing) { TEST_SYNC_POINT_CALLBACK("SstFileWriter::Rep::InvalidatePageCache", &(bytes_since_last_fadvise)); - // Tell the OS that we dont need this file in page cache + // Tell the OS that we don't need this file in page cache file_writer->InvalidateCache(0, 0); last_fadvise_size = builder->FileSize(); } diff --git a/tools/block_cache_analyzer/block_cache_trace_analyzer.cc b/tools/block_cache_analyzer/block_cache_trace_analyzer.cc index f90cb794b..a5137a0e2 100644 --- a/tools/block_cache_analyzer/block_cache_trace_analyzer.cc +++ b/tools/block_cache_analyzer/block_cache_trace_analyzer.cc @@ -67,7 +67,7 @@ DEFINE_string( "Group the number of accesses per block per second using these labels. " "Possible labels are a combination of the following: cf (column family), " "sst, level, bt (block type), caller, block. For example, label \"cf_bt\" " - "means the number of acccess per second is grouped by unique pairs of " + "means the number of access per second is grouped by unique pairs of " "\"cf_bt\". A label \"all\" contains the aggregated number of accesses per " "second across all possible labels."); DEFINE_string(reuse_distance_labels, "", diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index 5c2ca01e6..c63f741df 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -1061,7 +1061,7 @@ DEFINE_double(keyrange_dist_d, 0.0, "f(x)=a*exp(b*x)+c*exp(d*x)"); DEFINE_int64(keyrange_num, 1, "The number of key ranges that are in the same prefix " - "group, each prefix range will have its key acccess " + "group, each prefix range will have its key access " "distribution"); DEFINE_double(key_dist_a, 0.0, "The parameter 'a' of key access distribution model " diff --git a/utilities/blob_db/blob_db_impl_filesnapshot.cc b/utilities/blob_db/blob_db_impl_filesnapshot.cc index 168c7ce9d..82bff2ea6 100644 --- a/utilities/blob_db/blob_db_impl_filesnapshot.cc +++ b/utilities/blob_db/blob_db_impl_filesnapshot.cc @@ -32,7 +32,7 @@ Status BlobDBImpl::DisableFileDeletions() { } ROCKS_LOG_INFO(db_options_.info_log, - "Disalbed blob file deletions. count: %d", count); + "Disabled blob file deletions. count: %d", count); return Status::OK(); } diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index bdc2609f5..06a88c30c 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -5575,7 +5575,7 @@ class ThreeBytewiseComparator : public Comparator { Slice nb = Slice(b.data(), b.size() < 3 ? b.size() : 3); return na == nb; } - // This methods below dont seem relevant to this test. Implement them if + // These methods below don't seem relevant to this test. Implement them if // proven othersize. void FindShortestSeparator(std::string* start, const Slice& limit) const override { diff --git a/utilities/transactions/write_prepared_txn_db.h b/utilities/transactions/write_prepared_txn_db.h index 964b72689..812c50b9a 100644 --- a/utilities/transactions/write_prepared_txn_db.h +++ b/utilities/transactions/write_prepared_txn_db.h @@ -194,7 +194,7 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { // happen after recovery, or it could be committed and evicted by another // commit, or never committed. - // At this point we dont know if it was committed or it is still prepared + // At this point we don't know if it was committed or it is still prepared max_evicted_seq_ub = max_evicted_seq_.load(std::memory_order_acquire); if (UNLIKELY(max_evicted_seq_lb != max_evicted_seq_ub)) { continue;