diff --git a/HISTORY.md b/HISTORY.md index f2e89191c..d5a6cd9b4 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -256,7 +256,7 @@ * Added a new way to report QPS from db_bench (check out --report_file and --report_interval_seconds) * Added a cache for individual rows. See DBOptions::row_cache for more info. * Several new features on EventListener (see include/rocksdb/listener.h): - - OnCompationCompleted() now returns per-compaciton job statistics, defined in include/rocksdb/compaction_job_stats.h. + - OnCompationCompleted() now returns per-compaction job statistics, defined in include/rocksdb/compaction_job_stats.h. - Added OnTableFileCreated() and OnTableFileDeleted(). * Add compaction_options_universal.enable_trivial_move to true, to allow trivial move while performing universal compaction. Trivial move will happen only when all the input files are non overlapping. diff --git a/db/db_impl.h b/db/db_impl.h index e17039633..37aaf1ca5 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -391,7 +391,7 @@ class DBImpl : public DB { // Return the lastest MutableCFOptions of a column family Status TEST_GetLatestMutableCFOptions(ColumnFamilyHandle* column_family, - MutableCFOptions* mutable_cf_opitons); + MutableCFOptions* mutable_cf_options); Cache* TEST_table_cache() { return table_cache_.get(); } @@ -1098,7 +1098,7 @@ class DBImpl : public DB { // Indicate DB was opened successfully bool opened_successfully_; - // minmum log number still containing prepared data. + // minimum log number still containing prepared data. // this is used by FindObsoleteFiles to determine which // flushed logs we must keep around because they still // contain prepared data which has not been flushed or rolled back @@ -1111,7 +1111,7 @@ class DBImpl : public DB { // to prepared_section_completed_ which maps LOG -> instance_count // since a log could contain multiple prepared sections // - // when trying to determine the minmum log still active we first + // when trying to determine the minimum log still active we first // consult min_log_with_prep_. while that root value maps to // a value > 0 in prepared_section_completed_ we decrement the // instance_count for that log and pop the root value in diff --git a/db/db_impl_compaction_flush.cc b/db/db_impl_compaction_flush.cc index 6f160026e..08b7e9fcb 100644 --- a/db/db_impl_compaction_flush.cc +++ b/db/db_impl_compaction_flush.cc @@ -1403,7 +1403,7 @@ Status DBImpl::BackgroundCompaction(bool* made_progress, // Can't compact right now, but try again later TEST_SYNC_POINT("DBImpl::BackgroundCompaction()::Conflict"); - // Stay in the compaciton queue. + // Stay in the compaction queue. unscheduled_compactions_++; return Status::OK(); diff --git a/db/db_universal_compaction_test.cc b/db/db_universal_compaction_test.cc index e75e52376..6b01b67e5 100644 --- a/db/db_universal_compaction_test.cc +++ b/db/db_universal_compaction_test.cc @@ -118,7 +118,7 @@ class DelayFilterFactory : public CompactionFilterFactory { }; } // namespace -// Make sure we don't trigger a problem if the trigger conditon is given +// Make sure we don't trigger a problem if the trigger condtion is given // to be 0, which is invalid. TEST_P(DBTestUniversalCompaction, UniversalCompactionSingleSortedRun) { Options options = CurrentOptions(); diff --git a/include/rocksdb/advanced_options.h b/include/rocksdb/advanced_options.h index 6d95f7b28..c7ca9270b 100644 --- a/include/rocksdb/advanced_options.h +++ b/include/rocksdb/advanced_options.h @@ -37,11 +37,11 @@ enum CompactionStyle : char { kCompactionStyleNone = 0x3, }; -// In Level-based comapction, it Determines which file from a level to be +// In Level-based compaction, it Determines which file from a level to be // picked to merge to the next level. We suggest people try // kMinOverlappingRatio first when you tune your database. enum CompactionPri : char { - // Slightly Priotize larger files by size compensated by #deletes + // Slightly prioritize larger files by size compensated by #deletes kByCompensatedSize = 0x0, // First compact files whose data's latest update time is oldest. // Try this if you only update some hot keys in small ranges. diff --git a/include/rocksdb/cache.h b/include/rocksdb/cache.h index 6ab58280e..5ce24eda1 100644 --- a/include/rocksdb/cache.h +++ b/include/rocksdb/cache.h @@ -182,7 +182,7 @@ class Cache { bool thread_safe) = 0; // Remove all entries. - // Prerequisit: no entry is referenced. + // Prerequisite: no entry is referenced. virtual void EraseUnRefEntries() = 0; virtual std::string GetPrintableOptions() const { return ""; } diff --git a/include/rocksdb/compaction_filter.h b/include/rocksdb/compaction_filter.h index 8a4e84d90..209d59d89 100644 --- a/include/rocksdb/compaction_filter.h +++ b/include/rocksdb/compaction_filter.h @@ -182,7 +182,7 @@ class CompactionFilter { // will be called even if the keys were written before the last snapshot. // This behavior is to be used only when we want to delete a set of keys // irrespective of snapshots. In particular, care should be taken - // to understand that the values of thesekeys will change even if we are + // to understand that the values of these keys will change even if we are // using a snapshot. virtual bool IgnoreSnapshots() const { return false; } diff --git a/include/rocksdb/compaction_job_stats.h b/include/rocksdb/compaction_job_stats.h index 1c9c727cf..876809d2c 100644 --- a/include/rocksdb/compaction_job_stats.h +++ b/include/rocksdb/compaction_job_stats.h @@ -15,7 +15,7 @@ struct CompactionJobStats { // Aggregate the CompactionJobStats from another instance with this one void Add(const CompactionJobStats& stats); - // the elapsed time in micro of this compaction. + // the elapsed time of this compaction in microseconds. uint64_t elapsed_micros; // the number of compaction input records. diff --git a/include/rocksdb/convenience.h b/include/rocksdb/convenience.h index 19fb21c30..4456f96f7 100644 --- a/include/rocksdb/convenience.h +++ b/include/rocksdb/convenience.h @@ -34,7 +34,7 @@ namespace rocksdb { // - "optimize_filters_for_hits=true" in GetColumnFamilyOptionsFromString. // // * Integers: -// Integers are converted directly from string, in addtion to the following +// Integers are converted directly from string, in addition to the following // units that we support: // - 'k' or 'K' => 2^10 // - 'm' or 'M' => 2^20 diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index d741a8907..c25bcf771 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -439,7 +439,7 @@ class DB { // It could also be used to return the stats in the format of the map. // In this case there will a pair of string to array of double for // each level as well as for "Sum". "Int" stats will not be affected - // when this form of stats are retrived. + // when this form of stats are retrieved. static const std::string kCFStatsNoFileHistogram; // "rocksdb.cf-file-histogram" - print out how many file reads to every @@ -538,7 +538,7 @@ class DB { // by iterators or unfinished compactions. static const std::string kNumLiveVersions; - // "rocksdb.current-super-version-number" - returns number of curent LSM + // "rocksdb.current-super-version-number" - returns number of current LSM // version. It is a uint64_t integer number, incremented after there is // any change to the LSM tree. The number is not preserved after restarting // the DB. After DB restart, it will start from 0 again. @@ -548,7 +548,7 @@ class DB { // live data in bytes. static const std::string kEstimateLiveDataSize; - // "rocksdb.min-log-number-to-keep" - return the minmum log number of the + // "rocksdb.min-log-number-to-keep" - return the minimum log number of the // log files that should be kept. static const std::string kMinLogNumberToKeep; @@ -956,7 +956,7 @@ class DB { // // (1) External SST files can be created using SstFileWriter // (2) We will try to ingest the files to the lowest possible level - // even if the file compression dont match the level compression + // even if the file compression doesn't match the level compression // (3) If IngestExternalFileOptions->ingest_behind is set to true, // we always ingest at the bottommost level, which should be reserved // for this purpose (see DBOPtions::allow_ingest_behind flag). diff --git a/include/rocksdb/env.h b/include/rocksdb/env.h index ad59dd1a0..053765911 100644 --- a/include/rocksdb/env.h +++ b/include/rocksdb/env.h @@ -183,7 +183,7 @@ class Env { unique_ptr* result, const EnvOptions& options); - // Open `fname` for random read and write, if file dont exist the file + // Open `fname` for random read and write, if file doesn't exist the file // will be created. On success, stores a pointer to the new file in // *result and returns OK. On failure returns non-OK. // @@ -318,7 +318,7 @@ class Env { // Wait for all threads started by StartThread to terminate. virtual void WaitForJoin() {} - // Get thread pool queue length for specific thrad pool. + // Get thread pool queue length for specific thread pool. virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const { return 0; } @@ -516,7 +516,7 @@ class RandomAccessFile { // may not have been modified. // // This function guarantees, for IDs from a given environment, two unique ids - // cannot be made equal to eachother by adding arbitrary bytes to one of + // cannot be made equal to each other by adding arbitrary bytes to one of // them. That is, no unique ID is the prefix of another. // // This function guarantees that the returned ID will not be interpretable as @@ -687,7 +687,7 @@ class WritableFile { return; } // If this write would cross one or more preallocation blocks, - // determine what the last preallocation block necesessary to + // determine what the last preallocation block necessary to // cover this write would be and Allocate to that point. const auto block_size = preallocation_block_size_; size_t new_last_preallocated_block = diff --git a/include/rocksdb/filter_policy.h b/include/rocksdb/filter_policy.h index 2c1588a23..662cf4ef2 100644 --- a/include/rocksdb/filter_policy.h +++ b/include/rocksdb/filter_policy.h @@ -112,7 +112,7 @@ class FilterPolicy { // // bits_per_key: bits per key in bloom filter. A good value for bits_per_key // is 10, which yields a filter with ~ 1% false positive rate. -// use_block_based_builder: use block based filter rather than full fiter. +// use_block_based_builder: use block based filter rather than full filter. // If you want to builder full filter, it needs to be set to false. // // Callers must delete the result after any database that is using the diff --git a/include/rocksdb/options.h b/include/rocksdb/options.h index eb56b6ae3..40b917c6d 100644 --- a/include/rocksdb/options.h +++ b/include/rocksdb/options.h @@ -136,7 +136,7 @@ struct ColumnFamilyOptions : public AdvancedColumnFamilyOptions { // the same DB. The only exception is reserved for upgrade, where a DB // previously without a merge operator is introduced to Merge operation // for the first time. It's necessary to specify a merge operator when - // openning the DB in this case. + // opening the DB in this case. // Default: nullptr std::shared_ptr merge_operator = nullptr; @@ -578,7 +578,7 @@ struct DBOptions { // // Files will be opened in "direct I/O" mode // which means that data r/w from the disk will not be cached or - // bufferized. The hardware buffer of the devices may however still + // buffered. The hardware buffer of the devices may however still // be used. Memory mapped files are not impacted by these parameters. // Use O_DIRECT for user reads @@ -946,7 +946,7 @@ struct ReadOptions { // and iterator_upper_bound need to have the same prefix. // This is because ordering is not guaranteed outside of prefix domain. // There is no lower bound on the iterator. If needed, that can be easily - // implemented + // implemented. // // Default: nullptr const Slice* iterate_upper_bound; diff --git a/include/rocksdb/rate_limiter.h b/include/rocksdb/rate_limiter.h index 4b02263eb..0d34388e6 100644 --- a/include/rocksdb/rate_limiter.h +++ b/include/rocksdb/rate_limiter.h @@ -68,9 +68,9 @@ class RateLimiter { // The default should work for most cases. // @fairness: RateLimiter accepts high-pri requests and low-pri requests. // A low-pri request is usually blocked in favor of hi-pri request. Currently, -// RocksDB assigns low-pri to request from compaciton and high-pri to request +// RocksDB assigns low-pri to request from compaction and high-pri to request // from flush. Low-pri requests can get blocked if flush requests come in -// continuouly. This fairness parameter grants low-pri requests permission by +// continuously. This fairness parameter grants low-pri requests permission by // 1/fairness chance even though high-pri requests exist to avoid starvation. // You should be good by leaving it at default 10. extern RateLimiter* NewGenericRateLimiter( diff --git a/include/rocksdb/slice.h b/include/rocksdb/slice.h index 2c858a506..33f77a0e5 100644 --- a/include/rocksdb/slice.h +++ b/include/rocksdb/slice.h @@ -122,7 +122,7 @@ class Slice { * A Slice that can be pinned with some cleanup tasks, which will be run upon * ::Reset() or object destruction, whichever is invoked first. This can be used * to avoid memcpy by having the PinnsableSlice object referring to the data - * that is locked in the memory and release them after the data is consuned. + * that is locked in the memory and release them after the data is consumed. */ class PinnableSlice : public Slice, public Cleanable { public: diff --git a/include/rocksdb/slice_transform.h b/include/rocksdb/slice_transform.h index abaaf2a70..ac9cb84a9 100644 --- a/include/rocksdb/slice_transform.h +++ b/include/rocksdb/slice_transform.h @@ -65,15 +65,15 @@ class SliceTransform { // This function is not used by RocksDB, but for users. If users pass // Options by string to RocksDB, they might not know what prefix extractor // they are using. This function is to help users can determine: - // if they want to iterate all keys prefixing `prefix`, whetherit is + // if they want to iterate all keys prefixing `prefix`, whether it is // safe to use prefix bloom filter and seek to key `prefix`. // If this function returns true, this means a user can Seek() to a prefix // using the bloom filter. Otherwise, user needs to skip the bloom filter // by setting ReadOptions.total_order_seek = true. // // Here is an example: Suppose we implement a slice transform that returns - // the first part of the string after spliting it using deimiter ",": - // 1. SameResultWhenAppended("abc,") should return true. If aplying prefix + // the first part of the string after spliting it using delimiter ",": + // 1. SameResultWhenAppended("abc,") should return true. If applying prefix // bloom filter using it, all slices matching "abc:.*" will be extracted // to "abc,", so any SST file or memtable containing any of those key // will not be filtered out. diff --git a/include/rocksdb/sst_file_manager.h b/include/rocksdb/sst_file_manager.h index a4f38bf25..665fb0e26 100644 --- a/include/rocksdb/sst_file_manager.h +++ b/include/rocksdb/sst_file_manager.h @@ -68,7 +68,7 @@ class SstFileManager { // == Deletion rate limiting specific arguments == // @param trash_dir: Path to the directory where deleted files will be moved // to be deleted in a background thread while applying rate limiting. If this -// directory dont exist, it will be created. This directory should not be +// directory doesn't exist, it will be created. This directory should not be // used by any other process or any other SstFileManager, Set to "" to // disable deletion rate limiting. // @param rate_bytes_per_sec: How many bytes should be deleted per second, If diff --git a/include/rocksdb/statistics.h b/include/rocksdb/statistics.h index c81794332..041e7f368 100644 --- a/include/rocksdb/statistics.h +++ b/include/rocksdb/statistics.h @@ -174,7 +174,7 @@ enum Tickers : uint32_t { GET_UPDATES_SINCE_CALLS, BLOCK_CACHE_COMPRESSED_MISS, // miss in the compressed block cache BLOCK_CACHE_COMPRESSED_HIT, // hit in the compressed block cache - // Number of blocks added to comopressed block cache + // Number of blocks added to compressed block cache BLOCK_CACHE_COMPRESSED_ADD, // Number of failures when adding blocks to compressed block cache BLOCK_CACHE_COMPRESSED_ADD_FAILURES, @@ -328,7 +328,7 @@ const std::vector> TickersNameMap = { /** * Keep adding histogram's here. - * Any histogram whould have value less than HISTOGRAM_ENUM_MAX + * Any histogram should have value less than HISTOGRAM_ENUM_MAX * Add a new Histogram by assigning it the current value of HISTOGRAM_ENUM_MAX * Add a string representation in HistogramsNameMap below * And increment HISTOGRAM_ENUM_MAX diff --git a/include/rocksdb/table.h b/include/rocksdb/table.h index ea0c48a61..2590005f4 100644 --- a/include/rocksdb/table.h +++ b/include/rocksdb/table.h @@ -67,7 +67,7 @@ struct BlockBasedTableOptions { // If cache_index_and_filter_blocks is enabled, cache index and filter // blocks with high priority. If set to true, depending on implementation of - // block cache, index and filter blocks may be less likely to be eviected + // block cache, index and filter blocks may be less likely to be evicted // than data blocks. bool cache_index_and_filter_blocks_with_high_priority = false; @@ -147,7 +147,7 @@ struct BlockBasedTableOptions { // Block size for partitioned metadata. Currently applied to indexes when // kTwoLevelIndexSearch is used and to filters when partition_filters is used. // Note: Since in the current implementation the filters and index partitions - // are aligned, an index/filter block is created when eitehr index or filter + // are aligned, an index/filter block is created when either index or filter // block size reaches the specified limit. // Note: this limit is currently applied to only index blocks; a filter // partition is cut right after an index block is cut @@ -418,7 +418,7 @@ class TableFactory { // (1) TableCache::FindTable() calls the function when table cache miss // and cache the table object returned. // (2) SstFileReader (for SST Dump) opens the table and dump the table - // contents using the interator of the table. + // contents using the iterator of the table. // (3) DBImpl::AddFile() calls this function to read the contents of // the sst file it's attempting to add // @@ -446,7 +446,7 @@ class TableFactory { // (4) When running Repairer, it creates a table builder to convert logs to // SST files (In Repairer::ConvertLogToTable() by calling BuildTable()) // - // Multiple configured can be acceseed from there, including and not limited + // Multiple configured can be accessed from there, including and not limited // to compression options. file is a handle of a writable file. // It is the caller's responsibility to keep the file open and close the file // after closing the table builder. compression_type is the compression type @@ -472,7 +472,7 @@ class TableFactory { // Since the return value is a raw pointer, the TableFactory owns the // pointer and the caller should not delete the pointer. // - // In certan case, it is desirable to alter the underlying options when the + // In certain case, it is desirable to alter the underlying options when the // TableFactory is not used by any open DB by casting the returned pointer // to the right class. For instance, if BlockBasedTableFactory is used, // then the pointer can be casted to BlockBasedTableOptions. diff --git a/include/rocksdb/thread_status.h b/include/rocksdb/thread_status.h index 7681a98e0..294c6cb3b 100644 --- a/include/rocksdb/thread_status.h +++ b/include/rocksdb/thread_status.h @@ -145,7 +145,7 @@ struct ThreadStatus { // The operation (high-level action) that the current thread is involved. const OperationType operation_type; - // The elapsed time in micros of the current thread operation. + // The elapsed time of the current thread operation in microseconds. const uint64_t op_elapsed_micros; // An integer showing the current stage where the thread is involved diff --git a/include/rocksdb/universal_compaction.h b/include/rocksdb/universal_compaction.h index 11490e413..d22b7c1d1 100644 --- a/include/rocksdb/universal_compaction.h +++ b/include/rocksdb/universal_compaction.h @@ -24,7 +24,7 @@ enum CompactionStopStyle { class CompactionOptionsUniversal { public: - // Percentage flexibilty while comparing file size. If the candidate file(s) + // Percentage flexibility while comparing file size. If the candidate file(s) // size is 1% smaller than the next file's size, then include next file into // this candidate set. // Default: 1 unsigned int size_ratio; diff --git a/include/rocksdb/write_batch.h b/include/rocksdb/write_batch.h index d60fa8cc8..d98c56261 100644 --- a/include/rocksdb/write_batch.h +++ b/include/rocksdb/write_batch.h @@ -72,7 +72,7 @@ class WriteBatch : public WriteBatchBase { } // Variant of Put() that gathers output like writev(2). The key and value - // that will be written to the database are concatentations of arrays of + // that will be written to the database are concatenations of arrays of // slices. Status Put(ColumnFamilyHandle* column_family, const SliceParts& key, const SliceParts& value) override; @@ -144,7 +144,7 @@ class WriteBatch : public WriteBatchBase { // it will not be persisted to the SST files. When iterating over this // WriteBatch, WriteBatch::Handler::LogData will be called with the contents // of the blob as it is encountered. Blobs, puts, deletes, and merges will be - // encountered in the same order in thich they were inserted. The blob will + // encountered in the same order in which they were inserted. The blob will // NOT consume sequence number(s) and will NOT increase the count of the batch // // Example application: add timestamps to the transaction log for use in diff --git a/include/rocksdb/write_batch_base.h b/include/rocksdb/write_batch_base.h index 8b6279292..f1fe754b1 100644 --- a/include/rocksdb/write_batch_base.h +++ b/include/rocksdb/write_batch_base.h @@ -31,7 +31,7 @@ class WriteBatchBase { virtual Status Put(const Slice& key, const Slice& value) = 0; // Variant of Put() that gathers output like writev(2). The key and value - // that will be written to the database are concatentations of arrays of + // that will be written to the database are concatenations of arrays of // slices. virtual Status Put(ColumnFamilyHandle* column_family, const SliceParts& key, const SliceParts& value); @@ -87,7 +87,7 @@ class WriteBatchBase { // it will not be persisted to the SST files. When iterating over this // WriteBatch, WriteBatch::Handler::LogData will be called with the contents // of the blob as it is encountered. Blobs, puts, deletes, and merges will be - // encountered in the same order in thich they were inserted. The blob will + // encountered in the same order in which they were inserted. The blob will // NOT consume sequence number(s) and will NOT increase the count of the batch // // Example application: add timestamps to the transaction log for use in diff --git a/table/cuckoo_table_builder_test.cc b/table/cuckoo_table_builder_test.cc index b50a40b21..275005593 100644 --- a/table/cuckoo_table_builder_test.cc +++ b/table/cuckoo_table_builder_test.cc @@ -108,7 +108,7 @@ class CuckooBuilderTest : public testing::Test { std::find(expected_locations.begin(), expected_locations.end(), i) - expected_locations.begin(); if (key_idx == keys.size()) { - // i is not one of the expected locaitons. Empty bucket. + // i is not one of the expected locations. Empty bucket. ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0); } else { keys_found[key_idx] = true; diff --git a/table/cuckoo_table_reader_test.cc b/table/cuckoo_table_reader_test.cc index e7d958d49..32848131d 100644 --- a/table/cuckoo_table_reader_test.cc +++ b/table/cuckoo_table_reader_test.cc @@ -517,7 +517,7 @@ TEST_F(CuckooReaderTest, TestReadPerformance) { return; } double hash_ratio = 0.95; - // These numbers are chosen to have a hash utilizaiton % close to + // These numbers are chosen to have a hash utilization % close to // 0.9, 0.75, 0.6 and 0.5 respectively. // They all create 128 M buckets. std::vector nums = {120*1024*1024, 100*1024*1024, 80*1024*1024, diff --git a/utilities/env_mirror.cc b/utilities/env_mirror.cc index 304a3c60b..39cac3e02 100644 --- a/utilities/env_mirror.cc +++ b/utilities/env_mirror.cc @@ -12,7 +12,7 @@ namespace rocksdb { -// An implementaiton of Env that mirrors all work over two backend +// An implementation of Env that mirrors all work over two backend // Env's. This is useful for debugging purposes. class SequentialFileMirror : public SequentialFile { public: