Fix some typos in comments (#8066)

Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/8066

Reviewed By: jay-zhuang

Differential Revision: D27280799

Pulled By: mrambacher

fbshipit-source-id: 68f91f5af4ffe0a84be581961bf9366887f47702
main
storagezhang 4 years ago committed by Facebook GitHub Bot
parent c20a7cd6c7
commit 711881bc25
  1. 4
      cache/lru_cache.h
  2. 2
      cache/lru_cache_test.cc
  3. 2
      db/blob/db_blob_basic_test.cc
  4. 4
      db/column_family.h
  5. 2
      db/compaction/compaction.cc
  6. 2
      db/compaction/compaction.h
  7. 2
      db/compaction/compaction_iterator.cc
  8. 54
      db/compaction/compaction_iterator_test.cc
  9. 4
      db/compaction/compaction_job.cc
  10. 8
      db/compaction/compaction_picker_test.cc
  11. 2
      db/compaction/compaction_picker_universal.cc
  12. 4
      db/db_iter.cc
  13. 2
      db/db_iter.h
  14. 2
      db/dbformat.h
  15. 2
      db/error_handler.h
  16. 2
      db/forward_iterator.cc
  17. 6
      db/memtable.h
  18. 2
      db/memtable_list.cc
  19. 4
      db/range_del_aggregator.h
  20. 2
      db/snapshot_impl.h
  21. 2
      db/table_cache.h
  22. 4
      db/version_edit.h
  23. 12
      db/version_set.cc

4
cache/lru_cache.h vendored

@ -239,7 +239,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
// not threadsafe // not threadsafe
size_t TEST_GetLRUSize(); size_t TEST_GetLRUSize();
// Retrives high pri pool ratio // Retrieves high pri pool ratio
double GetHighPriPoolRatio(); double GetHighPriPoolRatio();
private: private:
@ -328,7 +328,7 @@ class LRUCache
// Retrieves number of elements in LRU, for unit test purpose only // Retrieves number of elements in LRU, for unit test purpose only
size_t TEST_GetLRUSize(); size_t TEST_GetLRUSize();
// Retrives high pri pool ratio // Retrieves high pri pool ratio
double GetHighPriPoolRatio(); double GetHighPriPoolRatio();
private: private:

@ -30,7 +30,7 @@ class LRUCacheTest : public testing::Test {
DeleteCache(); DeleteCache();
cache_ = reinterpret_cast<LRUCacheShard*>( cache_ = reinterpret_cast<LRUCacheShard*>(
port::cacheline_aligned_alloc(sizeof(LRUCacheShard))); port::cacheline_aligned_alloc(sizeof(LRUCacheShard)));
new (cache_) LRUCacheShard(capacity, false /*strict_capcity_limit*/, new (cache_) LRUCacheShard(capacity, false /*strict_capacity_limit*/,
high_pri_pool_ratio, use_adaptive_mutex, high_pri_pool_ratio, use_adaptive_mutex,
kDontChargeCacheMetadata); kDontChargeCacheMetadata);
} }

@ -236,7 +236,7 @@ TEST_F(DBBlobBasicTest, GenerateIOTracing) {
ASSERT_OK(env_->FileExists(trace_file)); ASSERT_OK(env_->FileExists(trace_file));
} }
{ {
// Parse trace file to check file opertions related to blob files are // Parse trace file to check file operations related to blob files are
// recorded. // recorded.
std::unique_ptr<TraceReader> trace_reader; std::unique_ptr<TraceReader> trace_reader;
ASSERT_OK( ASSERT_OK(

@ -253,7 +253,7 @@ extern Status CheckCFPathsSupported(const DBOptions& db_options,
extern ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options, extern ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
const ColumnFamilyOptions& src); const ColumnFamilyOptions& src);
// Wrap user defined table proproties collector factories `from cf_options` // Wrap user defined table properties collector factories `from cf_options`
// into internal ones in int_tbl_prop_collector_factories. Add a system internal // into internal ones in int_tbl_prop_collector_factories. Add a system internal
// one too. // one too.
extern void GetIntTblPropCollectorFactory( extern void GetIntTblPropCollectorFactory(
@ -441,7 +441,7 @@ class ColumnFamilyData {
// Get SuperVersion stored in thread local storage. If it does not exist, // Get SuperVersion stored in thread local storage. If it does not exist,
// get a reference from a current SuperVersion. // get a reference from a current SuperVersion.
SuperVersion* GetThreadLocalSuperVersion(DBImpl* db); SuperVersion* GetThreadLocalSuperVersion(DBImpl* db);
// Try to return SuperVersion back to thread local storage. Retrun true on // Try to return SuperVersion back to thread local storage. Return true on
// success and false on failure. It fails when the thread local storage // success and false on failure. It fails when the thread local storage
// contains anything other than SuperVersion::kSVInUse flag. // contains anything other than SuperVersion::kSVInUse flag.
bool ReturnThreadLocalSuperVersion(SuperVersion* sv); bool ReturnThreadLocalSuperVersion(SuperVersion* sv);

@ -519,7 +519,7 @@ uint64_t Compaction::OutputFilePreallocationSize() const {
// Over-estimate slightly so we don't end up just barely crossing // Over-estimate slightly so we don't end up just barely crossing
// the threshold // the threshold
// No point to prellocate more than 1GB. // No point to preallocate more than 1GB.
return std::min(uint64_t{1073741824}, return std::min(uint64_t{1073741824},
preallocation_size + (preallocation_size / 10)); preallocation_size + (preallocation_size / 10));
} }

@ -341,7 +341,7 @@ class Compaction {
const uint32_t output_path_id_; const uint32_t output_path_id_;
CompressionType output_compression_; CompressionType output_compression_;
CompressionOptions output_compression_opts_; CompressionOptions output_compression_opts_;
// If true, then the comaction can be done by simply deleting input files. // If true, then the compaction can be done by simply deleting input files.
const bool deletion_compaction_; const bool deletion_compaction_;
// Compaction input files organized by level. Constant after construction // Compaction input files organized by level. Constant after construction

@ -135,7 +135,7 @@ CompactionIterator::CompactionIterator(
} }
CompactionIterator::~CompactionIterator() { CompactionIterator::~CompactionIterator() {
// input_ Iteartor lifetime is longer than pinned_iters_mgr_ lifetime // input_ Iterator lifetime is longer than pinned_iters_mgr_ lifetime
input_->SetPinnedItersMgr(nullptr); input_->SetPinnedItersMgr(nullptr);
} }

@ -38,7 +38,7 @@ class NoMergingMergeOp : public MergeOperator {
// Compaction filter that gets stuck when it sees a particular key, // Compaction filter that gets stuck when it sees a particular key,
// then gets unstuck when told to. // then gets unstuck when told to.
// Always returns Decition::kRemove. // Always returns Decision::kRemove.
class StallingFilter : public CompactionFilter { class StallingFilter : public CompactionFilter {
public: public:
Decision FilterV2(int /*level*/, const Slice& key, ValueType /*type*/, Decision FilterV2(int /*level*/, const Slice& key, ValueType /*type*/,
@ -189,7 +189,7 @@ class FakeCompaction : public CompactionIterator::CompactionProxy {
bool is_allow_ingest_behind = false; bool is_allow_ingest_behind = false;
}; };
// A simplifed snapshot checker which assumes each snapshot has a global // A simplified snapshot checker which assumes each snapshot has a global
// last visible sequence. // last visible sequence.
class TestSnapshotChecker : public SnapshotChecker { class TestSnapshotChecker : public SnapshotChecker {
public: public:
@ -711,7 +711,7 @@ TEST_P(CompactionIteratorTest, ZeroOutSequenceAtBottomLevel) {
RunTest({test::KeyStr("a", 1, kTypeValue), test::KeyStr("b", 2, kTypeValue)}, RunTest({test::KeyStr("a", 1, kTypeValue), test::KeyStr("b", 2, kTypeValue)},
{"v1", "v2"}, {"v1", "v2"},
{test::KeyStr("a", 0, kTypeValue), test::KeyStr("b", 2, kTypeValue)}, {test::KeyStr("a", 0, kTypeValue), test::KeyStr("b", 2, kTypeValue)},
{"v1", "v2"}, kMaxSequenceNumber /*last_commited_seq*/, {"v1", "v2"}, kMaxSequenceNumber /*last_committed_seq*/,
nullptr /*merge_operator*/, nullptr /*compaction_filter*/, nullptr /*merge_operator*/, nullptr /*compaction_filter*/,
true /*bottommost_level*/); true /*bottommost_level*/);
} }
@ -720,15 +720,14 @@ TEST_P(CompactionIteratorTest, ZeroOutSequenceAtBottomLevel) {
// permanently. // permanently.
TEST_P(CompactionIteratorTest, RemoveDeletionAtBottomLevel) { TEST_P(CompactionIteratorTest, RemoveDeletionAtBottomLevel) {
AddSnapshot(1); AddSnapshot(1);
RunTest({test::KeyStr("a", 1, kTypeDeletion), RunTest(
test::KeyStr("b", 3, kTypeDeletion), {test::KeyStr("a", 1, kTypeDeletion), test::KeyStr("b", 3, kTypeDeletion),
test::KeyStr("b", 1, kTypeValue)}, test::KeyStr("b", 1, kTypeValue)},
{"", "", ""}, {"", "", ""},
{test::KeyStr("b", 3, kTypeDeletion), {test::KeyStr("b", 3, kTypeDeletion), test::KeyStr("b", 0, kTypeValue)},
test::KeyStr("b", 0, kTypeValue)}, {"", ""}, kMaxSequenceNumber /*last_committed_seq*/,
{"", ""}, nullptr /*merge_operator*/, nullptr /*compaction_filter*/,
kMaxSequenceNumber /*last_commited_seq*/, nullptr /*merge_operator*/, true /*bottommost_level*/);
nullptr /*compaction_filter*/, true /*bottommost_level*/);
} }
// In bottommost level, single deletions earlier than earliest snapshot can be // In bottommost level, single deletions earlier than earliest snapshot can be
@ -738,7 +737,7 @@ TEST_P(CompactionIteratorTest, RemoveSingleDeletionAtBottomLevel) {
RunTest({test::KeyStr("a", 1, kTypeSingleDeletion), RunTest({test::KeyStr("a", 1, kTypeSingleDeletion),
test::KeyStr("b", 2, kTypeSingleDeletion)}, test::KeyStr("b", 2, kTypeSingleDeletion)},
{"", ""}, {test::KeyStr("b", 2, kTypeSingleDeletion)}, {""}, {"", ""}, {test::KeyStr("b", 2, kTypeSingleDeletion)}, {""},
kMaxSequenceNumber /*last_commited_seq*/, nullptr /*merge_operator*/, kMaxSequenceNumber /*last_committed_seq*/, nullptr /*merge_operator*/,
nullptr /*compaction_filter*/, true /*bottommost_level*/); nullptr /*compaction_filter*/, true /*bottommost_level*/);
} }
@ -895,7 +894,7 @@ TEST_F(CompactionIteratorWithSnapshotCheckerTest,
{"v1", "v2", "v3"}, {"v1", "v2", "v3"},
{test::KeyStr("a", 0, kTypeValue), test::KeyStr("b", 2, kTypeValue), {test::KeyStr("a", 0, kTypeValue), test::KeyStr("b", 2, kTypeValue),
test::KeyStr("c", 3, kTypeValue)}, test::KeyStr("c", 3, kTypeValue)},
{"v1", "v2", "v3"}, kMaxSequenceNumber /*last_commited_seq*/, {"v1", "v2", "v3"}, kMaxSequenceNumber /*last_committed_seq*/,
nullptr /*merge_operator*/, nullptr /*compaction_filter*/, nullptr /*merge_operator*/, nullptr /*compaction_filter*/,
true /*bottommost_level*/); true /*bottommost_level*/);
} }
@ -906,9 +905,7 @@ TEST_F(CompactionIteratorWithSnapshotCheckerTest,
RunTest( RunTest(
{test::KeyStr("a", 1, kTypeDeletion), test::KeyStr("b", 2, kTypeDeletion), {test::KeyStr("a", 1, kTypeDeletion), test::KeyStr("b", 2, kTypeDeletion),
test::KeyStr("c", 3, kTypeDeletion)}, test::KeyStr("c", 3, kTypeDeletion)},
{"", "", ""}, {"", "", ""}, {}, {"", ""}, kMaxSequenceNumber /*last_committed_seq*/,
{},
{"", ""}, kMaxSequenceNumber /*last_commited_seq*/,
nullptr /*merge_operator*/, nullptr /*compaction_filter*/, nullptr /*merge_operator*/, nullptr /*compaction_filter*/,
true /*bottommost_level*/); true /*bottommost_level*/);
} }
@ -916,15 +913,14 @@ TEST_F(CompactionIteratorWithSnapshotCheckerTest,
TEST_F(CompactionIteratorWithSnapshotCheckerTest, TEST_F(CompactionIteratorWithSnapshotCheckerTest,
NotRemoveDeletionIfValuePresentToEarlierSnapshot) { NotRemoveDeletionIfValuePresentToEarlierSnapshot) {
AddSnapshot(2,1); AddSnapshot(2,1);
RunTest( RunTest({test::KeyStr("a", 4, kTypeDeletion),
{test::KeyStr("a", 4, kTypeDeletion), test::KeyStr("a", 1, kTypeValue), test::KeyStr("a", 1, kTypeValue), test::KeyStr("b", 3, kTypeValue)},
test::KeyStr("b", 3, kTypeValue)}, {"", "", ""},
{"", "", ""}, {test::KeyStr("a", 4, kTypeDeletion),
{test::KeyStr("a", 4, kTypeDeletion), test::KeyStr("a", 0, kTypeValue), test::KeyStr("a", 0, kTypeValue), test::KeyStr("b", 3, kTypeValue)},
test::KeyStr("b", 3, kTypeValue)}, {"", "", ""}, kMaxSequenceNumber /*last_committed_seq*/,
{"", "", ""}, kMaxSequenceNumber /*last_commited_seq*/, nullptr /*merge_operator*/, nullptr /*compaction_filter*/,
nullptr /*merge_operator*/, nullptr /*compaction_filter*/, true /*bottommost_level*/);
true /*bottommost_level*/);
} }
TEST_F(CompactionIteratorWithSnapshotCheckerTest, TEST_F(CompactionIteratorWithSnapshotCheckerTest,
@ -936,7 +932,7 @@ TEST_F(CompactionIteratorWithSnapshotCheckerTest,
{"", "", ""}, {"", "", ""},
{test::KeyStr("b", 2, kTypeSingleDeletion), {test::KeyStr("b", 2, kTypeSingleDeletion),
test::KeyStr("c", 3, kTypeSingleDeletion)}, test::KeyStr("c", 3, kTypeSingleDeletion)},
{"", ""}, kMaxSequenceNumber /*last_commited_seq*/, {"", ""}, kMaxSequenceNumber /*last_committed_seq*/,
nullptr /*merge_operator*/, nullptr /*compaction_filter*/, nullptr /*merge_operator*/, nullptr /*compaction_filter*/,
true /*bottommost_level*/); true /*bottommost_level*/);
} }
@ -986,8 +982,8 @@ TEST_F(CompactionIteratorWithSnapshotCheckerTest,
} }
// Compaction filter should keep uncommitted key as-is, and // Compaction filter should keep uncommitted key as-is, and
// * Convert the latest velue to deletion, and/or // * Convert the latest value to deletion, and/or
// * if latest value is a merge, apply filter to all suequent merges. // * if latest value is a merge, apply filter to all subsequent merges.
TEST_F(CompactionIteratorWithSnapshotCheckerTest, CompactionFilter_Value) { TEST_F(CompactionIteratorWithSnapshotCheckerTest, CompactionFilter_Value) {
std::unique_ptr<CompactionFilter> compaction_filter( std::unique_ptr<CompactionFilter> compaction_filter(

@ -150,7 +150,7 @@ struct CompactionJob::SubcompactionState {
// This subcompaction's output could be empty if compaction was aborted // This subcompaction's output could be empty if compaction was aborted
// before this subcompaction had a chance to generate any output files. // before this subcompaction had a chance to generate any output files.
// When subcompactions are executed sequentially this is more likely and // When subcompactions are executed sequentially this is more likely and
// will be particulalry likely for the later subcompactions to be empty. // will be particularly likely for the later subcompactions to be empty.
// Once they are run in parallel however it should be much rarer. // Once they are run in parallel however it should be much rarer.
return nullptr; return nullptr;
} else { } else {
@ -410,7 +410,7 @@ void CompactionJob::Prepare() {
AutoThreadOperationStageUpdater stage_updater( AutoThreadOperationStageUpdater stage_updater(
ThreadStatus::STAGE_COMPACTION_PREPARE); ThreadStatus::STAGE_COMPACTION_PREPARE);
// Generate file_levels_ for compaction berfore making Iterator // Generate file_levels_ for compaction before making Iterator
auto* c = compact_->compaction; auto* c = compact_->compaction;
assert(c->column_family_data() != nullptr); assert(c->column_family_data() != nullptr);
assert(c->column_family_data()->current()->storage_info()->NumLevelFiles( assert(c->column_family_data()->current()->storage_info()->NumLevelFiles(

@ -650,7 +650,7 @@ TEST_F(CompactionPickerTest, UniversalPeriodicCompaction3) {
TEST_F(CompactionPickerTest, UniversalPeriodicCompaction4) { TEST_F(CompactionPickerTest, UniversalPeriodicCompaction4) {
// The case where universal periodic compaction couldn't form // The case where universal periodic compaction couldn't form
// a compaction that inlcudes any file marked for periodic compaction. // a compaction that includes any file marked for periodic compaction.
// Right now we form the compaction anyway if it is more than one // Right now we form the compaction anyway if it is more than one
// sorted run. Just put the case here to validate that it doesn't // sorted run. Just put the case here to validate that it doesn't
// crash. // crash.
@ -800,7 +800,7 @@ TEST_F(CompactionPickerTest, CompactionPriMinOverlapping2) {
Add(2, 6U, "150", "175", Add(2, 6U, "150", "175",
60000000U); // Overlaps with file 26, 27, total size 521M 60000000U); // Overlaps with file 26, 27, total size 521M
Add(2, 7U, "176", "200", 60000000U); // Overlaps with file 27, 28, total size Add(2, 7U, "176", "200", 60000000U); // Overlaps with file 27, 28, total size
// 520M, the smalelst overlapping // 520M, the smallest overlapping
Add(2, 8U, "201", "300", Add(2, 8U, "201", "300",
60000000U); // Overlaps with file 28, 29, total size 521M 60000000U); // Overlaps with file 28, 29, total size 521M
@ -1228,7 +1228,7 @@ TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri1) {
Add(0, 32U, "001", "400", 1000000000U, 0, 0); Add(0, 32U, "001", "400", 1000000000U, 0, 0);
Add(0, 33U, "001", "400", 1000000000U, 0, 0); Add(0, 33U, "001", "400", 1000000000U, 0, 0);
// L1 total size 2GB, score 2.2. If one file being comapcted, score 1.1. // L1 total size 2GB, score 2.2. If one file being compacted, score 1.1.
Add(1, 4U, "050", "300", 1000000000U, 0, 0); Add(1, 4U, "050", "300", 1000000000U, 0, 0);
file_map_[4u].first->being_compacted = true; file_map_[4u].first->being_compacted = true;
Add(1, 5U, "301", "350", 1000000000U, 0, 0); Add(1, 5U, "301", "350", 1000000000U, 0, 0);
@ -1261,7 +1261,7 @@ TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri2) {
Add(0, 32U, "001", "400", 1000000000U, 0, 0); Add(0, 32U, "001", "400", 1000000000U, 0, 0);
Add(0, 33U, "001", "400", 1000000000U, 0, 0); Add(0, 33U, "001", "400", 1000000000U, 0, 0);
// L1 total size 2GB, score 2.2. If one file being comapcted, score 1.1. // L1 total size 2GB, score 2.2. If one file being compacted, score 1.1.
Add(1, 4U, "050", "300", 1000000000U, 0, 0); Add(1, 4U, "050", "300", 1000000000U, 0, 0);
Add(1, 5U, "301", "350", 1000000000U, 0, 0); Add(1, 5U, "301", "350", 1000000000U, 0, 0);

@ -733,7 +733,7 @@ Compaction* UniversalCompactionBuilder::PickCompactionToReduceSortedRuns(
} }
// Look at overall size amplification. If size amplification // Look at overall size amplification. If size amplification
// exceeeds the configured value, then do a compaction // exceeds the configured value, then do a compaction
// of the candidate files all the way upto the earliest // of the candidate files all the way upto the earliest
// base file (overrides configured values of file-size ratios, // base file (overrides configured values of file-size ratios,
// min_merge_width and max_merge_width). // min_merge_width and max_merge_width).

@ -1343,7 +1343,7 @@ void DBIter::Seek(const Slice& target) {
// we need to find out the next key that is visible to the user. // we need to find out the next key that is visible to the user.
ClearSavedValue(); ClearSavedValue();
if (prefix_same_as_start_) { if (prefix_same_as_start_) {
// The case where the iterator needs to be invalidated if it has exausted // The case where the iterator needs to be invalidated if it has exhausted
// keys within the same prefix of the seek key. // keys within the same prefix of the seek key.
assert(prefix_extractor_ != nullptr); assert(prefix_extractor_ != nullptr);
Slice target_prefix = prefix_extractor_->Transform(target); Slice target_prefix = prefix_extractor_->Transform(target);
@ -1418,7 +1418,7 @@ void DBIter::SeekForPrev(const Slice& target) {
// backward direction. // backward direction.
ClearSavedValue(); ClearSavedValue();
if (prefix_same_as_start_) { if (prefix_same_as_start_) {
// The case where the iterator needs to be invalidated if it has exausted // The case where the iterator needs to be invalidated if it has exhausted
// keys within the same prefix of the seek key. // keys within the same prefix of the seek key.
assert(prefix_extractor_ != nullptr); assert(prefix_extractor_ != nullptr);
Slice target_prefix = prefix_extractor_->Transform(target); Slice target_prefix = prefix_extractor_->Transform(target);

@ -235,7 +235,7 @@ class DBIter final : public Iterator {
// If `skipping_saved_key` is true, the function will keep iterating until it // If `skipping_saved_key` is true, the function will keep iterating until it
// finds a user key that is larger than `saved_key_`. // finds a user key that is larger than `saved_key_`.
// If `prefix` is not null, the iterator needs to stop when all keys for the // If `prefix` is not null, the iterator needs to stop when all keys for the
// prefix are exhausted and the interator is set to invalid. // prefix are exhausted and the iterator is set to invalid.
bool FindNextUserEntry(bool skipping_saved_key, const Slice* prefix); bool FindNextUserEntry(bool skipping_saved_key, const Slice* prefix);
// Internal implementation of FindNextUserEntry(). // Internal implementation of FindNextUserEntry().
bool FindNextUserEntryInternal(bool skipping_saved_key, const Slice* prefix); bool FindNextUserEntryInternal(bool skipping_saved_key, const Slice* prefix);

@ -616,7 +616,7 @@ class IterKey {
void EnlargeBuffer(size_t key_size); void EnlargeBuffer(size_t key_size);
}; };
// Convert from a SliceTranform of user keys, to a SliceTransform of // Convert from a SliceTransform of user keys, to a SliceTransform of
// user keys. // user keys.
class InternalKeySliceTransform : public SliceTransform { class InternalKeySliceTransform : public SliceTransform {
public: public:

@ -103,7 +103,7 @@ class ErrorHandler {
bool auto_recovery_; bool auto_recovery_;
bool recovery_in_prog_; bool recovery_in_prog_;
// A flag to indicate that for the soft error, we should not allow any // A flag to indicate that for the soft error, we should not allow any
// backrgound work execpt the work is from recovery. // background work except the work is from recovery.
bool soft_error_no_bg_work_; bool soft_error_no_bg_work_;
// Used to store the context for recover, such as flush reason. // Used to store the context for recover, such as flush reason.

@ -426,7 +426,7 @@ void ForwardIterator::SeekInternal(const Slice& internal_key,
if (seek_to_first) { if (seek_to_first) {
l0_iters_[i]->SeekToFirst(); l0_iters_[i]->SeekToFirst();
} else { } else {
// If the target key passes over the larget key, we are sure Next() // If the target key passes over the largest key, we are sure Next()
// won't go over this file. // won't go over this file.
if (user_comparator_->Compare(target_user_key, if (user_comparator_->Compare(target_user_key,
l0[i]->largest.user_key()) > 0) { l0[i]->largest.user_key()) > 0) {

@ -72,7 +72,7 @@ using MultiGetRange = MultiGetContext::Range;
// Note: Many of the methods in this class have comments indicating that // Note: Many of the methods in this class have comments indicating that
// external synchronization is required as these methods are not thread-safe. // external synchronization is required as these methods are not thread-safe.
// It is up to higher layers of code to decide how to prevent concurrent // It is up to higher layers of code to decide how to prevent concurrent
// invokation of these methods. This is usually done by acquiring either // invocation of these methods. This is usually done by acquiring either
// the db mutex or the single writer thread. // the db mutex or the single writer thread.
// //
// Some of these methods are documented to only require external // Some of these methods are documented to only require external
@ -139,7 +139,7 @@ class MemTable {
// operations on the same MemTable (unless this Memtable is immutable). // operations on the same MemTable (unless this Memtable is immutable).
size_t ApproximateMemoryUsage(); size_t ApproximateMemoryUsage();
// As a cheap version of `ApproximateMemoryUsage()`, this function doens't // As a cheap version of `ApproximateMemoryUsage()`, this function doesn't
// require external synchronization. The value may be less accurate though // require external synchronization. The value may be less accurate though
size_t ApproximateMemoryUsageFast() const { size_t ApproximateMemoryUsageFast() const {
return approximate_memory_usage_.load(std::memory_order_relaxed); return approximate_memory_usage_.load(std::memory_order_relaxed);
@ -533,7 +533,7 @@ class MemTable {
SequenceNumber atomic_flush_seqno_; SequenceNumber atomic_flush_seqno_;
// keep track of memory usage in table_, arena_, and range_del_table_. // keep track of memory usage in table_, arena_, and range_del_table_.
// Gets refrshed inside `ApproximateMemoryUsage()` or `ShouldFlushNow` // Gets refreshed inside `ApproximateMemoryUsage()` or `ShouldFlushNow`
std::atomic<uint64_t> approximate_memory_usage_; std::atomic<uint64_t> approximate_memory_usage_;
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE

@ -521,7 +521,7 @@ void MemTableList::Add(MemTable* m, autovector<MemTable*>* to_delete) {
InstallNewVersion(); InstallNewVersion();
// this method is used to move mutable memtable into an immutable list. // this method is used to move mutable memtable into an immutable list.
// since mutable memtable is already refcounted by the DBImpl, // since mutable memtable is already refcounted by the DBImpl,
// and when moving to the imutable list we don't unref it, // and when moving to the immutable list we don't unref it,
// we don't have to ref the memtable here. we just take over the // we don't have to ref the memtable here. we just take over the
// reference from the DBImpl. // reference from the DBImpl.
current_->Add(m, to_delete); current_->Add(m, to_delete);

@ -43,12 +43,12 @@ class TruncatedRangeDelIterator {
void InternalNext(); void InternalNext();
// Seeks to the tombstone with the highest viisble sequence number that covers // Seeks to the tombstone with the highest visible sequence number that covers
// target (a user key). If no such tombstone exists, the position will be at // target (a user key). If no such tombstone exists, the position will be at
// the earliest tombstone that ends after target. // the earliest tombstone that ends after target.
void Seek(const Slice& target); void Seek(const Slice& target);
// Seeks to the tombstone with the highest viisble sequence number that covers // Seeks to the tombstone with the highest visible sequence number that covers
// target (a user key). If no such tombstone exists, the position will be at // target (a user key). If no such tombstone exists, the position will be at
// the latest tombstone that starts before target. // the latest tombstone that starts before target.
void SeekForPrev(const Slice& target); void SeekForPrev(const Slice& target);

@ -23,7 +23,7 @@ class SnapshotImpl : public Snapshot {
SequenceNumber number_; // const after creation SequenceNumber number_; // const after creation
// It indicates the smallest uncommitted data at the time the snapshot was // It indicates the smallest uncommitted data at the time the snapshot was
// taken. This is currently used by WritePrepared transactions to limit the // taken. This is currently used by WritePrepared transactions to limit the
// scope of queries to IsInSnpashot. // scope of queries to IsInSnapshot.
SequenceNumber min_uncommitted_ = kMinUnCommittedSeq; SequenceNumber min_uncommitted_ = kMinUnCommittedSeq;
virtual SequenceNumber GetSequenceNumber() const override { return number_; } virtual SequenceNumber GetSequenceNumber() const override { return number_; }

@ -183,7 +183,7 @@ class TableCache {
Cache* get_cache() const { return cache_; } Cache* get_cache() const { return cache_; }
// Capacity of the backing Cache that indicates inifinite TableCache capacity. // Capacity of the backing Cache that indicates infinite TableCache capacity.
// For example when max_open_files is -1 we set the backing Cache to this. // For example when max_open_files is -1 we set the backing Cache to this.
static const int kInfiniteCapacity = 0x400000; static const int kInfiniteCapacity = 0x400000;

@ -74,7 +74,7 @@ enum NewFileCustomTag : uint32_t {
kNeedCompaction = 2, kNeedCompaction = 2,
// Since Manifest is not entirely forward-compatible, we currently encode // Since Manifest is not entirely forward-compatible, we currently encode
// kMinLogNumberToKeep as part of NewFile as a hack. This should be removed // kMinLogNumberToKeep as part of NewFile as a hack. This should be removed
// when manifest becomes forward-comptabile. // when manifest becomes forward-compatible.
kMinLogNumberToKeepHack = 3, kMinLogNumberToKeepHack = 3,
kOldestBlobFileNumber = 4, kOldestBlobFileNumber = 4,
kOldestAncesterTime = 5, kOldestAncesterTime = 5,
@ -195,7 +195,7 @@ struct FileMetaData {
// The file could be the compaction output from other SST files, which could // The file could be the compaction output from other SST files, which could
// in turn be outputs for compact older SST files. We track the memtable // in turn be outputs for compact older SST files. We track the memtable
// flush timestamp for the oldest SST file that eventaully contribute data // flush timestamp for the oldest SST file that eventually contribute data
// to this file. 0 means the information is not available. // to this file. 0 means the information is not available.
uint64_t oldest_ancester_time = kUnknownOldestAncesterTime; uint64_t oldest_ancester_time = kUnknownOldestAncesterTime;

@ -408,7 +408,7 @@ class FilePickerMultiGet {
int GetCurrentLevel() const { return curr_level_; } int GetCurrentLevel() const { return curr_level_; }
// Iterates through files in the current level until it finds a file that // Iterates through files in the current level until it finds a file that
// contains atleast one key from the MultiGet batch // contains at least one key from the MultiGet batch
bool GetNextFileInLevelWithKeys(MultiGetRange* next_file_range, bool GetNextFileInLevelWithKeys(MultiGetRange* next_file_range,
size_t* file_index, FdWithKeyRange** fd, size_t* file_index, FdWithKeyRange** fd,
bool* is_last_key_in_file) { bool* is_last_key_in_file) {
@ -2786,7 +2786,7 @@ struct Fsize {
FileMetaData* file; FileMetaData* file;
}; };
// Compator that is used to sort files based on their size // Comparator that is used to sort files based on their size
// In normal mode: descending size // In normal mode: descending size
bool CompareCompensatedSizeDescending(const Fsize& first, const Fsize& second) { bool CompareCompensatedSizeDescending(const Fsize& first, const Fsize& second) {
return (first.file->compensated_file_size > return (first.file->compensated_file_size >
@ -3206,7 +3206,7 @@ void VersionStorageInfo::GetCleanInputsWithinInterval(
// specified range. From that file, iterate backwards and // specified range. From that file, iterate backwards and
// forwards to find all overlapping files. // forwards to find all overlapping files.
// if within_range is set, then only store the maximum clean inputs // if within_range is set, then only store the maximum clean inputs
// within range [begin, end]. "clean" means there is a boudnary // within range [begin, end]. "clean" means there is a boundary
// between the files in "*inputs" and the surrounding files // between the files in "*inputs" and the surrounding files
void VersionStorageInfo::GetOverlappingInputsRangeBinarySearch( void VersionStorageInfo::GetOverlappingInputsRangeBinarySearch(
int level, const InternalKey* begin, const InternalKey* end, int level, const InternalKey* begin, const InternalKey* end,
@ -3517,7 +3517,7 @@ void VersionStorageInfo::CalculateBaseBytes(const ImmutableCFOptions& ioptions,
// 1. the L0 size is larger than level size base, or // 1. the L0 size is larger than level size base, or
// 2. number of L0 files reaches twice the L0->L1 compaction trigger // 2. number of L0 files reaches twice the L0->L1 compaction trigger
// We don't do this otherwise to keep the LSM-tree structure stable // We don't do this otherwise to keep the LSM-tree structure stable
// unless the L0 compation is backlogged. // unless the L0 compaction is backlogged.
base_level_size = l0_size; base_level_size = l0_size;
if (base_level_ == num_levels_ - 1) { if (base_level_ == num_levels_ - 1) {
level_multiplier_ = 1.0; level_multiplier_ = 1.0;
@ -4354,7 +4354,7 @@ Status VersionSet::ProcessManifestWrites(
return s; return s;
} }
// 'datas' is gramatically incorrect. We still use this notation to indicate // 'datas' is grammatically incorrect. We still use this notation to indicate
// that this variable represents a collection of column_family_data. // that this variable represents a collection of column_family_data.
Status VersionSet::LogAndApply( Status VersionSet::LogAndApply(
const autovector<ColumnFamilyData*>& column_family_datas, const autovector<ColumnFamilyData*>& column_family_datas,
@ -4796,7 +4796,7 @@ Status VersionSet::TryRecoverFromOneManifest(
Status VersionSet::ListColumnFamilies(std::vector<std::string>* column_families, Status VersionSet::ListColumnFamilies(std::vector<std::string>* column_families,
const std::string& dbname, const std::string& dbname,
FileSystem* fs) { FileSystem* fs) {
// these are just for performance reasons, not correcntes, // these are just for performance reasons, not correctness,
// so we're fine using the defaults // so we're fine using the defaults
FileOptions soptions; FileOptions soptions;
// Read "CURRENT" file, which contains a pointer to the current manifest file // Read "CURRENT" file, which contains a pointer to the current manifest file

Loading…
Cancel
Save