Merge pull request #593 from charsyam/feature/type-1

fix typos
main
Igor Canadi 10 years ago
commit 93ab1473dc
  1. 2
      db/column_family.cc
  2. 2
      db/column_family_test.cc
  3. 14
      db/compaction_picker.cc
  4. 2
      db/corruption_test.cc
  5. 2
      db/db_iter.cc
  6. 6
      db/db_test.cc
  7. 6
      db/memtable_list.cc
  8. 2
      db/version_set.cc
  9. 4
      table/block_based_table_builder.cc
  10. 4
      table/block_based_table_reader.cc
  11. 2
      table/block_hash_index.cc
  12. 2
      tools/db_stress.cc
  13. 2
      util/bloom.cc
  14. 2
      util/env_hdfs.cc
  15. 2
      util/histogram.cc
  16. 2
      util/histogram_test.cc
  17. 2
      utilities/backupable/backupable_db.cc

@ -614,7 +614,7 @@ bool ColumnFamilyData::ReturnThreadLocalSuperVersion(SuperVersion* sv) {
void* expected = SuperVersion::kSVInUse; void* expected = SuperVersion::kSVInUse;
if (local_sv_->CompareAndSwap(static_cast<void*>(sv), expected)) { if (local_sv_->CompareAndSwap(static_cast<void*>(sv), expected)) {
// When we see kSVInUse in the ThreadLocal, we are sure ThreadLocal // When we see kSVInUse in the ThreadLocal, we are sure ThreadLocal
// storage has not been altered and no Scrape has happend. The // storage has not been altered and no Scrape has happened. The
// SuperVersion is still current. // SuperVersion is still current.
return true; return true;
} else { } else {

@ -268,7 +268,7 @@ class ColumnFamilyTest : public testing::Test {
VectorLogPtr wal_files; VectorLogPtr wal_files;
Status s; Status s;
// GetSortedWalFiles is a flakey function -- it gets all the wal_dir // GetSortedWalFiles is a flakey function -- it gets all the wal_dir
// children files and then later checks for their existance. if some of the // children files and then later checks for their existence. if some of the
// log files doesn't exist anymore, it reports an error. it does all of this // log files doesn't exist anymore, it reports an error. it does all of this
// without DB mutex held, so if a background process deletes the log file // without DB mutex held, so if a background process deletes the log file
// while the function is being executed, it returns an error. We retry the // while the function is being executed, it returns an error. We retry the

@ -1248,12 +1248,12 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
cf_name.c_str(), file_num_buf, loop); cf_name.c_str(), file_num_buf, loop);
} }
// Check if the suceeding files need compaction. // Check if the succeeding files need compaction.
for (unsigned int i = loop + 1; for (unsigned int i = loop + 1;
candidate_count < max_files_to_compact && i < sorted_runs.size(); candidate_count < max_files_to_compact && i < sorted_runs.size();
i++) { i++) {
const SortedRun* suceeding_sr = &sorted_runs[i]; const SortedRun* succeeding_sr = &sorted_runs[i];
if (suceeding_sr->being_compacted) { if (succeeding_sr->being_compacted) {
break; break;
} }
// Pick files if the total/last candidate file size (increased by the // Pick files if the total/last candidate file size (increased by the
@ -1263,14 +1263,14 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
// kCompactionStopStyleSimilarSize, it's simply the size of the last // kCompactionStopStyleSimilarSize, it's simply the size of the last
// picked file. // picked file.
double sz = candidate_size * (100.0 + ratio) / 100.0; double sz = candidate_size * (100.0 + ratio) / 100.0;
if (sz < static_cast<double>(suceeding_sr->size)) { if (sz < static_cast<double>(succeeding_sr->size)) {
break; break;
} }
if (ioptions_.compaction_options_universal.stop_style == if (ioptions_.compaction_options_universal.stop_style ==
kCompactionStopStyleSimilarSize) { kCompactionStopStyleSimilarSize) {
// Similar-size stopping rule: also check the last picked file isn't // Similar-size stopping rule: also check the last picked file isn't
// far larger than the next candidate file. // far larger than the next candidate file.
sz = (suceeding_sr->size * (100.0 + ratio)) / 100.0; sz = (succeeding_sr->size * (100.0 + ratio)) / 100.0;
if (sz < static_cast<double>(candidate_size)) { if (sz < static_cast<double>(candidate_size)) {
// If the small file we've encountered begins a run of similar-size // If the small file we've encountered begins a run of similar-size
// files, we'll pick them up on a future iteration of the outer // files, we'll pick them up on a future iteration of the outer
@ -1278,9 +1278,9 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
// by the last-resort read amp strategy which disregards size ratios. // by the last-resort read amp strategy which disregards size ratios.
break; break;
} }
candidate_size = suceeding_sr->compensated_file_size; candidate_size = succeeding_sr->compensated_file_size;
} else { // default kCompactionStopStyleTotalSize } else { // default kCompactionStopStyleTotalSize
candidate_size += suceeding_sr->compensated_file_size; candidate_size += succeeding_sr->compensated_file_size;
} }
candidate_count++; candidate_count++;
} }

@ -103,7 +103,7 @@ class CorruptionTest : public testing::Test {
// db itself will raise errors because data is corrupted. // db itself will raise errors because data is corrupted.
// Instead, we want the reads to be successful and this test // Instead, we want the reads to be successful and this test
// will detect whether the appropriate corruptions have // will detect whether the appropriate corruptions have
// occured. // occurred.
Iterator* iter = db_->NewIterator(ReadOptions(false, true)); Iterator* iter = db_->NewIterator(ReadOptions(false, true));
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
uint64_t key; uint64_t key;

@ -254,7 +254,7 @@ void DBIter::FindNextUserEntryInternal(bool skipping) {
} }
// If we have sequentially iterated via numerous keys and still not // If we have sequentially iterated via numerous keys and still not
// found the next user-key, then it is better to seek so that we can // found the next user-key, then it is better to seek so that we can
// avoid too many key comparisons. We seek to the last occurence of // avoid too many key comparisons. We seek to the last occurrence of
// our current key by looking for sequence number 0. // our current key by looking for sequence number 0.
if (skipping && num_skipped > max_skip_) { if (skipping && num_skipped > max_skip_) {
num_skipped = 0; num_skipped = 0;

@ -1295,7 +1295,7 @@ static long TestGetTickerCount(const Options& options, Tickers ticker_type) {
// A helper function that ensures the table properties returned in // A helper function that ensures the table properties returned in
// `GetPropertiesOfAllTablesTest` is correct. // `GetPropertiesOfAllTablesTest` is correct.
// This test assumes entries size is differnt for each of the tables. // This test assumes entries size is different for each of the tables.
namespace { namespace {
void VerifyTableProperties(DB* db, uint64_t expected_entries_size) { void VerifyTableProperties(DB* db, uint64_t expected_entries_size) {
TablePropertiesCollection props; TablePropertiesCollection props;
@ -1955,7 +1955,7 @@ TEST_F(DBTest, GetEncountersEmptyLevel) {
// * sstable B in level 2 // * sstable B in level 2
// Then do enough Get() calls to arrange for an automatic compaction // Then do enough Get() calls to arrange for an automatic compaction
// of sstable A. A bug would cause the compaction to be marked as // of sstable A. A bug would cause the compaction to be marked as
// occuring at level 1 (instead of the correct level 0). // occurring at level 1 (instead of the correct level 0).
// Step 1: First place sstables in levels 0 and 2 // Step 1: First place sstables in levels 0 and 2
int compaction_count = 0; int compaction_count = 0;
@ -11648,7 +11648,7 @@ TEST_F(DBTest, DynamicCompactionOptions) {
// Test max_mem_compaction_level. // Test max_mem_compaction_level.
// Destory DB and start from scratch // Destroy DB and start from scratch
options.max_background_compactions = 1; options.max_background_compactions = 1;
options.max_background_flushes = 0; options.max_background_flushes = 0;
options.max_mem_compaction_level = 2; options.max_mem_compaction_level = 2;

@ -161,7 +161,7 @@ void MemTableList::RollbackMemtableFlush(const autovector<MemTable*>& mems,
assert(!mems.empty()); assert(!mems.empty());
// If the flush was not successful, then just reset state. // If the flush was not successful, then just reset state.
// Maybe a suceeding attempt to flush will be successful. // Maybe a succeeding attempt to flush will be successful.
for (MemTable* m : mems) { for (MemTable* m : mems) {
assert(m->flush_in_progress_); assert(m->flush_in_progress_);
assert(m->file_number_ == 0); assert(m->file_number_ == 0);
@ -184,7 +184,7 @@ Status MemTableList::InstallMemtableFlushResults(
ThreadStatus::STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS); ThreadStatus::STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS);
mu->AssertHeld(); mu->AssertHeld();
// flush was sucessful // flush was successful
for (size_t i = 0; i < mems.size(); ++i) { for (size_t i = 0; i < mems.size(); ++i) {
// All the edits are associated with the first memtable of this batch. // All the edits are associated with the first memtable of this batch.
assert(i == 0 || mems[i]->GetEdits()->NumEntries() == 0); assert(i == 0 || mems[i]->GetEdits()->NumEntries() == 0);
@ -193,7 +193,7 @@ Status MemTableList::InstallMemtableFlushResults(
mems[i]->file_number_ = file_number; mems[i]->file_number_ = file_number;
} }
// if some other thread is already commiting, then return // if some other thread is already committing, then return
Status s; Status s;
if (commit_in_progress_) { if (commit_in_progress_) {
return s; return s;

@ -275,7 +275,7 @@ class FilePicker {
static_cast<uint32_t>(search_right_bound_)); static_cast<uint32_t>(search_right_bound_));
} else { } else {
// search_left_bound > search_right_bound, key does not exist in // search_left_bound > search_right_bound, key does not exist in
// this level. Since no comparision is done in this level, it will // this level. Since no comparison is done in this level, it will
// need to search all files in the next level. // need to search all files in the next level.
search_left_bound_ = 0; search_left_bound_ = 0;
search_right_bound_ = FileIndexer::kLevelMaxIndex; search_right_bound_ = FileIndexer::kLevelMaxIndex;

@ -208,7 +208,7 @@ class HashIndexBuilder : public IndexBuilder {
pending_entry_index_ = static_cast<uint32_t>(current_restart_index_); pending_entry_index_ = static_cast<uint32_t>(current_restart_index_);
} else { } else {
// entry number increments when keys share the prefix reside in // entry number increments when keys share the prefix reside in
// differnt data blocks. // different data blocks.
auto last_restart_index = pending_entry_index_ + pending_block_num_ - 1; auto last_restart_index = pending_entry_index_ + pending_block_num_ - 1;
assert(last_restart_index <= current_restart_index_); assert(last_restart_index <= current_restart_index_);
if (last_restart_index != current_restart_index_) { if (last_restart_index != current_restart_index_) {
@ -383,7 +383,7 @@ extern const uint64_t kLegacyBlockBasedTableMagicNumber = 0xdb4775248b80fb57ull;
// A collector that collects properties of interest to block-based table. // A collector that collects properties of interest to block-based table.
// For now this class looks heavy-weight since we only write one additional // For now this class looks heavy-weight since we only write one additional
// property. // property.
// But in the forseeable future, we will add more and more properties that are // But in the foreseeable future, we will add more and more properties that are
// specific to block-based table. // specific to block-based table.
class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector
: public IntTblPropCollector { : public IntTblPropCollector {

@ -1347,7 +1347,7 @@ Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader,
Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log, Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log,
"BlockBasedTableOptions::kHashSearch requires " "BlockBasedTableOptions::kHashSearch requires "
"options.prefix_extractor to be set." "options.prefix_extractor to be set."
" Fall back to binary seach index."); " Fall back to binary search index.");
index_type_on_file = BlockBasedTableOptions::kBinarySearch; index_type_on_file = BlockBasedTableOptions::kBinarySearch;
} }
@ -1367,7 +1367,7 @@ Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader,
// problem with prefix hash index loading. // problem with prefix hash index loading.
Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log, Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log,
"Unable to read the metaindex block." "Unable to read the metaindex block."
" Fall back to binary seach index."); " Fall back to binary search index.");
return BinarySearchIndexReader::Create( return BinarySearchIndexReader::Create(
file, footer, footer.index_handle(), env, comparator, index_reader); file, footer, footer.index_handle(), env, comparator, index_reader);
} }

@ -98,7 +98,7 @@ BlockHashIndex* CreateBlockHashIndexOnTheFly(
pending_entry_index = current_restart_index; pending_entry_index = current_restart_index;
} else { } else {
// entry number increments when keys share the prefix reside in // entry number increments when keys share the prefix reside in
// differnt data blocks. // different data blocks.
auto last_restart_index = pending_entry_index + pending_block_num - 1; auto last_restart_index = pending_entry_index + pending_block_num - 1;
assert(last_restart_index <= current_restart_index); assert(last_restart_index <= current_restart_index);
if (last_restart_index != current_restart_index) { if (last_restart_index != current_restart_index) {

@ -174,7 +174,7 @@ DEFINE_int32(compaction_thread_pool_adjust_interval, 0,
"The interval (in milliseconds) to adjust compaction thread pool " "The interval (in milliseconds) to adjust compaction thread pool "
"size. Don't change it periodically if the value is 0."); "size. Don't change it periodically if the value is 0.");
DEFINE_int32(compaction_thread_pool_varations, 2, DEFINE_int32(compaction_thread_pool_variations, 2,
"Range of bakground thread pool size variations when adjusted " "Range of bakground thread pool size variations when adjusted "
"periodically."); "periodically.");

@ -43,7 +43,7 @@ class FullFilterBitsBuilder : public FilterBitsBuilder {
// When creating filter, it is ensured that // When creating filter, it is ensured that
// total_bits = num_lines * CACHE_LINE_SIZE * 8 // total_bits = num_lines * CACHE_LINE_SIZE * 8
// dst len is >= 5, 1 for num_probes, 4 for num_lines // dst len is >= 5, 1 for num_probes, 4 for num_lines
// Then total_bits = (len - 5) * 8, and cache_line_size could be calulated // Then total_bits = (len - 5) * 8, and cache_line_size could be calculated
// +----------------------------------------------------------------+ // +----------------------------------------------------------------+
// | filter data with length total_bits/8 | // | filter data with length total_bits/8 |
// +----------------------------------------------------------------+ // +----------------------------------------------------------------+

@ -562,7 +562,7 @@ Status HdfsEnv::GetFileModificationTime(const std::string& fname,
} }
// The rename is not atomic. HDFS does not allow a renaming if the // The rename is not atomic. HDFS does not allow a renaming if the
// target already exists. So, we delete the target before attemting the // target already exists. So, we delete the target before attempting the
// rename. // rename.
Status HdfsEnv::RenameFile(const std::string& src, const std::string& target) { Status HdfsEnv::RenameFile(const std::string& src, const std::string& target) {
hdfsDelete(fileSys_, target.c_str(), 1); hdfsDelete(fileSys_, target.c_str(), 1);

@ -19,7 +19,7 @@ namespace rocksdb {
HistogramBucketMapper::HistogramBucketMapper() HistogramBucketMapper::HistogramBucketMapper()
: :
// Add newer bucket index here. // Add newer bucket index here.
// Should be alwyas added in sorted order. // Should be always added in sorted order.
// If you change this, you also need to change // If you change this, you also need to change
// size of array buckets_ in HistogramImpl // size of array buckets_ in HistogramImpl
bucketValues_( bucketValues_(

@ -33,7 +33,7 @@ TEST_F(HistogramTest, BasicOperation) {
ASSERT_TRUE(percentile99 >= percentile85); ASSERT_TRUE(percentile99 >= percentile85);
} }
ASSERT_EQ(histogram.Average(), 50.5); // avg is acurately caluclated. ASSERT_EQ(histogram.Average(), 50.5); // avg is acurately calculated.
} }
TEST_F(HistogramTest, EmptyHistogram) { TEST_F(HistogramTest, EmptyHistogram) {

@ -419,7 +419,7 @@ BackupEngineImpl::BackupEngineImpl(Env* db_env,
&backuped_file_infos_, backup_env_))))); &backuped_file_infos_, backup_env_)))));
} }
if (options_.destroy_old_data) { // Destory old data if (options_.destroy_old_data) { // Destroy old data
assert(!read_only_); assert(!read_only_);
Log(options_.info_log, Log(options_.info_log,
"Backup Engine started with destroy_old_data == true, deleting all " "Backup Engine started with destroy_old_data == true, deleting all "

Loading…
Cancel
Save