Use pre-increment instead of post-increment for iterators (#5296)

Summary:
Google C++ style guide indicates pre-increment should be used for iterators: https://google.github.io/styleguide/cppguide.html#Preincrement_and_Predecrement. Replaced all instances of ' it++' by ' ++it' (where type is iterator). So this covers the cases where iterators are named 'it'.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5296

Differential Revision: D15301256

Pulled By: tfersch

fbshipit-source-id: 2803483c1392504ad3b281d21db615429c71114b
main
Thomas Fersch 5 years ago committed by Facebook Github Bot
parent 189e711b37
commit a42757607d
  1. 2
      db/compaction_job.cc
  2. 2
      db/db_impl.h
  3. 10
      db/db_impl_compaction_flush.cc
  4. 2
      db/memtable_list.cc
  5. 2
      db/prefix_test.cc
  6. 4
      utilities/transactions/pessimistic_transaction_db.cc
  7. 4
      utilities/transactions/transaction_test.cc
  8. 6
      utilities/transactions/write_prepared_transaction_test.cc
  9. 4
      utilities/transactions/write_prepared_txn_db.cc
  10. 2
      utilities/transactions/write_unprepared_txn_db.cc

@ -519,7 +519,7 @@ void CompactionJob::GenSubcompactionBoundaries() {
auto* v = compact_->compaction->input_version();
for (auto it = bounds.begin();;) {
const Slice a = *it;
it++;
++it;
if (it == bounds.end()) {
break;

@ -725,7 +725,7 @@ class DBImpl : public DB {
void DeleteAllRecoveredTransactions() {
for (auto it = recovered_transactions_.begin();
it != recovered_transactions_.end(); it++) {
it != recovered_transactions_.end(); ++it) {
delete it->second;
}
recovered_transactions_.clear();

@ -2794,7 +2794,7 @@ void DBImpl::RemoveManualCompaction(DBImpl::ManualCompactionState* m) {
it = manual_compaction_dequeue_.erase(it);
return;
}
it++;
++it;
}
assert(false);
return;
@ -2815,7 +2815,7 @@ bool DBImpl::ShouldntRunManualCompaction(ManualCompactionState* m) {
bool seen = false;
while (it != manual_compaction_dequeue_.end()) {
if (m == (*it)) {
it++;
++it;
seen = true;
continue;
} else if (MCOverlap(m, (*it)) && (!seen && !(*it)->in_progress)) {
@ -2824,7 +2824,7 @@ bool DBImpl::ShouldntRunManualCompaction(ManualCompactionState* m) {
// and (*it) is ahead in the queue and is not yet in progress
return true;
}
it++;
++it;
}
return false;
}
@ -2842,7 +2842,7 @@ bool DBImpl::HaveManualCompaction(ColumnFamilyData* cfd) {
// in progress
return true;
}
it++;
++it;
}
return false;
}
@ -2855,7 +2855,7 @@ bool DBImpl::HasExclusiveManualCompaction() {
if ((*it)->exclusive) {
return true;
}
it++;
++it;
}
return false;
}

@ -437,7 +437,7 @@ Status MemTableList::TryInstallMemtableFlushResults(
++mem_id;
}
} else {
for (auto it = current_->memlist_.rbegin(); batch_count-- > 0; it++) {
for (auto it = current_->memlist_.rbegin(); batch_count-- > 0; ++it) {
MemTable* m = *it;
// commit failed. setup state so that we can flush again.
ROCKS_LOG_BUFFER(log_buffer, "Level-0 commit table #%" PRIu64

@ -751,7 +751,7 @@ TEST_F(PrefixTest, PrefixSeekModePrev) {
for (size_t k = 0; k < 9; k++) {
if (rnd.OneIn(2) || it == whole_map.begin()) {
iter->Next();
it++;
++it;
if (FLAGS_enable_print) {
std::cout << "Next >> ";
}

@ -121,7 +121,7 @@ Status PessimisticTransactionDB::Initialize(
assert(dbimpl != nullptr);
auto rtrxs = dbimpl->recovered_transactions();
for (auto it = rtrxs.begin(); it != rtrxs.end(); it++) {
for (auto it = rtrxs.begin(); it != rtrxs.end(); ++it) {
auto recovered_trx = it->second;
assert(recovered_trx);
assert(recovered_trx->batches_.size() == 1);
@ -594,7 +594,7 @@ void PessimisticTransactionDB::GetAllPreparedTransactions(
assert(transv);
transv->clear();
std::lock_guard<std::mutex> lock(name_map_mutex_);
for (auto it = transactions_.begin(); it != transactions_.end(); it++) {
for (auto it = transactions_.begin(); it != transactions_.end(); ++it) {
if (it->second->GetState() == Transaction::PREPARED) {
transv->push_back(it->second);
}

@ -567,7 +567,7 @@ TEST_P(TransactionTest, DeadlockCycleShared) {
TransactionID leaf_id =
dlock_entry[dlock_entry.size() - 1].m_txn_id - offset_root;
for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); it++) {
for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); ++it) {
auto dl_node = *it;
ASSERT_EQ(dl_node.m_txn_id, offset_root + leaf_id);
ASSERT_EQ(dl_node.m_cf_id, 0);
@ -774,7 +774,7 @@ TEST_P(TransactionStressTest, DeadlockCycle) {
}
// Iterates backwards over path verifying decreasing txn_ids.
for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); it++) {
for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); ++it) {
auto dl_node = *it;
ASSERT_EQ(dl_node.m_txn_id, len + curr_txn_id - 1);
ASSERT_EQ(dl_node.m_cf_id, 0);

@ -1099,7 +1099,7 @@ TEST_P(SnapshotConcurrentAccessTest, SnapshotConcurrentAccessTest) {
new_snapshots.push_back(snapshots[old_snapshots.size() + i]);
}
for (auto it = common_snapshots.begin(); it != common_snapshots.end();
it++) {
++it) {
auto snapshot = *it;
// Create a commit entry that is around the snapshot and thus should
// be not be discarded
@ -1166,12 +1166,12 @@ TEST_P(WritePreparedTransactionTest, AdvanceMaxEvictedSeqBasicTest) {
// b. delayed prepared should contain every txn <= max and prepared should
// only contain txns > max
auto it = initial_prepared.begin();
for (; it != initial_prepared.end() && *it <= new_max; it++) {
for (; it != initial_prepared.end() && *it <= new_max; ++it) {
ASSERT_EQ(1, wp_db->delayed_prepared_.erase(*it));
}
ASSERT_TRUE(wp_db->delayed_prepared_.empty());
for (; it != initial_prepared.end() && !wp_db->prepared_txns_.empty();
it++, wp_db->prepared_txns_.pop()) {
++it, wp_db->prepared_txns_.pop()) {
ASSERT_EQ(*it, wp_db->prepared_txns_.top());
}
ASSERT_TRUE(it == initial_prepared.end());

@ -798,7 +798,7 @@ void WritePreparedTxnDB::UpdateSnapshots(
// afterwards.
size_t i = 0;
auto it = snapshots.begin();
for (; it != snapshots.end() && i < SNAPSHOT_CACHE_SIZE; it++, i++) {
for (; it != snapshots.end() && i < SNAPSHOT_CACHE_SIZE; ++it, ++i) {
snapshot_cache_[i].store(*it, std::memory_order_release);
TEST_IDX_SYNC_POINT("WritePreparedTxnDB::UpdateSnapshots:p:", ++sync_i);
TEST_IDX_SYNC_POINT("WritePreparedTxnDB::UpdateSnapshots:s:", sync_i);
@ -812,7 +812,7 @@ void WritePreparedTxnDB::UpdateSnapshots(
}
#endif
snapshots_.clear();
for (; it != snapshots.end(); it++) {
for (; it != snapshots.end(); ++it) {
// Insert them to a vector that is less efficient to access
// concurrently
snapshots_.push_back(*it);

@ -46,7 +46,7 @@ Status WriteUnpreparedTxnDB::RollbackRecoveredTransaction(
};
// Iterate starting with largest sequence number.
for (auto it = rtxn->batches_.rbegin(); it != rtxn->batches_.rend(); it++) {
for (auto it = rtxn->batches_.rbegin(); it != rtxn->batches_.rend(); ++it) {
auto last_visible_txn = it->first - 1;
const auto& batch = it->second.batch_;
WriteBatch rollback_batch;

Loading…
Cancel
Save