Use pre-increment instead of post-increment for iterators (#5296)

Summary:
Google C++ style guide indicates pre-increment should be used for iterators: https://google.github.io/styleguide/cppguide.html#Preincrement_and_Predecrement. Replaced all instances of ' it++' by ' ++it' (where type is iterator). So this covers the cases where iterators are named 'it'.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5296

Differential Revision: D15301256

Pulled By: tfersch

fbshipit-source-id: 2803483c1392504ad3b281d21db615429c71114b
main
Thomas Fersch 6 years ago committed by Facebook Github Bot
parent 189e711b37
commit a42757607d
  1. 2
      db/compaction_job.cc
  2. 2
      db/db_impl.h
  3. 10
      db/db_impl_compaction_flush.cc
  4. 2
      db/memtable_list.cc
  5. 2
      db/prefix_test.cc
  6. 4
      utilities/transactions/pessimistic_transaction_db.cc
  7. 4
      utilities/transactions/transaction_test.cc
  8. 6
      utilities/transactions/write_prepared_transaction_test.cc
  9. 4
      utilities/transactions/write_prepared_txn_db.cc
  10. 2
      utilities/transactions/write_unprepared_txn_db.cc

@ -519,7 +519,7 @@ void CompactionJob::GenSubcompactionBoundaries() {
auto* v = compact_->compaction->input_version(); auto* v = compact_->compaction->input_version();
for (auto it = bounds.begin();;) { for (auto it = bounds.begin();;) {
const Slice a = *it; const Slice a = *it;
it++; ++it;
if (it == bounds.end()) { if (it == bounds.end()) {
break; break;

@ -725,7 +725,7 @@ class DBImpl : public DB {
void DeleteAllRecoveredTransactions() { void DeleteAllRecoveredTransactions() {
for (auto it = recovered_transactions_.begin(); for (auto it = recovered_transactions_.begin();
it != recovered_transactions_.end(); it++) { it != recovered_transactions_.end(); ++it) {
delete it->second; delete it->second;
} }
recovered_transactions_.clear(); recovered_transactions_.clear();

@ -2794,7 +2794,7 @@ void DBImpl::RemoveManualCompaction(DBImpl::ManualCompactionState* m) {
it = manual_compaction_dequeue_.erase(it); it = manual_compaction_dequeue_.erase(it);
return; return;
} }
it++; ++it;
} }
assert(false); assert(false);
return; return;
@ -2815,7 +2815,7 @@ bool DBImpl::ShouldntRunManualCompaction(ManualCompactionState* m) {
bool seen = false; bool seen = false;
while (it != manual_compaction_dequeue_.end()) { while (it != manual_compaction_dequeue_.end()) {
if (m == (*it)) { if (m == (*it)) {
it++; ++it;
seen = true; seen = true;
continue; continue;
} else if (MCOverlap(m, (*it)) && (!seen && !(*it)->in_progress)) { } else if (MCOverlap(m, (*it)) && (!seen && !(*it)->in_progress)) {
@ -2824,7 +2824,7 @@ bool DBImpl::ShouldntRunManualCompaction(ManualCompactionState* m) {
// and (*it) is ahead in the queue and is not yet in progress // and (*it) is ahead in the queue and is not yet in progress
return true; return true;
} }
it++; ++it;
} }
return false; return false;
} }
@ -2842,7 +2842,7 @@ bool DBImpl::HaveManualCompaction(ColumnFamilyData* cfd) {
// in progress // in progress
return true; return true;
} }
it++; ++it;
} }
return false; return false;
} }
@ -2855,7 +2855,7 @@ bool DBImpl::HasExclusiveManualCompaction() {
if ((*it)->exclusive) { if ((*it)->exclusive) {
return true; return true;
} }
it++; ++it;
} }
return false; return false;
} }

@ -437,7 +437,7 @@ Status MemTableList::TryInstallMemtableFlushResults(
++mem_id; ++mem_id;
} }
} else { } else {
for (auto it = current_->memlist_.rbegin(); batch_count-- > 0; it++) { for (auto it = current_->memlist_.rbegin(); batch_count-- > 0; ++it) {
MemTable* m = *it; MemTable* m = *it;
// commit failed. setup state so that we can flush again. // commit failed. setup state so that we can flush again.
ROCKS_LOG_BUFFER(log_buffer, "Level-0 commit table #%" PRIu64 ROCKS_LOG_BUFFER(log_buffer, "Level-0 commit table #%" PRIu64

@ -751,7 +751,7 @@ TEST_F(PrefixTest, PrefixSeekModePrev) {
for (size_t k = 0; k < 9; k++) { for (size_t k = 0; k < 9; k++) {
if (rnd.OneIn(2) || it == whole_map.begin()) { if (rnd.OneIn(2) || it == whole_map.begin()) {
iter->Next(); iter->Next();
it++; ++it;
if (FLAGS_enable_print) { if (FLAGS_enable_print) {
std::cout << "Next >> "; std::cout << "Next >> ";
} }

@ -121,7 +121,7 @@ Status PessimisticTransactionDB::Initialize(
assert(dbimpl != nullptr); assert(dbimpl != nullptr);
auto rtrxs = dbimpl->recovered_transactions(); auto rtrxs = dbimpl->recovered_transactions();
for (auto it = rtrxs.begin(); it != rtrxs.end(); it++) { for (auto it = rtrxs.begin(); it != rtrxs.end(); ++it) {
auto recovered_trx = it->second; auto recovered_trx = it->second;
assert(recovered_trx); assert(recovered_trx);
assert(recovered_trx->batches_.size() == 1); assert(recovered_trx->batches_.size() == 1);
@ -594,7 +594,7 @@ void PessimisticTransactionDB::GetAllPreparedTransactions(
assert(transv); assert(transv);
transv->clear(); transv->clear();
std::lock_guard<std::mutex> lock(name_map_mutex_); std::lock_guard<std::mutex> lock(name_map_mutex_);
for (auto it = transactions_.begin(); it != transactions_.end(); it++) { for (auto it = transactions_.begin(); it != transactions_.end(); ++it) {
if (it->second->GetState() == Transaction::PREPARED) { if (it->second->GetState() == Transaction::PREPARED) {
transv->push_back(it->second); transv->push_back(it->second);
} }

@ -567,7 +567,7 @@ TEST_P(TransactionTest, DeadlockCycleShared) {
TransactionID leaf_id = TransactionID leaf_id =
dlock_entry[dlock_entry.size() - 1].m_txn_id - offset_root; dlock_entry[dlock_entry.size() - 1].m_txn_id - offset_root;
for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); it++) { for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); ++it) {
auto dl_node = *it; auto dl_node = *it;
ASSERT_EQ(dl_node.m_txn_id, offset_root + leaf_id); ASSERT_EQ(dl_node.m_txn_id, offset_root + leaf_id);
ASSERT_EQ(dl_node.m_cf_id, 0); ASSERT_EQ(dl_node.m_cf_id, 0);
@ -774,7 +774,7 @@ TEST_P(TransactionStressTest, DeadlockCycle) {
} }
// Iterates backwards over path verifying decreasing txn_ids. // Iterates backwards over path verifying decreasing txn_ids.
for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); it++) { for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); ++it) {
auto dl_node = *it; auto dl_node = *it;
ASSERT_EQ(dl_node.m_txn_id, len + curr_txn_id - 1); ASSERT_EQ(dl_node.m_txn_id, len + curr_txn_id - 1);
ASSERT_EQ(dl_node.m_cf_id, 0); ASSERT_EQ(dl_node.m_cf_id, 0);

@ -1099,7 +1099,7 @@ TEST_P(SnapshotConcurrentAccessTest, SnapshotConcurrentAccessTest) {
new_snapshots.push_back(snapshots[old_snapshots.size() + i]); new_snapshots.push_back(snapshots[old_snapshots.size() + i]);
} }
for (auto it = common_snapshots.begin(); it != common_snapshots.end(); for (auto it = common_snapshots.begin(); it != common_snapshots.end();
it++) { ++it) {
auto snapshot = *it; auto snapshot = *it;
// Create a commit entry that is around the snapshot and thus should // Create a commit entry that is around the snapshot and thus should
// be not be discarded // be not be discarded
@ -1166,12 +1166,12 @@ TEST_P(WritePreparedTransactionTest, AdvanceMaxEvictedSeqBasicTest) {
// b. delayed prepared should contain every txn <= max and prepared should // b. delayed prepared should contain every txn <= max and prepared should
// only contain txns > max // only contain txns > max
auto it = initial_prepared.begin(); auto it = initial_prepared.begin();
for (; it != initial_prepared.end() && *it <= new_max; it++) { for (; it != initial_prepared.end() && *it <= new_max; ++it) {
ASSERT_EQ(1, wp_db->delayed_prepared_.erase(*it)); ASSERT_EQ(1, wp_db->delayed_prepared_.erase(*it));
} }
ASSERT_TRUE(wp_db->delayed_prepared_.empty()); ASSERT_TRUE(wp_db->delayed_prepared_.empty());
for (; it != initial_prepared.end() && !wp_db->prepared_txns_.empty(); for (; it != initial_prepared.end() && !wp_db->prepared_txns_.empty();
it++, wp_db->prepared_txns_.pop()) { ++it, wp_db->prepared_txns_.pop()) {
ASSERT_EQ(*it, wp_db->prepared_txns_.top()); ASSERT_EQ(*it, wp_db->prepared_txns_.top());
} }
ASSERT_TRUE(it == initial_prepared.end()); ASSERT_TRUE(it == initial_prepared.end());

@ -798,7 +798,7 @@ void WritePreparedTxnDB::UpdateSnapshots(
// afterwards. // afterwards.
size_t i = 0; size_t i = 0;
auto it = snapshots.begin(); auto it = snapshots.begin();
for (; it != snapshots.end() && i < SNAPSHOT_CACHE_SIZE; it++, i++) { for (; it != snapshots.end() && i < SNAPSHOT_CACHE_SIZE; ++it, ++i) {
snapshot_cache_[i].store(*it, std::memory_order_release); snapshot_cache_[i].store(*it, std::memory_order_release);
TEST_IDX_SYNC_POINT("WritePreparedTxnDB::UpdateSnapshots:p:", ++sync_i); TEST_IDX_SYNC_POINT("WritePreparedTxnDB::UpdateSnapshots:p:", ++sync_i);
TEST_IDX_SYNC_POINT("WritePreparedTxnDB::UpdateSnapshots:s:", sync_i); TEST_IDX_SYNC_POINT("WritePreparedTxnDB::UpdateSnapshots:s:", sync_i);
@ -812,7 +812,7 @@ void WritePreparedTxnDB::UpdateSnapshots(
} }
#endif #endif
snapshots_.clear(); snapshots_.clear();
for (; it != snapshots.end(); it++) { for (; it != snapshots.end(); ++it) {
// Insert them to a vector that is less efficient to access // Insert them to a vector that is less efficient to access
// concurrently // concurrently
snapshots_.push_back(*it); snapshots_.push_back(*it);

@ -46,7 +46,7 @@ Status WriteUnpreparedTxnDB::RollbackRecoveredTransaction(
}; };
// Iterate starting with largest sequence number. // Iterate starting with largest sequence number.
for (auto it = rtxn->batches_.rbegin(); it != rtxn->batches_.rend(); it++) { for (auto it = rtxn->batches_.rbegin(); it != rtxn->batches_.rend(); ++it) {
auto last_visible_txn = it->first - 1; auto last_visible_txn = it->first - 1;
const auto& batch = it->second.batch_; const auto& batch = it->second.batch_;
WriteBatch rollback_batch; WriteBatch rollback_batch;

Loading…
Cancel
Save