From e63350e726d62144037d889c348cf007deda7a80 Mon Sep 17 00:00:00 2001 From: Manuel Ung Date: Sat, 19 Nov 2016 11:34:26 -0800 Subject: [PATCH] Use more efficient hash map for deadlock detection Summary: Currently, deadlock cycles are held in std::unordered_map. The problem with it is that it allocates/deallocates memory on every insertion/deletion. This limits throughput since we're doing this expensive operation while holding a global mutex. Fix this by using a vector which caches memory instead. Running the deadlock stress test, this change increased throughput from 39k txns/s -> 49k txns/s. The effect is more noticeable in MyRocks. Closes https://github.com/facebook/rocksdb/pull/1545 Differential Revision: D4205662 Pulled By: lth fbshipit-source-id: ff990e4 --- util/hash_map.h | 67 +++++++++++++++++++ .../transactions/transaction_lock_mgr.cc | 27 +++++--- utilities/transactions/transaction_lock_mgr.h | 5 +- 3 files changed, 86 insertions(+), 13 deletions(-) create mode 100644 util/hash_map.h diff --git a/util/hash_map.h b/util/hash_map.h new file mode 100644 index 000000000..d39f59226 --- /dev/null +++ b/util/hash_map.h @@ -0,0 +1,67 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. This +// source code is licensed under the BSD-style license found in the LICENSE +// file in the root directory of this source tree. An additional grant of +// patent rights can be found in the PATENTS file in the same directory. +// + +#pragma once + +#include +#include +#include + +#include "util/autovector.h" + +namespace rocksdb { + +// This is similar to std::unordered_map, except that it tries to avoid +// allocating or deallocating memory as much as possible. With +// std::unordered_map, an allocation/deallocation is made for every insertion +// or deletion because of the requirement that iterators remain valid even +// with insertions or deletions. This means that the hash chains will be +// implemented as linked lists. +// +// This implementation uses autovector as hash chains insteads. +// +template +class HashMap { + std::array, 1>, size> table_; + + public: + bool Contains(K key) { + auto& bucket = table_[key % size]; + auto it = std::find_if( + bucket.begin(), bucket.end(), + [key](const std::pair& p) { return p.first == key; }); + return it != bucket.end(); + } + + void Insert(K key, V value) { + auto& bucket = table_[key % size]; + bucket.push_back({key, value}); + } + + void Delete(K key) { + auto& bucket = table_[key % size]; + auto it = std::find_if( + bucket.begin(), bucket.end(), + [key](const std::pair& p) { return p.first == key; }); + if (it != bucket.end()) { + auto last = bucket.end() - 1; + if (it != last) { + *it = *last; + } + bucket.pop_back(); + } + } + + V& Get(K key) { + auto& bucket = table_[key % size]; + auto it = std::find_if( + bucket.begin(), bucket.end(), + [key](const std::pair& p) { return p.first == key; }); + return it->second; + } +}; + +} // namespace rocksdb diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc index 792e014b1..8a49b221c 100644 --- a/utilities/transactions/transaction_lock_mgr.cc +++ b/utilities/transactions/transaction_lock_mgr.cc @@ -351,12 +351,12 @@ void TransactionLockMgr::DecrementWaiters(const TransactionImpl* txn, void TransactionLockMgr::DecrementWaitersImpl(const TransactionImpl* txn, TransactionID wait_id) { auto id = txn->GetID(); - assert(wait_txn_map_.count(id) > 0); - wait_txn_map_.erase(id); + assert(wait_txn_map_.Contains(id)); + wait_txn_map_.Delete(id); - rev_wait_txn_map_[wait_id]--; - if (rev_wait_txn_map_[wait_id] == 0) { - rev_wait_txn_map_.erase(wait_id); + rev_wait_txn_map_.Get(wait_id)--; + if (rev_wait_txn_map_.Get(wait_id) == 0) { + rev_wait_txn_map_.Delete(wait_id); } } @@ -364,12 +364,17 @@ bool TransactionLockMgr::IncrementWaiters(const TransactionImpl* txn, TransactionID wait_id) { auto id = txn->GetID(); std::lock_guard lock(wait_txn_map_mutex_); - assert(wait_txn_map_.count(id) == 0); - wait_txn_map_[id] = wait_id; - rev_wait_txn_map_[wait_id]++; + assert(!wait_txn_map_.Contains(id)); + wait_txn_map_.Insert(id, wait_id); + + if (rev_wait_txn_map_.Contains(wait_id)) { + rev_wait_txn_map_.Get(wait_id)++; + } else { + rev_wait_txn_map_.Insert(wait_id, 1); + } // No deadlock if nobody is waiting on self. - if (rev_wait_txn_map_.count(id) == 0) { + if (!rev_wait_txn_map_.Contains(id)) { return false; } @@ -378,10 +383,10 @@ bool TransactionLockMgr::IncrementWaiters(const TransactionImpl* txn, if (next == id) { DecrementWaitersImpl(txn, wait_id); return true; - } else if (wait_txn_map_.count(next) == 0) { + } else if (!wait_txn_map_.Contains(next)) { return false; } else { - next = wait_txn_map_[next]; + next = wait_txn_map_.Get(next); } } diff --git a/utilities/transactions/transaction_lock_mgr.h b/utilities/transactions/transaction_lock_mgr.h index da9913e16..ea0352d53 100644 --- a/utilities/transactions/transaction_lock_mgr.h +++ b/utilities/transactions/transaction_lock_mgr.h @@ -13,6 +13,7 @@ #include #include "rocksdb/utilities/transaction.h" +#include "util/hash_map.h" #include "util/instrumented_mutex.h" #include "util/thread_local.h" #include "utilities/transactions/transaction_impl.h" @@ -88,9 +89,9 @@ class TransactionLockMgr { std::mutex wait_txn_map_mutex_; // Maps from waitee -> number of waiters. - std::unordered_map rev_wait_txn_map_; + HashMap rev_wait_txn_map_; // Maps from waiter -> waitee. - std::unordered_map wait_txn_map_; + HashMap wait_txn_map_; // Used to allocate mutexes/condvars to use when locking keys std::shared_ptr mutex_factory_;