Fix data races caught by tsan

Summary:
This fixes the tsan build failures in:
- write_callback_test
- persistent_cache_test.*
Closes https://github.com/facebook/rocksdb/pull/2339

Differential Revision: D5101190

Pulled By: sagar0

fbshipit-source-id: 537e19ed05272b1f34cfbf793aa822b2264a1643
main
Sagar Vemuri 8 years ago committed by Facebook Github Bot
parent 4c9d2b1046
commit 228f49d20a
  1. 17
      db/write_callback_test.cc
  2. 9
      utilities/persistent_cache/block_cache_tier.h
  3. 8
      utilities/persistent_cache/volatile_tier_impl.h

@ -7,6 +7,7 @@
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include <atomic>
#include <functional> #include <functional>
#include <string> #include <string>
#include <utility> #include <utility>
@ -65,11 +66,19 @@ class WriteCallbackTestWriteCallback2 : public WriteCallback {
class MockWriteCallback : public WriteCallback { class MockWriteCallback : public WriteCallback {
public: public:
bool should_fail_ = false; bool should_fail_ = false;
bool was_called_ = false;
bool allow_batching_ = false; bool allow_batching_ = false;
std::atomic<bool> was_called_{false};
MockWriteCallback() {}
MockWriteCallback(const MockWriteCallback& other) {
should_fail_ = other.should_fail_;
allow_batching_ = other.allow_batching_;
was_called_.store(other.was_called_.load());
}
Status Callback(DB* db) override { Status Callback(DB* db) override {
was_called_ = true; was_called_.store(true);
if (should_fail_) { if (should_fail_) {
return Status::Busy(); return Status::Busy();
} else { } else {
@ -92,7 +101,7 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) {
void Clear() { void Clear() {
kvs_.clear(); kvs_.clear();
write_batch_.Clear(); write_batch_.Clear();
callback_.was_called_ = false; callback_.was_called_.store(false);
} }
MockWriteCallback callback_; MockWriteCallback callback_;
@ -265,7 +274,7 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) {
// check for keys // check for keys
string value; string value;
for (auto& w : write_group) { for (auto& w : write_group) {
ASSERT_TRUE(w.callback_.was_called_); ASSERT_TRUE(w.callback_.was_called_.load());
for (auto& kvp : w.kvs_) { for (auto& kvp : w.kvs_) {
if (w.callback_.should_fail_) { if (w.callback_.should_fail_) {
ASSERT_TRUE( ASSERT_TRUE(

@ -12,6 +12,7 @@
#include <unistd.h> #include <unistd.h>
#endif // ! OS_WIN #endif // ! OS_WIN
#include <atomic>
#include <list> #include <list>
#include <memory> #include <memory>
#include <set> #include <set>
@ -123,10 +124,10 @@ class BlockCacheTier : public PersistentCacheTier {
HistogramImpl read_hit_latency_; HistogramImpl read_hit_latency_;
HistogramImpl read_miss_latency_; HistogramImpl read_miss_latency_;
HistogramImpl write_latency_; HistogramImpl write_latency_;
uint64_t cache_hits_ = 0; std::atomic<uint64_t> cache_hits_{0};
uint64_t cache_misses_ = 0; std::atomic<uint64_t> cache_misses_{0};
uint64_t cache_errors_ = 0; std::atomic<uint64_t> cache_errors_{0};
uint64_t insert_dropped_ = 0; std::atomic<uint64_t> insert_dropped_{0};
double CacheHitPct() const { double CacheHitPct() const {
const auto lookups = cache_hits_ + cache_misses_; const auto lookups = cache_hits_ + cache_misses_;

@ -110,10 +110,10 @@ class VolatileCacheTier : public PersistentCacheTier {
}; };
struct Statistics { struct Statistics {
uint64_t cache_misses_ = 0; std::atomic<uint64_t> cache_misses_{0};
uint64_t cache_hits_ = 0; std::atomic<uint64_t> cache_hits_{0};
uint64_t cache_inserts_ = 0; std::atomic<uint64_t> cache_inserts_{0};
uint64_t cache_evicts_ = 0; std::atomic<uint64_t> cache_evicts_{0};
double CacheHitPct() const { double CacheHitPct() const {
auto lookups = cache_hits_ + cache_misses_; auto lookups = cache_hits_ + cache_misses_;

Loading…
Cancel
Save