diff --git a/db/table_cache.cc b/db/table_cache.cc index 4628a3735..1c37f74ab 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -62,6 +62,8 @@ void AppendVarint64(IterKey* key, uint64_t v) { } // namespace +const int kLoadConcurency = 128; + TableCache::TableCache(const ImmutableCFOptions& ioptions, const FileOptions& file_options, Cache* const cache, BlockCacheTracer* const block_cache_tracer) @@ -69,7 +71,8 @@ TableCache::TableCache(const ImmutableCFOptions& ioptions, file_options_(file_options), cache_(cache), immortal_tables_(false), - block_cache_tracer_(block_cache_tracer) { + block_cache_tracer_(block_cache_tracer), + loader_mutex_(kLoadConcurency, GetSliceNPHash64) { if (ioptions_.row_cache) { // If the same cache is shared by multiple instances, we need to // disambiguate its entries. @@ -155,6 +158,13 @@ Status TableCache::FindTable(const FileOptions& file_options, if (no_io) { // Don't do IO and return a not-found status return Status::Incomplete("Table not found in table_cache, no_io is set"); } + MutexLock load_lock(loader_mutex_.get(key)); + // We check the cache again under loading mutex + *handle = cache_->Lookup(key); + if (*handle != nullptr) { + return s; + } + std::unique_ptr table_reader; s = GetTableReader(file_options, internal_comparator, fd, false /* sequential mode */, record_read_stats, diff --git a/db/table_cache.h b/db/table_cache.h index 45d4f2998..71724808e 100644 --- a/db/table_cache.h +++ b/db/table_cache.h @@ -221,6 +221,7 @@ class TableCache { std::string row_cache_id_; bool immortal_tables_; BlockCacheTracer* const block_cache_tracer_; + Striped loader_mutex_; }; } // namespace ROCKSDB_NAMESPACE diff --git a/util/mutexlock.h b/util/mutexlock.h index 91ba4fda7..379a0eaad 100644 --- a/util/mutexlock.h +++ b/util/mutexlock.h @@ -132,4 +132,55 @@ class SpinMutex { std::atomic locked_; }; +// We want to prevent false sharing +template +struct ALIGN_AS(CACHE_LINE_SIZE) LockData { + T lock_; +}; + +// +// Inspired by Guava: https://github.com/google/guava/wiki/StripedExplained +// A striped Lock. This offers the underlying lock striping similar +// to that of ConcurrentHashMap in a reusable form, and extends it for +// semaphores and read-write locks. Conceptually, lock striping is the technique +// of dividing a lock into many stripes, increasing the granularity of a +// single lock and allowing independent operations to lock different stripes and +// proceed concurrently, instead of creating contention for a single lock. +// +template +class Striped { + public: + Striped(size_t stripes, std::function hash) + : stripes_(stripes), hash_(hash) { + + locks_ = reinterpret_cast *>( + port::cacheline_aligned_alloc(sizeof(LockData) * stripes)); + for (size_t i = 0; i < stripes; i++) { + new (&locks_[i]) LockData(); + } + + } + + virtual ~Striped() { + if (locks_ != nullptr) { + assert(stripes_ > 0); + for (size_t i = 0; i < stripes_; i++) { + locks_[i].~LockData(); + } + port::cacheline_aligned_free(locks_); + } + } + + T *get(const P &key) { + uint64_t h = hash_(key); + size_t index = h % stripes_; + return &reinterpret_cast *>(&locks_[index])->lock_; + } + + private: + size_t stripes_; + LockData *locks_; + std::function hash_; +}; + } // namespace ROCKSDB_NAMESPACE