Make adaptivity of LRU cache mutexes configurable (#5054)

Summary:
The patch adds a new config option to LRUCacheOptions that enables
users to choose whether to use an adaptive mutex for the LRU block
cache (on platforms where adaptive mutexes are supported). The default
is true if RocksDB is compiled with -DROCKSDB_DEFAULT_TO_ADAPTIVE_MUTEX,
false otherwise.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5054

Differential Revision: D14542749

Pulled By: ltamasi

fbshipit-source-id: 0065715ab6cf91f10444b737fed8c8aee6a8a0d2
main
Levi Tamasi 6 years ago committed by Facebook Github Bot
parent 1721635f76
commit 34f8ac0c99
  1. 21
      cache/lru_cache.cc
  2. 5
      cache/lru_cache.h
  3. 5
      cache/lru_cache_test.cc
  4. 17
      include/rocksdb/cache.h
  5. 15
      port/port_posix.cc
  6. 17
      port/port_posix.h
  7. 3
      port/win/port_win.cc
  8. 4
      port/win/port_win.h

21
cache/lru_cache.cc vendored

@ -100,14 +100,16 @@ void LRUHandleTable::Resize() {
} }
LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit, LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit,
double high_pri_pool_ratio) double high_pri_pool_ratio,
bool use_adaptive_mutex)
: capacity_(0), : capacity_(0),
high_pri_pool_usage_(0), high_pri_pool_usage_(0),
strict_capacity_limit_(strict_capacity_limit), strict_capacity_limit_(strict_capacity_limit),
high_pri_pool_ratio_(high_pri_pool_ratio), high_pri_pool_ratio_(high_pri_pool_ratio),
high_pri_pool_capacity_(0), high_pri_pool_capacity_(0),
usage_(0), usage_(0),
lru_usage_(0) { lru_usage_(0),
mutex_(use_adaptive_mutex) {
// Make empty circular linked list // Make empty circular linked list
lru_.next = &lru_; lru_.next = &lru_;
lru_.prev = &lru_; lru_.prev = &lru_;
@ -462,7 +464,8 @@ std::string LRUCacheShard::GetPrintableOptions() const {
LRUCache::LRUCache(size_t capacity, int num_shard_bits, LRUCache::LRUCache(size_t capacity, int num_shard_bits,
bool strict_capacity_limit, double high_pri_pool_ratio, bool strict_capacity_limit, double high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> allocator) std::shared_ptr<MemoryAllocator> allocator,
bool use_adaptive_mutex)
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit, : ShardedCache(capacity, num_shard_bits, strict_capacity_limit,
std::move(allocator)) { std::move(allocator)) {
num_shards_ = 1 << num_shard_bits; num_shards_ = 1 << num_shard_bits;
@ -471,7 +474,8 @@ LRUCache::LRUCache(size_t capacity, int num_shard_bits,
size_t per_shard = (capacity + (num_shards_ - 1)) / num_shards_; size_t per_shard = (capacity + (num_shards_ - 1)) / num_shards_;
for (int i = 0; i < num_shards_; i++) { for (int i = 0; i < num_shards_; i++) {
new (&shards_[i]) new (&shards_[i])
LRUCacheShard(per_shard, strict_capacity_limit, high_pri_pool_ratio); LRUCacheShard(per_shard, strict_capacity_limit, high_pri_pool_ratio,
use_adaptive_mutex);
} }
} }
@ -540,13 +544,15 @@ std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
return NewLRUCache(cache_opts.capacity, cache_opts.num_shard_bits, return NewLRUCache(cache_opts.capacity, cache_opts.num_shard_bits,
cache_opts.strict_capacity_limit, cache_opts.strict_capacity_limit,
cache_opts.high_pri_pool_ratio, cache_opts.high_pri_pool_ratio,
cache_opts.memory_allocator); cache_opts.memory_allocator,
cache_opts.use_adaptive_mutex);
} }
std::shared_ptr<Cache> NewLRUCache( std::shared_ptr<Cache> NewLRUCache(
size_t capacity, int num_shard_bits, bool strict_capacity_limit, size_t capacity, int num_shard_bits, bool strict_capacity_limit,
double high_pri_pool_ratio, double high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> memory_allocator) { std::shared_ptr<MemoryAllocator> memory_allocator,
bool use_adaptive_mutex) {
if (num_shard_bits >= 20) { if (num_shard_bits >= 20) {
return nullptr; // the cache cannot be sharded into too many fine pieces return nullptr; // the cache cannot be sharded into too many fine pieces
} }
@ -559,7 +565,8 @@ std::shared_ptr<Cache> NewLRUCache(
} }
return std::make_shared<LRUCache>(capacity, num_shard_bits, return std::make_shared<LRUCache>(capacity, num_shard_bits,
strict_capacity_limit, high_pri_pool_ratio, strict_capacity_limit, high_pri_pool_ratio,
std::move(memory_allocator)); std::move(memory_allocator),
use_adaptive_mutex);
} }
} // namespace rocksdb } // namespace rocksdb

5
cache/lru_cache.h vendored

@ -168,7 +168,7 @@ class LRUHandleTable {
class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard : public CacheShard { class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard : public CacheShard {
public: public:
LRUCacheShard(size_t capacity, bool strict_capacity_limit, LRUCacheShard(size_t capacity, bool strict_capacity_limit,
double high_pri_pool_ratio); double high_pri_pool_ratio, bool use_adaptive_mutex);
virtual ~LRUCacheShard(); virtual ~LRUCacheShard();
// Separate from constructor so caller can easily make an array of LRUCache // Separate from constructor so caller can easily make an array of LRUCache
@ -288,7 +288,8 @@ class LRUCache : public ShardedCache {
public: public:
LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit, LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
double high_pri_pool_ratio, double high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr); std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
bool use_adaptive_mutex = kDefaultToAdaptiveMutex);
virtual ~LRUCache(); virtual ~LRUCache();
virtual const char* Name() const override { return "LRUCache"; } virtual const char* Name() const override { return "LRUCache"; }
virtual CacheShard* GetShard(int shard) override; virtual CacheShard* GetShard(int shard) override;

@ -25,12 +25,13 @@ class LRUCacheTest : public testing::Test {
} }
} }
void NewCache(size_t capacity, double high_pri_pool_ratio = 0.0) { void NewCache(size_t capacity, double high_pri_pool_ratio = 0.0,
bool use_adaptive_mutex = kDefaultToAdaptiveMutex) {
DeleteCache(); DeleteCache();
cache_ = reinterpret_cast<LRUCacheShard*>( cache_ = reinterpret_cast<LRUCacheShard*>(
port::cacheline_aligned_alloc(sizeof(LRUCacheShard))); port::cacheline_aligned_alloc(sizeof(LRUCacheShard)));
new (cache_) LRUCacheShard(capacity, false /*strict_capcity_limit*/, new (cache_) LRUCacheShard(capacity, false /*strict_capcity_limit*/,
high_pri_pool_ratio); high_pri_pool_ratio, use_adaptive_mutex);
} }
void Insert(const std::string& key, void Insert(const std::string& key,

@ -34,6 +34,8 @@ namespace rocksdb {
class Cache; class Cache;
extern const bool kDefaultToAdaptiveMutex;
struct LRUCacheOptions { struct LRUCacheOptions {
// Capacity of the cache. // Capacity of the cache.
size_t capacity = 0; size_t capacity = 0;
@ -68,15 +70,23 @@ struct LRUCacheOptions {
// internally (currently only XPRESS). // internally (currently only XPRESS).
std::shared_ptr<MemoryAllocator> memory_allocator; std::shared_ptr<MemoryAllocator> memory_allocator;
// Whether to use adaptive mutexes for cache shards. Note that adaptive
// mutexes need to be supported by the platform in order for this to have any
// effect. The default value is true if RocksDB is compiled with
// -DROCKSDB_DEFAULT_TO_ADAPTIVE_MUTEX, false otherwise.
bool use_adaptive_mutex = kDefaultToAdaptiveMutex;
LRUCacheOptions() {} LRUCacheOptions() {}
LRUCacheOptions(size_t _capacity, int _num_shard_bits, LRUCacheOptions(size_t _capacity, int _num_shard_bits,
bool _strict_capacity_limit, double _high_pri_pool_ratio, bool _strict_capacity_limit, double _high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> _memory_allocator = nullptr) std::shared_ptr<MemoryAllocator> _memory_allocator = nullptr,
bool _use_adaptive_mutex = kDefaultToAdaptiveMutex)
: capacity(_capacity), : capacity(_capacity),
num_shard_bits(_num_shard_bits), num_shard_bits(_num_shard_bits),
strict_capacity_limit(_strict_capacity_limit), strict_capacity_limit(_strict_capacity_limit),
high_pri_pool_ratio(_high_pri_pool_ratio), high_pri_pool_ratio(_high_pri_pool_ratio),
memory_allocator(std::move(_memory_allocator)) {} memory_allocator(std::move(_memory_allocator)),
use_adaptive_mutex(_use_adaptive_mutex) {}
}; };
// Create a new cache with a fixed size capacity. The cache is sharded // Create a new cache with a fixed size capacity. The cache is sharded
@ -90,7 +100,8 @@ struct LRUCacheOptions {
extern std::shared_ptr<Cache> NewLRUCache( extern std::shared_ptr<Cache> NewLRUCache(
size_t capacity, int num_shard_bits = -1, size_t capacity, int num_shard_bits = -1,
bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.0, bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.0,
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr); std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
bool use_adaptive_mutex = kDefaultToAdaptiveMutex);
extern std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts); extern std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts);

@ -25,6 +25,21 @@
#include "util/logging.h" #include "util/logging.h"
namespace rocksdb { namespace rocksdb {
// We want to give users opportunity to default all the mutexes to adaptive if
// not specified otherwise. This enables a quick way to conduct various
// performance related experiements.
//
// NB! Support for adaptive mutexes is turned on by definining
// ROCKSDB_PTHREAD_ADAPTIVE_MUTEX during the compilation. If you use RocksDB
// build environment then this happens automatically; otherwise it's up to the
// consumer to define the identifier.
#ifdef ROCKSDB_DEFAULT_TO_ADAPTIVE_MUTEX
extern const bool kDefaultToAdaptiveMutex = true;
#else
extern const bool kDefaultToAdaptiveMutex = false;
#endif
namespace port { namespace port {
static int PthreadCall(const char* label, int result) { static int PthreadCall(const char* label, int result) {

@ -82,6 +82,9 @@
#endif #endif
namespace rocksdb { namespace rocksdb {
extern const bool kDefaultToAdaptiveMutex;
namespace port { namespace port {
// For use at db/file_indexer.h kLevelMaxIndex // For use at db/file_indexer.h kLevelMaxIndex
@ -100,19 +103,7 @@ class CondVar;
class Mutex { class Mutex {
public: public:
// We want to give users opportunity to default all the mutexes to adaptive if explicit Mutex(bool adaptive = kDefaultToAdaptiveMutex);
// not specified otherwise. This enables a quick way to conduct various
// performance related experiements.
//
// NB! Support for adaptive mutexes is turned on by definining
// ROCKSDB_PTHREAD_ADAPTIVE_MUTEX during the compilation. If you use RocksDB
// build environment then this happens automatically; otherwise it's up to the
// consumer to define the identifier.
#ifdef ROCKSDB_DEFAULT_TO_ADAPTIVE_MUTEX
explicit Mutex(bool adaptive = true);
#else
explicit Mutex(bool adaptive = false);
#endif
~Mutex(); ~Mutex();
void Lock(); void Lock();

@ -36,6 +36,9 @@
#include "util/logging.h" #include "util/logging.h"
namespace rocksdb { namespace rocksdb {
extern const bool kDefaultToAdaptiveMutex = false;
namespace port { namespace port {
#ifdef ROCKSDB_WINDOWS_UTF8_FILENAMES #ifdef ROCKSDB_WINDOWS_UTF8_FILENAMES

@ -78,6 +78,8 @@ namespace rocksdb {
#define PREFETCH(addr, rw, locality) #define PREFETCH(addr, rw, locality)
extern const bool kDefaultToAdaptiveMutex;
namespace port { namespace port {
// VS < 2015 // VS < 2015
@ -127,7 +129,7 @@ class CondVar;
class Mutex { class Mutex {
public: public:
/* implicit */ Mutex(bool adaptive = false) /* implicit */ Mutex(bool adaptive = kDefaultToAdaptiveMutex)
#ifndef NDEBUG #ifndef NDEBUG
: locked_(false) : locked_(false)
#endif #endif

Loading…
Cancel
Save