|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
// A Cache is an interface that maps keys to values. It has internal
|
|
|
|
// synchronization and may be safely accessed concurrently from
|
|
|
|
// multiple threads. It may automatically evict entries to make room
|
|
|
|
// for new entries. Values have a specified charge against the cache
|
|
|
|
// capacity. For example, a cache where the values are variable
|
|
|
|
// length strings, may use the length of the string as the charge for
|
|
|
|
// the string.
|
|
|
|
//
|
|
|
|
// A builtin cache implementation with a least-recently-used eviction
|
|
|
|
// policy is provided. Clients may use their own implementations if
|
|
|
|
// they want something more sophisticated (like scan-resistance, a
|
|
|
|
// custom eviction policy, variable cache sizing, etc.)
|
|
|
|
|
|
|
|
#ifndef STORAGE_ROCKSDB_INCLUDE_CACHE_H_
|
|
|
|
#define STORAGE_ROCKSDB_INCLUDE_CACHE_H_
|
|
|
|
|
|
|
|
#include <memory>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include "rocksdb/slice.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
using std::shared_ptr;
|
|
|
|
|
|
|
|
class Cache;
|
|
|
|
|
|
|
|
// Create a new cache with a fixed size capacity. The cache is sharded
|
|
|
|
// to 2^numShardBits shards, by hash of the key. The total capacity
|
|
|
|
// is divided and evenly assigned to each shard.
|
|
|
|
//
|
|
|
|
// The functions without parameter numShardBits uses default value, which is 4
|
|
|
|
extern shared_ptr<Cache> NewLRUCache(size_t capacity);
|
|
|
|
extern shared_ptr<Cache> NewLRUCache(size_t capacity, int numShardBits);
|
|
|
|
|
|
|
|
class Cache {
|
|
|
|
public:
|
|
|
|
Cache() { }
|
|
|
|
|
|
|
|
// Destroys all existing entries by calling the "deleter"
|
|
|
|
// function that was passed to the constructor.
|
|
|
|
virtual ~Cache();
|
|
|
|
|
|
|
|
// Opaque handle to an entry stored in the cache.
|
|
|
|
struct Handle { };
|
|
|
|
|
|
|
|
// Insert a mapping from key->value into the cache and assign it
|
|
|
|
// the specified charge against the total cache capacity.
|
|
|
|
//
|
|
|
|
// Returns a handle that corresponds to the mapping. The caller
|
|
|
|
// must call this->Release(handle) when the returned mapping is no
|
|
|
|
// longer needed.
|
|
|
|
//
|
|
|
|
// When the inserted entry is no longer needed, the key and
|
|
|
|
// value will be passed to "deleter".
|
|
|
|
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
|
|
|
|
void (*deleter)(const Slice& key, void* value)) = 0;
|
|
|
|
|
|
|
|
// If the cache has no mapping for "key", returns nullptr.
|
|
|
|
//
|
|
|
|
// Else return a handle that corresponds to the mapping. The caller
|
|
|
|
// must call this->Release(handle) when the returned mapping is no
|
|
|
|
// longer needed.
|
|
|
|
virtual Handle* Lookup(const Slice& key) = 0;
|
|
|
|
|
|
|
|
// Release a mapping returned by a previous Lookup().
|
|
|
|
// REQUIRES: handle must not have been released yet.
|
|
|
|
// REQUIRES: handle must have been returned by a method on *this.
|
|
|
|
virtual void Release(Handle* handle) = 0;
|
|
|
|
|
|
|
|
// Return the value encapsulated in a handle returned by a
|
|
|
|
// successful Lookup().
|
|
|
|
// REQUIRES: handle must not have been released yet.
|
|
|
|
// REQUIRES: handle must have been returned by a method on *this.
|
|
|
|
virtual void* Value(Handle* handle) = 0;
|
|
|
|
|
|
|
|
// If the cache contains entry for key, erase it. Note that the
|
|
|
|
// underlying entry will be kept around until all existing handles
|
|
|
|
// to it have been released.
|
|
|
|
virtual void Erase(const Slice& key) = 0;
|
|
|
|
|
|
|
|
// Return a new numeric id. May be used by multiple clients who are
|
|
|
|
// sharing the same cache to partition the key space. Typically the
|
|
|
|
// client will allocate a new id at startup and prepend the id to
|
|
|
|
// its cache keys.
|
|
|
|
virtual uint64_t NewId() = 0;
|
|
|
|
|
|
|
|
// sets the maximum configured capacity of the cache. When the new
|
|
|
|
// capacity is less than the old capacity and the existing usage is
|
|
|
|
// greater than new capacity, the implementation will do its best job to
|
|
|
|
// purge the released entries from the cache in order to lower the usage
|
|
|
|
virtual void SetCapacity(size_t capacity) = 0;
|
|
|
|
|
|
|
|
// returns the maximum configured capacity of the cache
|
|
|
|
virtual size_t GetCapacity() const = 0;
|
|
|
|
|
|
|
|
// returns the memory size for the entries residing in the cache.
|
|
|
|
virtual size_t GetUsage() const = 0;
|
|
|
|
|
|
|
|
// returns the memory size for a specific entry in the cache.
|
|
|
|
virtual size_t GetUsage(Handle* handle) const = 0;
|
|
|
|
|
|
|
|
// returns the memory size for the entries in use by the system
|
|
|
|
virtual size_t GetPinnedUsage() const = 0;
|
|
|
|
|
Add a call DisownData() to Cache, which should speed up shutdown
Summary: On a shutdown, freeing memory takes a long time. If we're shutting down, we don't really care about memory leaks. I added a call to Cache that will avoid freeing all objects in cache.
Test Plan:
I created a script to test the speedup and demonstrate how to use the call: https://phabricator.fb.com/P3864368
Clean shutdown took 7.2 seconds, while fast and dirty one took 6.3 seconds. Unfortunately, the speedup is not that big, but should be bigger with bigger block_cache. I have set up the capacity to 80GB, but the script filled up only ~7GB.
Reviewers: dhruba, haobo, MarkCallaghan, xjin
Reviewed By: dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15069
11 years ago
|
|
|
// Call this on shutdown if you want to speed it up. Cache will disown
|
|
|
|
// any underlying data and will not free it on delete. This call will leak
|
|
|
|
// memory - call this only if you're shutting down the process.
|
|
|
|
// Any attempts of using cache after this call will fail terribly.
|
|
|
|
// Always delete the DB object before calling this method!
|
|
|
|
virtual void DisownData() {
|
|
|
|
// default implementation is noop
|
|
|
|
};
|
|
|
|
|
|
|
|
// Apply callback to all entries in the cache
|
|
|
|
// If thread_safe is true, it will also lock the accesses. Otherwise, it will
|
|
|
|
// access the cache without the lock held
|
|
|
|
virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
|
|
|
|
bool thread_safe) = 0;
|
|
|
|
|
|
|
|
private:
|
|
|
|
void LRU_Remove(Handle* e);
|
|
|
|
void LRU_Append(Handle* e);
|
|
|
|
void Unref(Handle* e);
|
|
|
|
|
|
|
|
// No copying allowed
|
|
|
|
Cache(const Cache&);
|
|
|
|
void operator=(const Cache&);
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
#endif // STORAGE_ROCKSDB_UTIL_CACHE_H_
|