|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <atomic>
|
|
|
|
#include <cstddef>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <memory>
|
|
|
|
#include <mutex>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "cache/cache_entry_roles.h"
|
|
|
|
#include "cache/cache_key.h"
|
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/slice.h"
|
|
|
|
#include "rocksdb/status.h"
|
|
|
|
#include "util/coding.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
// CacheReservationManager is an interface for reserving cache space for the
|
|
|
|
// memory used
|
|
|
|
class CacheReservationManager {
|
|
|
|
public:
|
|
|
|
// CacheReservationHandle is for managing the lifetime of a cache reservation
|
|
|
|
// for an incremental amount of memory used (i.e, incremental_memory_used)
|
|
|
|
class CacheReservationHandle {
|
|
|
|
public:
|
|
|
|
virtual ~CacheReservationHandle() {}
|
|
|
|
};
|
|
|
|
virtual ~CacheReservationManager() {}
|
|
|
|
virtual Status UpdateCacheReservation(std::size_t new_memory_used) = 0;
|
Account memory of FileMetaData in global memory limit (#9924)
Summary:
**Context/Summary:**
As revealed by heap profiling, allocation of `FileMetaData` for [newly created file added to a Version](https://github.com/facebook/rocksdb/pull/9924/files#diff-a6aa385940793f95a2c5b39cc670bd440c4547fa54fd44622f756382d5e47e43R774) can consume significant heap memory. This PR is to account that toward our global memory limit based on block cache capacity.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9924
Test Plan:
- Previous `make check` verified there are only 2 places where the memory of the allocated `FileMetaData` can be released
- New unit test `TEST_P(ChargeFileMetadataTestWithParam, Basic)`
- db bench (CPU cost of `charge_file_metadata` in write and compact)
- **write micros/op: -0.24%** : `TEST_TMPDIR=/dev/shm/testdb ./db_bench -benchmarks=fillseq -db=$TEST_TMPDIR -charge_file_metadata=1 (remove this option for pre-PR) -disable_auto_compactions=1 -write_buffer_size=100000 -num=4000000 | egrep 'fillseq'`
- **compact micros/op -0.87%** : `TEST_TMPDIR=/dev/shm/testdb ./db_bench -benchmarks=fillseq -db=$TEST_TMPDIR -charge_file_metadata=1 -disable_auto_compactions=1 -write_buffer_size=100000 -num=4000000 -numdistinct=1000 && ./db_bench -benchmarks=compact -db=$TEST_TMPDIR -use_existing_db=1 -charge_file_metadata=1 -disable_auto_compactions=1 | egrep 'compact'`
table 1 - write
#-run | (pre-PR) avg micros/op | std micros/op | (post-PR) micros/op | std micros/op | change (%)
-- | -- | -- | -- | -- | --
10 | 3.9711 | 0.264408 | 3.9914 | 0.254563 | 0.5111933721
20 | 3.83905 | 0.0664488 | 3.8251 | 0.0695456 | -0.3633711465
40 | 3.86625 | 0.136669 | 3.8867 | 0.143765 | 0.5289363078
80 | 3.87828 | 0.119007 | 3.86791 | 0.115674 | **-0.2673865734**
160 | 3.87677 | 0.162231 | 3.86739 | 0.16663 | **-0.2419539978**
table 2 - compact
#-run | (pre-PR) avg micros/op | std micros/op | (post-PR) micros/op | std micros/op | change (%)
-- | -- | -- | -- | -- | --
10 | 2,399,650.00 | 96,375.80 | 2,359,537.00 | 53,243.60 | -1.67
20 | 2,410,480.00 | 89,988.00 | 2,433,580.00 | 91,121.20 | 0.96
40 | 2.41E+06 | 121811 | 2.39E+06 | 131525 | **-0.96**
80 | 2.40E+06 | 134503 | 2.39E+06 | 108799 | **-0.78**
- stress test: `python3 tools/db_crashtest.py blackbox --charge_file_metadata=1 --cache_size=1` killed as normal
Reviewed By: ajkr
Differential Revision: D36055583
Pulled By: hx235
fbshipit-source-id: b60eab94707103cb1322cf815f05810ef0232625
2 years ago
|
|
|
// TODO(hx235): replace the usage of
|
|
|
|
// `UpdateCacheReservation(memory_used_delta, increase)` with
|
|
|
|
// `UpdateCacheReservation(new_memory_used)` so that we only have one
|
|
|
|
// `UpdateCacheReservation` function
|
|
|
|
virtual Status UpdateCacheReservation(std::size_t memory_used_delta,
|
|
|
|
bool increase) = 0;
|
|
|
|
virtual Status MakeCacheReservation(
|
|
|
|
std::size_t incremental_memory_used,
|
|
|
|
std::unique_ptr<CacheReservationManager::CacheReservationHandle>
|
|
|
|
*handle) = 0;
|
|
|
|
virtual std::size_t GetTotalReservedCacheSize() = 0;
|
|
|
|
virtual std::size_t GetTotalMemoryUsed() = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
// CacheReservationManagerImpl implements interface CacheReservationManager
|
|
|
|
// for reserving cache space for the memory used by inserting/releasing dummy
|
|
|
|
// entries in the cache.
|
|
|
|
//
|
|
|
|
// This class is NOT thread-safe, except that GetTotalReservedCacheSize()
|
|
|
|
// can be called without external synchronization.
|
|
|
|
template <CacheEntryRole R>
|
|
|
|
class CacheReservationManagerImpl
|
|
|
|
: public CacheReservationManager,
|
|
|
|
public std::enable_shared_from_this<CacheReservationManagerImpl<R>> {
|
|
|
|
public:
|
|
|
|
class CacheReservationHandle
|
|
|
|
: public CacheReservationManager::CacheReservationHandle {
|
|
|
|
public:
|
|
|
|
CacheReservationHandle(
|
|
|
|
std::size_t incremental_memory_used,
|
|
|
|
std::shared_ptr<CacheReservationManagerImpl> cache_res_mgr);
|
|
|
|
~CacheReservationHandle() override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::size_t incremental_memory_used_;
|
|
|
|
std::shared_ptr<CacheReservationManagerImpl> cache_res_mgr_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Construct a CacheReservationManagerImpl
|
|
|
|
// @param cache The cache where dummy entries are inserted and released for
|
|
|
|
// reserving cache space
|
|
|
|
// @param delayed_decrease If set true, then dummy entries won't be released
|
|
|
|
// immediately when memory usage decreases.
|
|
|
|
// Instead, it will be released when the memory usage
|
|
|
|
// decreases to 3/4 of what we have reserved so far.
|
|
|
|
// This is for saving some future dummy entry
|
|
|
|
// insertion when memory usage increases are likely to
|
|
|
|
// happen in the near future.
|
|
|
|
//
|
|
|
|
// REQUIRED: cache is not nullptr
|
|
|
|
explicit CacheReservationManagerImpl(std::shared_ptr<Cache> cache,
|
|
|
|
bool delayed_decrease = false);
|
|
|
|
|
|
|
|
// no copy constructor, copy assignment, move constructor, move assignment
|
|
|
|
CacheReservationManagerImpl(const CacheReservationManagerImpl &) = delete;
|
|
|
|
CacheReservationManagerImpl &operator=(const CacheReservationManagerImpl &) =
|
|
|
|
delete;
|
|
|
|
CacheReservationManagerImpl(CacheReservationManagerImpl &&) = delete;
|
|
|
|
CacheReservationManagerImpl &operator=(CacheReservationManagerImpl &&) =
|
|
|
|
delete;
|
|
|
|
|
|
|
|
~CacheReservationManagerImpl() override;
|
|
|
|
|
|
|
|
// One of the two ways of reserving/releasing cache space,
|
|
|
|
// see MakeCacheReservation() for the other.
|
|
|
|
//
|
|
|
|
// Use ONLY one of these two ways to prevent unexpected behavior.
|
|
|
|
//
|
|
|
|
// Insert and release dummy entries in the cache to
|
|
|
|
// match the size of total dummy entries with the least multiple of
|
|
|
|
// kSizeDummyEntry greater than or equal to new_mem_used
|
|
|
|
//
|
|
|
|
// Insert dummy entries if new_memory_used > cache_allocated_size_;
|
|
|
|
//
|
|
|
|
// Release dummy entries if new_memory_used < cache_allocated_size_
|
|
|
|
// (and new_memory_used < cache_allocated_size_ * 3/4
|
|
|
|
// when delayed_decrease is set true);
|
|
|
|
//
|
|
|
|
// Keey dummy entries the same if (1) new_memory_used == cache_allocated_size_
|
|
|
|
// or (2) new_memory_used is in the interval of
|
|
|
|
// [cache_allocated_size_ * 3/4, cache_allocated_size) when delayed_decrease
|
|
|
|
// is set true.
|
|
|
|
//
|
|
|
|
// @param new_memory_used The number of bytes used by new memory
|
|
|
|
// The most recent new_memoy_used passed in will be returned
|
|
|
|
// in GetTotalMemoryUsed() even when the call return non-ok status.
|
|
|
|
//
|
|
|
|
// Since the class is NOT thread-safe, external synchronization on the
|
|
|
|
// order of calling UpdateCacheReservation() is needed if you want
|
|
|
|
// GetTotalMemoryUsed() indeed returns the latest memory used.
|
|
|
|
//
|
|
|
|
// @return On inserting dummy entries, it returns Status::OK() if all dummy
|
|
|
|
// entry insertions succeed.
|
|
|
|
// Otherwise, it returns the first non-ok status;
|
|
|
|
// On releasing dummy entries, it always returns Status::OK().
|
|
|
|
// On keeping dummy entries the same, it always returns Status::OK().
|
|
|
|
Status UpdateCacheReservation(std::size_t new_memory_used) override;
|
|
|
|
|
Account memory of FileMetaData in global memory limit (#9924)
Summary:
**Context/Summary:**
As revealed by heap profiling, allocation of `FileMetaData` for [newly created file added to a Version](https://github.com/facebook/rocksdb/pull/9924/files#diff-a6aa385940793f95a2c5b39cc670bd440c4547fa54fd44622f756382d5e47e43R774) can consume significant heap memory. This PR is to account that toward our global memory limit based on block cache capacity.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9924
Test Plan:
- Previous `make check` verified there are only 2 places where the memory of the allocated `FileMetaData` can be released
- New unit test `TEST_P(ChargeFileMetadataTestWithParam, Basic)`
- db bench (CPU cost of `charge_file_metadata` in write and compact)
- **write micros/op: -0.24%** : `TEST_TMPDIR=/dev/shm/testdb ./db_bench -benchmarks=fillseq -db=$TEST_TMPDIR -charge_file_metadata=1 (remove this option for pre-PR) -disable_auto_compactions=1 -write_buffer_size=100000 -num=4000000 | egrep 'fillseq'`
- **compact micros/op -0.87%** : `TEST_TMPDIR=/dev/shm/testdb ./db_bench -benchmarks=fillseq -db=$TEST_TMPDIR -charge_file_metadata=1 -disable_auto_compactions=1 -write_buffer_size=100000 -num=4000000 -numdistinct=1000 && ./db_bench -benchmarks=compact -db=$TEST_TMPDIR -use_existing_db=1 -charge_file_metadata=1 -disable_auto_compactions=1 | egrep 'compact'`
table 1 - write
#-run | (pre-PR) avg micros/op | std micros/op | (post-PR) micros/op | std micros/op | change (%)
-- | -- | -- | -- | -- | --
10 | 3.9711 | 0.264408 | 3.9914 | 0.254563 | 0.5111933721
20 | 3.83905 | 0.0664488 | 3.8251 | 0.0695456 | -0.3633711465
40 | 3.86625 | 0.136669 | 3.8867 | 0.143765 | 0.5289363078
80 | 3.87828 | 0.119007 | 3.86791 | 0.115674 | **-0.2673865734**
160 | 3.87677 | 0.162231 | 3.86739 | 0.16663 | **-0.2419539978**
table 2 - compact
#-run | (pre-PR) avg micros/op | std micros/op | (post-PR) micros/op | std micros/op | change (%)
-- | -- | -- | -- | -- | --
10 | 2,399,650.00 | 96,375.80 | 2,359,537.00 | 53,243.60 | -1.67
20 | 2,410,480.00 | 89,988.00 | 2,433,580.00 | 91,121.20 | 0.96
40 | 2.41E+06 | 121811 | 2.39E+06 | 131525 | **-0.96**
80 | 2.40E+06 | 134503 | 2.39E+06 | 108799 | **-0.78**
- stress test: `python3 tools/db_crashtest.py blackbox --charge_file_metadata=1 --cache_size=1` killed as normal
Reviewed By: ajkr
Differential Revision: D36055583
Pulled By: hx235
fbshipit-source-id: b60eab94707103cb1322cf815f05810ef0232625
2 years ago
|
|
|
Status UpdateCacheReservation(std::size_t /* memory_used_delta */,
|
|
|
|
bool /* increase */) override {
|
|
|
|
return Status::NotSupported();
|
|
|
|
}
|
|
|
|
|
|
|
|
// One of the two ways of reserving cache space and releasing is done through
|
|
|
|
// destruction of CacheReservationHandle.
|
|
|
|
// See UpdateCacheReservation() for the other way.
|
|
|
|
//
|
|
|
|
// Use ONLY one of these two ways to prevent unexpected behavior.
|
|
|
|
//
|
|
|
|
// Insert dummy entries in the cache for the incremental memory usage
|
|
|
|
// to match the size of total dummy entries with the least multiple of
|
|
|
|
// kSizeDummyEntry greater than or equal to the total memory used.
|
|
|
|
//
|
|
|
|
// A CacheReservationHandle is returned as an output parameter.
|
|
|
|
// The reserved dummy entries are automatically released on the destruction of
|
|
|
|
// this handle, which achieves better RAII per cache reservation.
|
|
|
|
//
|
|
|
|
// WARNING: Deallocate all the handles of the CacheReservationManager object
|
|
|
|
// before deallocating the object to prevent unexpected behavior.
|
|
|
|
//
|
|
|
|
// @param incremental_memory_used The number of bytes increased in memory
|
|
|
|
// usage.
|
|
|
|
//
|
|
|
|
// Calling GetTotalMemoryUsed() afterward will return the total memory
|
|
|
|
// increased by this number, even when calling MakeCacheReservation()
|
|
|
|
// returns non-ok status.
|
|
|
|
//
|
|
|
|
// Since the class is NOT thread-safe, external synchronization in
|
|
|
|
// calling MakeCacheReservation() is needed if you want
|
|
|
|
// GetTotalMemoryUsed() indeed returns the latest memory used.
|
|
|
|
//
|
|
|
|
// @param handle An pointer to std::unique_ptr<CacheReservationHandle> that
|
|
|
|
// manages the lifetime of the cache reservation represented by the
|
|
|
|
// handle.
|
|
|
|
//
|
|
|
|
// @return It returns Status::OK() if all dummy
|
|
|
|
// entry insertions succeed.
|
|
|
|
// Otherwise, it returns the first non-ok status;
|
|
|
|
//
|
|
|
|
// REQUIRES: handle != nullptr
|
|
|
|
Status MakeCacheReservation(
|
|
|
|
std::size_t incremental_memory_used,
|
|
|
|
std::unique_ptr<CacheReservationManager::CacheReservationHandle> *handle)
|
|
|
|
override;
|
|
|
|
|
|
|
|
// Return the size of the cache (which is a multiple of kSizeDummyEntry)
|
|
|
|
// successfully reserved by calling UpdateCacheReservation().
|
|
|
|
//
|
|
|
|
// When UpdateCacheReservation() returns non-ok status,
|
|
|
|
// calling GetTotalReservedCacheSize() after that might return a slightly
|
|
|
|
// smaller number than the actual reserved cache size due to
|
|
|
|
// the returned number will always be a multiple of kSizeDummyEntry
|
|
|
|
// and cache full might happen in the middle of inserting a dummy entry.
|
|
|
|
std::size_t GetTotalReservedCacheSize() override;
|
|
|
|
|
|
|
|
// Return the latest total memory used indicated by the most recent call of
|
|
|
|
// UpdateCacheReservation(std::size_t new_memory_used);
|
|
|
|
std::size_t GetTotalMemoryUsed() override;
|
|
|
|
|
|
|
|
static constexpr std::size_t GetDummyEntrySize() { return kSizeDummyEntry; }
|
|
|
|
|
Account Bloom/Ribbon filter construction memory in global memory limit (#9073)
Summary:
Note: This PR is the 4th part of a bigger PR stack (https://github.com/facebook/rocksdb/pull/9073) and will rebase/merge only after the first three PRs (https://github.com/facebook/rocksdb/pull/9070, https://github.com/facebook/rocksdb/pull/9071, https://github.com/facebook/rocksdb/pull/9130) merge.
**Context:**
Similar to https://github.com/facebook/rocksdb/pull/8428, this PR is to track memory usage during (new) Bloom Filter (i.e,FastLocalBloom) and Ribbon Filter (i.e, Ribbon128) construction, moving toward the goal of [single global memory limit using block cache capacity](https://github.com/facebook/rocksdb/wiki/Projects-Being-Developed#improving-memory-efficiency). It also constrains the size of the banding portion of Ribbon Filter during construction by falling back to Bloom Filter if that banding is, at some point, larger than the available space in the cache under `LRUCacheOptions::strict_capacity_limit=true`.
The option to turn on this feature is `BlockBasedTableOptions::reserve_table_builder_memory = true` which by default is set to `false`. We [decided](https://github.com/facebook/rocksdb/pull/9073#discussion_r741548409) not to have separate option for separate memory user in table building therefore their memory accounting are all bundled under one general option.
**Summary:**
- Reserved/released cache for creation/destruction of three main memory users with the passed-in `FilterBuildingContext::cache_res_mgr` during filter construction:
- hash entries (i.e`hash_entries`.size(), we bucket-charge hash entries during insertion for performance),
- banding (Ribbon Filter only, `bytes_coeff_rows` +`bytes_result_rows` + `bytes_backtrack`),
- final filter (i.e, `mutable_buf`'s size).
- Implementation details: in order to use `CacheReservationManager::CacheReservationHandle` to account final filter's memory, we have to store the `CacheReservationManager` object and `CacheReservationHandle` for final filter in `XXPH3BitsFilterBuilder` as well as explicitly delete the filter bits builder when done with the final filter in block based table.
- Added option fo run `filter_bench` with this memory reservation feature
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9073
Test Plan:
- Added new tests in `db_bloom_filter_test` to verify filter construction peak cache reservation under combination of `BlockBasedTable::Rep::FilterType` (e.g, `kFullFilter`, `kPartitionedFilter`), `BloomFilterPolicy::Mode`(e.g, `kFastLocalBloom`, `kStandard128Ribbon`, `kDeprecatedBlock`) and `BlockBasedTableOptions::reserve_table_builder_memory`
- To address the concern for slow test: tests with memory reservation under `kFullFilter` + `kStandard128Ribbon` and `kPartitionedFilter` take around **3000 - 6000 ms** and others take around **1500 - 2000 ms**, in total adding **20000 - 25000 ms** to the test suit running locally
- Added new test in `bloom_test` to verify Ribbon Filter fallback on large banding in FullFilter
- Added test in `filter_bench` to verify that this feature does not significantly slow down Bloom/Ribbon Filter construction speed. Local result averaged over **20** run as below:
- FastLocalBloom
- baseline `./filter_bench -impl=2 -quick -runs 20 | grep 'Build avg'`:
- **Build avg ns/key: 29.56295** (DEBUG_LEVEL=1), **29.98153** (DEBUG_LEVEL=0)
- new feature (expected to be similar as above)`./filter_bench -impl=2 -quick -runs 20 -reserve_table_builder_memory=true | grep 'Build avg'`:
- **Build avg ns/key: 30.99046** (DEBUG_LEVEL=1), **30.48867** (DEBUG_LEVEL=0)
- new feature of RibbonFilter with fallback (expected to be similar as above) `./filter_bench -impl=2 -quick -runs 20 -reserve_table_builder_memory=true -strict_capacity_limit=true | grep 'Build avg'` :
- **Build avg ns/key: 31.146975** (DEBUG_LEVEL=1), **30.08165** (DEBUG_LEVEL=0)
- Ribbon128
- baseline `./filter_bench -impl=3 -quick -runs 20 | grep 'Build avg'`:
- **Build avg ns/key: 129.17585** (DEBUG_LEVEL=1), **130.5225** (DEBUG_LEVEL=0)
- new feature (expected to be similar as above) `./filter_bench -impl=3 -quick -runs 20 -reserve_table_builder_memory=true | grep 'Build avg' `:
- **Build avg ns/key: 131.61645** (DEBUG_LEVEL=1), **132.98075** (DEBUG_LEVEL=0)
- new feature of RibbonFilter with fallback (expected to be a lot faster than above due to fallback) `./filter_bench -impl=3 -quick -runs 20 -reserve_table_builder_memory=true -strict_capacity_limit=true | grep 'Build avg'` :
- **Build avg ns/key: 52.032965** (DEBUG_LEVEL=1), **52.597825** (DEBUG_LEVEL=0)
- And the warning message of `"Cache reservation for Ribbon filter banding failed due to cache full"` is indeed logged to console.
Reviewed By: pdillinger
Differential Revision: D31991348
Pulled By: hx235
fbshipit-source-id: 9336b2c60f44d530063da518ceaf56dac5f9df8e
3 years ago
|
|
|
// For testing only - it is to help ensure the NoopDeleterForRole<R>
|
|
|
|
// accessed from CacheReservationManagerImpl and the one accessed from the
|
|
|
|
// test are from the same translation units
|
Account Bloom/Ribbon filter construction memory in global memory limit (#9073)
Summary:
Note: This PR is the 4th part of a bigger PR stack (https://github.com/facebook/rocksdb/pull/9073) and will rebase/merge only after the first three PRs (https://github.com/facebook/rocksdb/pull/9070, https://github.com/facebook/rocksdb/pull/9071, https://github.com/facebook/rocksdb/pull/9130) merge.
**Context:**
Similar to https://github.com/facebook/rocksdb/pull/8428, this PR is to track memory usage during (new) Bloom Filter (i.e,FastLocalBloom) and Ribbon Filter (i.e, Ribbon128) construction, moving toward the goal of [single global memory limit using block cache capacity](https://github.com/facebook/rocksdb/wiki/Projects-Being-Developed#improving-memory-efficiency). It also constrains the size of the banding portion of Ribbon Filter during construction by falling back to Bloom Filter if that banding is, at some point, larger than the available space in the cache under `LRUCacheOptions::strict_capacity_limit=true`.
The option to turn on this feature is `BlockBasedTableOptions::reserve_table_builder_memory = true` which by default is set to `false`. We [decided](https://github.com/facebook/rocksdb/pull/9073#discussion_r741548409) not to have separate option for separate memory user in table building therefore their memory accounting are all bundled under one general option.
**Summary:**
- Reserved/released cache for creation/destruction of three main memory users with the passed-in `FilterBuildingContext::cache_res_mgr` during filter construction:
- hash entries (i.e`hash_entries`.size(), we bucket-charge hash entries during insertion for performance),
- banding (Ribbon Filter only, `bytes_coeff_rows` +`bytes_result_rows` + `bytes_backtrack`),
- final filter (i.e, `mutable_buf`'s size).
- Implementation details: in order to use `CacheReservationManager::CacheReservationHandle` to account final filter's memory, we have to store the `CacheReservationManager` object and `CacheReservationHandle` for final filter in `XXPH3BitsFilterBuilder` as well as explicitly delete the filter bits builder when done with the final filter in block based table.
- Added option fo run `filter_bench` with this memory reservation feature
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9073
Test Plan:
- Added new tests in `db_bloom_filter_test` to verify filter construction peak cache reservation under combination of `BlockBasedTable::Rep::FilterType` (e.g, `kFullFilter`, `kPartitionedFilter`), `BloomFilterPolicy::Mode`(e.g, `kFastLocalBloom`, `kStandard128Ribbon`, `kDeprecatedBlock`) and `BlockBasedTableOptions::reserve_table_builder_memory`
- To address the concern for slow test: tests with memory reservation under `kFullFilter` + `kStandard128Ribbon` and `kPartitionedFilter` take around **3000 - 6000 ms** and others take around **1500 - 2000 ms**, in total adding **20000 - 25000 ms** to the test suit running locally
- Added new test in `bloom_test` to verify Ribbon Filter fallback on large banding in FullFilter
- Added test in `filter_bench` to verify that this feature does not significantly slow down Bloom/Ribbon Filter construction speed. Local result averaged over **20** run as below:
- FastLocalBloom
- baseline `./filter_bench -impl=2 -quick -runs 20 | grep 'Build avg'`:
- **Build avg ns/key: 29.56295** (DEBUG_LEVEL=1), **29.98153** (DEBUG_LEVEL=0)
- new feature (expected to be similar as above)`./filter_bench -impl=2 -quick -runs 20 -reserve_table_builder_memory=true | grep 'Build avg'`:
- **Build avg ns/key: 30.99046** (DEBUG_LEVEL=1), **30.48867** (DEBUG_LEVEL=0)
- new feature of RibbonFilter with fallback (expected to be similar as above) `./filter_bench -impl=2 -quick -runs 20 -reserve_table_builder_memory=true -strict_capacity_limit=true | grep 'Build avg'` :
- **Build avg ns/key: 31.146975** (DEBUG_LEVEL=1), **30.08165** (DEBUG_LEVEL=0)
- Ribbon128
- baseline `./filter_bench -impl=3 -quick -runs 20 | grep 'Build avg'`:
- **Build avg ns/key: 129.17585** (DEBUG_LEVEL=1), **130.5225** (DEBUG_LEVEL=0)
- new feature (expected to be similar as above) `./filter_bench -impl=3 -quick -runs 20 -reserve_table_builder_memory=true | grep 'Build avg' `:
- **Build avg ns/key: 131.61645** (DEBUG_LEVEL=1), **132.98075** (DEBUG_LEVEL=0)
- new feature of RibbonFilter with fallback (expected to be a lot faster than above due to fallback) `./filter_bench -impl=3 -quick -runs 20 -reserve_table_builder_memory=true -strict_capacity_limit=true | grep 'Build avg'` :
- **Build avg ns/key: 52.032965** (DEBUG_LEVEL=1), **52.597825** (DEBUG_LEVEL=0)
- And the warning message of `"Cache reservation for Ribbon filter banding failed due to cache full"` is indeed logged to console.
Reviewed By: pdillinger
Differential Revision: D31991348
Pulled By: hx235
fbshipit-source-id: 9336b2c60f44d530063da518ceaf56dac5f9df8e
3 years ago
|
|
|
static Cache::DeleterFn TEST_GetNoopDeleterForRole();
|
|
|
|
|
|
|
|
private:
|
|
|
|
static constexpr std::size_t kSizeDummyEntry = 256 * 1024;
|
|
|
|
|
|
|
|
Slice GetNextCacheKey();
|
|
|
|
|
|
|
|
Status ReleaseCacheReservation(std::size_t incremental_memory_used);
|
|
|
|
Status IncreaseCacheReservation(std::size_t new_mem_used);
|
|
|
|
Status DecreaseCacheReservation(std::size_t new_mem_used);
|
|
|
|
|
|
|
|
std::shared_ptr<Cache> cache_;
|
|
|
|
bool delayed_decrease_;
|
|
|
|
std::atomic<std::size_t> cache_allocated_size_;
|
|
|
|
std::size_t memory_used_;
|
|
|
|
std::vector<Cache::Handle *> dummy_handles_;
|
New stable, fixed-length cache keys (#9126)
Summary:
This change standardizes on a new 16-byte cache key format for
block cache (incl compressed and secondary) and persistent cache (but
not table cache and row cache).
The goal is a really fast cache key with practically ideal stability and
uniqueness properties without external dependencies (e.g. from FileSystem).
A fixed key size of 16 bytes should enable future optimizations to the
concurrent hash table for block cache, which is a heavy CPU user /
bottleneck, but there appears to be measurable performance improvement
even with no changes to LRUCache.
This change replaces a lot of disjointed and ugly code handling cache
keys with calls to a simple, clean new internal API (cache_key.h).
(Preserving the old cache key logic under an option would be very ugly
and likely negate the performance gain of the new approach. Complete
replacement carries some inherent risk, but I think that's acceptable
with sufficient analysis and testing.)
The scheme for encoding new cache keys is complicated but explained
in cache_key.cc.
Also: EndianSwapValue is moved to math.h to be next to other bit
operations. (Explains some new include "math.h".) ReverseBits operation
added and unit tests added to hash_test for both.
Fixes https://github.com/facebook/rocksdb/issues/7405 (presuming a root cause)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9126
Test Plan:
### Basic correctness
Several tests needed updates to work with the new functionality, mostly
because we are no longer relying on filesystem for stable cache keys
so table builders & readers need more context info to agree on cache
keys. This functionality is so core, a huge number of existing tests
exercise the cache key functionality.
### Performance
Create db with
`TEST_TMPDIR=/dev/shm ./db_bench -bloom_bits=10 -benchmarks=fillrandom -num=3000000 -partition_index_and_filters`
And test performance with
`TEST_TMPDIR=/dev/shm ./db_bench -readonly -use_existing_db -bloom_bits=10 -benchmarks=readrandom -num=3000000 -duration=30 -cache_index_and_filter_blocks -cache_size=250000 -threads=4`
using DEBUG_LEVEL=0 and simultaneous before & after runs.
Before ops/sec, avg over 100 runs: 121924
After ops/sec, avg over 100 runs: 125385 (+2.8%)
### Collision probability
I have built a tool, ./cache_bench -stress_cache_key to broadly simulate host-wide cache activity
over many months, by making some pessimistic simplifying assumptions:
* Every generated file has a cache entry for every byte offset in the file (contiguous range of cache keys)
* All of every file is cached for its entire lifetime
We use a simple table with skewed address assignment and replacement on address collision
to simulate files coming & going, with quite a variance (super-Poisson) in ages. Some output
with `./cache_bench -stress_cache_key -sck_keep_bits=40`:
```
Total cache or DBs size: 32TiB Writing 925.926 MiB/s or 76.2939TiB/day
Multiply by 9.22337e+18 to correct for simulation losses (but still assume whole file cached)
```
These come from default settings of 2.5M files per day of 32 MB each, and
`-sck_keep_bits=40` means that to represent a single file, we are only keeping 40 bits of
the 128-bit cache key. With file size of 2\*\*25 contiguous keys (pessimistic), our simulation
is about 2\*\*(128-40-25) or about 9 billion billion times more prone to collision than reality.
More default assumptions, relatively pessimistic:
* 100 DBs in same process (doesn't matter much)
* Re-open DB in same process (new session ID related to old session ID) on average
every 100 files generated
* Restart process (all new session IDs unrelated to old) 24 times per day
After enough data, we get a result at the end:
```
(keep 40 bits) 17 collisions after 2 x 90 days, est 10.5882 days between (9.76592e+19 corrected)
```
If we believe the (pessimistic) simulation and the mathematical generalization, we would need to run a billion machines all for 97 billion days to expect a cache key collision. To help verify that our generalization ("corrected") is robust, we can make our simulation more precise with `-sck_keep_bits=41` and `42`, which takes more running time to get enough data:
```
(keep 41 bits) 16 collisions after 4 x 90 days, est 22.5 days between (1.03763e+20 corrected)
(keep 42 bits) 19 collisions after 10 x 90 days, est 47.3684 days between (1.09224e+20 corrected)
```
The generalized prediction still holds. With the `-sck_randomize` option, we can see that we are beating "random" cache keys (except offsets still non-randomized) by a modest amount (roughly 20x less collision prone than random), which should make us reasonably comfortable even in "degenerate" cases:
```
197 collisions after 1 x 90 days, est 0.456853 days between (4.21372e+18 corrected)
```
I've run other tests to validate other conditions behave as expected, never behaving "worse than random" unless we start chopping off structured data.
Reviewed By: zhichao-cao
Differential Revision: D33171746
Pulled By: pdillinger
fbshipit-source-id: f16a57e369ed37be5e7e33525ace848d0537c88f
3 years ago
|
|
|
CacheKey cache_key_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class ConcurrentCacheReservationManager
|
|
|
|
: public CacheReservationManager,
|
|
|
|
public std::enable_shared_from_this<ConcurrentCacheReservationManager> {
|
|
|
|
public:
|
|
|
|
class CacheReservationHandle
|
|
|
|
: public CacheReservationManager::CacheReservationHandle {
|
|
|
|
public:
|
|
|
|
CacheReservationHandle(
|
|
|
|
std::shared_ptr<ConcurrentCacheReservationManager> cache_res_mgr,
|
|
|
|
std::unique_ptr<CacheReservationManager::CacheReservationHandle>
|
|
|
|
cache_res_handle) {
|
|
|
|
assert(cache_res_mgr && cache_res_handle);
|
|
|
|
cache_res_mgr_ = cache_res_mgr;
|
|
|
|
cache_res_handle_ = std::move(cache_res_handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
~CacheReservationHandle() override {
|
|
|
|
std::lock_guard<std::mutex> lock(cache_res_mgr_->cache_res_mgr_mu_);
|
|
|
|
cache_res_handle_.reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::shared_ptr<ConcurrentCacheReservationManager> cache_res_mgr_;
|
|
|
|
std::unique_ptr<CacheReservationManager::CacheReservationHandle>
|
|
|
|
cache_res_handle_;
|
|
|
|
};
|
|
|
|
|
|
|
|
explicit ConcurrentCacheReservationManager(
|
|
|
|
std::shared_ptr<CacheReservationManager> cache_res_mgr) {
|
|
|
|
cache_res_mgr_ = std::move(cache_res_mgr);
|
|
|
|
}
|
|
|
|
ConcurrentCacheReservationManager(const ConcurrentCacheReservationManager &) =
|
|
|
|
delete;
|
|
|
|
ConcurrentCacheReservationManager &operator=(
|
|
|
|
const ConcurrentCacheReservationManager &) = delete;
|
|
|
|
ConcurrentCacheReservationManager(ConcurrentCacheReservationManager &&) =
|
|
|
|
delete;
|
|
|
|
ConcurrentCacheReservationManager &operator=(
|
|
|
|
ConcurrentCacheReservationManager &&) = delete;
|
|
|
|
|
|
|
|
~ConcurrentCacheReservationManager() override {}
|
|
|
|
|
|
|
|
inline Status UpdateCacheReservation(std::size_t new_memory_used) override {
|
|
|
|
std::lock_guard<std::mutex> lock(cache_res_mgr_mu_);
|
|
|
|
return cache_res_mgr_->UpdateCacheReservation(new_memory_used);
|
|
|
|
}
|
Account memory of FileMetaData in global memory limit (#9924)
Summary:
**Context/Summary:**
As revealed by heap profiling, allocation of `FileMetaData` for [newly created file added to a Version](https://github.com/facebook/rocksdb/pull/9924/files#diff-a6aa385940793f95a2c5b39cc670bd440c4547fa54fd44622f756382d5e47e43R774) can consume significant heap memory. This PR is to account that toward our global memory limit based on block cache capacity.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9924
Test Plan:
- Previous `make check` verified there are only 2 places where the memory of the allocated `FileMetaData` can be released
- New unit test `TEST_P(ChargeFileMetadataTestWithParam, Basic)`
- db bench (CPU cost of `charge_file_metadata` in write and compact)
- **write micros/op: -0.24%** : `TEST_TMPDIR=/dev/shm/testdb ./db_bench -benchmarks=fillseq -db=$TEST_TMPDIR -charge_file_metadata=1 (remove this option for pre-PR) -disable_auto_compactions=1 -write_buffer_size=100000 -num=4000000 | egrep 'fillseq'`
- **compact micros/op -0.87%** : `TEST_TMPDIR=/dev/shm/testdb ./db_bench -benchmarks=fillseq -db=$TEST_TMPDIR -charge_file_metadata=1 -disable_auto_compactions=1 -write_buffer_size=100000 -num=4000000 -numdistinct=1000 && ./db_bench -benchmarks=compact -db=$TEST_TMPDIR -use_existing_db=1 -charge_file_metadata=1 -disable_auto_compactions=1 | egrep 'compact'`
table 1 - write
#-run | (pre-PR) avg micros/op | std micros/op | (post-PR) micros/op | std micros/op | change (%)
-- | -- | -- | -- | -- | --
10 | 3.9711 | 0.264408 | 3.9914 | 0.254563 | 0.5111933721
20 | 3.83905 | 0.0664488 | 3.8251 | 0.0695456 | -0.3633711465
40 | 3.86625 | 0.136669 | 3.8867 | 0.143765 | 0.5289363078
80 | 3.87828 | 0.119007 | 3.86791 | 0.115674 | **-0.2673865734**
160 | 3.87677 | 0.162231 | 3.86739 | 0.16663 | **-0.2419539978**
table 2 - compact
#-run | (pre-PR) avg micros/op | std micros/op | (post-PR) micros/op | std micros/op | change (%)
-- | -- | -- | -- | -- | --
10 | 2,399,650.00 | 96,375.80 | 2,359,537.00 | 53,243.60 | -1.67
20 | 2,410,480.00 | 89,988.00 | 2,433,580.00 | 91,121.20 | 0.96
40 | 2.41E+06 | 121811 | 2.39E+06 | 131525 | **-0.96**
80 | 2.40E+06 | 134503 | 2.39E+06 | 108799 | **-0.78**
- stress test: `python3 tools/db_crashtest.py blackbox --charge_file_metadata=1 --cache_size=1` killed as normal
Reviewed By: ajkr
Differential Revision: D36055583
Pulled By: hx235
fbshipit-source-id: b60eab94707103cb1322cf815f05810ef0232625
2 years ago
|
|
|
|
|
|
|
inline Status UpdateCacheReservation(std::size_t memory_used_delta,
|
|
|
|
bool increase) override {
|
|
|
|
std::lock_guard<std::mutex> lock(cache_res_mgr_mu_);
|
|
|
|
std::size_t total_mem_used = cache_res_mgr_->GetTotalMemoryUsed();
|
|
|
|
Status s;
|
|
|
|
if (!increase) {
|
|
|
|
assert(total_mem_used >= memory_used_delta);
|
|
|
|
s = cache_res_mgr_->UpdateCacheReservation(total_mem_used -
|
|
|
|
memory_used_delta);
|
|
|
|
} else {
|
|
|
|
s = cache_res_mgr_->UpdateCacheReservation(total_mem_used +
|
|
|
|
memory_used_delta);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline Status MakeCacheReservation(
|
|
|
|
std::size_t incremental_memory_used,
|
|
|
|
std::unique_ptr<CacheReservationManager::CacheReservationHandle> *handle)
|
|
|
|
override {
|
|
|
|
std::unique_ptr<CacheReservationManager::CacheReservationHandle>
|
|
|
|
wrapped_handle;
|
|
|
|
Status s;
|
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> lock(cache_res_mgr_mu_);
|
|
|
|
s = cache_res_mgr_->MakeCacheReservation(incremental_memory_used,
|
|
|
|
&wrapped_handle);
|
|
|
|
}
|
|
|
|
(*handle).reset(
|
|
|
|
new ConcurrentCacheReservationManager::CacheReservationHandle(
|
|
|
|
std::enable_shared_from_this<
|
|
|
|
ConcurrentCacheReservationManager>::shared_from_this(),
|
|
|
|
std::move(wrapped_handle)));
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
inline std::size_t GetTotalReservedCacheSize() override {
|
|
|
|
return cache_res_mgr_->GetTotalReservedCacheSize();
|
|
|
|
}
|
|
|
|
inline std::size_t GetTotalMemoryUsed() override {
|
|
|
|
std::lock_guard<std::mutex> lock(cache_res_mgr_mu_);
|
|
|
|
return cache_res_mgr_->GetTotalMemoryUsed();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::mutex cache_res_mgr_mu_;
|
|
|
|
std::shared_ptr<CacheReservationManager> cache_res_mgr_;
|
|
|
|
};
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|