|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "table/block_based/block_based_table_builder.h"
|
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
#include <stdio.h>
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
|
|
|
|
#include <atomic>
|
|
|
|
#include <list>
|
|
|
|
#include <map>
|
|
|
|
#include <memory>
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
#include <numeric>
|
|
|
|
#include <string>
|
|
|
|
#include <unordered_map>
|
|
|
|
#include <utility>
|
|
|
|
|
|
|
|
#include "cache/cache_entry_roles.h"
|
|
|
|
#include "cache/cache_reservation_manager.h"
|
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "index_builder.h"
|
|
|
|
#include "logging/logging.h"
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
#include "memory/memory_allocator.h"
|
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/filter_policy.h"
|
|
|
|
#include "rocksdb/flush_block_policy.h"
|
|
|
|
#include "rocksdb/merge_operator.h"
|
|
|
|
#include "rocksdb/table.h"
|
|
|
|
#include "rocksdb/types.h"
|
|
|
|
#include "table/block_based/block.h"
|
|
|
|
#include "table/block_based/block_based_filter_block.h"
|
|
|
|
#include "table/block_based/block_based_table_factory.h"
|
|
|
|
#include "table/block_based/block_based_table_reader.h"
|
|
|
|
#include "table/block_based/block_builder.h"
|
|
|
|
#include "table/block_based/block_like_traits.h"
|
|
|
|
#include "table/block_based/filter_block.h"
|
New Bloom filter implementation for full and partitioned filters (#6007)
Summary:
Adds an improved, replacement Bloom filter implementation (FastLocalBloom) for full and partitioned filters in the block-based table. This replacement is faster and more accurate, especially for high bits per key or millions of keys in a single filter.
Speed
The improved speed, at least on recent x86_64, comes from
* Using fastrange instead of modulo (%)
* Using our new hash function (XXH3 preview, added in a previous commit), which is much faster for large keys and only *slightly* slower on keys around 12 bytes if hashing the same size many thousands of times in a row.
* Optimizing the Bloom filter queries with AVX2 SIMD operations. (Added AVX2 to the USE_SSE=1 build.) Careful design was required to support (a) SIMD-optimized queries, (b) compatible non-SIMD code that's simple and efficient, (c) flexible choice of number of probes, and (d) essentially maximized accuracy for a cache-local Bloom filter. Probes are made eight at a time, so any number of probes up to 8 is the same speed, then up to 16, etc.
* Prefetching cache lines when building the filter. Although this optimization could be applied to the old structure as well, it seems to balance out the small added cost of accumulating 64 bit hashes for adding to the filter rather than 32 bit hashes.
Here's nominal speed data from filter_bench (200MB in filters, about 10k keys each, 10 bits filter data / key, 6 probes, avg key size 24 bytes, includes hashing time) on Skylake DE (relatively low clock speed):
$ ./filter_bench -quick -impl=2 -net_includes_hashing # New Bloom filter
Build avg ns/key: 47.7135
Mixed inside/outside queries...
Single filter net ns/op: 26.2825
Random filter net ns/op: 150.459
Average FP rate %: 0.954651
$ ./filter_bench -quick -impl=0 -net_includes_hashing # Old Bloom filter
Build avg ns/key: 47.2245
Mixed inside/outside queries...
Single filter net ns/op: 63.2978
Random filter net ns/op: 188.038
Average FP rate %: 1.13823
Similar build time but dramatically faster query times on hot data (63 ns to 26 ns), and somewhat faster on stale data (188 ns to 150 ns). Performance differences on batched and skewed query loads are between these extremes as expected.
The only other interesting thing about speed is "inside" (query key was added to filter) vs. "outside" (query key was not added to filter) query times. The non-SIMD implementations are substantially slower when most queries are "outside" vs. "inside". This goes against what one might expect or would have observed years ago, as "outside" queries only need about two probes on average, due to short-circuiting, while "inside" always have num_probes (say 6). The problem is probably the nastily unpredictable branch. The SIMD implementation has few branches (very predictable) and has pretty consistent running time regardless of query outcome.
Accuracy
The generally improved accuracy (re: Issue https://github.com/facebook/rocksdb/issues/5857) comes from a better design for probing indices
within a cache line (re: Issue https://github.com/facebook/rocksdb/issues/4120) and improved accuracy for millions of keys in a single filter from using a 64-bit hash function (XXH3p). Design details in code comments.
Accuracy data (generalizes, except old impl gets worse with millions of keys):
Memory bits per key: FP rate percent old impl -> FP rate percent new impl
6: 5.70953 -> 5.69888
8: 2.45766 -> 2.29709
10: 1.13977 -> 0.959254
12: 0.662498 -> 0.411593
16: 0.353023 -> 0.0873754
24: 0.261552 -> 0.0060971
50: 0.225453 -> ~0.00003 (less than 1 in a million queries are FP)
Fixes https://github.com/facebook/rocksdb/issues/5857
Fixes https://github.com/facebook/rocksdb/issues/4120
Unlike the old implementation, this implementation has a fixed cache line size (64 bytes). At 10 bits per key, the accuracy of this new implementation is very close to the old implementation with 128-byte cache line size. If there's sufficient demand, this implementation could be generalized.
Compatibility
Although old releases would see the new structure as corrupt filter data and read the table as if there's no filter, we've decided only to enable the new Bloom filter with new format_version=5. This provides a smooth path for automatic adoption over time, with an option for early opt-in.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6007
Test Plan: filter_bench has been used thoroughly to validate speed, accuracy, and correctness. Unit tests have been carefully updated to exercise new and old implementations, as well as the logic to select an implementation based on context (format_version).
Differential Revision: D18294749
Pulled By: pdillinger
fbshipit-source-id: d44c9db3696e4d0a17caaec47075b7755c262c5f
5 years ago
|
|
|
#include "table/block_based/filter_policy_internal.h"
|
|
|
|
#include "table/block_based/full_filter_block.h"
|
|
|
|
#include "table/block_based/partitioned_filter_block.h"
|
|
|
|
#include "table/format.h"
|
|
|
|
#include "table/table_builder.h"
|
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/compression.h"
|
|
|
|
#include "util/stop_watch.h"
|
|
|
|
#include "util/string_util.h"
|
|
|
|
#include "util/work_queue.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
extern const std::string kHashIndexPrefixesBlock;
|
|
|
|
extern const std::string kHashIndexPrefixesMetadataBlock;
|
|
|
|
|
|
|
|
|
|
|
|
// Without anonymous namespace here, we fail the warning -Wmissing-prototypes
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Create a filter block builder based on its type.
|
|
|
|
FilterBlockBuilder* CreateFilterBlockBuilder(
|
|
|
|
const ImmutableCFOptions& /*opt*/, const MutableCFOptions& mopt,
|
|
|
|
const FilterBuildingContext& context,
|
|
|
|
const bool use_delta_encoding_for_index_values,
|
|
|
|
PartitionedIndexBuilder* const p_index_builder) {
|
|
|
|
const BlockBasedTableOptions& table_opt = context.table_options;
|
Add more LSM info to FilterBuildingContext (#8246)
Summary:
Add `num_levels`, `is_bottommost`, and table file creation
`reason` to `FilterBuildingContext`, in anticipation of more powerful
Bloom-like filter support.
To support this, added `is_bottommost` and `reason` to
`TableBuilderOptions`, which allowed removing `reason` parameter from
`rocksdb::BuildTable`.
I attempted to remove `skip_filters` from `TableBuilderOptions`, because
filter construction decisions should arise from options, not one-off
parameters. I could not completely remove it because the public API for
SstFileWriter takes a `skip_filters` parameter, and translating this
into an option change would mean awkwardly replacing the table_factory
if it is BlockBasedTableFactory with new filter_policy=nullptr option.
I marked this public skip_filters option as deprecated because of this
oddity. (skip_filters on the read side probably makes sense.)
At least `skip_filters` is now largely hidden for users of
`TableBuilderOptions` and is no longer used for implementing the
optimize_filters_for_hits option. Bringing the logic for that option
closer to handling of FilterBuildingContext makes it more obvious that
hese two are using the same notion of "bottommost." (Planned:
configuration options for Bloom-like filters that generalize
`optimize_filters_for_hits`)
Recommended follow-up: Try to get away from "bottommost level" naming of
things, which is inaccurate (see
VersionStorageInfo::RangeMightExistAfterSortedRun), and move to
"bottommost run" or just "bottommost."
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8246
Test Plan:
extended an existing unit test to exercise and check various
filter building contexts. Also, existing tests for
optimize_filters_for_hits validate some of the "bottommost" handling,
which is now closely connected to FilterBuildingContext::is_bottommost
through TableBuilderOptions::is_bottommost
Reviewed By: mrambacher
Differential Revision: D28099346
Pulled By: pdillinger
fbshipit-source-id: 2c1072e29c24d4ac404c761a7b7663292372600a
4 years ago
|
|
|
assert(table_opt.filter_policy); // precondition
|
|
|
|
|
|
|
|
FilterBitsBuilder* filter_bits_builder =
|
|
|
|
BloomFilterPolicy::GetBuilderFromContext(context);
|
|
|
|
if (filter_bits_builder == nullptr) {
|
|
|
|
return new BlockBasedFilterBlockBuilder(mopt.prefix_extractor.get(),
|
|
|
|
table_opt);
|
|
|
|
} else {
|
|
|
|
if (table_opt.partition_filters) {
|
|
|
|
assert(p_index_builder != nullptr);
|
|
|
|
// Since after partition cut request from filter builder it takes time
|
|
|
|
// until index builder actully cuts the partition, until the end of a
|
|
|
|
// data block potentially with many keys, we take the lower bound as
|
|
|
|
// partition size.
|
|
|
|
assert(table_opt.block_size_deviation <= 100);
|
|
|
|
auto partition_size =
|
|
|
|
static_cast<uint32_t>(((table_opt.metadata_block_size *
|
|
|
|
(100 - table_opt.block_size_deviation)) +
|
|
|
|
99) /
|
|
|
|
100);
|
|
|
|
partition_size = std::max(partition_size, static_cast<uint32_t>(1));
|
|
|
|
return new PartitionedFilterBlockBuilder(
|
|
|
|
mopt.prefix_extractor.get(), table_opt.whole_key_filtering,
|
|
|
|
filter_bits_builder, table_opt.index_block_restart_interval,
|
|
|
|
use_delta_encoding_for_index_values, p_index_builder, partition_size);
|
|
|
|
} else {
|
|
|
|
return new FullFilterBlockBuilder(mopt.prefix_extractor.get(),
|
|
|
|
table_opt.whole_key_filtering,
|
|
|
|
filter_bits_builder);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool GoodCompressionRatio(size_t compressed_size, size_t raw_size) {
|
|
|
|
// Check to see if compressed less than 12.5%
|
|
|
|
return compressed_size < raw_size - (raw_size / 8u);
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// format_version is the block format as defined in include/rocksdb/table.h
|
|
|
|
Slice CompressBlock(const Slice& raw, const CompressionInfo& info,
|
|
|
|
CompressionType* type, uint32_t format_version,
|
|
|
|
bool do_sample, std::string* compressed_output,
|
|
|
|
std::string* sampled_output_fast,
|
|
|
|
std::string* sampled_output_slow) {
|
|
|
|
assert(type);
|
|
|
|
assert(compressed_output);
|
|
|
|
assert(compressed_output->empty());
|
|
|
|
|
|
|
|
// If requested, we sample one in every N block with a
|
|
|
|
// fast and slow compression algorithm and report the stats.
|
|
|
|
// The users can use these stats to decide if it is worthwhile
|
|
|
|
// enabling compression and they also get a hint about which
|
|
|
|
// compression algorithm wil be beneficial.
|
|
|
|
if (do_sample && info.SampleForCompression() &&
|
|
|
|
Random::GetTLSInstance()->OneIn(
|
|
|
|
static_cast<int>(info.SampleForCompression()))) {
|
|
|
|
// Sampling with a fast compression algorithm
|
|
|
|
if (sampled_output_fast && (LZ4_Supported() || Snappy_Supported())) {
|
|
|
|
CompressionType c =
|
|
|
|
LZ4_Supported() ? kLZ4Compression : kSnappyCompression;
|
|
|
|
CompressionContext context(c);
|
|
|
|
CompressionOptions options;
|
|
|
|
CompressionInfo info_tmp(options, context,
|
|
|
|
CompressionDict::GetEmptyDict(), c,
|
|
|
|
info.SampleForCompression());
|
|
|
|
|
|
|
|
CompressData(raw, info_tmp, GetCompressFormatForVersion(format_version),
|
|
|
|
sampled_output_fast);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sampling with a slow but high-compression algorithm
|
|
|
|
if (sampled_output_slow && (ZSTD_Supported() || Zlib_Supported())) {
|
|
|
|
CompressionType c = ZSTD_Supported() ? kZSTD : kZlibCompression;
|
|
|
|
CompressionContext context(c);
|
|
|
|
CompressionOptions options;
|
|
|
|
CompressionInfo info_tmp(options, context,
|
|
|
|
CompressionDict::GetEmptyDict(), c,
|
|
|
|
info.SampleForCompression());
|
|
|
|
|
|
|
|
CompressData(raw, info_tmp, GetCompressFormatForVersion(format_version),
|
|
|
|
sampled_output_slow);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info.type() == kNoCompression) {
|
|
|
|
*type = kNoCompression;
|
|
|
|
return raw;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Actually compress the data; if the compression method is not supported,
|
|
|
|
// or the compression fails etc., just fall back to uncompressed
|
|
|
|
if (!CompressData(raw, info, GetCompressFormatForVersion(format_version),
|
|
|
|
compressed_output)) {
|
|
|
|
*type = kNoCompression;
|
|
|
|
return raw;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the compression ratio; if it's not good enough, just fall back to
|
|
|
|
// uncompressed
|
|
|
|
if (!GoodCompressionRatio(compressed_output->size(), raw.size())) {
|
|
|
|
*type = kNoCompression;
|
|
|
|
return raw;
|
|
|
|
}
|
|
|
|
|
|
|
|
*type = info.type();
|
|
|
|
return *compressed_output;
|
|
|
|
}
|
|
|
|
|
|
|
|
// kBlockBasedTableMagicNumber was picked by running
|
|
|
|
// echo rocksdb.table.block_based | sha1sum
|
|
|
|
// and taking the leading 64 bits.
|
|
|
|
// Please note that kBlockBasedTableMagicNumber may also be accessed by other
|
|
|
|
// .cc files
|
|
|
|
// for that reason we declare it extern in the header but to get the space
|
|
|
|
// allocated
|
|
|
|
// it must be not extern in one place.
|
|
|
|
const uint64_t kBlockBasedTableMagicNumber = 0x88e241b785f4cff7ull;
|
|
|
|
// We also support reading and writing legacy block based table format (for
|
|
|
|
// backwards compatibility)
|
|
|
|
const uint64_t kLegacyBlockBasedTableMagicNumber = 0xdb4775248b80fb57ull;
|
|
|
|
|
|
|
|
// A collector that collects properties of interest to block-based table.
|
|
|
|
// For now this class looks heavy-weight since we only write one additional
|
|
|
|
// property.
|
|
|
|
// But in the foreseeable future, we will add more and more properties that are
|
|
|
|
// specific to block-based table.
|
|
|
|
class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
10 years ago
|
|
|
: public IntTblPropCollector {
|
|
|
|
public:
|
|
|
|
explicit BlockBasedTablePropertiesCollector(
|
|
|
|
BlockBasedTableOptions::IndexType index_type, bool whole_key_filtering,
|
|
|
|
bool prefix_filtering)
|
|
|
|
: index_type_(index_type),
|
|
|
|
whole_key_filtering_(whole_key_filtering),
|
|
|
|
prefix_filtering_(prefix_filtering) {}
|
|
|
|
|
|
|
|
Status InternalAdd(const Slice& /*key*/, const Slice& /*value*/,
|
|
|
|
uint64_t /*file_size*/) override {
|
|
|
|
// Intentionally left blank. Have no interest in collecting stats for
|
|
|
|
// individual key/value pairs.
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void BlockAdd(uint64_t /* block_raw_bytes */,
|
|
|
|
uint64_t /* block_compressed_bytes_fast */,
|
|
|
|
uint64_t /* block_compressed_bytes_slow */) override {
|
|
|
|
// Intentionally left blank. No interest in collecting stats for
|
|
|
|
// blocks.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Finish(UserCollectedProperties* properties) override {
|
|
|
|
std::string val;
|
|
|
|
PutFixed32(&val, static_cast<uint32_t>(index_type_));
|
|
|
|
properties->insert({BlockBasedTablePropertyNames::kIndexType, val});
|
|
|
|
properties->insert({BlockBasedTablePropertyNames::kWholeKeyFiltering,
|
|
|
|
whole_key_filtering_ ? kPropTrue : kPropFalse});
|
|
|
|
properties->insert({BlockBasedTablePropertyNames::kPrefixFiltering,
|
|
|
|
prefix_filtering_ ? kPropTrue : kPropFalse});
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// The name of the properties collector can be used for debugging purpose.
|
|
|
|
const char* Name() const override {
|
|
|
|
return "BlockBasedTablePropertiesCollector";
|
|
|
|
}
|
|
|
|
|
|
|
|
UserCollectedProperties GetReadableProperties() const override {
|
|
|
|
// Intentionally left blank.
|
|
|
|
return UserCollectedProperties();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
BlockBasedTableOptions::IndexType index_type_;
|
|
|
|
bool whole_key_filtering_;
|
|
|
|
bool prefix_filtering_;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct BlockBasedTableBuilder::Rep {
|
|
|
|
const ImmutableOptions ioptions;
|
|
|
|
const MutableCFOptions moptions;
|
|
|
|
const BlockBasedTableOptions table_options;
|
|
|
|
const InternalKeyComparator& internal_comparator;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
WritableFileWriter* file;
|
|
|
|
std::atomic<uint64_t> offset;
|
|
|
|
size_t alignment;
|
|
|
|
BlockBuilder data_block;
|
|
|
|
// Buffers uncompressed data blocks to replay later. Needed when
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
// compression dictionary is enabled so we can finalize the dictionary before
|
|
|
|
// compressing any data blocks.
|
|
|
|
std::vector<std::string> data_block_buffers;
|
|
|
|
BlockBuilder range_del_block;
|
|
|
|
|
|
|
|
InternalKeySliceTransform internal_prefix_transform;
|
|
|
|
std::unique_ptr<IndexBuilder> index_builder;
|
|
|
|
PartitionedIndexBuilder* p_index_builder_ = nullptr;
|
|
|
|
|
|
|
|
std::string last_key;
|
|
|
|
const Slice* first_key_in_next_block = nullptr;
|
|
|
|
CompressionType compression_type;
|
|
|
|
uint64_t sample_for_compression;
|
|
|
|
std::atomic<uint64_t> compressible_input_data_bytes;
|
|
|
|
std::atomic<uint64_t> uncompressible_input_data_bytes;
|
|
|
|
std::atomic<uint64_t> sampled_input_data_bytes;
|
|
|
|
std::atomic<uint64_t> sampled_output_slow_data_bytes;
|
|
|
|
std::atomic<uint64_t> sampled_output_fast_data_bytes;
|
|
|
|
CompressionOptions compression_opts;
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
std::unique_ptr<CompressionDict> compression_dict;
|
|
|
|
std::vector<std::unique_ptr<CompressionContext>> compression_ctxs;
|
|
|
|
std::vector<std::unique_ptr<UncompressionContext>> verify_ctxs;
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
std::unique_ptr<UncompressionDict> verify_dict;
|
|
|
|
|
|
|
|
size_t data_begin_offset = 0;
|
|
|
|
|
|
|
|
TableProperties props;
|
|
|
|
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
// States of the builder.
|
|
|
|
//
|
|
|
|
// - `kBuffered`: This is the initial state where zero or more data blocks are
|
|
|
|
// accumulated uncompressed in-memory. From this state, call
|
|
|
|
// `EnterUnbuffered()` to finalize the compression dictionary if enabled,
|
|
|
|
// compress/write out any buffered blocks, and proceed to the `kUnbuffered`
|
|
|
|
// state.
|
|
|
|
//
|
|
|
|
// - `kUnbuffered`: This is the state when compression dictionary is finalized
|
|
|
|
// either because it wasn't enabled in the first place or it's been created
|
|
|
|
// from sampling previously buffered data. In this state, blocks are simply
|
|
|
|
// compressed/written out as they fill up. From this state, call `Finish()`
|
|
|
|
// to complete the file (write meta-blocks, etc.), or `Abandon()` to delete
|
|
|
|
// the partially created file.
|
|
|
|
//
|
|
|
|
// - `kClosed`: This indicates either `Finish()` or `Abandon()` has been
|
|
|
|
// called, so the table builder is no longer usable. We must be in this
|
|
|
|
// state by the time the destructor runs.
|
|
|
|
enum class State {
|
|
|
|
kBuffered,
|
|
|
|
kUnbuffered,
|
|
|
|
kClosed,
|
|
|
|
};
|
|
|
|
State state;
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
// `kBuffered` state is allowed only as long as the buffering of uncompressed
|
|
|
|
// data blocks (see `data_block_buffers`) does not exceed `buffer_limit`.
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
uint64_t buffer_limit;
|
|
|
|
std::unique_ptr<CacheReservationManager>
|
|
|
|
compression_dict_buffer_cache_res_mgr;
|
|
|
|
const bool use_delta_encoding_for_index_values;
|
|
|
|
std::unique_ptr<FilterBlockBuilder> filter_builder;
|
|
|
|
char cache_key_prefix[BlockBasedTable::kMaxCacheKeyPrefixSize];
|
|
|
|
size_t cache_key_prefix_size;
|
|
|
|
char compressed_cache_key_prefix[BlockBasedTable::kMaxCacheKeyPrefixSize];
|
|
|
|
size_t compressed_cache_key_prefix_size;
|
|
|
|
const TableFileCreationReason reason;
|
|
|
|
|
|
|
|
BlockHandle pending_handle; // Handle to add to index block
|
|
|
|
|
|
|
|
std::string compressed_output;
|
|
|
|
std::unique_ptr<FlushBlockPolicy> flush_block_policy;
|
|
|
|
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
10 years ago
|
|
|
std::vector<std::unique_ptr<IntTblPropCollector>> table_properties_collectors;
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
11 years ago
|
|
|
|
|
|
|
std::unique_ptr<ParallelCompressionRep> pc_rep;
|
|
|
|
|
|
|
|
uint64_t get_offset() { return offset.load(std::memory_order_relaxed); }
|
|
|
|
void set_offset(uint64_t o) { offset.store(o, std::memory_order_relaxed); }
|
|
|
|
|
|
|
|
bool IsParallelCompressionEnabled() const {
|
|
|
|
return compression_opts.parallel_threads > 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status GetStatus() {
|
|
|
|
// We need to make modifications of status visible when status_ok is set
|
|
|
|
// to false, and this is ensured by status_mutex, so no special memory
|
|
|
|
// order for status_ok is required.
|
|
|
|
if (status_ok.load(std::memory_order_relaxed)) {
|
|
|
|
return Status::OK();
|
|
|
|
} else {
|
|
|
|
return CopyStatus();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status CopyStatus() {
|
|
|
|
std::lock_guard<std::mutex> lock(status_mutex);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
IOStatus GetIOStatus() {
|
|
|
|
// We need to make modifications of io_status visible when status_ok is set
|
|
|
|
// to false, and this is ensured by io_status_mutex, so no special memory
|
|
|
|
// order for io_status_ok is required.
|
|
|
|
if (io_status_ok.load(std::memory_order_relaxed)) {
|
|
|
|
return IOStatus::OK();
|
|
|
|
} else {
|
|
|
|
return CopyIOStatus();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
IOStatus CopyIOStatus() {
|
|
|
|
std::lock_guard<std::mutex> lock(io_status_mutex);
|
|
|
|
return io_status;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Never erase an existing status that is not OK.
|
|
|
|
void SetStatus(Status s) {
|
|
|
|
if (!s.ok() && status_ok.load(std::memory_order_relaxed)) {
|
|
|
|
// Locking is an overkill for non compression_opts.parallel_threads
|
|
|
|
// case but since it's unlikely that s is not OK, we take this cost
|
|
|
|
// to be simplicity.
|
|
|
|
std::lock_guard<std::mutex> lock(status_mutex);
|
|
|
|
status = s;
|
|
|
|
status_ok.store(false, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Never erase an existing I/O status that is not OK.
|
|
|
|
void SetIOStatus(IOStatus ios) {
|
|
|
|
if (!ios.ok() && io_status_ok.load(std::memory_order_relaxed)) {
|
|
|
|
// Locking is an overkill for non compression_opts.parallel_threads
|
|
|
|
// case but since it's unlikely that s is not OK, we take this cost
|
|
|
|
// to be simplicity.
|
|
|
|
std::lock_guard<std::mutex> lock(io_status_mutex);
|
|
|
|
io_status = ios;
|
|
|
|
io_status_ok.store(false, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Rep(const BlockBasedTableOptions& table_opt, const TableBuilderOptions& tbo,
|
|
|
|
WritableFileWriter* f)
|
|
|
|
: ioptions(tbo.ioptions),
|
|
|
|
moptions(tbo.moptions),
|
|
|
|
table_options(table_opt),
|
|
|
|
internal_comparator(tbo.internal_comparator),
|
|
|
|
file(f),
|
|
|
|
offset(0),
|
|
|
|
alignment(table_options.block_align
|
|
|
|
? std::min(table_options.block_size, kDefaultPageSize)
|
|
|
|
: 0),
|
|
|
|
data_block(table_options.block_restart_interval,
|
|
|
|
table_options.use_delta_encoding,
|
|
|
|
false /* use_value_delta_encoding */,
|
|
|
|
tbo.internal_comparator.user_comparator()
|
|
|
|
->CanKeysWithDifferentByteContentsBeEqual()
|
|
|
|
? BlockBasedTableOptions::kDataBlockBinarySearch
|
|
|
|
: table_options.data_block_index_type,
|
|
|
|
table_options.data_block_hash_table_util_ratio),
|
|
|
|
range_del_block(1 /* block_restart_interval */),
|
|
|
|
internal_prefix_transform(tbo.moptions.prefix_extractor.get()),
|
|
|
|
compression_type(tbo.compression_type),
|
|
|
|
sample_for_compression(tbo.moptions.sample_for_compression),
|
|
|
|
compressible_input_data_bytes(0),
|
|
|
|
uncompressible_input_data_bytes(0),
|
|
|
|
sampled_input_data_bytes(0),
|
|
|
|
sampled_output_slow_data_bytes(0),
|
|
|
|
sampled_output_fast_data_bytes(0),
|
|
|
|
compression_opts(tbo.compression_opts),
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
compression_dict(),
|
|
|
|
compression_ctxs(tbo.compression_opts.parallel_threads),
|
|
|
|
verify_ctxs(tbo.compression_opts.parallel_threads),
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
verify_dict(),
|
|
|
|
state((tbo.compression_opts.max_dict_bytes > 0) ? State::kBuffered
|
|
|
|
: State::kUnbuffered),
|
|
|
|
use_delta_encoding_for_index_values(table_opt.format_version >= 4 &&
|
|
|
|
!table_opt.block_align),
|
|
|
|
cache_key_prefix_size(0),
|
|
|
|
compressed_cache_key_prefix_size(0),
|
|
|
|
reason(tbo.reason),
|
|
|
|
flush_block_policy(
|
|
|
|
table_options.flush_block_policy_factory->NewFlushBlockPolicy(
|
|
|
|
table_options, data_block)),
|
|
|
|
status_ok(true),
|
|
|
|
io_status_ok(true) {
|
|
|
|
if (tbo.target_file_size == 0) {
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
buffer_limit = compression_opts.max_dict_buffer_bytes;
|
|
|
|
} else if (compression_opts.max_dict_buffer_bytes == 0) {
|
|
|
|
buffer_limit = tbo.target_file_size;
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
} else {
|
|
|
|
buffer_limit = std::min(tbo.target_file_size,
|
|
|
|
compression_opts.max_dict_buffer_bytes);
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
}
|
|
|
|
if (table_options.no_block_cache || table_options.block_cache == nullptr) {
|
|
|
|
compression_dict_buffer_cache_res_mgr.reset(nullptr);
|
|
|
|
} else {
|
|
|
|
compression_dict_buffer_cache_res_mgr.reset(
|
|
|
|
new CacheReservationManager(table_options.block_cache));
|
|
|
|
}
|
|
|
|
for (uint32_t i = 0; i < compression_opts.parallel_threads; i++) {
|
|
|
|
compression_ctxs[i].reset(new CompressionContext(compression_type));
|
|
|
|
}
|
|
|
|
if (table_options.index_type ==
|
|
|
|
BlockBasedTableOptions::kTwoLevelIndexSearch) {
|
|
|
|
p_index_builder_ = PartitionedIndexBuilder::CreateIndexBuilder(
|
|
|
|
&internal_comparator, use_delta_encoding_for_index_values,
|
|
|
|
table_options);
|
|
|
|
index_builder.reset(p_index_builder_);
|
|
|
|
} else {
|
|
|
|
index_builder.reset(IndexBuilder::CreateIndexBuilder(
|
|
|
|
table_options.index_type, &internal_comparator,
|
|
|
|
&this->internal_prefix_transform, use_delta_encoding_for_index_values,
|
|
|
|
table_options));
|
|
|
|
}
|
Add more LSM info to FilterBuildingContext (#8246)
Summary:
Add `num_levels`, `is_bottommost`, and table file creation
`reason` to `FilterBuildingContext`, in anticipation of more powerful
Bloom-like filter support.
To support this, added `is_bottommost` and `reason` to
`TableBuilderOptions`, which allowed removing `reason` parameter from
`rocksdb::BuildTable`.
I attempted to remove `skip_filters` from `TableBuilderOptions`, because
filter construction decisions should arise from options, not one-off
parameters. I could not completely remove it because the public API for
SstFileWriter takes a `skip_filters` parameter, and translating this
into an option change would mean awkwardly replacing the table_factory
if it is BlockBasedTableFactory with new filter_policy=nullptr option.
I marked this public skip_filters option as deprecated because of this
oddity. (skip_filters on the read side probably makes sense.)
At least `skip_filters` is now largely hidden for users of
`TableBuilderOptions` and is no longer used for implementing the
optimize_filters_for_hits option. Bringing the logic for that option
closer to handling of FilterBuildingContext makes it more obvious that
hese two are using the same notion of "bottommost." (Planned:
configuration options for Bloom-like filters that generalize
`optimize_filters_for_hits`)
Recommended follow-up: Try to get away from "bottommost level" naming of
things, which is inaccurate (see
VersionStorageInfo::RangeMightExistAfterSortedRun), and move to
"bottommost run" or just "bottommost."
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8246
Test Plan:
extended an existing unit test to exercise and check various
filter building contexts. Also, existing tests for
optimize_filters_for_hits validate some of the "bottommost" handling,
which is now closely connected to FilterBuildingContext::is_bottommost
through TableBuilderOptions::is_bottommost
Reviewed By: mrambacher
Differential Revision: D28099346
Pulled By: pdillinger
fbshipit-source-id: 2c1072e29c24d4ac404c761a7b7663292372600a
4 years ago
|
|
|
if (ioptions.optimize_filters_for_hits && tbo.is_bottommost) {
|
|
|
|
// Apply optimize_filters_for_hits setting here when applicable by
|
|
|
|
// skipping filter generation
|
|
|
|
filter_builder.reset();
|
|
|
|
} else if (tbo.skip_filters) {
|
|
|
|
// For SstFileWriter skip_filters
|
|
|
|
filter_builder.reset();
|
|
|
|
} else if (!table_options.filter_policy) {
|
|
|
|
// Null filter_policy -> no filter
|
|
|
|
filter_builder.reset();
|
|
|
|
} else {
|
|
|
|
FilterBuildingContext filter_context(table_options);
|
|
|
|
|
|
|
|
filter_context.info_log = ioptions.logger;
|
Add more LSM info to FilterBuildingContext (#8246)
Summary:
Add `num_levels`, `is_bottommost`, and table file creation
`reason` to `FilterBuildingContext`, in anticipation of more powerful
Bloom-like filter support.
To support this, added `is_bottommost` and `reason` to
`TableBuilderOptions`, which allowed removing `reason` parameter from
`rocksdb::BuildTable`.
I attempted to remove `skip_filters` from `TableBuilderOptions`, because
filter construction decisions should arise from options, not one-off
parameters. I could not completely remove it because the public API for
SstFileWriter takes a `skip_filters` parameter, and translating this
into an option change would mean awkwardly replacing the table_factory
if it is BlockBasedTableFactory with new filter_policy=nullptr option.
I marked this public skip_filters option as deprecated because of this
oddity. (skip_filters on the read side probably makes sense.)
At least `skip_filters` is now largely hidden for users of
`TableBuilderOptions` and is no longer used for implementing the
optimize_filters_for_hits option. Bringing the logic for that option
closer to handling of FilterBuildingContext makes it more obvious that
hese two are using the same notion of "bottommost." (Planned:
configuration options for Bloom-like filters that generalize
`optimize_filters_for_hits`)
Recommended follow-up: Try to get away from "bottommost level" naming of
things, which is inaccurate (see
VersionStorageInfo::RangeMightExistAfterSortedRun), and move to
"bottommost run" or just "bottommost."
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8246
Test Plan:
extended an existing unit test to exercise and check various
filter building contexts. Also, existing tests for
optimize_filters_for_hits validate some of the "bottommost" handling,
which is now closely connected to FilterBuildingContext::is_bottommost
through TableBuilderOptions::is_bottommost
Reviewed By: mrambacher
Differential Revision: D28099346
Pulled By: pdillinger
fbshipit-source-id: 2c1072e29c24d4ac404c761a7b7663292372600a
4 years ago
|
|
|
filter_context.column_family_name = tbo.column_family_name;
|
|
|
|
filter_context.reason = reason;
|
Add more LSM info to FilterBuildingContext (#8246)
Summary:
Add `num_levels`, `is_bottommost`, and table file creation
`reason` to `FilterBuildingContext`, in anticipation of more powerful
Bloom-like filter support.
To support this, added `is_bottommost` and `reason` to
`TableBuilderOptions`, which allowed removing `reason` parameter from
`rocksdb::BuildTable`.
I attempted to remove `skip_filters` from `TableBuilderOptions`, because
filter construction decisions should arise from options, not one-off
parameters. I could not completely remove it because the public API for
SstFileWriter takes a `skip_filters` parameter, and translating this
into an option change would mean awkwardly replacing the table_factory
if it is BlockBasedTableFactory with new filter_policy=nullptr option.
I marked this public skip_filters option as deprecated because of this
oddity. (skip_filters on the read side probably makes sense.)
At least `skip_filters` is now largely hidden for users of
`TableBuilderOptions` and is no longer used for implementing the
optimize_filters_for_hits option. Bringing the logic for that option
closer to handling of FilterBuildingContext makes it more obvious that
hese two are using the same notion of "bottommost." (Planned:
configuration options for Bloom-like filters that generalize
`optimize_filters_for_hits`)
Recommended follow-up: Try to get away from "bottommost level" naming of
things, which is inaccurate (see
VersionStorageInfo::RangeMightExistAfterSortedRun), and move to
"bottommost run" or just "bottommost."
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8246
Test Plan:
extended an existing unit test to exercise and check various
filter building contexts. Also, existing tests for
optimize_filters_for_hits validate some of the "bottommost" handling,
which is now closely connected to FilterBuildingContext::is_bottommost
through TableBuilderOptions::is_bottommost
Reviewed By: mrambacher
Differential Revision: D28099346
Pulled By: pdillinger
fbshipit-source-id: 2c1072e29c24d4ac404c761a7b7663292372600a
4 years ago
|
|
|
|
|
|
|
// Only populate other fields if known to be in LSM rather than
|
|
|
|
// generating external SST file
|
|
|
|
if (reason != TableFileCreationReason::kMisc) {
|
Add more LSM info to FilterBuildingContext (#8246)
Summary:
Add `num_levels`, `is_bottommost`, and table file creation
`reason` to `FilterBuildingContext`, in anticipation of more powerful
Bloom-like filter support.
To support this, added `is_bottommost` and `reason` to
`TableBuilderOptions`, which allowed removing `reason` parameter from
`rocksdb::BuildTable`.
I attempted to remove `skip_filters` from `TableBuilderOptions`, because
filter construction decisions should arise from options, not one-off
parameters. I could not completely remove it because the public API for
SstFileWriter takes a `skip_filters` parameter, and translating this
into an option change would mean awkwardly replacing the table_factory
if it is BlockBasedTableFactory with new filter_policy=nullptr option.
I marked this public skip_filters option as deprecated because of this
oddity. (skip_filters on the read side probably makes sense.)
At least `skip_filters` is now largely hidden for users of
`TableBuilderOptions` and is no longer used for implementing the
optimize_filters_for_hits option. Bringing the logic for that option
closer to handling of FilterBuildingContext makes it more obvious that
hese two are using the same notion of "bottommost." (Planned:
configuration options for Bloom-like filters that generalize
`optimize_filters_for_hits`)
Recommended follow-up: Try to get away from "bottommost level" naming of
things, which is inaccurate (see
VersionStorageInfo::RangeMightExistAfterSortedRun), and move to
"bottommost run" or just "bottommost."
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8246
Test Plan:
extended an existing unit test to exercise and check various
filter building contexts. Also, existing tests for
optimize_filters_for_hits validate some of the "bottommost" handling,
which is now closely connected to FilterBuildingContext::is_bottommost
through TableBuilderOptions::is_bottommost
Reviewed By: mrambacher
Differential Revision: D28099346
Pulled By: pdillinger
fbshipit-source-id: 2c1072e29c24d4ac404c761a7b7663292372600a
4 years ago
|
|
|
filter_context.compaction_style = ioptions.compaction_style;
|
|
|
|
filter_context.num_levels = ioptions.num_levels;
|
|
|
|
filter_context.level_at_creation = tbo.level_at_creation;
|
|
|
|
filter_context.is_bottommost = tbo.is_bottommost;
|
|
|
|
assert(filter_context.level_at_creation < filter_context.num_levels);
|
|
|
|
}
|
|
|
|
|
|
|
|
filter_builder.reset(CreateFilterBlockBuilder(
|
|
|
|
ioptions, moptions, filter_context,
|
|
|
|
use_delta_encoding_for_index_values, p_index_builder_));
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(tbo.int_tbl_prop_collector_factories);
|
|
|
|
for (auto& factory : *tbo.int_tbl_prop_collector_factories) {
|
|
|
|
assert(factory);
|
|
|
|
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
11 years ago
|
|
|
table_properties_collectors.emplace_back(
|
|
|
|
factory->CreateIntTblPropCollector(tbo.column_family_id,
|
|
|
|
tbo.level_at_creation));
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
11 years ago
|
|
|
}
|
|
|
|
table_properties_collectors.emplace_back(
|
|
|
|
new BlockBasedTablePropertiesCollector(
|
|
|
|
table_options.index_type, table_options.whole_key_filtering,
|
|
|
|
moptions.prefix_extractor != nullptr));
|
|
|
|
if (table_options.verify_compression) {
|
|
|
|
for (uint32_t i = 0; i < compression_opts.parallel_threads; i++) {
|
|
|
|
verify_ctxs[i].reset(new UncompressionContext(compression_type));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// These are only needed for populating table properties
|
|
|
|
props.column_family_id = tbo.column_family_id;
|
|
|
|
props.column_family_name = tbo.column_family_name;
|
|
|
|
props.creation_time = tbo.creation_time;
|
|
|
|
props.oldest_key_time = tbo.oldest_key_time;
|
|
|
|
props.file_creation_time = tbo.file_creation_time;
|
|
|
|
props.orig_file_number = tbo.cur_file_num;
|
|
|
|
props.db_id = tbo.db_id;
|
|
|
|
props.db_session_id = tbo.db_session_id;
|
|
|
|
props.db_host_id = ioptions.db_host_id;
|
|
|
|
if (!ReifyDbHostIdProperty(ioptions.env, &props.db_host_id).ok()) {
|
|
|
|
ROCKS_LOG_INFO(ioptions.logger, "db_host_id property will not be set");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Rep(const Rep&) = delete;
|
|
|
|
Rep& operator=(const Rep&) = delete;
|
|
|
|
|
|
|
|
private:
|
|
|
|
// Synchronize status & io_status accesses across threads from main thread,
|
|
|
|
// compression thread and write thread in parallel compression.
|
|
|
|
std::mutex status_mutex;
|
|
|
|
std::atomic<bool> status_ok;
|
|
|
|
Status status;
|
|
|
|
std::mutex io_status_mutex;
|
|
|
|
std::atomic<bool> io_status_ok;
|
|
|
|
IOStatus io_status;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct BlockBasedTableBuilder::ParallelCompressionRep {
|
|
|
|
// Keys is a wrapper of vector of strings avoiding
|
|
|
|
// releasing string memories during vector clear()
|
|
|
|
// in order to save memory allocation overhead
|
|
|
|
class Keys {
|
|
|
|
public:
|
|
|
|
Keys() : keys_(kKeysInitSize), size_(0) {}
|
|
|
|
void PushBack(const Slice& key) {
|
|
|
|
if (size_ == keys_.size()) {
|
|
|
|
keys_.emplace_back(key.data(), key.size());
|
|
|
|
} else {
|
|
|
|
keys_[size_].assign(key.data(), key.size());
|
|
|
|
}
|
|
|
|
size_++;
|
|
|
|
}
|
|
|
|
void SwapAssign(std::vector<std::string>& keys) {
|
|
|
|
size_ = keys.size();
|
|
|
|
std::swap(keys_, keys);
|
|
|
|
}
|
|
|
|
void Clear() { size_ = 0; }
|
|
|
|
size_t Size() { return size_; }
|
|
|
|
std::string& Back() { return keys_[size_ - 1]; }
|
|
|
|
std::string& operator[](size_t idx) {
|
|
|
|
assert(idx < size_);
|
|
|
|
return keys_[idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
const size_t kKeysInitSize = 32;
|
|
|
|
std::vector<std::string> keys_;
|
|
|
|
size_t size_;
|
|
|
|
};
|
|
|
|
std::unique_ptr<Keys> curr_block_keys;
|
|
|
|
|
|
|
|
class BlockRepSlot;
|
|
|
|
|
|
|
|
// BlockRep instances are fetched from and recycled to
|
|
|
|
// block_rep_pool during parallel compression.
|
|
|
|
struct BlockRep {
|
|
|
|
Slice contents;
|
|
|
|
Slice compressed_contents;
|
|
|
|
std::unique_ptr<std::string> data;
|
|
|
|
std::unique_ptr<std::string> compressed_data;
|
|
|
|
CompressionType compression_type;
|
|
|
|
std::unique_ptr<std::string> first_key_in_next_block;
|
|
|
|
std::unique_ptr<Keys> keys;
|
|
|
|
std::unique_ptr<BlockRepSlot> slot;
|
|
|
|
Status status;
|
|
|
|
};
|
|
|
|
// Use a vector of BlockRep as a buffer for a determined number
|
|
|
|
// of BlockRep structures. All data referenced by pointers in
|
|
|
|
// BlockRep will be freed when this vector is destructed.
|
|
|
|
using BlockRepBuffer = std::vector<BlockRep>;
|
|
|
|
BlockRepBuffer block_rep_buf;
|
|
|
|
// Use a thread-safe queue for concurrent access from block
|
|
|
|
// building thread and writer thread.
|
|
|
|
using BlockRepPool = WorkQueue<BlockRep*>;
|
|
|
|
BlockRepPool block_rep_pool;
|
|
|
|
|
|
|
|
// Use BlockRepSlot to keep block order in write thread.
|
|
|
|
// slot_ will pass references to BlockRep
|
|
|
|
class BlockRepSlot {
|
|
|
|
public:
|
|
|
|
BlockRepSlot() : slot_(1) {}
|
|
|
|
template <typename T>
|
|
|
|
void Fill(T&& rep) {
|
|
|
|
slot_.push(std::forward<T>(rep));
|
|
|
|
};
|
|
|
|
void Take(BlockRep*& rep) { slot_.pop(rep); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
// slot_ will pass references to BlockRep in block_rep_buf,
|
|
|
|
// and those references are always valid before the destruction of
|
|
|
|
// block_rep_buf.
|
|
|
|
WorkQueue<BlockRep*> slot_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Compression queue will pass references to BlockRep in block_rep_buf,
|
|
|
|
// and those references are always valid before the destruction of
|
|
|
|
// block_rep_buf.
|
|
|
|
using CompressQueue = WorkQueue<BlockRep*>;
|
|
|
|
CompressQueue compress_queue;
|
|
|
|
std::vector<port::Thread> compress_thread_pool;
|
|
|
|
|
|
|
|
// Write queue will pass references to BlockRep::slot in block_rep_buf,
|
|
|
|
// and those references are always valid before the corresponding
|
|
|
|
// BlockRep::slot is destructed, which is before the destruction of
|
|
|
|
// block_rep_buf.
|
|
|
|
using WriteQueue = WorkQueue<BlockRepSlot*>;
|
|
|
|
WriteQueue write_queue;
|
|
|
|
std::unique_ptr<port::Thread> write_thread;
|
|
|
|
|
|
|
|
// Estimate output file size when parallel compression is enabled. This is
|
|
|
|
// necessary because compression & flush are no longer synchronized,
|
|
|
|
// and BlockBasedTableBuilder::FileSize() is no longer accurate.
|
|
|
|
// memory_order_relaxed suffices because accurate statistics is not required.
|
|
|
|
class FileSizeEstimator {
|
|
|
|
public:
|
|
|
|
explicit FileSizeEstimator()
|
|
|
|
: raw_bytes_compressed(0),
|
|
|
|
raw_bytes_curr_block(0),
|
|
|
|
raw_bytes_curr_block_set(false),
|
|
|
|
raw_bytes_inflight(0),
|
|
|
|
blocks_inflight(0),
|
|
|
|
curr_compression_ratio(0),
|
|
|
|
estimated_file_size(0) {}
|
|
|
|
|
|
|
|
// Estimate file size when a block is about to be emitted to
|
|
|
|
// compression thread
|
|
|
|
void EmitBlock(uint64_t raw_block_size, uint64_t curr_file_size) {
|
|
|
|
uint64_t new_raw_bytes_inflight =
|
|
|
|
raw_bytes_inflight.fetch_add(raw_block_size,
|
|
|
|
std::memory_order_relaxed) +
|
|
|
|
raw_block_size;
|
|
|
|
|
|
|
|
uint64_t new_blocks_inflight =
|
|
|
|
blocks_inflight.fetch_add(1, std::memory_order_relaxed) + 1;
|
|
|
|
|
|
|
|
estimated_file_size.store(
|
|
|
|
curr_file_size +
|
|
|
|
static_cast<uint64_t>(
|
|
|
|
static_cast<double>(new_raw_bytes_inflight) *
|
|
|
|
curr_compression_ratio.load(std::memory_order_relaxed)) +
|
|
|
|
new_blocks_inflight * kBlockTrailerSize,
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Estimate file size when a block is already reaped from
|
|
|
|
// compression thread
|
|
|
|
void ReapBlock(uint64_t compressed_block_size, uint64_t curr_file_size) {
|
|
|
|
assert(raw_bytes_curr_block_set);
|
|
|
|
|
|
|
|
uint64_t new_raw_bytes_compressed =
|
|
|
|
raw_bytes_compressed + raw_bytes_curr_block;
|
|
|
|
assert(new_raw_bytes_compressed > 0);
|
|
|
|
|
|
|
|
curr_compression_ratio.store(
|
|
|
|
(curr_compression_ratio.load(std::memory_order_relaxed) *
|
|
|
|
raw_bytes_compressed +
|
|
|
|
compressed_block_size) /
|
|
|
|
static_cast<double>(new_raw_bytes_compressed),
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
raw_bytes_compressed = new_raw_bytes_compressed;
|
|
|
|
|
|
|
|
uint64_t new_raw_bytes_inflight =
|
|
|
|
raw_bytes_inflight.fetch_sub(raw_bytes_curr_block,
|
|
|
|
std::memory_order_relaxed) -
|
|
|
|
raw_bytes_curr_block;
|
|
|
|
|
|
|
|
uint64_t new_blocks_inflight =
|
|
|
|
blocks_inflight.fetch_sub(1, std::memory_order_relaxed) - 1;
|
|
|
|
|
|
|
|
estimated_file_size.store(
|
|
|
|
curr_file_size +
|
|
|
|
static_cast<uint64_t>(
|
|
|
|
static_cast<double>(new_raw_bytes_inflight) *
|
|
|
|
curr_compression_ratio.load(std::memory_order_relaxed)) +
|
|
|
|
new_blocks_inflight * kBlockTrailerSize,
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
|
|
|
|
raw_bytes_curr_block_set = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetEstimatedFileSize(uint64_t size) {
|
|
|
|
estimated_file_size.store(size, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t GetEstimatedFileSize() {
|
|
|
|
return estimated_file_size.load(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetCurrBlockRawSize(uint64_t size) {
|
|
|
|
raw_bytes_curr_block = size;
|
|
|
|
raw_bytes_curr_block_set = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
// Raw bytes compressed so far.
|
|
|
|
uint64_t raw_bytes_compressed;
|
|
|
|
// Size of current block being appended.
|
|
|
|
uint64_t raw_bytes_curr_block;
|
|
|
|
// Whether raw_bytes_curr_block has been set for next
|
|
|
|
// ReapBlock call.
|
|
|
|
bool raw_bytes_curr_block_set;
|
|
|
|
// Raw bytes under compression and not appended yet.
|
|
|
|
std::atomic<uint64_t> raw_bytes_inflight;
|
|
|
|
// Number of blocks under compression and not appended yet.
|
|
|
|
std::atomic<uint64_t> blocks_inflight;
|
|
|
|
// Current compression ratio, maintained by BGWorkWriteRawBlock.
|
|
|
|
std::atomic<double> curr_compression_ratio;
|
|
|
|
// Estimated SST file size.
|
|
|
|
std::atomic<uint64_t> estimated_file_size;
|
|
|
|
};
|
|
|
|
FileSizeEstimator file_size_estimator;
|
|
|
|
|
|
|
|
// Facilities used for waiting first block completion. Need to Wait for
|
|
|
|
// the completion of first block compression and flush to get a non-zero
|
|
|
|
// compression ratio.
|
|
|
|
std::atomic<bool> first_block_processed;
|
|
|
|
std::condition_variable first_block_cond;
|
|
|
|
std::mutex first_block_mutex;
|
|
|
|
|
|
|
|
explicit ParallelCompressionRep(uint32_t parallel_threads)
|
|
|
|
: curr_block_keys(new Keys()),
|
|
|
|
block_rep_buf(parallel_threads),
|
|
|
|
block_rep_pool(parallel_threads),
|
|
|
|
compress_queue(parallel_threads),
|
|
|
|
write_queue(parallel_threads),
|
|
|
|
first_block_processed(false) {
|
|
|
|
for (uint32_t i = 0; i < parallel_threads; i++) {
|
|
|
|
block_rep_buf[i].contents = Slice();
|
|
|
|
block_rep_buf[i].compressed_contents = Slice();
|
|
|
|
block_rep_buf[i].data.reset(new std::string());
|
|
|
|
block_rep_buf[i].compressed_data.reset(new std::string());
|
|
|
|
block_rep_buf[i].compression_type = CompressionType();
|
|
|
|
block_rep_buf[i].first_key_in_next_block.reset(new std::string());
|
|
|
|
block_rep_buf[i].keys.reset(new Keys());
|
|
|
|
block_rep_buf[i].slot.reset(new BlockRepSlot());
|
|
|
|
block_rep_buf[i].status = Status::OK();
|
|
|
|
block_rep_pool.push(&block_rep_buf[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
~ParallelCompressionRep() { block_rep_pool.finish(); }
|
|
|
|
|
|
|
|
// Make a block prepared to be emitted to compression thread
|
|
|
|
// Used in non-buffered mode
|
|
|
|
BlockRep* PrepareBlock(CompressionType compression_type,
|
|
|
|
const Slice* first_key_in_next_block,
|
|
|
|
BlockBuilder* data_block) {
|
|
|
|
BlockRep* block_rep =
|
|
|
|
PrepareBlockInternal(compression_type, first_key_in_next_block);
|
|
|
|
assert(block_rep != nullptr);
|
|
|
|
data_block->SwapAndReset(*(block_rep->data));
|
|
|
|
block_rep->contents = *(block_rep->data);
|
|
|
|
std::swap(block_rep->keys, curr_block_keys);
|
|
|
|
curr_block_keys->Clear();
|
|
|
|
return block_rep;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Used in EnterUnbuffered
|
|
|
|
BlockRep* PrepareBlock(CompressionType compression_type,
|
|
|
|
const Slice* first_key_in_next_block,
|
|
|
|
std::string* data_block,
|
|
|
|
std::vector<std::string>* keys) {
|
|
|
|
BlockRep* block_rep =
|
|
|
|
PrepareBlockInternal(compression_type, first_key_in_next_block);
|
|
|
|
assert(block_rep != nullptr);
|
|
|
|
std::swap(*(block_rep->data), *data_block);
|
|
|
|
block_rep->contents = *(block_rep->data);
|
|
|
|
block_rep->keys->SwapAssign(*keys);
|
|
|
|
return block_rep;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit a block to compression thread
|
|
|
|
void EmitBlock(BlockRep* block_rep) {
|
|
|
|
assert(block_rep != nullptr);
|
|
|
|
assert(block_rep->status.ok());
|
|
|
|
if (!write_queue.push(block_rep->slot.get())) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!compress_queue.push(block_rep)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!first_block_processed.load(std::memory_order_relaxed)) {
|
|
|
|
std::unique_lock<std::mutex> lock(first_block_mutex);
|
|
|
|
first_block_cond.wait(lock, [this] {
|
|
|
|
return first_block_processed.load(std::memory_order_relaxed);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reap a block from compression thread
|
|
|
|
void ReapBlock(BlockRep* block_rep) {
|
|
|
|
assert(block_rep != nullptr);
|
|
|
|
block_rep->compressed_data->clear();
|
|
|
|
block_rep_pool.push(block_rep);
|
|
|
|
|
|
|
|
if (!first_block_processed.load(std::memory_order_relaxed)) {
|
|
|
|
std::lock_guard<std::mutex> lock(first_block_mutex);
|
|
|
|
first_block_processed.store(true, std::memory_order_relaxed);
|
|
|
|
first_block_cond.notify_one();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
BlockRep* PrepareBlockInternal(CompressionType compression_type,
|
|
|
|
const Slice* first_key_in_next_block) {
|
|
|
|
BlockRep* block_rep = nullptr;
|
|
|
|
block_rep_pool.pop(block_rep);
|
|
|
|
assert(block_rep != nullptr);
|
|
|
|
|
|
|
|
assert(block_rep->data);
|
|
|
|
|
|
|
|
block_rep->compression_type = compression_type;
|
|
|
|
|
|
|
|
if (first_key_in_next_block == nullptr) {
|
|
|
|
block_rep->first_key_in_next_block.reset(nullptr);
|
|
|
|
} else {
|
|
|
|
block_rep->first_key_in_next_block->assign(
|
|
|
|
first_key_in_next_block->data(), first_key_in_next_block->size());
|
|
|
|
}
|
|
|
|
|
|
|
|
return block_rep;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
BlockBasedTableBuilder::BlockBasedTableBuilder(
|
|
|
|
const BlockBasedTableOptions& table_options, const TableBuilderOptions& tbo,
|
|
|
|
WritableFileWriter* file) {
|
|
|
|
BlockBasedTableOptions sanitized_table_options(table_options);
|
|
|
|
if (sanitized_table_options.format_version == 0 &&
|
|
|
|
sanitized_table_options.checksum != kCRC32c) {
|
|
|
|
ROCKS_LOG_WARN(
|
|
|
|
tbo.ioptions.logger,
|
|
|
|
"Silently converting format_version to 1 because checksum is "
|
|
|
|
"non-default");
|
|
|
|
// silently convert format_version to 1 to keep consistent with current
|
|
|
|
// behavior
|
|
|
|
sanitized_table_options.format_version = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rep_ = new Rep(sanitized_table_options, tbo, file);
|
|
|
|
|
|
|
|
if (rep_->filter_builder != nullptr) {
|
|
|
|
rep_->filter_builder->StartBlock(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
SetupCacheKeyPrefix(tbo);
|
|
|
|
|
|
|
|
if (rep_->IsParallelCompressionEnabled()) {
|
|
|
|
StartParallelCompression();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
BlockBasedTableBuilder::~BlockBasedTableBuilder() {
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
// Catch errors where caller forgot to call Finish()
|
|
|
|
assert(rep_->state == Rep::State::kClosed);
|
|
|
|
delete rep_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) {
|
|
|
|
Rep* r = rep_;
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
assert(rep_->state != Rep::State::kClosed);
|
|
|
|
if (!ok()) return;
|
|
|
|
ValueType value_type = ExtractValueType(key);
|
|
|
|
if (IsValueType(value_type)) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
if (r->props.num_entries > r->props.num_range_deletions) {
|
|
|
|
assert(r->internal_comparator.Compare(key, Slice(r->last_key)) > 0);
|
|
|
|
}
|
|
|
|
#endif // !NDEBUG
|
|
|
|
|
|
|
|
auto should_flush = r->flush_block_policy->Update(key, value);
|
|
|
|
if (should_flush) {
|
|
|
|
assert(!r->data_block.empty());
|
|
|
|
r->first_key_in_next_block = &key;
|
|
|
|
Flush();
|
|
|
|
if (r->state == Rep::State::kBuffered) {
|
|
|
|
bool exceeds_buffer_limit =
|
|
|
|
(r->buffer_limit != 0 && r->data_begin_offset > r->buffer_limit);
|
|
|
|
bool exceeds_global_block_cache_limit = false;
|
|
|
|
|
|
|
|
// Increase cache reservation for the last buffered data block
|
|
|
|
// only if the block is not going to be unbuffered immediately
|
|
|
|
// and there exists a cache reservation manager
|
|
|
|
if (!exceeds_buffer_limit &&
|
|
|
|
r->compression_dict_buffer_cache_res_mgr != nullptr) {
|
|
|
|
Status s =
|
|
|
|
r->compression_dict_buffer_cache_res_mgr->UpdateCacheReservation<
|
|
|
|
CacheEntryRole::kCompressionDictionaryBuildingBuffer>(
|
|
|
|
r->data_begin_offset);
|
|
|
|
exceeds_global_block_cache_limit = s.IsIncomplete();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (exceeds_buffer_limit || exceeds_global_block_cache_limit) {
|
|
|
|
EnterUnbuffered();
|
|
|
|
}
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
}
|
|
|
|
|
|
|
|
// Add item to index block.
|
|
|
|
// We do not emit the index entry for a block until we have seen the
|
|
|
|
// first key for the next data block. This allows us to use shorter
|
|
|
|
// keys in the index block. For example, consider a block boundary
|
|
|
|
// between the keys "the quick brown fox" and "the who". We can use
|
|
|
|
// "the r" as the key for the index block entry since it is >= all
|
|
|
|
// entries in the first block and < all entries in subsequent
|
|
|
|
// blocks.
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
if (ok() && r->state == Rep::State::kUnbuffered) {
|
|
|
|
if (r->IsParallelCompressionEnabled()) {
|
|
|
|
r->pc_rep->curr_block_keys->Clear();
|
|
|
|
} else {
|
|
|
|
r->index_builder->AddIndexEntry(&r->last_key, &key,
|
|
|
|
r->pending_handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note: PartitionedFilterBlockBuilder requires key being added to filter
|
|
|
|
// builder after being added to index builder.
|
|
|
|
if (r->state == Rep::State::kUnbuffered) {
|
|
|
|
if (r->IsParallelCompressionEnabled()) {
|
|
|
|
r->pc_rep->curr_block_keys->PushBack(key);
|
|
|
|
} else {
|
|
|
|
if (r->filter_builder != nullptr) {
|
|
|
|
size_t ts_sz =
|
|
|
|
r->internal_comparator.user_comparator()->timestamp_size();
|
|
|
|
r->filter_builder->Add(ExtractUserKeyAndStripTimestamp(key, ts_sz));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
r->data_block.AddWithLastKey(key, value, r->last_key);
|
|
|
|
r->last_key.assign(key.data(), key.size());
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
if (r->state == Rep::State::kBuffered) {
|
|
|
|
// Buffered keys will be replayed from data_block_buffers during
|
|
|
|
// `Finish()` once compression dictionary has been finalized.
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
} else {
|
|
|
|
if (!r->IsParallelCompressionEnabled()) {
|
|
|
|
r->index_builder->OnKeyAdded(key);
|
|
|
|
}
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
}
|
|
|
|
// TODO offset passed in is not accurate for parallel compression case
|
|
|
|
NotifyCollectTableCollectorsOnAdd(key, value, r->get_offset(),
|
|
|
|
r->table_properties_collectors,
|
|
|
|
r->ioptions.logger);
|
|
|
|
|
|
|
|
} else if (value_type == kTypeRangeDeletion) {
|
|
|
|
r->range_del_block.Add(key, value);
|
|
|
|
// TODO offset passed in is not accurate for parallel compression case
|
|
|
|
NotifyCollectTableCollectorsOnAdd(key, value, r->get_offset(),
|
|
|
|
r->table_properties_collectors,
|
|
|
|
r->ioptions.logger);
|
|
|
|
} else {
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
r->props.num_entries++;
|
|
|
|
r->props.raw_key_size += key.size();
|
|
|
|
r->props.raw_value_size += value.size();
|
|
|
|
if (value_type == kTypeDeletion || value_type == kTypeSingleDeletion) {
|
|
|
|
r->props.num_deletions++;
|
|
|
|
} else if (value_type == kTypeRangeDeletion) {
|
|
|
|
r->props.num_deletions++;
|
|
|
|
r->props.num_range_deletions++;
|
|
|
|
} else if (value_type == kTypeMerge) {
|
|
|
|
r->props.num_merge_operands++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::Flush() {
|
|
|
|
Rep* r = rep_;
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
assert(rep_->state != Rep::State::kClosed);
|
|
|
|
if (!ok()) return;
|
|
|
|
if (r->data_block.empty()) return;
|
|
|
|
if (r->IsParallelCompressionEnabled() &&
|
|
|
|
r->state == Rep::State::kUnbuffered) {
|
|
|
|
r->data_block.Finish();
|
|
|
|
ParallelCompressionRep::BlockRep* block_rep = r->pc_rep->PrepareBlock(
|
|
|
|
r->compression_type, r->first_key_in_next_block, &(r->data_block));
|
|
|
|
assert(block_rep != nullptr);
|
|
|
|
r->pc_rep->file_size_estimator.EmitBlock(block_rep->data->size(),
|
|
|
|
r->get_offset());
|
|
|
|
r->pc_rep->EmitBlock(block_rep);
|
|
|
|
} else {
|
|
|
|
WriteBlock(&r->data_block, &r->pending_handle, BlockType::kData);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::WriteBlock(BlockBuilder* block,
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
9 years ago
|
|
|
BlockHandle* handle,
|
|
|
|
BlockType block_type) {
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
block->Finish();
|
|
|
|
std::string raw_block_contents;
|
Two performance improvements in BlockBuilder (#9039)
Summary:
Primarily, this change reserves space in the std::string for building
the next block once a block is finished, using `block_size` as
reservation size. Note: also tried reusing same std::string in the
common "unbuffered" path but that showed no benefit or regression.
Secondarily, this slightly reduces the work in resetting `restarts_`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9039
Test Plan:
TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
Run 1, Primary change only: 292697 vs. 280267 (+4.4%)
Run 2, Primary change only: 288763 vs. 279621 (+3.3%)
Run 1, Secondary change only: 260065 vs. 254232 (+2.3%)
Run 2, Secondary change only: 275925 vs. 272248 (+1.4%)
Run 1, Both changes: 284890 vs. 270372 (+5.3%)
Run 2, Both changes: 263511 vs. 258188 (+2.0%)
Reviewed By: zhichao-cao
Differential Revision: D31701253
Pulled By: pdillinger
fbshipit-source-id: 7e40810afbb98e6b6446955e77bda59e69b19ffd
3 years ago
|
|
|
raw_block_contents.reserve(rep_->table_options.block_size);
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
block->SwapAndReset(raw_block_contents);
|
|
|
|
if (rep_->state == Rep::State::kBuffered) {
|
|
|
|
assert(block_type == BlockType::kData);
|
|
|
|
rep_->data_block_buffers.emplace_back(std::move(raw_block_contents));
|
|
|
|
rep_->data_begin_offset += rep_->data_block_buffers.back().size();
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
return;
|
|
|
|
}
|
|
|
|
WriteBlock(raw_block_contents, handle, block_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::WriteBlock(const Slice& raw_block_contents,
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
9 years ago
|
|
|
BlockHandle* handle,
|
|
|
|
BlockType block_type) {
|
|
|
|
Rep* r = rep_;
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
assert(r->state == Rep::State::kUnbuffered);
|
|
|
|
Slice block_contents;
|
|
|
|
CompressionType type;
|
|
|
|
Status compress_status;
|
|
|
|
bool is_data_block = block_type == BlockType::kData;
|
|
|
|
CompressAndVerifyBlock(raw_block_contents, is_data_block,
|
|
|
|
*(r->compression_ctxs[0]), r->verify_ctxs[0].get(),
|
|
|
|
&(r->compressed_output), &(block_contents), &type,
|
|
|
|
&compress_status);
|
|
|
|
r->SetStatus(compress_status);
|
|
|
|
if (!ok()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteRawBlock(block_contents, type, handle, block_type, &raw_block_contents);
|
|
|
|
r->compressed_output.clear();
|
|
|
|
if (is_data_block) {
|
|
|
|
if (r->filter_builder != nullptr) {
|
|
|
|
r->filter_builder->StartBlock(r->get_offset());
|
|
|
|
}
|
|
|
|
r->props.data_size = r->get_offset();
|
|
|
|
++r->props.num_data_blocks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::BGWorkCompression(
|
|
|
|
const CompressionContext& compression_ctx,
|
|
|
|
UncompressionContext* verify_ctx) {
|
|
|
|
ParallelCompressionRep::BlockRep* block_rep = nullptr;
|
|
|
|
while (rep_->pc_rep->compress_queue.pop(block_rep)) {
|
|
|
|
assert(block_rep != nullptr);
|
|
|
|
CompressAndVerifyBlock(block_rep->contents, true, /* is_data_block*/
|
|
|
|
compression_ctx, verify_ctx,
|
|
|
|
block_rep->compressed_data.get(),
|
|
|
|
&block_rep->compressed_contents,
|
|
|
|
&(block_rep->compression_type), &block_rep->status);
|
|
|
|
block_rep->slot->Fill(block_rep);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::CompressAndVerifyBlock(
|
|
|
|
const Slice& raw_block_contents, bool is_data_block,
|
|
|
|
const CompressionContext& compression_ctx, UncompressionContext* verify_ctx,
|
|
|
|
std::string* compressed_output, Slice* block_contents,
|
|
|
|
CompressionType* type, Status* out_status) {
|
|
|
|
// File format contains a sequence of blocks where each block has:
|
|
|
|
// block_data: uint8[n]
|
|
|
|
// type: uint8
|
|
|
|
// crc: uint32
|
|
|
|
Rep* r = rep_;
|
|
|
|
bool is_status_ok = ok();
|
|
|
|
if (!r->IsParallelCompressionEnabled()) {
|
|
|
|
assert(is_status_ok);
|
|
|
|
}
|
|
|
|
|
|
|
|
*type = r->compression_type;
|
|
|
|
uint64_t sample_for_compression = r->sample_for_compression;
|
|
|
|
bool abort_compression = false;
|
|
|
|
|
|
|
|
StopWatchNano timer(
|
|
|
|
r->ioptions.clock,
|
|
|
|
ShouldReportDetailedTime(r->ioptions.env, r->ioptions.stats));
|
|
|
|
|
|
|
|
if (is_status_ok && raw_block_contents.size() < kCompressionSizeLimit) {
|
|
|
|
if (is_data_block) {
|
|
|
|
r->compressible_input_data_bytes.fetch_add(raw_block_contents.size(),
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
}
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
const CompressionDict* compression_dict;
|
|
|
|
if (!is_data_block || r->compression_dict == nullptr) {
|
|
|
|
compression_dict = &CompressionDict::GetEmptyDict();
|
|
|
|
} else {
|
|
|
|
compression_dict = r->compression_dict.get();
|
|
|
|
}
|
|
|
|
assert(compression_dict != nullptr);
|
|
|
|
CompressionInfo compression_info(r->compression_opts, compression_ctx,
|
|
|
|
*compression_dict, *type,
|
|
|
|
sample_for_compression);
|
|
|
|
|
|
|
|
std::string sampled_output_fast;
|
|
|
|
std::string sampled_output_slow;
|
|
|
|
*block_contents = CompressBlock(
|
|
|
|
raw_block_contents, compression_info, type,
|
|
|
|
r->table_options.format_version, is_data_block /* do_sample */,
|
|
|
|
compressed_output, &sampled_output_fast, &sampled_output_slow);
|
|
|
|
|
|
|
|
if (sampled_output_slow.size() > 0 || sampled_output_fast.size() > 0) {
|
|
|
|
// Currently compression sampling is only enabled for data block.
|
|
|
|
assert(is_data_block);
|
|
|
|
r->sampled_input_data_bytes.fetch_add(raw_block_contents.size(),
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
r->sampled_output_slow_data_bytes.fetch_add(sampled_output_slow.size(),
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
r->sampled_output_fast_data_bytes.fetch_add(sampled_output_fast.size(),
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
// notify collectors on block add
|
|
|
|
NotifyCollectTableCollectorsOnBlockAdd(
|
|
|
|
r->table_properties_collectors, raw_block_contents.size(),
|
|
|
|
sampled_output_fast.size(), sampled_output_slow.size());
|
|
|
|
|
|
|
|
// Some of the compression algorithms are known to be unreliable. If
|
|
|
|
// the verify_compression flag is set then try to de-compress the
|
|
|
|
// compressed data and compare to the input.
|
|
|
|
if (*type != kNoCompression && r->table_options.verify_compression) {
|
|
|
|
// Retrieve the uncompressed contents into a new buffer
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
const UncompressionDict* verify_dict;
|
|
|
|
if (!is_data_block || r->verify_dict == nullptr) {
|
|
|
|
verify_dict = &UncompressionDict::GetEmptyDict();
|
|
|
|
} else {
|
|
|
|
verify_dict = r->verify_dict.get();
|
|
|
|
}
|
|
|
|
assert(verify_dict != nullptr);
|
|
|
|
BlockContents contents;
|
|
|
|
UncompressionInfo uncompression_info(*verify_ctx, *verify_dict,
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
r->compression_type);
|
|
|
|
Status stat = UncompressBlockContentsForCompressionType(
|
|
|
|
uncompression_info, block_contents->data(), block_contents->size(),
|
|
|
|
&contents, r->table_options.format_version, r->ioptions);
|
|
|
|
|
|
|
|
if (stat.ok()) {
|
|
|
|
bool compressed_ok = contents.data.compare(raw_block_contents) == 0;
|
|
|
|
if (!compressed_ok) {
|
|
|
|
// The result of the compression was invalid. abort.
|
|
|
|
abort_compression = true;
|
|
|
|
ROCKS_LOG_ERROR(r->ioptions.logger,
|
|
|
|
"Decompressed block did not match raw block");
|
|
|
|
*out_status =
|
|
|
|
Status::Corruption("Decompressed block did not match raw block");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Decompression reported an error. abort.
|
|
|
|
*out_status = Status::Corruption(std::string("Could not decompress: ") +
|
|
|
|
stat.getState());
|
|
|
|
abort_compression = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Block is too big to be compressed.
|
|
|
|
if (is_data_block) {
|
|
|
|
r->uncompressible_input_data_bytes.fetch_add(raw_block_contents.size(),
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
abort_compression = true;
|
|
|
|
}
|
|
|
|
if (is_data_block) {
|
|
|
|
r->uncompressible_input_data_bytes.fetch_add(kBlockTrailerSize,
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Abort compression if the block is too big, or did not pass
|
|
|
|
// verification.
|
|
|
|
if (abort_compression) {
|
|
|
|
RecordTick(r->ioptions.stats, NUMBER_BLOCK_NOT_COMPRESSED);
|
|
|
|
*type = kNoCompression;
|
|
|
|
*block_contents = raw_block_contents;
|
|
|
|
} else if (*type != kNoCompression) {
|
|
|
|
if (ShouldReportDetailedTime(r->ioptions.env, r->ioptions.stats)) {
|
|
|
|
RecordTimeToHistogram(r->ioptions.stats, COMPRESSION_TIMES_NANOS,
|
|
|
|
timer.ElapsedNanos());
|
|
|
|
}
|
|
|
|
RecordInHistogram(r->ioptions.stats, BYTES_COMPRESSED,
|
|
|
|
raw_block_contents.size());
|
|
|
|
RecordTick(r->ioptions.stats, NUMBER_BLOCK_COMPRESSED);
|
|
|
|
} else if (*type != r->compression_type) {
|
|
|
|
RecordTick(r->ioptions.stats, NUMBER_BLOCK_NOT_COMPRESSED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
|
|
|
|
CompressionType type,
|
|
|
|
BlockHandle* handle,
|
|
|
|
BlockType block_type,
|
|
|
|
const Slice* raw_block_contents) {
|
|
|
|
Rep* r = rep_;
|
|
|
|
bool is_data_block = block_type == BlockType::kData;
|
|
|
|
Status s = Status::OK();
|
|
|
|
IOStatus io_s = IOStatus::OK();
|
|
|
|
StopWatch sw(r->ioptions.clock, r->ioptions.stats, WRITE_RAW_BLOCK_MICROS);
|
|
|
|
handle->set_offset(r->get_offset());
|
|
|
|
handle->set_size(block_contents.size());
|
|
|
|
assert(status().ok());
|
|
|
|
assert(io_status().ok());
|
|
|
|
io_s = r->file->Append(block_contents);
|
|
|
|
if (io_s.ok()) {
|
Implement XXH3 block checksum type (#9069)
Summary:
XXH3 - latest hash function that is extremely fast on large
data, easily faster than crc32c on most any x86_64 hardware. In
integrating this hash function, I have handled the compression type byte
in a non-standard way to avoid using the streaming API (extra data
movement and active code size because of hash function complexity). This
approach got a thumbs-up from Yann Collet.
Existing functionality change:
* reject bad ChecksumType in options with InvalidArgument
This change split off from https://github.com/facebook/rocksdb/issues/9058 because context-aware checksum is
likely to be handled through different configuration than ChecksumType.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9069
Test Plan:
tests updated, and substantially expanded. Unit tests now check
that we don't accidentally change the values generated by the checksum
algorithms ("schema test") and that we properly handle
invalid/unrecognized checksum types in options or in file footer.
DBTestBase::ChangeOptions (etc.) updated from two to one configuration
changing from default CRC32c ChecksumType. The point of this test code
is to detect possible interactions among features, and the likelihood of
some bad interaction being detected by including configurations other
than XXH3 and CRC32c--and then not detected by stress/crash test--is
extremely low.
Stress/crash test also updated (manual run long enough to see it accepts
new checksum type). db_bench also updated for microbenchmarking
checksums.
### Performance microbenchmark (PORTABLE=0 DEBUG_LEVEL=0, Broadwell processor)
./db_bench -benchmarks=crc32c,xxhash,xxhash64,xxh3,crc32c,xxhash,xxhash64,xxh3,crc32c,xxhash,xxhash64,xxh3
crc32c : 0.200 micros/op 5005220 ops/sec; 19551.6 MB/s (4096 per op)
xxhash : 0.807 micros/op 1238408 ops/sec; 4837.5 MB/s (4096 per op)
xxhash64 : 0.421 micros/op 2376514 ops/sec; 9283.3 MB/s (4096 per op)
xxh3 : 0.171 micros/op 5858391 ops/sec; 22884.3 MB/s (4096 per op)
crc32c : 0.206 micros/op 4859566 ops/sec; 18982.7 MB/s (4096 per op)
xxhash : 0.793 micros/op 1260850 ops/sec; 4925.2 MB/s (4096 per op)
xxhash64 : 0.410 micros/op 2439182 ops/sec; 9528.1 MB/s (4096 per op)
xxh3 : 0.161 micros/op 6202872 ops/sec; 24230.0 MB/s (4096 per op)
crc32c : 0.203 micros/op 4924686 ops/sec; 19237.1 MB/s (4096 per op)
xxhash : 0.839 micros/op 1192388 ops/sec; 4657.8 MB/s (4096 per op)
xxhash64 : 0.424 micros/op 2357391 ops/sec; 9208.6 MB/s (4096 per op)
xxh3 : 0.162 micros/op 6182678 ops/sec; 24151.1 MB/s (4096 per op)
As you can see, especially once warmed up, xxh3 is fastest.
### Performance macrobenchmark (PORTABLE=0 DEBUG_LEVEL=0, Broadwell processor)
Test
for I in `seq 1 50`; do for CHK in 0 1 2 3 4; do TEST_TMPDIR=/dev/shm/rocksdb$CHK ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=30000000 -checksum_type=$CHK 2>&1 | grep 'micros/op' | tee -a results-$CHK & done; wait; done
Results (ops/sec)
for FILE in results*; do echo -n "$FILE "; awk '{ s += $5; c++; } END { print 1.0 * s / c; }' < $FILE; done
results-0 252118 # kNoChecksum
results-1 251588 # kCRC32c
results-2 251863 # kxxHash
results-3 252016 # kxxHash64
results-4 252038 # kXXH3
Reviewed By: mrambacher
Differential Revision: D31905249
Pulled By: pdillinger
fbshipit-source-id: cb9b998ebe2523fc7c400eedf62124a78bf4b4d1
3 years ago
|
|
|
std::array<char, kBlockTrailerSize> trailer;
|
|
|
|
trailer[0] = type;
|
|
|
|
uint32_t checksum = ComputeBuiltinChecksumWithLastByte(
|
|
|
|
r->table_options.checksum, block_contents.data(), block_contents.size(),
|
|
|
|
/*last_byte*/ type);
|
|
|
|
EncodeFixed32(trailer.data() + 1, checksum);
|
|
|
|
|
|
|
|
assert(io_s.ok());
|
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"BlockBasedTableBuilder::WriteRawBlock:TamperWithChecksum",
|
Implement XXH3 block checksum type (#9069)
Summary:
XXH3 - latest hash function that is extremely fast on large
data, easily faster than crc32c on most any x86_64 hardware. In
integrating this hash function, I have handled the compression type byte
in a non-standard way to avoid using the streaming API (extra data
movement and active code size because of hash function complexity). This
approach got a thumbs-up from Yann Collet.
Existing functionality change:
* reject bad ChecksumType in options with InvalidArgument
This change split off from https://github.com/facebook/rocksdb/issues/9058 because context-aware checksum is
likely to be handled through different configuration than ChecksumType.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9069
Test Plan:
tests updated, and substantially expanded. Unit tests now check
that we don't accidentally change the values generated by the checksum
algorithms ("schema test") and that we properly handle
invalid/unrecognized checksum types in options or in file footer.
DBTestBase::ChangeOptions (etc.) updated from two to one configuration
changing from default CRC32c ChecksumType. The point of this test code
is to detect possible interactions among features, and the likelihood of
some bad interaction being detected by including configurations other
than XXH3 and CRC32c--and then not detected by stress/crash test--is
extremely low.
Stress/crash test also updated (manual run long enough to see it accepts
new checksum type). db_bench also updated for microbenchmarking
checksums.
### Performance microbenchmark (PORTABLE=0 DEBUG_LEVEL=0, Broadwell processor)
./db_bench -benchmarks=crc32c,xxhash,xxhash64,xxh3,crc32c,xxhash,xxhash64,xxh3,crc32c,xxhash,xxhash64,xxh3
crc32c : 0.200 micros/op 5005220 ops/sec; 19551.6 MB/s (4096 per op)
xxhash : 0.807 micros/op 1238408 ops/sec; 4837.5 MB/s (4096 per op)
xxhash64 : 0.421 micros/op 2376514 ops/sec; 9283.3 MB/s (4096 per op)
xxh3 : 0.171 micros/op 5858391 ops/sec; 22884.3 MB/s (4096 per op)
crc32c : 0.206 micros/op 4859566 ops/sec; 18982.7 MB/s (4096 per op)
xxhash : 0.793 micros/op 1260850 ops/sec; 4925.2 MB/s (4096 per op)
xxhash64 : 0.410 micros/op 2439182 ops/sec; 9528.1 MB/s (4096 per op)
xxh3 : 0.161 micros/op 6202872 ops/sec; 24230.0 MB/s (4096 per op)
crc32c : 0.203 micros/op 4924686 ops/sec; 19237.1 MB/s (4096 per op)
xxhash : 0.839 micros/op 1192388 ops/sec; 4657.8 MB/s (4096 per op)
xxhash64 : 0.424 micros/op 2357391 ops/sec; 9208.6 MB/s (4096 per op)
xxh3 : 0.162 micros/op 6182678 ops/sec; 24151.1 MB/s (4096 per op)
As you can see, especially once warmed up, xxh3 is fastest.
### Performance macrobenchmark (PORTABLE=0 DEBUG_LEVEL=0, Broadwell processor)
Test
for I in `seq 1 50`; do for CHK in 0 1 2 3 4; do TEST_TMPDIR=/dev/shm/rocksdb$CHK ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=30000000 -checksum_type=$CHK 2>&1 | grep 'micros/op' | tee -a results-$CHK & done; wait; done
Results (ops/sec)
for FILE in results*; do echo -n "$FILE "; awk '{ s += $5; c++; } END { print 1.0 * s / c; }' < $FILE; done
results-0 252118 # kNoChecksum
results-1 251588 # kCRC32c
results-2 251863 # kxxHash
results-3 252016 # kxxHash64
results-4 252038 # kXXH3
Reviewed By: mrambacher
Differential Revision: D31905249
Pulled By: pdillinger
fbshipit-source-id: cb9b998ebe2523fc7c400eedf62124a78bf4b4d1
3 years ago
|
|
|
trailer.data());
|
|
|
|
io_s = r->file->Append(Slice(trailer.data(), trailer.size()));
|
|
|
|
if (io_s.ok()) {
|
|
|
|
assert(s.ok());
|
|
|
|
bool warm_cache;
|
|
|
|
switch (r->table_options.prepopulate_block_cache) {
|
|
|
|
case BlockBasedTableOptions::PrepopulateBlockCache::kFlushOnly:
|
|
|
|
warm_cache = (r->reason == TableFileCreationReason::kFlush);
|
|
|
|
break;
|
|
|
|
case BlockBasedTableOptions::PrepopulateBlockCache::kDisable:
|
|
|
|
warm_cache = false;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
// missing case
|
|
|
|
assert(false);
|
|
|
|
warm_cache = false;
|
|
|
|
}
|
|
|
|
if (warm_cache) {
|
|
|
|
if (type == kNoCompression) {
|
|
|
|
s = InsertBlockInCacheHelper(block_contents, handle, block_type);
|
|
|
|
} else if (raw_block_contents != nullptr) {
|
|
|
|
s = InsertBlockInCacheHelper(*raw_block_contents, handle, block_type);
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
r->SetStatus(s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// TODO:: Should InsertBlockInCompressedCache take into account error from
|
|
|
|
// InsertBlockInCache or ignore and overwrite it.
|
|
|
|
s = InsertBlockInCompressedCache(block_contents, type, handle);
|
|
|
|
if (!s.ok()) {
|
|
|
|
r->SetStatus(s);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
r->SetIOStatus(io_s);
|
|
|
|
}
|
|
|
|
if (s.ok() && io_s.ok()) {
|
|
|
|
r->set_offset(r->get_offset() + block_contents.size() +
|
|
|
|
kBlockTrailerSize);
|
|
|
|
if (r->table_options.block_align && is_data_block) {
|
|
|
|
size_t pad_bytes =
|
|
|
|
(r->alignment - ((block_contents.size() + kBlockTrailerSize) &
|
|
|
|
(r->alignment - 1))) &
|
|
|
|
(r->alignment - 1);
|
|
|
|
io_s = r->file->Pad(pad_bytes);
|
|
|
|
if (io_s.ok()) {
|
|
|
|
r->set_offset(r->get_offset() + pad_bytes);
|
|
|
|
} else {
|
|
|
|
r->SetIOStatus(io_s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (r->IsParallelCompressionEnabled()) {
|
|
|
|
if (is_data_block) {
|
|
|
|
r->pc_rep->file_size_estimator.ReapBlock(block_contents.size(),
|
|
|
|
r->get_offset());
|
|
|
|
} else {
|
|
|
|
r->pc_rep->file_size_estimator.SetEstimatedFileSize(r->get_offset());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
r->SetIOStatus(io_s);
|
|
|
|
}
|
|
|
|
if (!io_s.ok() && s.ok()) {
|
|
|
|
r->SetStatus(io_s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::BGWorkWriteRawBlock() {
|
|
|
|
Rep* r = rep_;
|
|
|
|
ParallelCompressionRep::BlockRepSlot* slot = nullptr;
|
|
|
|
ParallelCompressionRep::BlockRep* block_rep = nullptr;
|
|
|
|
while (r->pc_rep->write_queue.pop(slot)) {
|
|
|
|
assert(slot != nullptr);
|
|
|
|
slot->Take(block_rep);
|
|
|
|
assert(block_rep != nullptr);
|
|
|
|
if (!block_rep->status.ok()) {
|
|
|
|
r->SetStatus(block_rep->status);
|
|
|
|
// Reap block so that blocked Flush() can finish
|
|
|
|
// if there is one, and Flush() will notice !ok() next time.
|
|
|
|
block_rep->status = Status::OK();
|
|
|
|
r->pc_rep->ReapBlock(block_rep);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < block_rep->keys->Size(); i++) {
|
|
|
|
auto& key = (*block_rep->keys)[i];
|
|
|
|
if (r->filter_builder != nullptr) {
|
|
|
|
size_t ts_sz =
|
|
|
|
r->internal_comparator.user_comparator()->timestamp_size();
|
|
|
|
r->filter_builder->Add(ExtractUserKeyAndStripTimestamp(key, ts_sz));
|
|
|
|
}
|
|
|
|
r->index_builder->OnKeyAdded(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
r->pc_rep->file_size_estimator.SetCurrBlockRawSize(block_rep->data->size());
|
|
|
|
WriteRawBlock(block_rep->compressed_contents, block_rep->compression_type,
|
|
|
|
&r->pending_handle, BlockType::kData, &block_rep->contents);
|
|
|
|
if (!ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r->filter_builder != nullptr) {
|
|
|
|
r->filter_builder->StartBlock(r->get_offset());
|
|
|
|
}
|
|
|
|
r->props.data_size = r->get_offset();
|
|
|
|
++r->props.num_data_blocks;
|
|
|
|
|
|
|
|
if (block_rep->first_key_in_next_block == nullptr) {
|
|
|
|
r->index_builder->AddIndexEntry(&(block_rep->keys->Back()), nullptr,
|
|
|
|
r->pending_handle);
|
|
|
|
} else {
|
|
|
|
Slice first_key_in_next_block =
|
|
|
|
Slice(*block_rep->first_key_in_next_block);
|
|
|
|
r->index_builder->AddIndexEntry(&(block_rep->keys->Back()),
|
|
|
|
&first_key_in_next_block,
|
|
|
|
r->pending_handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
r->pc_rep->ReapBlock(block_rep);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::StartParallelCompression() {
|
|
|
|
rep_->pc_rep.reset(
|
|
|
|
new ParallelCompressionRep(rep_->compression_opts.parallel_threads));
|
|
|
|
rep_->pc_rep->compress_thread_pool.reserve(
|
|
|
|
rep_->compression_opts.parallel_threads);
|
|
|
|
for (uint32_t i = 0; i < rep_->compression_opts.parallel_threads; i++) {
|
|
|
|
rep_->pc_rep->compress_thread_pool.emplace_back([this, i] {
|
|
|
|
BGWorkCompression(*(rep_->compression_ctxs[i]),
|
|
|
|
rep_->verify_ctxs[i].get());
|
|
|
|
});
|
|
|
|
}
|
|
|
|
rep_->pc_rep->write_thread.reset(
|
|
|
|
new port::Thread([this] { BGWorkWriteRawBlock(); }));
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::StopParallelCompression() {
|
|
|
|
rep_->pc_rep->compress_queue.finish();
|
|
|
|
for (auto& thread : rep_->pc_rep->compress_thread_pool) {
|
|
|
|
thread.join();
|
|
|
|
}
|
|
|
|
rep_->pc_rep->write_queue.finish();
|
|
|
|
rep_->pc_rep->write_thread->join();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockBasedTableBuilder::status() const { return rep_->GetStatus(); }
|
|
|
|
|
|
|
|
IOStatus BlockBasedTableBuilder::io_status() const {
|
|
|
|
return rep_->GetIOStatus();
|
|
|
|
}
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
5 years ago
|
|
|
|
|
|
|
namespace {
|
|
|
|
// Delete the entry resided in the cache.
|
|
|
|
template <class Entry>
|
|
|
|
void DeleteEntryCached(const Slice& /*key*/, void* value) {
|
|
|
|
auto entry = reinterpret_cast<Entry*>(value);
|
|
|
|
delete entry;
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// Helper function to setup the cache key's prefix for the Table.
|
|
|
|
void BlockBasedTableBuilder::SetupCacheKeyPrefix(
|
|
|
|
const TableBuilderOptions& tbo) {
|
|
|
|
// FIXME: Unify with BlockBasedTable::SetupCacheKeyPrefix
|
|
|
|
if (rep_->table_options.block_cache.get() != nullptr) {
|
|
|
|
BlockBasedTable::GenerateCachePrefix<Cache, FSWritableFile>(
|
|
|
|
rep_->table_options.block_cache.get(), rep_->file->writable_file(),
|
|
|
|
&rep_->cache_key_prefix[0], &rep_->cache_key_prefix_size,
|
|
|
|
tbo.db_session_id, tbo.cur_file_num);
|
|
|
|
}
|
|
|
|
if (rep_->table_options.block_cache_compressed.get() != nullptr) {
|
|
|
|
BlockBasedTable::GenerateCachePrefix<Cache, FSWritableFile>(
|
|
|
|
rep_->table_options.block_cache_compressed.get(),
|
|
|
|
rep_->file->writable_file(), &rep_->compressed_cache_key_prefix[0],
|
|
|
|
&rep_->compressed_cache_key_prefix_size, tbo.db_session_id,
|
|
|
|
tbo.cur_file_num);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Make a copy of the block contents and insert into compressed block cache
|
|
|
|
//
|
|
|
|
Status BlockBasedTableBuilder::InsertBlockInCompressedCache(
|
|
|
|
const Slice& block_contents, const CompressionType type,
|
|
|
|
const BlockHandle* handle) {
|
|
|
|
Rep* r = rep_;
|
|
|
|
Cache* block_cache_compressed = r->table_options.block_cache_compressed.get();
|
|
|
|
Status s;
|
|
|
|
if (type != kNoCompression && block_cache_compressed != nullptr) {
|
|
|
|
size_t size = block_contents.size();
|
|
|
|
|
|
|
|
auto ubuf =
|
|
|
|
AllocateBlock(size + 1, block_cache_compressed->memory_allocator());
|
|
|
|
memcpy(ubuf.get(), block_contents.data(), size);
|
|
|
|
ubuf[size] = type;
|
|
|
|
|
|
|
|
BlockContents* block_contents_to_cache =
|
|
|
|
new BlockContents(std::move(ubuf), size);
|
|
|
|
#ifndef NDEBUG
|
|
|
|
block_contents_to_cache->is_raw_block = true;
|
|
|
|
#endif // NDEBUG
|
|
|
|
|
|
|
|
// make cache key by appending the file offset to the cache prefix id
|
|
|
|
char* end = EncodeVarint64(
|
|
|
|
r->compressed_cache_key_prefix + r->compressed_cache_key_prefix_size,
|
|
|
|
handle->offset());
|
|
|
|
Slice key(r->compressed_cache_key_prefix,
|
|
|
|
static_cast<size_t>(end - r->compressed_cache_key_prefix));
|
|
|
|
|
|
|
|
s = block_cache_compressed->Insert(
|
|
|
|
key, block_contents_to_cache,
|
|
|
|
block_contents_to_cache->ApproximateMemoryUsage(),
|
|
|
|
&DeleteEntryCached<BlockContents>);
|
|
|
|
if (s.ok()) {
|
|
|
|
RecordTick(rep_->ioptions.stats, BLOCK_CACHE_COMPRESSED_ADD);
|
|
|
|
} else {
|
|
|
|
RecordTick(rep_->ioptions.stats, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
|
|
|
|
}
|
|
|
|
// Invalidate OS cache.
|
|
|
|
r->file->InvalidateCache(static_cast<size_t>(r->get_offset()), size)
|
|
|
|
.PermitUncheckedError();
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockBasedTableBuilder::InsertBlockInCacheHelper(
|
|
|
|
const Slice& block_contents, const BlockHandle* handle,
|
|
|
|
BlockType block_type) {
|
|
|
|
Status s;
|
|
|
|
if (block_type == BlockType::kData || block_type == BlockType::kIndex) {
|
|
|
|
s = InsertBlockInCache<Block>(block_contents, handle, block_type);
|
|
|
|
} else if (block_type == BlockType::kFilter) {
|
|
|
|
if (rep_->filter_builder->IsBlockBased()) {
|
|
|
|
s = InsertBlockInCache<Block>(block_contents, handle, block_type);
|
|
|
|
} else {
|
|
|
|
s = InsertBlockInCache<ParsedFullFilterBlock>(block_contents, handle,
|
|
|
|
block_type);
|
|
|
|
}
|
|
|
|
} else if (block_type == BlockType::kCompressionDictionary) {
|
|
|
|
s = InsertBlockInCache<UncompressionDict>(block_contents, handle,
|
|
|
|
block_type);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename TBlocklike>
|
|
|
|
Status BlockBasedTableBuilder::InsertBlockInCache(const Slice& block_contents,
|
|
|
|
const BlockHandle* handle,
|
|
|
|
BlockType block_type) {
|
|
|
|
// Uncompressed regular block cache
|
|
|
|
Cache* block_cache = rep_->table_options.block_cache.get();
|
|
|
|
Status s;
|
|
|
|
if (block_cache != nullptr) {
|
|
|
|
size_t size = block_contents.size();
|
|
|
|
auto buf = AllocateBlock(size, block_cache->memory_allocator());
|
|
|
|
memcpy(buf.get(), block_contents.data(), size);
|
|
|
|
BlockContents results(std::move(buf), size);
|
|
|
|
|
|
|
|
char
|
|
|
|
cache_key[BlockBasedTable::kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
Slice key = BlockBasedTable::GetCacheKey(rep_->cache_key_prefix,
|
|
|
|
rep_->cache_key_prefix_size,
|
|
|
|
*handle, cache_key);
|
|
|
|
|
|
|
|
const size_t read_amp_bytes_per_bit =
|
|
|
|
rep_->table_options.read_amp_bytes_per_bit;
|
|
|
|
|
|
|
|
// TODO akanksha:: Dedup below code by calling
|
|
|
|
// BlockBasedTable::PutDataBlockToCache.
|
|
|
|
std::unique_ptr<TBlocklike> block_holder(
|
|
|
|
BlocklikeTraits<TBlocklike>::Create(
|
|
|
|
std::move(results), read_amp_bytes_per_bit,
|
|
|
|
rep_->ioptions.statistics.get(),
|
|
|
|
false /*rep_->blocks_definitely_zstd_compressed*/,
|
|
|
|
rep_->table_options.filter_policy.get()));
|
|
|
|
|
|
|
|
assert(block_holder->own_bytes());
|
|
|
|
size_t charge = block_holder->ApproximateMemoryUsage();
|
|
|
|
s = block_cache->Insert(
|
|
|
|
key, block_holder.get(),
|
|
|
|
BlocklikeTraits<TBlocklike>::GetCacheItemHelper(block_type), charge,
|
|
|
|
nullptr, Cache::Priority::LOW);
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
// Release ownership of block_holder.
|
|
|
|
block_holder.release();
|
|
|
|
BlockBasedTable::UpdateCacheInsertionMetrics(
|
|
|
|
block_type, nullptr /*get_context*/, charge, s.IsOkOverwritten(),
|
|
|
|
rep_->ioptions.stats);
|
|
|
|
} else {
|
|
|
|
RecordTick(rep_->ioptions.stats, BLOCK_CACHE_ADD_FAILURES);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::WriteFilterBlock(
|
|
|
|
MetaIndexBuilder* meta_index_builder) {
|
|
|
|
BlockHandle filter_block_handle;
|
|
|
|
bool empty_filter_block =
|
|
|
|
(rep_->filter_builder == nullptr || rep_->filter_builder->IsEmpty());
|
|
|
|
if (ok() && !empty_filter_block) {
|
|
|
|
rep_->props.num_filter_entries +=
|
|
|
|
rep_->filter_builder->EstimateEntriesAdded();
|
|
|
|
Status s = Status::Incomplete();
|
|
|
|
while (ok() && s.IsIncomplete()) {
|
|
|
|
// filter_data is used to store the transferred filter data payload from
|
|
|
|
// FilterBlockBuilder and deallocate the payload by going out of scope.
|
|
|
|
// Otherwise, the payload will unnecessarily remain until
|
|
|
|
// BlockBasedTableBuilder is deallocated.
|
|
|
|
//
|
|
|
|
// See FilterBlockBuilder::Finish() for more on the difference in
|
|
|
|
// transferred filter data payload among different FilterBlockBuilder
|
|
|
|
// subtypes.
|
|
|
|
std::unique_ptr<const char[]> filter_data;
|
|
|
|
Slice filter_content =
|
|
|
|
rep_->filter_builder->Finish(filter_block_handle, &s, &filter_data);
|
|
|
|
assert(s.ok() || s.IsIncomplete());
|
|
|
|
rep_->props.filter_size += filter_content.size();
|
|
|
|
WriteRawBlock(filter_content, kNoCompression, &filter_block_handle,
|
|
|
|
BlockType::kFilter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ok() && !empty_filter_block) {
|
|
|
|
// Add mapping from "<filter_block_prefix>.Name" to location
|
|
|
|
// of filter data.
|
|
|
|
std::string key;
|
|
|
|
if (rep_->filter_builder->IsBlockBased()) {
|
|
|
|
key = BlockBasedTable::kFilterBlockPrefix;
|
|
|
|
} else {
|
|
|
|
key = rep_->table_options.partition_filters
|
|
|
|
? BlockBasedTable::kPartitionedFilterBlockPrefix
|
|
|
|
: BlockBasedTable::kFullFilterBlockPrefix;
|
|
|
|
}
|
|
|
|
key.append(rep_->table_options.filter_policy->Name());
|
|
|
|
meta_index_builder->Add(key, filter_block_handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::WriteIndexBlock(
|
|
|
|
MetaIndexBuilder* meta_index_builder, BlockHandle* index_block_handle) {
|
|
|
|
IndexBuilder::IndexBlocks index_blocks;
|
|
|
|
auto index_builder_status = rep_->index_builder->Finish(&index_blocks);
|
|
|
|
if (index_builder_status.IsIncomplete()) {
|
|
|
|
// We we have more than one index partition then meta_blocks are not
|
|
|
|
// supported for the index. Currently meta_blocks are used only by
|
|
|
|
// HashIndexBuilder which is not multi-partition.
|
|
|
|
assert(index_blocks.meta_blocks.empty());
|
|
|
|
} else if (ok() && !index_builder_status.ok()) {
|
|
|
|
rep_->SetStatus(index_builder_status);
|
|
|
|
}
|
|
|
|
if (ok()) {
|
|
|
|
for (const auto& item : index_blocks.meta_blocks) {
|
|
|
|
BlockHandle block_handle;
|
|
|
|
WriteBlock(item.second, &block_handle, BlockType::kIndex);
|
|
|
|
if (!ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
meta_index_builder->Add(item.first, block_handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ok()) {
|
|
|
|
if (rep_->table_options.enable_index_compression) {
|
|
|
|
WriteBlock(index_blocks.index_block_contents, index_block_handle,
|
|
|
|
BlockType::kIndex);
|
|
|
|
} else {
|
|
|
|
WriteRawBlock(index_blocks.index_block_contents, kNoCompression,
|
|
|
|
index_block_handle, BlockType::kIndex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If there are more index partitions, finish them and write them out
|
|
|
|
if (index_builder_status.IsIncomplete()) {
|
|
|
|
Status s = Status::Incomplete();
|
|
|
|
while (ok() && s.IsIncomplete()) {
|
|
|
|
s = rep_->index_builder->Finish(&index_blocks, *index_block_handle);
|
|
|
|
if (!s.ok() && !s.IsIncomplete()) {
|
|
|
|
rep_->SetStatus(s);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (rep_->table_options.enable_index_compression) {
|
|
|
|
WriteBlock(index_blocks.index_block_contents, index_block_handle,
|
|
|
|
BlockType::kIndex);
|
|
|
|
} else {
|
|
|
|
WriteRawBlock(index_blocks.index_block_contents, kNoCompression,
|
|
|
|
index_block_handle, BlockType::kIndex);
|
|
|
|
}
|
|
|
|
// The last index_block_handle will be for the partition index block
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::WritePropertiesBlock(
|
|
|
|
MetaIndexBuilder* meta_index_builder) {
|
|
|
|
BlockHandle properties_block_handle;
|
|
|
|
if (ok()) {
|
|
|
|
PropertyBlockBuilder property_block_builder;
|
|
|
|
rep_->props.filter_policy_name =
|
|
|
|
rep_->table_options.filter_policy != nullptr
|
|
|
|
? rep_->table_options.filter_policy->Name()
|
|
|
|
: "";
|
|
|
|
rep_->props.index_size =
|
|
|
|
rep_->index_builder->IndexSize() + kBlockTrailerSize;
|
|
|
|
rep_->props.comparator_name = rep_->ioptions.user_comparator != nullptr
|
|
|
|
? rep_->ioptions.user_comparator->Name()
|
|
|
|
: "nullptr";
|
|
|
|
rep_->props.merge_operator_name =
|
|
|
|
rep_->ioptions.merge_operator != nullptr
|
|
|
|
? rep_->ioptions.merge_operator->Name()
|
|
|
|
: "nullptr";
|
|
|
|
rep_->props.compression_name =
|
|
|
|
CompressionTypeToString(rep_->compression_type);
|
|
|
|
rep_->props.compression_options =
|
|
|
|
CompressionOptionsToString(rep_->compression_opts);
|
|
|
|
rep_->props.prefix_extractor_name =
|
|
|
|
rep_->moptions.prefix_extractor != nullptr
|
|
|
|
? rep_->moptions.prefix_extractor->AsString()
|
|
|
|
: "nullptr";
|
|
|
|
std::string property_collectors_names = "[";
|
|
|
|
for (size_t i = 0;
|
|
|
|
i < rep_->ioptions.table_properties_collector_factories.size(); ++i) {
|
|
|
|
if (i != 0) {
|
|
|
|
property_collectors_names += ",";
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
9 years ago
|
|
|
}
|
|
|
|
property_collectors_names +=
|
|
|
|
rep_->ioptions.table_properties_collector_factories[i]->Name();
|
|
|
|
}
|
|
|
|
property_collectors_names += "]";
|
|
|
|
rep_->props.property_collectors_names = property_collectors_names;
|
|
|
|
if (rep_->table_options.index_type ==
|
|
|
|
BlockBasedTableOptions::kTwoLevelIndexSearch) {
|
|
|
|
assert(rep_->p_index_builder_ != nullptr);
|
|
|
|
rep_->props.index_partitions = rep_->p_index_builder_->NumPartitions();
|
|
|
|
rep_->props.top_level_index_size =
|
|
|
|
rep_->p_index_builder_->TopLevelIndexSize(rep_->offset);
|
|
|
|
}
|
|
|
|
rep_->props.index_key_is_user_key =
|
|
|
|
!rep_->index_builder->seperator_is_key_plus_seq();
|
|
|
|
rep_->props.index_value_is_delta_encoded =
|
|
|
|
rep_->use_delta_encoding_for_index_values;
|
|
|
|
if (rep_->sampled_input_data_bytes > 0) {
|
|
|
|
rep_->props.slow_compression_estimated_data_size = static_cast<uint64_t>(
|
|
|
|
static_cast<double>(rep_->sampled_output_slow_data_bytes) /
|
|
|
|
rep_->sampled_input_data_bytes *
|
|
|
|
rep_->compressible_input_data_bytes +
|
|
|
|
rep_->uncompressible_input_data_bytes + 0.5);
|
|
|
|
rep_->props.fast_compression_estimated_data_size = static_cast<uint64_t>(
|
|
|
|
static_cast<double>(rep_->sampled_output_fast_data_bytes) /
|
|
|
|
rep_->sampled_input_data_bytes *
|
|
|
|
rep_->compressible_input_data_bytes +
|
|
|
|
rep_->uncompressible_input_data_bytes + 0.5);
|
|
|
|
} else if (rep_->sample_for_compression > 0) {
|
|
|
|
// We tried to sample but none were found. Assume worst-case (compression
|
|
|
|
// ratio 1.0) so data is complete and aggregatable.
|
|
|
|
rep_->props.slow_compression_estimated_data_size =
|
|
|
|
rep_->compressible_input_data_bytes +
|
|
|
|
rep_->uncompressible_input_data_bytes;
|
|
|
|
rep_->props.fast_compression_estimated_data_size =
|
|
|
|
rep_->compressible_input_data_bytes +
|
|
|
|
rep_->uncompressible_input_data_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add basic properties
|
|
|
|
property_block_builder.AddTableProperty(rep_->props);
|
|
|
|
|
|
|
|
// Add use collected properties
|
|
|
|
NotifyCollectTableCollectorsOnFinish(rep_->table_properties_collectors,
|
|
|
|
rep_->ioptions.logger,
|
|
|
|
&property_block_builder);
|
|
|
|
|
|
|
|
WriteRawBlock(property_block_builder.Finish(), kNoCompression,
|
|
|
|
&properties_block_handle, BlockType::kProperties);
|
|
|
|
}
|
|
|
|
if (ok()) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
{
|
|
|
|
uint64_t props_block_offset = properties_block_handle.offset();
|
|
|
|
uint64_t props_block_size = properties_block_handle.size();
|
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"BlockBasedTableBuilder::WritePropertiesBlock:GetPropsBlockOffset",
|
|
|
|
&props_block_offset);
|
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"BlockBasedTableBuilder::WritePropertiesBlock:GetPropsBlockSize",
|
|
|
|
&props_block_size);
|
|
|
|
}
|
|
|
|
#endif // !NDEBUG
|
|
|
|
meta_index_builder->Add(kPropertiesBlock, properties_block_handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::WriteCompressionDictBlock(
|
|
|
|
MetaIndexBuilder* meta_index_builder) {
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
if (rep_->compression_dict != nullptr &&
|
|
|
|
rep_->compression_dict->GetRawDict().size()) {
|
|
|
|
BlockHandle compression_dict_block_handle;
|
|
|
|
if (ok()) {
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
WriteRawBlock(rep_->compression_dict->GetRawDict(), kNoCompression,
|
|
|
|
&compression_dict_block_handle,
|
|
|
|
BlockType::kCompressionDictionary);
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
#ifndef NDEBUG
|
|
|
|
Slice compression_dict = rep_->compression_dict->GetRawDict();
|
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"BlockBasedTableBuilder::WriteCompressionDictBlock:RawDict",
|
|
|
|
&compression_dict);
|
|
|
|
#endif // NDEBUG
|
|
|
|
}
|
|
|
|
if (ok()) {
|
|
|
|
meta_index_builder->Add(kCompressionDictBlock,
|
|
|
|
compression_dict_block_handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::WriteRangeDelBlock(
|
|
|
|
MetaIndexBuilder* meta_index_builder) {
|
|
|
|
if (ok() && !rep_->range_del_block.empty()) {
|
|
|
|
BlockHandle range_del_block_handle;
|
|
|
|
WriteRawBlock(rep_->range_del_block.Finish(), kNoCompression,
|
|
|
|
&range_del_block_handle, BlockType::kRangeDeletion);
|
|
|
|
meta_index_builder->Add(kRangeDelBlock, range_del_block_handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::WriteFooter(BlockHandle& metaindex_block_handle,
|
|
|
|
BlockHandle& index_block_handle) {
|
|
|
|
Rep* r = rep_;
|
|
|
|
// No need to write out new footer if we're using default checksum.
|
|
|
|
// We're writing legacy magic number because we want old versions of RocksDB
|
|
|
|
// be able to read files generated with new release (just in case if
|
|
|
|
// somebody wants to roll back after an upgrade)
|
|
|
|
// TODO(icanadi) at some point in the future, when we're absolutely sure
|
|
|
|
// nobody will roll back to RocksDB 2.x versions, retire the legacy magic
|
|
|
|
// number and always write new table files with new magic number
|
|
|
|
bool legacy = (r->table_options.format_version == 0);
|
|
|
|
// this is guaranteed by BlockBasedTableBuilder's constructor
|
|
|
|
assert(r->table_options.checksum == kCRC32c ||
|
|
|
|
r->table_options.format_version != 0);
|
|
|
|
Footer footer(
|
|
|
|
legacy ? kLegacyBlockBasedTableMagicNumber : kBlockBasedTableMagicNumber,
|
|
|
|
r->table_options.format_version);
|
|
|
|
footer.set_metaindex_handle(metaindex_block_handle);
|
|
|
|
footer.set_index_handle(index_block_handle);
|
|
|
|
footer.set_checksum(r->table_options.checksum);
|
|
|
|
std::string footer_encoding;
|
|
|
|
footer.EncodeTo(&footer_encoding);
|
|
|
|
assert(ok());
|
|
|
|
IOStatus ios = r->file->Append(footer_encoding);
|
|
|
|
if (ios.ok()) {
|
|
|
|
r->set_offset(r->get_offset() + footer_encoding.size());
|
|
|
|
} else {
|
|
|
|
r->SetIOStatus(ios);
|
|
|
|
r->SetStatus(ios);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
void BlockBasedTableBuilder::EnterUnbuffered() {
|
|
|
|
Rep* r = rep_;
|
|
|
|
assert(r->state == Rep::State::kBuffered);
|
|
|
|
r->state = Rep::State::kUnbuffered;
|
|
|
|
const size_t kSampleBytes = r->compression_opts.zstd_max_train_bytes > 0
|
|
|
|
? r->compression_opts.zstd_max_train_bytes
|
|
|
|
: r->compression_opts.max_dict_bytes;
|
|
|
|
const size_t kNumBlocksBuffered = r->data_block_buffers.size();
|
|
|
|
if (kNumBlocksBuffered == 0) {
|
|
|
|
// The below code is neither safe nor necessary for handling zero data
|
|
|
|
// blocks.
|
|
|
|
return;
|
|
|
|
}
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
|
|
|
|
// Abstract algebra teaches us that a finite cyclic group (such as the
|
|
|
|
// additive group of integers modulo N) can be generated by a number that is
|
|
|
|
// coprime with N. Since N is variable (number of buffered data blocks), we
|
|
|
|
// must then pick a prime number in order to guarantee coprimeness with any N.
|
|
|
|
//
|
|
|
|
// One downside of this approach is the spread will be poor when
|
|
|
|
// `kPrimeGeneratorRemainder` is close to zero or close to
|
|
|
|
// `kNumBlocksBuffered`.
|
|
|
|
//
|
|
|
|
// Picked a random number between one and one trillion and then chose the
|
|
|
|
// next prime number greater than or equal to it.
|
|
|
|
const uint64_t kPrimeGenerator = 545055921143ull;
|
|
|
|
// Can avoid repeated division by just adding the remainder repeatedly.
|
|
|
|
const size_t kPrimeGeneratorRemainder = static_cast<size_t>(
|
|
|
|
kPrimeGenerator % static_cast<uint64_t>(kNumBlocksBuffered));
|
|
|
|
const size_t kInitSampleIdx = kNumBlocksBuffered / 2;
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
std::string compression_dict_samples;
|
|
|
|
std::vector<size_t> compression_dict_sample_lens;
|
|
|
|
size_t buffer_idx = kInitSampleIdx;
|
|
|
|
for (size_t i = 0;
|
|
|
|
i < kNumBlocksBuffered && compression_dict_samples.size() < kSampleBytes;
|
|
|
|
++i) {
|
|
|
|
size_t copy_len = std::min(kSampleBytes - compression_dict_samples.size(),
|
|
|
|
r->data_block_buffers[buffer_idx].size());
|
|
|
|
compression_dict_samples.append(r->data_block_buffers[buffer_idx], 0,
|
|
|
|
copy_len);
|
|
|
|
compression_dict_sample_lens.emplace_back(copy_len);
|
|
|
|
|
|
|
|
buffer_idx += kPrimeGeneratorRemainder;
|
|
|
|
if (buffer_idx >= kNumBlocksBuffered) {
|
|
|
|
buffer_idx -= kNumBlocksBuffered;
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// final data block flushed, now we can generate dictionary from the samples.
|
|
|
|
// OK if compression_dict_samples is empty, we'll just get empty dictionary.
|
|
|
|
std::string dict;
|
|
|
|
if (r->compression_opts.zstd_max_train_bytes > 0) {
|
|
|
|
dict = ZSTD_TrainDictionary(compression_dict_samples,
|
|
|
|
compression_dict_sample_lens,
|
|
|
|
r->compression_opts.max_dict_bytes);
|
|
|
|
} else {
|
|
|
|
dict = std::move(compression_dict_samples);
|
|
|
|
}
|
|
|
|
r->compression_dict.reset(new CompressionDict(dict, r->compression_type,
|
|
|
|
r->compression_opts.level));
|
|
|
|
r->verify_dict.reset(new UncompressionDict(
|
|
|
|
dict, r->compression_type == kZSTD ||
|
|
|
|
r->compression_type == kZSTDNotFinalCompression));
|
|
|
|
|
|
|
|
auto get_iterator_for_block = [&r](size_t i) {
|
|
|
|
auto& data_block = r->data_block_buffers[i];
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
assert(!data_block.empty());
|
|
|
|
|
|
|
|
Block reader{BlockContents{data_block}};
|
|
|
|
DataBlockIter* iter = reader.NewDataIterator(
|
|
|
|
r->internal_comparator.user_comparator(), kDisableGlobalSequenceNumber);
|
|
|
|
|
|
|
|
iter->SeekToFirst();
|
|
|
|
assert(iter->Valid());
|
|
|
|
return std::unique_ptr<DataBlockIter>(iter);
|
|
|
|
};
|
|
|
|
|
|
|
|
std::unique_ptr<DataBlockIter> iter = nullptr, next_block_iter = nullptr;
|
|
|
|
|
|
|
|
for (size_t i = 0; ok() && i < r->data_block_buffers.size(); ++i) {
|
|
|
|
if (iter == nullptr) {
|
|
|
|
iter = get_iterator_for_block(i);
|
|
|
|
assert(iter != nullptr);
|
|
|
|
};
|
|
|
|
|
|
|
|
if (i + 1 < r->data_block_buffers.size()) {
|
|
|
|
next_block_iter = get_iterator_for_block(i + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto& data_block = r->data_block_buffers[i];
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
|
|
|
|
if (r->IsParallelCompressionEnabled()) {
|
|
|
|
Slice first_key_in_next_block;
|
|
|
|
const Slice* first_key_in_next_block_ptr = &first_key_in_next_block;
|
|
|
|
if (i + 1 < r->data_block_buffers.size()) {
|
|
|
|
assert(next_block_iter != nullptr);
|
|
|
|
first_key_in_next_block = next_block_iter->key();
|
|
|
|
} else {
|
|
|
|
first_key_in_next_block_ptr = r->first_key_in_next_block;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
for (; iter->Valid(); iter->Next()) {
|
|
|
|
keys.emplace_back(iter->key().ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
ParallelCompressionRep::BlockRep* block_rep = r->pc_rep->PrepareBlock(
|
|
|
|
r->compression_type, first_key_in_next_block_ptr, &data_block, &keys);
|
|
|
|
|
|
|
|
assert(block_rep != nullptr);
|
|
|
|
r->pc_rep->file_size_estimator.EmitBlock(block_rep->data->size(),
|
|
|
|
r->get_offset());
|
|
|
|
r->pc_rep->EmitBlock(block_rep);
|
|
|
|
} else {
|
|
|
|
for (; iter->Valid(); iter->Next()) {
|
|
|
|
Slice key = iter->key();
|
|
|
|
if (r->filter_builder != nullptr) {
|
|
|
|
size_t ts_sz =
|
|
|
|
r->internal_comparator.user_comparator()->timestamp_size();
|
|
|
|
r->filter_builder->Add(ExtractUserKeyAndStripTimestamp(key, ts_sz));
|
|
|
|
}
|
|
|
|
r->index_builder->OnKeyAdded(key);
|
|
|
|
}
|
|
|
|
WriteBlock(Slice(data_block), &r->pending_handle, BlockType::kData);
|
|
|
|
if (ok() && i + 1 < r->data_block_buffers.size()) {
|
|
|
|
assert(next_block_iter != nullptr);
|
|
|
|
Slice first_key_in_next_block = next_block_iter->key();
|
|
|
|
|
|
|
|
Slice* first_key_in_next_block_ptr = &first_key_in_next_block;
|
|
|
|
|
|
|
|
iter->SeekToLast();
|
|
|
|
std::string last_key = iter->key().ToString();
|
|
|
|
r->index_builder->AddIndexEntry(&last_key, first_key_in_next_block_ptr,
|
|
|
|
r->pending_handle);
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
std::swap(iter, next_block_iter);
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
}
|
|
|
|
r->data_block_buffers.clear();
|
|
|
|
r->data_begin_offset = 0;
|
|
|
|
// Release all reserved cache for data block buffers
|
|
|
|
if (r->compression_dict_buffer_cache_res_mgr != nullptr) {
|
|
|
|
Status s = r->compression_dict_buffer_cache_res_mgr->UpdateCacheReservation<
|
|
|
|
CacheEntryRole::kCompressionDictionaryBuildingBuffer>(
|
|
|
|
r->data_begin_offset);
|
|
|
|
s.PermitUncheckedError();
|
|
|
|
}
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockBasedTableBuilder::Finish() {
|
|
|
|
Rep* r = rep_;
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
assert(r->state != Rep::State::kClosed);
|
|
|
|
bool empty_data_block = r->data_block.empty();
|
|
|
|
r->first_key_in_next_block = nullptr;
|
|
|
|
Flush();
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
if (r->state == Rep::State::kBuffered) {
|
|
|
|
EnterUnbuffered();
|
|
|
|
}
|
|
|
|
if (r->IsParallelCompressionEnabled()) {
|
|
|
|
StopParallelCompression();
|
|
|
|
#ifndef NDEBUG
|
|
|
|
for (const auto& br : r->pc_rep->block_rep_buf) {
|
|
|
|
assert(br.status.ok());
|
|
|
|
}
|
|
|
|
#endif // !NDEBUG
|
|
|
|
} else {
|
|
|
|
// To make sure properties block is able to keep the accurate size of index
|
|
|
|
// block, we will finish writing all index entries first.
|
|
|
|
if (ok() && !empty_data_block) {
|
|
|
|
r->index_builder->AddIndexEntry(
|
|
|
|
&r->last_key, nullptr /* no next data block */, r->pending_handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write meta blocks, metaindex block and footer in the following order.
|
|
|
|
// 1. [meta block: filter]
|
|
|
|
// 2. [meta block: index]
|
|
|
|
// 3. [meta block: compression dictionary]
|
|
|
|
// 4. [meta block: range deletion tombstone]
|
|
|
|
// 5. [meta block: properties]
|
|
|
|
// 6. [metaindex block]
|
|
|
|
// 7. Footer
|
|
|
|
BlockHandle metaindex_block_handle, index_block_handle;
|
|
|
|
MetaIndexBuilder meta_index_builder;
|
|
|
|
WriteFilterBlock(&meta_index_builder);
|
|
|
|
WriteIndexBlock(&meta_index_builder, &index_block_handle);
|
|
|
|
WriteCompressionDictBlock(&meta_index_builder);
|
|
|
|
WriteRangeDelBlock(&meta_index_builder);
|
|
|
|
WritePropertiesBlock(&meta_index_builder);
|
|
|
|
if (ok()) {
|
|
|
|
// flush the meta index block
|
|
|
|
WriteRawBlock(meta_index_builder.Finish(), kNoCompression,
|
|
|
|
&metaindex_block_handle, BlockType::kMetaIndex);
|
|
|
|
}
|
|
|
|
if (ok()) {
|
|
|
|
WriteFooter(metaindex_block_handle, index_block_handle);
|
|
|
|
}
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
r->state = Rep::State::kClosed;
|
|
|
|
r->SetStatus(r->CopyIOStatus());
|
|
|
|
Status ret_status = r->CopyStatus();
|
|
|
|
assert(!ret_status.ok() || io_status().ok());
|
|
|
|
return ret_status;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::Abandon() {
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
assert(rep_->state != Rep::State::kClosed);
|
|
|
|
if (rep_->IsParallelCompressionEnabled()) {
|
|
|
|
StopParallelCompression();
|
|
|
|
}
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
rep_->state = Rep::State::kClosed;
|
|
|
|
rep_->CopyStatus().PermitUncheckedError();
|
|
|
|
rep_->CopyIOStatus().PermitUncheckedError();
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t BlockBasedTableBuilder::NumEntries() const {
|
|
|
|
return rep_->props.num_entries;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BlockBasedTableBuilder::IsEmpty() const {
|
|
|
|
return rep_->props.num_entries == 0 && rep_->props.num_range_deletions == 0;
|
|
|
|
}
|
|
|
|
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
6 years ago
|
|
|
uint64_t BlockBasedTableBuilder::FileSize() const { return rep_->offset; }
|
|
|
|
|
|
|
|
uint64_t BlockBasedTableBuilder::EstimatedFileSize() const {
|
|
|
|
if (rep_->IsParallelCompressionEnabled()) {
|
|
|
|
// Use compression ratio so far and inflight raw bytes to estimate
|
|
|
|
// final SST size.
|
|
|
|
return rep_->pc_rep->file_size_estimator.GetEstimatedFileSize();
|
|
|
|
} else {
|
|
|
|
return FileSize();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BlockBasedTableBuilder::NeedCompact() const {
|
|
|
|
for (const auto& collector : rep_->table_properties_collectors) {
|
|
|
|
if (collector->NeedCompact()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Add more table properties to EventLogger
Summary:
Example output:
{"time_micros": 1431463794310521, "job": 353, "event": "table_file_creation", "file_number": 387, "file_size": 86937, "table_info": {"data_size": "81801", "index_size": "9751", "filter_size": "0", "raw_key_size": "23448", "raw_average_key_size": "24.000000", "raw_value_size": "990571", "raw_average_value_size": "1013.890481", "num_data_blocks": "245", "num_entries": "977", "filter_policy_name": "", "kDeletedKeys": "0"}}
Also fixed a bug where BuildTable() in recovery was passing Env::IOHigh argument into paranoid_checks_file parameter.
Test Plan: make check + check out the output in the log
Reviewers: sdong, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D38343
10 years ago
|
|
|
TableProperties BlockBasedTableBuilder::GetTableProperties() const {
|
|
|
|
TableProperties ret = rep_->props;
|
|
|
|
for (const auto& collector : rep_->table_properties_collectors) {
|
|
|
|
for (const auto& prop : collector->GetReadableProperties()) {
|
|
|
|
ret.readable_properties.insert(prop);
|
Add more table properties to EventLogger
Summary:
Example output:
{"time_micros": 1431463794310521, "job": 353, "event": "table_file_creation", "file_number": 387, "file_size": 86937, "table_info": {"data_size": "81801", "index_size": "9751", "filter_size": "0", "raw_key_size": "23448", "raw_average_key_size": "24.000000", "raw_value_size": "990571", "raw_average_value_size": "1013.890481", "num_data_blocks": "245", "num_entries": "977", "filter_policy_name": "", "kDeletedKeys": "0"}}
Also fixed a bug where BuildTable() in recovery was passing Env::IOHigh argument into paranoid_checks_file parameter.
Test Plan: make check + check out the output in the log
Reviewers: sdong, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D38343
10 years ago
|
|
|
}
|
|
|
|
collector->Finish(&ret.user_collected_properties).PermitUncheckedError();
|
Add more table properties to EventLogger
Summary:
Example output:
{"time_micros": 1431463794310521, "job": 353, "event": "table_file_creation", "file_number": 387, "file_size": 86937, "table_info": {"data_size": "81801", "index_size": "9751", "filter_size": "0", "raw_key_size": "23448", "raw_average_key_size": "24.000000", "raw_value_size": "990571", "raw_average_value_size": "1013.890481", "num_data_blocks": "245", "num_entries": "977", "filter_policy_name": "", "kDeletedKeys": "0"}}
Also fixed a bug where BuildTable() in recovery was passing Env::IOHigh argument into paranoid_checks_file parameter.
Test Plan: make check + check out the output in the log
Reviewers: sdong, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D38343
10 years ago
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string BlockBasedTableBuilder::GetFileChecksum() const {
|
|
|
|
if (rep_->file != nullptr) {
|
|
|
|
return rep_->file->GetFileChecksum();
|
|
|
|
} else {
|
|
|
|
return kUnknownFileChecksum;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* BlockBasedTableBuilder::GetFileChecksumFuncName() const {
|
|
|
|
if (rep_->file != nullptr) {
|
|
|
|
return rep_->file->GetFileChecksumFuncName();
|
|
|
|
} else {
|
|
|
|
return kUnknownFileChecksumFuncName;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string BlockBasedTable::kFilterBlockPrefix = "filter.";
|
|
|
|
const std::string BlockBasedTable::kFullFilterBlockPrefix = "fullfilter.";
|
|
|
|
const std::string BlockBasedTable::kPartitionedFilterBlockPrefix =
|
|
|
|
"partitionedfilter.";
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|