|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
#include <cctype>
|
|
|
|
#include <cinttypes>
|
|
|
|
#include <cstring>
|
|
|
|
#include <unordered_map>
|
|
|
|
|
|
|
|
#include "cache/lru_cache.h"
|
|
|
|
#include "cache/sharded_cache.h"
|
|
|
|
#include "options/options_helper.h"
|
|
|
|
#include "options/options_parser.h"
|
|
|
|
#include "port/port.h"
|
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/convenience.h"
|
|
|
|
#include "rocksdb/file_checksum.h"
|
|
|
|
#include "rocksdb/memtablerep.h"
|
|
|
|
#include "rocksdb/utilities/leveldb_options.h"
|
|
|
|
#include "rocksdb/utilities/object_registry.h"
|
|
|
|
#include "rocksdb/utilities/options_type.h"
|
Allow fractional bits/key in BloomFilterPolicy (#6092)
Summary:
There's no technological impediment to allowing the Bloom
filter bits/key to be non-integer (fractional/decimal) values, and it
provides finer control over the memory vs. accuracy trade-off. This is
especially handy in using the format_version=5 Bloom filter in place
of the old one, because bits_per_key=9.55 provides the same accuracy as
the old bits_per_key=10.
This change not only requires refining the logic for choosing the best
num_probes for a given bits/key setting, it revealed a flaw in that logic.
As bits/key gets higher, the best num_probes for a cache-local Bloom
filter is closer to bpk / 2 than to bpk * 0.69, the best choice for a
standard Bloom filter. For example, at 16 bits per key, the best
num_probes is 9 (FP rate = 0.0843%) not 11 (FP rate = 0.0884%).
This change fixes and refines that logic (for the format_version=5
Bloom filter only, just in case) based on empirical tests to find
accuracy inflection points between each num_probes.
Although bits_per_key is now specified as a double, the new Bloom
filter converts/rounds this to "millibits / key" for predictable/precise
internal computations. Just in case of unforeseen compatibility
issues, we round to the nearest whole number bits / key for the
legacy Bloom filter, so as not to unlock new behaviors for it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6092
Test Plan: unit tests included
Differential Revision: D18711313
Pulled By: pdillinger
fbshipit-source-id: 1aa73295f152a995328cb846ef9157ae8a05522a
5 years ago
|
|
|
#include "table/block_based/filter_policy_internal.h"
|
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
|
|
|
#include "util/random.h"
|
|
|
|
#include "util/stderr_logger.h"
|
|
|
|
#include "util/string_util.h"
|
|
|
|
#include "utilities/merge_operators/bytesxor.h"
|
|
|
|
#include "utilities/merge_operators/sortlist.h"
|
|
|
|
#include "utilities/merge_operators/string_append/stringappend.h"
|
|
|
|
#include "utilities/merge_operators/string_append/stringappend2.h"
|
|
|
|
|
|
|
|
#ifndef GFLAGS
|
|
|
|
bool FLAGS_enable_print = false;
|
|
|
|
#else
|
|
|
|
#include "util/gflags_compat.h"
|
|
|
|
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
|
|
|
|
DEFINE_bool(enable_print, false, "Print options generated to console.");
|
|
|
|
#endif // GFLAGS
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
class OptionsTest : public testing::Test {};
|
|
|
|
|
|
|
|
class UnregisteredTableFactory : public TableFactory {
|
|
|
|
public:
|
|
|
|
UnregisteredTableFactory() {}
|
|
|
|
const char* Name() const override { return "Unregistered"; }
|
|
|
|
using TableFactory::NewTableReader;
|
|
|
|
Status NewTableReader(const ReadOptions&, const TableReaderOptions&,
|
|
|
|
std::unique_ptr<RandomAccessFileReader>&&, uint64_t,
|
|
|
|
std::unique_ptr<TableReader>*, bool) const override {
|
|
|
|
return Status::NotSupported();
|
|
|
|
}
|
|
|
|
TableBuilder* NewTableBuilder(const TableBuilderOptions&,
|
|
|
|
WritableFileWriter*) const override {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE // GetOptionsFromMap is not supported in ROCKSDB_LITE
|
|
|
|
TEST_F(OptionsTest, GetOptionsFromMapTest) {
|
|
|
|
std::unordered_map<std::string, std::string> cf_options_map = {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
{"write_buffer_size", "1"},
|
|
|
|
{"max_write_buffer_number", "2"},
|
|
|
|
{"min_write_buffer_number_to_merge", "3"},
|
|
|
|
{"max_write_buffer_number_to_maintain", "99"},
|
Refactor trimming logic for immutable memtables (#5022)
Summary:
MyRocks currently sets `max_write_buffer_number_to_maintain` in order to maintain enough history for transaction conflict checking. The effectiveness of this approach depends on the size of memtables. When memtables are small, it may not keep enough history; when memtables are large, this may consume too much memory.
We are proposing a new way to configure memtable list history: by limiting the memory usage of immutable memtables. The new option is `max_write_buffer_size_to_maintain` and it will take precedence over the old `max_write_buffer_number_to_maintain` if they are both set to non-zero values. The new option accounts for the total memory usage of flushed immutable memtables and mutable memtable. When the total usage exceeds the limit, RocksDB may start dropping immutable memtables (which is also called trimming history), starting from the oldest one.
The semantics of the old option actually works both as an upper bound and lower bound. History trimming will start if number of immutable memtables exceeds the limit, but it will never go below (limit-1) due to history trimming.
In order the mimic the behavior with the new option, history trimming will stop if dropping the next immutable memtable causes the total memory usage go below the size limit. For example, assuming the size limit is set to 64MB, and there are 3 immutable memtables with sizes of 20, 30, 30. Although the total memory usage is 80MB > 64MB, dropping the oldest memtable will reduce the memory usage to 60MB < 64MB, so in this case no memtable will be dropped.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5022
Differential Revision: D14394062
Pulled By: miasantreble
fbshipit-source-id: 60457a509c6af89d0993f988c9b5c2aa9e45f5c5
5 years ago
|
|
|
{"max_write_buffer_size_to_maintain", "-99999"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
{"compression", "kSnappyCompression"},
|
|
|
|
{"compression_per_level",
|
|
|
|
"kNoCompression:"
|
|
|
|
"kSnappyCompression:"
|
|
|
|
"kZlibCompression:"
|
|
|
|
"kBZip2Compression:"
|
|
|
|
"kLZ4Compression:"
|
|
|
|
"kLZ4HCCompression:"
|
|
|
|
"kXpressCompression:"
|
|
|
|
"kZSTD:"
|
|
|
|
"kZSTDNotFinalCompression"},
|
|
|
|
{"bottommost_compression", "kLZ4Compression"},
|
|
|
|
{"bottommost_compression_opts", "5:6:7:8:10:true"},
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
{"compression_opts", "4:5:6:7:8:2:true:100:false"},
|
|
|
|
{"num_levels", "8"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
{"level0_file_num_compaction_trigger", "8"},
|
|
|
|
{"level0_slowdown_writes_trigger", "9"},
|
|
|
|
{"level0_stop_writes_trigger", "10"},
|
|
|
|
{"target_file_size_base", "12"},
|
|
|
|
{"target_file_size_multiplier", "13"},
|
|
|
|
{"max_bytes_for_level_base", "14"},
|
|
|
|
{"level_compaction_dynamic_level_bytes", "true"},
|
|
|
|
{"max_bytes_for_level_multiplier", "15.0"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
{"max_bytes_for_level_multiplier_additional", "16:17:18"},
|
|
|
|
{"max_compaction_bytes", "21"},
|
|
|
|
{"hard_pending_compaction_bytes_limit", "211"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
{"arena_block_size", "22"},
|
|
|
|
{"disable_auto_compactions", "true"},
|
|
|
|
{"compaction_style", "kCompactionStyleLevel"},
|
|
|
|
{"compaction_pri", "kOldestSmallestSeqFirst"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
{"verify_checksums_in_compaction", "false"},
|
|
|
|
{"compaction_options_fifo", "23"},
|
|
|
|
{"max_sequential_skip_in_iterations", "24"},
|
|
|
|
{"inplace_update_support", "true"},
|
|
|
|
{"report_bg_io_stats", "true"},
|
|
|
|
{"compaction_measure_io_stats", "false"},
|
|
|
|
{"purge_redundant_kvs_while_flush", "false"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
{"inplace_update_num_locks", "25"},
|
|
|
|
{"memtable_prefix_bloom_size_ratio", "0.26"},
|
|
|
|
{"memtable_whole_key_filtering", "true"},
|
|
|
|
{"memtable_huge_page_size", "28"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
{"bloom_locality", "29"},
|
|
|
|
{"max_successive_merges", "30"},
|
|
|
|
{"min_partial_merge_operands", "31"},
|
|
|
|
{"prefix_extractor", "fixed:31"},
|
|
|
|
{"experimental_mempurge_threshold", "0.003"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
{"optimize_filters_for_hits", "true"},
|
|
|
|
{"enable_blob_files", "true"},
|
|
|
|
{"min_blob_size", "1K"},
|
|
|
|
{"blob_file_size", "1G"},
|
|
|
|
{"blob_compression_type", "kZSTD"},
|
|
|
|
{"enable_blob_garbage_collection", "true"},
|
|
|
|
{"blob_garbage_collection_age_cutoff", "0.5"},
|
Make it possible to force the garbage collection of the oldest blob files (#8994)
Summary:
The current BlobDB garbage collection logic works by relocating the valid
blobs from the oldest blob files as they are encountered during compaction,
and cleaning up blob files once they contain nothing but garbage. However,
with sufficiently skewed workloads, it is theoretically possible to end up in a
situation when few or no compactions get scheduled for the SST files that contain
references to the oldest blob files, which can lead to increased space amp due
to the lack of GC.
In order to efficiently handle such workloads, the patch adds a new BlobDB
configuration option called `blob_garbage_collection_force_threshold`,
which signals to BlobDB to schedule targeted compactions for the SST files
that keep alive the oldest batch of blob files if the overall ratio of garbage in
the given blob files meets the threshold *and* all the given blob files are
eligible for GC based on `blob_garbage_collection_age_cutoff`. (For example,
if the new option is set to 0.9, targeted compactions will get scheduled if the
sum of garbage bytes meets or exceeds 90% of the sum of total bytes in the
oldest blob files, assuming all affected blob files are below the age-based cutoff.)
The net result of these targeted compactions is that the valid blobs in the oldest
blob files are relocated and the oldest blob files themselves cleaned up (since
*all* SST files that rely on them get compacted away).
These targeted compactions are similar to periodic compactions in the sense
that they force certain SST files that otherwise would not get picked up to undergo
compaction and also in the sense that instead of merging files from multiple levels,
they target a single file. (Note: such compactions might still include neighboring files
from the same level due to the need of having a "clean cut" boundary but they never
include any files from any other level.)
This functionality is currently only supported with the leveled compaction style
and is inactive by default (since the default value is set to 1.0, i.e. 100%).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8994
Test Plan: Ran `make check` and tested using `db_bench` and the stress/crash tests.
Reviewed By: riversand963
Differential Revision: D31489850
Pulled By: ltamasi
fbshipit-source-id: 44057d511726a0e2a03c5d9313d7511b3f0c4eab
3 years ago
|
|
|
{"blob_garbage_collection_force_threshold", "0.75"},
|
|
|
|
{"blob_compaction_readahead_size", "256K"},
|
|
|
|
{"blob_file_starting_level", "1"},
|
|
|
|
{"prepopulate_blob_cache", "kDisable"},
|
|
|
|
{"last_level_temperature", "kWarm"},
|
|
|
|
};
|
|
|
|
|
|
|
|
std::unordered_map<std::string, std::string> db_options_map = {
|
|
|
|
{"create_if_missing", "false"},
|
|
|
|
{"create_missing_column_families", "true"},
|
|
|
|
{"error_if_exists", "false"},
|
|
|
|
{"paranoid_checks", "true"},
|
|
|
|
{"track_and_verify_wals_in_manifest", "true"},
|
|
|
|
{"verify_sst_unique_id_in_manifest", "true"},
|
|
|
|
{"max_open_files", "32"},
|
|
|
|
{"max_total_wal_size", "33"},
|
|
|
|
{"use_fsync", "true"},
|
|
|
|
{"db_log_dir", "/db_log_dir"},
|
|
|
|
{"wal_dir", "/wal_dir"},
|
|
|
|
{"delete_obsolete_files_period_micros", "34"},
|
|
|
|
{"max_background_compactions", "35"},
|
|
|
|
{"max_background_flushes", "36"},
|
|
|
|
{"max_log_file_size", "37"},
|
|
|
|
{"log_file_time_to_roll", "38"},
|
|
|
|
{"keep_log_file_num", "39"},
|
|
|
|
{"recycle_log_file_num", "5"},
|
|
|
|
{"max_manifest_file_size", "40"},
|
|
|
|
{"table_cache_numshardbits", "41"},
|
|
|
|
{"WAL_ttl_seconds", "43"},
|
|
|
|
{"WAL_size_limit_MB", "44"},
|
|
|
|
{"manifest_preallocation_size", "45"},
|
|
|
|
{"allow_mmap_reads", "true"},
|
|
|
|
{"allow_mmap_writes", "false"},
|
|
|
|
{"use_direct_reads", "false"},
|
|
|
|
{"use_direct_io_for_flush_and_compaction", "false"},
|
|
|
|
{"is_fd_close_on_exec", "true"},
|
|
|
|
{"skip_log_error_on_recovery", "false"},
|
|
|
|
{"stats_dump_period_sec", "46"},
|
|
|
|
{"stats_persist_period_sec", "57"},
|
|
|
|
{"persist_stats_to_disk", "false"},
|
|
|
|
{"stats_history_buffer_size", "69"},
|
|
|
|
{"advise_random_on_open", "true"},
|
|
|
|
{"use_adaptive_mutex", "false"},
|
|
|
|
{"compaction_readahead_size", "100"},
|
|
|
|
{"random_access_max_buffer_size", "3145728"},
|
|
|
|
{"writable_file_max_buffer_size", "314159"},
|
|
|
|
{"bytes_per_sync", "47"},
|
|
|
|
{"wal_bytes_per_sync", "48"},
|
Optionally wait on bytes_per_sync to smooth I/O (#5183)
Summary:
The existing implementation does not guarantee bytes reach disk every `bytes_per_sync` when writing SST files, or every `wal_bytes_per_sync` when writing WALs. This can cause confusing behavior for users who enable this feature to avoid large syncs during flush and compaction, but then end up hitting them anyways.
My understanding of the existing behavior is we used `sync_file_range` with `SYNC_FILE_RANGE_WRITE` to submit ranges for async writeback, such that we could continue processing the next range of bytes while that I/O is happening. I believe we can preserve that benefit while also limiting how far the processing can get ahead of the I/O, which prevents huge syncs from happening when the file finishes.
Consider this `sync_file_range` usage: `sync_file_range(fd_, 0, static_cast<off_t>(offset + nbytes), SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE)`. Expanding the range to start at 0 and adding the `SYNC_FILE_RANGE_WAIT_BEFORE` flag causes any pending writeback (like from a previous call to `sync_file_range`) to finish before it proceeds to submit the latest `nbytes` for writeback. The latest `nbytes` are still written back asynchronously, unless processing exceeds I/O speed, in which case the following `sync_file_range` will need to wait on it.
There is a second change in this PR to use `fdatasync` when `sync_file_range` is unavailable (determined statically) or has some known problem with the underlying filesystem (determined dynamically).
The above two changes only apply when the user enables a new option, `strict_bytes_per_sync`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5183
Differential Revision: D14953553
Pulled By: siying
fbshipit-source-id: 445c3862e019fb7b470f9c7f314fc231b62706e9
6 years ago
|
|
|
{"strict_bytes_per_sync", "true"},
|
|
|
|
{"preserve_deletes", "false"},
|
|
|
|
};
|
|
|
|
|
|
|
|
ColumnFamilyOptions base_cf_opt;
|
|
|
|
ColumnFamilyOptions new_cf_opt;
|
|
|
|
ConfigOptions exact, loose;
|
|
|
|
exact.input_strings_escaped = false;
|
|
|
|
exact.ignore_unknown_options = false;
|
|
|
|
exact.sanity_level = ConfigOptions::kSanityLevelExactMatch;
|
|
|
|
loose.sanity_level = ConfigOptions::kSanityLevelLooselyCompatible;
|
|
|
|
|
|
|
|
loose.input_strings_escaped = false;
|
|
|
|
loose.ignore_unknown_options = true;
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(exact, base_cf_opt, cf_options_map,
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 1U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 2);
|
|
|
|
ASSERT_EQ(new_cf_opt.min_write_buffer_number_to_merge, 3);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number_to_maintain, 99);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_size_to_maintain, -99999);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression, kSnappyCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level.size(), 9U);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[0], kNoCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[1], kSnappyCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[2], kZlibCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[3], kBZip2Compression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[4], kLZ4Compression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[5], kLZ4HCCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[6], kXpressCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[7], kZSTD);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[8], kZSTDNotFinalCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes, 7u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.zstd_max_train_bytes, 8u);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.parallel_threads, 2u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_buffer_bytes, 100u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.use_zstd_dict_trainer, false);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression, kLZ4Compression);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.strategy, 7);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_bytes, 8u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.zstd_max_train_bytes, 10u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.parallel_threads,
|
|
|
|
CompressionOptions().parallel_threads);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
CompressionOptions().use_zstd_dict_trainer);
|
|
|
|
ASSERT_EQ(new_cf_opt.num_levels, 8);
|
|
|
|
ASSERT_EQ(new_cf_opt.level0_file_num_compaction_trigger, 8);
|
|
|
|
ASSERT_EQ(new_cf_opt.level0_slowdown_writes_trigger, 9);
|
|
|
|
ASSERT_EQ(new_cf_opt.level0_stop_writes_trigger, 10);
|
|
|
|
ASSERT_EQ(new_cf_opt.target_file_size_base, static_cast<uint64_t>(12));
|
|
|
|
ASSERT_EQ(new_cf_opt.target_file_size_multiplier, 13);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_base, 14U);
|
|
|
|
ASSERT_EQ(new_cf_opt.level_compaction_dynamic_level_bytes, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier, 15.0);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional.size(), 3U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[0], 16);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[1], 17);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[2], 18);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_compaction_bytes, 21);
|
|
|
|
ASSERT_EQ(new_cf_opt.hard_pending_compaction_bytes_limit, 211);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 22U);
|
|
|
|
ASSERT_EQ(new_cf_opt.disable_auto_compactions, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.compaction_style, kCompactionStyleLevel);
|
|
|
|
ASSERT_EQ(new_cf_opt.compaction_pri, kOldestSmallestSeqFirst);
|
|
|
|
ASSERT_EQ(new_cf_opt.compaction_options_fifo.max_table_files_size,
|
|
|
|
static_cast<uint64_t>(23));
|
|
|
|
ASSERT_EQ(new_cf_opt.max_sequential_skip_in_iterations,
|
|
|
|
static_cast<uint64_t>(24));
|
|
|
|
ASSERT_EQ(new_cf_opt.inplace_update_support, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 25U);
|
|
|
|
ASSERT_EQ(new_cf_opt.memtable_prefix_bloom_size_ratio, 0.26);
|
|
|
|
ASSERT_EQ(new_cf_opt.memtable_whole_key_filtering, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.memtable_huge_page_size, 28U);
|
|
|
|
ASSERT_EQ(new_cf_opt.bloom_locality, 29U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_successive_merges, 30U);
|
|
|
|
ASSERT_TRUE(new_cf_opt.prefix_extractor != nullptr);
|
|
|
|
ASSERT_EQ(new_cf_opt.optimize_filters_for_hits, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.prefix_extractor->AsString(), "rocksdb.FixedPrefix.31");
|
|
|
|
ASSERT_EQ(new_cf_opt.experimental_mempurge_threshold, 0.003);
|
|
|
|
ASSERT_EQ(new_cf_opt.enable_blob_files, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.min_blob_size, 1ULL << 10);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_file_size, 1ULL << 30);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_compression_type, kZSTD);
|
|
|
|
ASSERT_EQ(new_cf_opt.enable_blob_garbage_collection, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_garbage_collection_age_cutoff, 0.5);
|
Make it possible to force the garbage collection of the oldest blob files (#8994)
Summary:
The current BlobDB garbage collection logic works by relocating the valid
blobs from the oldest blob files as they are encountered during compaction,
and cleaning up blob files once they contain nothing but garbage. However,
with sufficiently skewed workloads, it is theoretically possible to end up in a
situation when few or no compactions get scheduled for the SST files that contain
references to the oldest blob files, which can lead to increased space amp due
to the lack of GC.
In order to efficiently handle such workloads, the patch adds a new BlobDB
configuration option called `blob_garbage_collection_force_threshold`,
which signals to BlobDB to schedule targeted compactions for the SST files
that keep alive the oldest batch of blob files if the overall ratio of garbage in
the given blob files meets the threshold *and* all the given blob files are
eligible for GC based on `blob_garbage_collection_age_cutoff`. (For example,
if the new option is set to 0.9, targeted compactions will get scheduled if the
sum of garbage bytes meets or exceeds 90% of the sum of total bytes in the
oldest blob files, assuming all affected blob files are below the age-based cutoff.)
The net result of these targeted compactions is that the valid blobs in the oldest
blob files are relocated and the oldest blob files themselves cleaned up (since
*all* SST files that rely on them get compacted away).
These targeted compactions are similar to periodic compactions in the sense
that they force certain SST files that otherwise would not get picked up to undergo
compaction and also in the sense that instead of merging files from multiple levels,
they target a single file. (Note: such compactions might still include neighboring files
from the same level due to the need of having a "clean cut" boundary but they never
include any files from any other level.)
This functionality is currently only supported with the leveled compaction style
and is inactive by default (since the default value is set to 1.0, i.e. 100%).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8994
Test Plan: Ran `make check` and tested using `db_bench` and the stress/crash tests.
Reviewed By: riversand963
Differential Revision: D31489850
Pulled By: ltamasi
fbshipit-source-id: 44057d511726a0e2a03c5d9313d7511b3f0c4eab
3 years ago
|
|
|
ASSERT_EQ(new_cf_opt.blob_garbage_collection_force_threshold, 0.75);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_compaction_readahead_size, 262144);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_file_starting_level, 1);
|
|
|
|
ASSERT_EQ(new_cf_opt.prepopulate_blob_cache, PrepopulateBlobCache::kDisable);
|
|
|
|
ASSERT_EQ(new_cf_opt.last_level_temperature, Temperature::kWarm);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_temperature, Temperature::kWarm);
|
|
|
|
|
|
|
|
cf_options_map["write_buffer_size"] = "hello";
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(exact, base_cf_opt, cf_options_map,
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
cf_options_map["write_buffer_size"] = "1";
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(exact, base_cf_opt, cf_options_map,
|
|
|
|
&new_cf_opt));
|
|
|
|
|
|
|
|
cf_options_map["unknown_option"] = "1";
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(exact, base_cf_opt, cf_options_map,
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
// ignore_unknown_options=true;input_strings_escaped=false
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(loose, base_cf_opt, cf_options_map,
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(loose, base_cf_opt, new_cf_opt));
|
|
|
|
ASSERT_NOK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
DBOptions base_db_opt;
|
|
|
|
DBOptions new_db_opt;
|
|
|
|
ASSERT_OK(
|
|
|
|
GetDBOptionsFromMap(exact, base_db_opt, db_options_map, &new_db_opt));
|
|
|
|
ASSERT_EQ(new_db_opt.create_if_missing, false);
|
|
|
|
ASSERT_EQ(new_db_opt.create_missing_column_families, true);
|
|
|
|
ASSERT_EQ(new_db_opt.error_if_exists, false);
|
|
|
|
ASSERT_EQ(new_db_opt.paranoid_checks, true);
|
|
|
|
ASSERT_EQ(new_db_opt.track_and_verify_wals_in_manifest, true);
|
|
|
|
ASSERT_EQ(new_db_opt.verify_sst_unique_id_in_manifest, true);
|
|
|
|
ASSERT_EQ(new_db_opt.max_open_files, 32);
|
|
|
|
ASSERT_EQ(new_db_opt.max_total_wal_size, static_cast<uint64_t>(33));
|
|
|
|
ASSERT_EQ(new_db_opt.use_fsync, true);
|
|
|
|
ASSERT_EQ(new_db_opt.db_log_dir, "/db_log_dir");
|
|
|
|
ASSERT_EQ(new_db_opt.wal_dir, "/wal_dir");
|
|
|
|
ASSERT_EQ(new_db_opt.delete_obsolete_files_period_micros,
|
|
|
|
static_cast<uint64_t>(34));
|
|
|
|
ASSERT_EQ(new_db_opt.max_background_compactions, 35);
|
|
|
|
ASSERT_EQ(new_db_opt.max_background_flushes, 36);
|
|
|
|
ASSERT_EQ(new_db_opt.max_log_file_size, 37U);
|
|
|
|
ASSERT_EQ(new_db_opt.log_file_time_to_roll, 38U);
|
|
|
|
ASSERT_EQ(new_db_opt.keep_log_file_num, 39U);
|
|
|
|
ASSERT_EQ(new_db_opt.recycle_log_file_num, 5U);
|
|
|
|
ASSERT_EQ(new_db_opt.max_manifest_file_size, static_cast<uint64_t>(40));
|
|
|
|
ASSERT_EQ(new_db_opt.table_cache_numshardbits, 41);
|
|
|
|
ASSERT_EQ(new_db_opt.WAL_ttl_seconds, static_cast<uint64_t>(43));
|
|
|
|
ASSERT_EQ(new_db_opt.WAL_size_limit_MB, static_cast<uint64_t>(44));
|
|
|
|
ASSERT_EQ(new_db_opt.manifest_preallocation_size, 45U);
|
|
|
|
ASSERT_EQ(new_db_opt.allow_mmap_reads, true);
|
|
|
|
ASSERT_EQ(new_db_opt.allow_mmap_writes, false);
|
|
|
|
ASSERT_EQ(new_db_opt.use_direct_reads, false);
|
|
|
|
ASSERT_EQ(new_db_opt.use_direct_io_for_flush_and_compaction, false);
|
|
|
|
ASSERT_EQ(new_db_opt.is_fd_close_on_exec, true);
|
|
|
|
ASSERT_EQ(new_db_opt.stats_dump_period_sec, 46U);
|
|
|
|
ASSERT_EQ(new_db_opt.stats_persist_period_sec, 57U);
|
|
|
|
ASSERT_EQ(new_db_opt.persist_stats_to_disk, false);
|
|
|
|
ASSERT_EQ(new_db_opt.stats_history_buffer_size, 69U);
|
|
|
|
ASSERT_EQ(new_db_opt.advise_random_on_open, true);
|
|
|
|
ASSERT_EQ(new_db_opt.use_adaptive_mutex, false);
|
|
|
|
ASSERT_EQ(new_db_opt.compaction_readahead_size, 100);
|
|
|
|
ASSERT_EQ(new_db_opt.random_access_max_buffer_size, 3145728);
|
|
|
|
ASSERT_EQ(new_db_opt.writable_file_max_buffer_size, 314159);
|
|
|
|
ASSERT_EQ(new_db_opt.bytes_per_sync, static_cast<uint64_t>(47));
|
|
|
|
ASSERT_EQ(new_db_opt.wal_bytes_per_sync, static_cast<uint64_t>(48));
|
|
|
|
ASSERT_EQ(new_db_opt.strict_bytes_per_sync, true);
|
|
|
|
|
|
|
|
db_options_map["max_open_files"] = "hello";
|
|
|
|
Status s =
|
|
|
|
GetDBOptionsFromMap(exact, base_db_opt, db_options_map, &new_db_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(loose, base_db_opt, new_db_opt));
|
|
|
|
|
|
|
|
// unknow options should fail parsing without ignore_unknown_options = true
|
|
|
|
db_options_map["unknown_db_option"] = "1";
|
|
|
|
s = GetDBOptionsFromMap(exact, base_db_opt, db_options_map, &new_db_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
GetDBOptionsFromMap(loose, base_db_opt, db_options_map, &new_db_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(loose, base_db_opt, new_db_opt));
|
|
|
|
ASSERT_NOK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
}
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE // GetColumnFamilyOptionsFromString is not supported in
|
|
|
|
// ROCKSDB_LITE
|
|
|
|
TEST_F(OptionsTest, GetColumnFamilyOptionsFromStringTest) {
|
|
|
|
ColumnFamilyOptions base_cf_opt;
|
|
|
|
ColumnFamilyOptions new_cf_opt;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
|
|
|
|
base_cf_opt.table_factory.reset();
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt, "",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "write_buffer_size=5", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 5U);
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory == nullptr);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "write_buffer_size=6;", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 6U);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, " write_buffer_size = 7 ", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 7U);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, " write_buffer_size = 8 ; ", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 8U);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=9;max_write_buffer_number=10", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 9U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 10);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=11; max_write_buffer_number = 12 ;", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 11U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 12);
|
|
|
|
// Wrong name "max_write_buffer_number_"
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=13;max_write_buffer_number_=14;", &new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
// Comparator from object registry
|
|
|
|
std::string kCompName = "reverse_comp";
|
|
|
|
ObjectLibrary::Default()->AddFactory<const Comparator>(
|
|
|
|
kCompName,
|
|
|
|
[](const std::string& /*name*/,
|
|
|
|
std::unique_ptr<const Comparator>* /*guard*/,
|
|
|
|
std::string* /* errmsg */) { return ReverseBytewiseComparator(); });
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"comparator=" + kCompName + ";",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.comparator, ReverseBytewiseComparator());
|
|
|
|
|
|
|
|
// MergeOperator from object registry
|
|
|
|
std::unique_ptr<BytesXOROperator> bxo(new BytesXOROperator());
|
|
|
|
std::string kMoName = bxo->Name();
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"merge_operator=" + kMoName + ";",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(kMoName, std::string(new_cf_opt.merge_operator->Name()));
|
|
|
|
|
|
|
|
// Wrong key/value pair
|
|
|
|
Status s = GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=13;max_write_buffer_number;", &new_cf_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
// Error Parsing value
|
|
|
|
s = GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=13;max_write_buffer_number=;", &new_cf_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
// Missing option name
|
|
|
|
s = GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "write_buffer_size=13; =100;", &new_cf_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
const uint64_t kilo = 1024UL;
|
|
|
|
const uint64_t mega = 1024 * kilo;
|
|
|
|
const uint64_t giga = 1024 * mega;
|
|
|
|
const uint64_t tera = 1024 * giga;
|
|
|
|
|
|
|
|
// Units (k)
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "max_write_buffer_number=15K", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 15 * kilo);
|
|
|
|
// Units (m)
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"max_write_buffer_number=16m;inplace_update_num_locks=17M", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 16 * mega);
|
|
|
|
ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 17u * mega);
|
|
|
|
// Units (g)
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=18g;prefix_extractor=capped:8;"
|
|
|
|
"arena_block_size=19G",
|
|
|
|
&new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 18 * giga);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 19 * giga);
|
|
|
|
ASSERT_TRUE(new_cf_opt.prefix_extractor.get() != nullptr);
|
|
|
|
ASSERT_EQ(new_cf_opt.prefix_extractor->AsString(), "rocksdb.CappedPrefix.8");
|
|
|
|
|
|
|
|
// Units (t)
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "write_buffer_size=20t;arena_block_size=21T",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 20 * tera);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 21 * tera);
|
|
|
|
|
|
|
|
// Nested block based table options
|
|
|
|
// Empty
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={};arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
// Non-empty
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_cache=1M;block_size=4;};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
// Last one
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_cache=1M;block_size=4;}",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
// Mismatch curly braces
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={{{block_size=4;};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
// Unexpected chars after closing curly brace
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_size=4;}};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_size=4;}xdfa;"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_size=4;}xdfa",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
// Invalid block based table option
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={xx_block_size=4;}",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"optimize_filters_for_hits=true",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"optimize_filters_for_hits=false",
|
|
|
|
&new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"optimize_filters_for_hits=junk",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
// Nested plain table options
|
|
|
|
// Empty
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"plain_table_factory={};arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
ASSERT_EQ(std::string(new_cf_opt.table_factory->Name()), "PlainTable");
|
|
|
|
// Non-empty
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"plain_table_factory={user_key_len=66;bloom_bits_per_key=20;};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
ASSERT_EQ(std::string(new_cf_opt.table_factory->Name()), "PlainTable");
|
|
|
|
|
|
|
|
// memtable factory
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"memtable=skip_list:10;arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.memtable_factory != nullptr);
|
|
|
|
ASSERT_EQ(std::string(new_cf_opt.memtable_factory->Name()), "SkipListFactory");
|
|
|
|
ASSERT_TRUE(new_cf_opt.memtable_factory->IsInstanceOf("SkipListFactory"));
|
|
|
|
|
|
|
|
// blob cache
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"blob_cache={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;high_pri_pool_ratio=0.5;};",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_NE(new_cf_opt.blob_cache, nullptr);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_cache->GetCapacity(), 1024UL * 1024UL);
|
|
|
|
ASSERT_EQ(static_cast<ShardedCacheBase*>(new_cf_opt.blob_cache.get())
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_cache->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(static_cast<LRUCache*>(new_cf_opt.blob_cache.get())
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, CompressionOptionsFromString) {
|
|
|
|
ColumnFamilyOptions base_cf_opt;
|
|
|
|
ColumnFamilyOptions new_cf_opt;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::string opts_str;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
CompressionOptions dflt;
|
|
|
|
// Test with some optional values removed....
|
|
|
|
ASSERT_OK(
|
|
|
|
GetColumnFamilyOptionsFromString(config_options, ColumnFamilyOptions(),
|
|
|
|
"compression_opts=3:4:5; "
|
|
|
|
"bottommost_compression_opts=4:5:6:7",
|
|
|
|
&base_cf_opt));
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.window_bits, 3);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.level, 4);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.strategy, 5);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.max_dict_bytes, dflt.max_dict_bytes);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.zstd_max_train_bytes,
|
|
|
|
dflt.zstd_max_train_bytes);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.parallel_threads,
|
|
|
|
dflt.parallel_threads);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.enabled, dflt.enabled);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.use_zstd_dict_trainer,
|
|
|
|
dflt.use_zstd_dict_trainer);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.max_dict_bytes, 7u);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.zstd_max_train_bytes,
|
|
|
|
dflt.zstd_max_train_bytes);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.parallel_threads,
|
|
|
|
dflt.parallel_threads);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.enabled, dflt.enabled);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
dflt.use_zstd_dict_trainer);
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(),
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
"compression_opts=4:5:6:7:8:9:true:10:false; "
|
|
|
|
"bottommost_compression_opts=5:6:7:8:9:false",
|
|
|
|
&base_cf_opt));
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.max_dict_bytes, 7u);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.zstd_max_train_bytes, 8u);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.parallel_threads, 9u);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.max_dict_buffer_bytes, 10u);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.use_zstd_dict_trainer, false);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.strategy, 7);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.max_dict_bytes, 8u);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.zstd_max_train_bytes, 9u);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.parallel_threads,
|
|
|
|
dflt.parallel_threads);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
dflt.use_zstd_dict_trainer);
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
GetStringFromColumnFamilyOptions(config_options, base_cf_opt, &opts_str));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(), opts_str, &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes, 7u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.zstd_max_train_bytes, 8u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.parallel_threads, 9u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.max_dict_buffer_bytes, 10u);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.use_zstd_dict_trainer, false);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.strategy, 7);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_bytes, 8u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.zstd_max_train_bytes, 9u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.parallel_threads,
|
|
|
|
dflt.parallel_threads);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
dflt.use_zstd_dict_trainer);
|
|
|
|
|
|
|
|
// Test as struct values
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(),
|
|
|
|
"compression_opts={window_bits=5; level=6; strategy=7; max_dict_bytes=8;"
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
"zstd_max_train_bytes=9;parallel_threads=10;enabled=true;use_zstd_dict_"
|
|
|
|
"trainer=false}; "
|
|
|
|
"bottommost_compression_opts={window_bits=4; level=5; strategy=6;"
|
|
|
|
" max_dict_bytes=7;zstd_max_train_bytes=8;parallel_threads=9;"
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
"enabled=false;use_zstd_dict_trainer=true}; ",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.strategy, 7);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes, 8u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.zstd_max_train_bytes, 9u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.parallel_threads, 10u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.use_zstd_dict_trainer, false);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_bytes, 7u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.zstd_max_train_bytes, 8u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.parallel_threads, 9u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer, true);
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"compression_opts={window_bits=4; strategy=5;};"
|
|
|
|
"bottommost_compression_opts={level=6; strategy=7;}",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.strategy, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.strategy, 7);
|
|
|
|
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.level,
|
|
|
|
base_cf_opt.compression_opts.level);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes,
|
|
|
|
base_cf_opt.compression_opts.max_dict_bytes);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.zstd_max_train_bytes,
|
|
|
|
base_cf_opt.compression_opts.zstd_max_train_bytes);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.parallel_threads,
|
|
|
|
base_cf_opt.compression_opts.parallel_threads);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled,
|
|
|
|
base_cf_opt.compression_opts.enabled);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.window_bits,
|
|
|
|
base_cf_opt.bottommost_compression_opts.window_bits);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_bytes,
|
|
|
|
base_cf_opt.bottommost_compression_opts.max_dict_bytes);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.zstd_max_train_bytes,
|
|
|
|
base_cf_opt.bottommost_compression_opts.zstd_max_train_bytes);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.parallel_threads,
|
|
|
|
base_cf_opt.bottommost_compression_opts.parallel_threads);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled,
|
|
|
|
base_cf_opt.bottommost_compression_opts.enabled);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
base_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer);
|
|
|
|
|
|
|
|
// Test a few individual struct values
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"compression_opts.enabled=false; "
|
|
|
|
"bottommost_compression_opts.enabled=true; ",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled, false);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled, true);
|
|
|
|
|
|
|
|
// Now test some illegal values
|
|
|
|
ConfigOptions ignore;
|
|
|
|
ignore.ignore_unknown_options = true;
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(),
|
|
|
|
"compression_opts=5:6:7:8:9:x:false", &base_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
ignore, ColumnFamilyOptions(), "compression_opts=5:6:7:8:9:x:false",
|
|
|
|
&base_cf_opt));
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(),
|
|
|
|
"compression_opts=1:2:3:4:5:6:true:8", &base_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
ignore, ColumnFamilyOptions(), "compression_opts=1:2:3:4:5:6:true:8",
|
|
|
|
&base_cf_opt));
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
4 years ago
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(),
|
|
|
|
"compression_opts=1:2:3:4:5:6:true:8:9", &base_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
ignore, ColumnFamilyOptions(), "compression_opts=1:2:3:4:5:6:true:8:9",
|
|
|
|
&base_cf_opt));
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(), "compression_opts={unknown=bad;}",
|
|
|
|
&base_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(ignore, ColumnFamilyOptions(),
|
|
|
|
"compression_opts={unknown=bad;}",
|
|
|
|
&base_cf_opt));
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(), "compression_opts.unknown=bad",
|
|
|
|
&base_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(ignore, ColumnFamilyOptions(),
|
|
|
|
"compression_opts.unknown=bad",
|
|
|
|
&base_cf_opt));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, OldInterfaceTest) {
|
|
|
|
ColumnFamilyOptions base_cf_opt;
|
|
|
|
ColumnFamilyOptions new_cf_opt;
|
|
|
|
ConfigOptions exact;
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
base_cf_opt,
|
|
|
|
"write_buffer_size=18;prefix_extractor=capped:8;"
|
|
|
|
"arena_block_size=19",
|
|
|
|
&new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 18);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 19);
|
|
|
|
ASSERT_TRUE(new_cf_opt.prefix_extractor.get() != nullptr);
|
|
|
|
|
|
|
|
// And with a bad option
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={xx_block_size=4;}",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
std::unordered_map<std::string, std::string> cf_options_map = {
|
|
|
|
{"write_buffer_size", "1"},
|
|
|
|
{"max_write_buffer_number", "2"},
|
|
|
|
{"min_write_buffer_number_to_merge", "3"},
|
|
|
|
};
|
|
|
|
ASSERT_OK(
|
|
|
|
GetColumnFamilyOptionsFromMap(base_cf_opt, cf_options_map, &new_cf_opt));
|
|
|
|
cf_options_map["unknown_option"] = "1";
|
|
|
|
ASSERT_NOK(
|
|
|
|
GetColumnFamilyOptionsFromMap(base_cf_opt, cf_options_map, &new_cf_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(base_cf_opt, cf_options_map,
|
|
|
|
&new_cf_opt, true, true));
|
|
|
|
|
|
|
|
DBOptions base_db_opt;
|
|
|
|
DBOptions new_db_opt;
|
|
|
|
std::unordered_map<std::string, std::string> db_options_map = {
|
|
|
|
{"create_if_missing", "false"},
|
|
|
|
{"create_missing_column_families", "true"},
|
|
|
|
{"error_if_exists", "false"},
|
|
|
|
{"paranoid_checks", "true"},
|
|
|
|
{"track_and_verify_wals_in_manifest", "true"},
|
|
|
|
{"verify_sst_unique_id_in_manifest", "true"},
|
|
|
|
{"max_open_files", "32"},
|
|
|
|
};
|
|
|
|
ASSERT_OK(GetDBOptionsFromMap(base_db_opt, db_options_map, &new_db_opt));
|
|
|
|
ASSERT_EQ(new_db_opt.create_if_missing, false);
|
|
|
|
ASSERT_EQ(new_db_opt.create_missing_column_families, true);
|
|
|
|
ASSERT_EQ(new_db_opt.error_if_exists, false);
|
|
|
|
ASSERT_EQ(new_db_opt.paranoid_checks, true);
|
|
|
|
ASSERT_EQ(new_db_opt.track_and_verify_wals_in_manifest, true);
|
|
|
|
ASSERT_EQ(new_db_opt.verify_sst_unique_id_in_manifest, true);
|
|
|
|
ASSERT_EQ(new_db_opt.max_open_files, 32);
|
|
|
|
db_options_map["unknown_option"] = "1";
|
|
|
|
Status s = GetDBOptionsFromMap(base_db_opt, db_options_map, &new_db_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
ASSERT_OK(GetDBOptionsFromMap(base_db_opt, db_options_map, &new_db_opt, true,
|
|
|
|
true));
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(
|
|
|
|
base_db_opt,
|
|
|
|
"create_if_missing=false;error_if_exists=false;max_open_files=42;",
|
|
|
|
&new_db_opt));
|
|
|
|
ASSERT_EQ(new_db_opt.create_if_missing, false);
|
|
|
|
ASSERT_EQ(new_db_opt.error_if_exists, false);
|
|
|
|
ASSERT_EQ(new_db_opt.max_open_files, 42);
|
|
|
|
s = GetDBOptionsFromString(
|
|
|
|
base_db_opt,
|
|
|
|
"create_if_missing=false;error_if_exists=false;max_open_files=42;"
|
|
|
|
"unknown_option=1;",
|
|
|
|
&new_db_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE // GetBlockBasedTableOptionsFromString is not supported
|
|
|
|
TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) {
|
|
|
|
BlockBasedTableOptions table_opt;
|
|
|
|
BlockBasedTableOptions new_opt;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
config_options.ignore_unsupported_options = false;
|
|
|
|
|
|
|
|
// make sure default values are overwritten by something else
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;index_type=kHashSearch;"
|
|
|
|
"checksum=kxxHash;"
|
|
|
|
"block_cache=1M;block_cache_compressed=1k;block_size=1024;"
|
|
|
|
"block_size_deviation=8;block_restart_interval=4;"
|
|
|
|
"format_version=5;whole_key_filtering=1;"
|
Detect (new) Bloom/Ribbon Filter construction corruption (#9342)
Summary:
Note: rebase on and merge after https://github.com/facebook/rocksdb/pull/9349, https://github.com/facebook/rocksdb/pull/9345, (optional) https://github.com/facebook/rocksdb/pull/9393
**Context:**
(Quoted from pdillinger) Layers of information during new Bloom/Ribbon Filter construction in building block-based tables includes the following:
a) set of keys to add to filter
b) set of hashes to add to filter (64-bit hash applied to each key)
c) set of Bloom indices to set in filter, with duplicates
d) set of Bloom indices to set in filter, deduplicated
e) final filter and its checksum
This PR aims to detect corruption (e.g, unexpected hardware/software corruption on data structures residing in the memory for a long time) from b) to e) and leave a) as future works for application level.
- b)'s corruption is detected by verifying the xor checksum of the hash entries calculated as the entries accumulate before being added to the filter. (i.e, `XXPH3FilterBitsBuilder::MaybeVerifyHashEntriesChecksum()`)
- c) - e)'s corruption is detected by verifying the hash entries indeed exists in the constructed filter by re-querying these hash entries in the filter (i.e, `FilterBitsBuilder::MaybePostVerify()`) after computing the block checksum (except for PartitionFilter, which is done right after each `FilterBitsBuilder::Finish` for impl simplicity - see code comment for more). For this stage of detection, we assume hash entries are not corrupted after checking on b) since the time interval from b) to c) is relatively short IMO.
Option to enable this feature of detection is `BlockBasedTableOptions::detect_filter_construct_corruption` which is false by default.
**Summary:**
- Implemented new functions `XXPH3FilterBitsBuilder::MaybeVerifyHashEntriesChecksum()` and `FilterBitsBuilder::MaybePostVerify()`
- Ensured hash entries, final filter and banding and their [cache reservation ](https://github.com/facebook/rocksdb/issues/9073) are released properly despite corruption
- See [Filter.construction.artifacts.release.point.pdf ](https://github.com/facebook/rocksdb/files/7923487/Design.Filter.construction.artifacts.release.point.pdf) for high-level design
- Bundled and refactored hash entries's related artifact in XXPH3FilterBitsBuilder into `HashEntriesInfo` for better control on lifetime of these artifact during `SwapEntires`, `ResetEntries`
- Ensured RocksDB block-based table builder calls `FilterBitsBuilder::MaybePostVerify()` after constructing the filter by `FilterBitsBuilder::Finish()`
- When encountering such filter construction corruption, stop writing the filter content to files and mark such a block-based table building non-ok by storing the corruption status in the builder.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9342
Test Plan:
- Added new unit test `DBFilterConstructionCorruptionTestWithParam.DetectCorruption`
- Included this new feature in `DBFilterConstructionReserveMemoryTestWithParam.ReserveMemory` as this feature heavily touch ReserveMemory's impl
- For fallback case, I run `./filter_bench -impl=3 -detect_filter_construct_corruption=true -reserve_table_builder_memory=true -strict_capacity_limit=true -quick -runs 10 | grep 'Build avg'` to make sure nothing break.
- Added to `filter_bench`: increased filter construction time by **30%**, mostly by `MaybePostVerify()`
- FastLocalBloom
- Before change: `./filter_bench -impl=2 -quick -runs 10 | grep 'Build avg'`: **28.86643s**
- After change:
- `./filter_bench -impl=2 -detect_filter_construct_corruption=false -quick -runs 10 | grep 'Build avg'` (expect a tiny increase due to MaybePostVerify is always called regardless): **27.6644s (-4% perf improvement might be due to now we don't drop bloom hash entry in `AddAllEntries` along iteration but in bulk later, same with the bypassing-MaybePostVerify case below)**
- `./filter_bench -impl=2 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'` (expect acceptable increase): **34.41159s (+20%)**
- `./filter_bench -impl=2 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'` (by-passing MaybePostVerify, expect minor increase): **27.13431s (-6%)**
- Standard128Ribbon
- Before change: `./filter_bench -impl=3 -quick -runs 10 | grep 'Build avg'`: **122.5384s**
- After change:
- `./filter_bench -impl=3 -detect_filter_construct_corruption=false -quick -runs 10 | grep 'Build avg'` (expect a tiny increase due to MaybePostVerify is always called regardless - verified by removing MaybePostVerify under this case and found only +-1ns difference): **124.3588s (+2%)**
- `./filter_bench -impl=3 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'`(expect acceptable increase): **159.4946s (+30%)**
- `./filter_bench -impl=3 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'`(by-passing MaybePostVerify, expect minor increase) : **125.258s (+2%)**
- Added to `db_stress`: `make crash_test`, `./db_stress --detect_filter_construct_corruption=true`
- Manually smoke-tested: manually corrupted the filter construction in some db level tests with basic PUT and background flush. As expected, the error did get returned to users in subsequent PUT and Flush status.
Reviewed By: pdillinger
Differential Revision: D33746928
Pulled By: hx235
fbshipit-source-id: cb056426be5a7debc1cd16f23bc250f36a08ca57
3 years ago
|
|
|
"filter_policy=bloomfilter:4.567:false;detect_filter_construct_"
|
|
|
|
"corruption=true;"
|
|
|
|
// A bug caused read_amp_bytes_per_bit to be a large integer in OPTIONS
|
|
|
|
// file generated by 6.10 to 6.14. Though bug is fixed in these releases,
|
|
|
|
// we need to handle the case of loading OPTIONS file generated before the
|
|
|
|
// fix.
|
|
|
|
"read_amp_bytes_per_bit=17179869185;",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(new_opt.index_type, BlockBasedTableOptions::kHashSearch);
|
|
|
|
ASSERT_EQ(new_opt.checksum, ChecksumType::kxxHash);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL);
|
|
|
|
ASSERT_EQ(new_opt.block_size, 1024UL);
|
|
|
|
ASSERT_EQ(new_opt.block_size_deviation, 8);
|
|
|
|
ASSERT_EQ(new_opt.block_restart_interval, 4);
|
|
|
|
ASSERT_EQ(new_opt.format_version, 5U);
|
|
|
|
ASSERT_EQ(new_opt.whole_key_filtering, true);
|
Detect (new) Bloom/Ribbon Filter construction corruption (#9342)
Summary:
Note: rebase on and merge after https://github.com/facebook/rocksdb/pull/9349, https://github.com/facebook/rocksdb/pull/9345, (optional) https://github.com/facebook/rocksdb/pull/9393
**Context:**
(Quoted from pdillinger) Layers of information during new Bloom/Ribbon Filter construction in building block-based tables includes the following:
a) set of keys to add to filter
b) set of hashes to add to filter (64-bit hash applied to each key)
c) set of Bloom indices to set in filter, with duplicates
d) set of Bloom indices to set in filter, deduplicated
e) final filter and its checksum
This PR aims to detect corruption (e.g, unexpected hardware/software corruption on data structures residing in the memory for a long time) from b) to e) and leave a) as future works for application level.
- b)'s corruption is detected by verifying the xor checksum of the hash entries calculated as the entries accumulate before being added to the filter. (i.e, `XXPH3FilterBitsBuilder::MaybeVerifyHashEntriesChecksum()`)
- c) - e)'s corruption is detected by verifying the hash entries indeed exists in the constructed filter by re-querying these hash entries in the filter (i.e, `FilterBitsBuilder::MaybePostVerify()`) after computing the block checksum (except for PartitionFilter, which is done right after each `FilterBitsBuilder::Finish` for impl simplicity - see code comment for more). For this stage of detection, we assume hash entries are not corrupted after checking on b) since the time interval from b) to c) is relatively short IMO.
Option to enable this feature of detection is `BlockBasedTableOptions::detect_filter_construct_corruption` which is false by default.
**Summary:**
- Implemented new functions `XXPH3FilterBitsBuilder::MaybeVerifyHashEntriesChecksum()` and `FilterBitsBuilder::MaybePostVerify()`
- Ensured hash entries, final filter and banding and their [cache reservation ](https://github.com/facebook/rocksdb/issues/9073) are released properly despite corruption
- See [Filter.construction.artifacts.release.point.pdf ](https://github.com/facebook/rocksdb/files/7923487/Design.Filter.construction.artifacts.release.point.pdf) for high-level design
- Bundled and refactored hash entries's related artifact in XXPH3FilterBitsBuilder into `HashEntriesInfo` for better control on lifetime of these artifact during `SwapEntires`, `ResetEntries`
- Ensured RocksDB block-based table builder calls `FilterBitsBuilder::MaybePostVerify()` after constructing the filter by `FilterBitsBuilder::Finish()`
- When encountering such filter construction corruption, stop writing the filter content to files and mark such a block-based table building non-ok by storing the corruption status in the builder.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9342
Test Plan:
- Added new unit test `DBFilterConstructionCorruptionTestWithParam.DetectCorruption`
- Included this new feature in `DBFilterConstructionReserveMemoryTestWithParam.ReserveMemory` as this feature heavily touch ReserveMemory's impl
- For fallback case, I run `./filter_bench -impl=3 -detect_filter_construct_corruption=true -reserve_table_builder_memory=true -strict_capacity_limit=true -quick -runs 10 | grep 'Build avg'` to make sure nothing break.
- Added to `filter_bench`: increased filter construction time by **30%**, mostly by `MaybePostVerify()`
- FastLocalBloom
- Before change: `./filter_bench -impl=2 -quick -runs 10 | grep 'Build avg'`: **28.86643s**
- After change:
- `./filter_bench -impl=2 -detect_filter_construct_corruption=false -quick -runs 10 | grep 'Build avg'` (expect a tiny increase due to MaybePostVerify is always called regardless): **27.6644s (-4% perf improvement might be due to now we don't drop bloom hash entry in `AddAllEntries` along iteration but in bulk later, same with the bypassing-MaybePostVerify case below)**
- `./filter_bench -impl=2 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'` (expect acceptable increase): **34.41159s (+20%)**
- `./filter_bench -impl=2 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'` (by-passing MaybePostVerify, expect minor increase): **27.13431s (-6%)**
- Standard128Ribbon
- Before change: `./filter_bench -impl=3 -quick -runs 10 | grep 'Build avg'`: **122.5384s**
- After change:
- `./filter_bench -impl=3 -detect_filter_construct_corruption=false -quick -runs 10 | grep 'Build avg'` (expect a tiny increase due to MaybePostVerify is always called regardless - verified by removing MaybePostVerify under this case and found only +-1ns difference): **124.3588s (+2%)**
- `./filter_bench -impl=3 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'`(expect acceptable increase): **159.4946s (+30%)**
- `./filter_bench -impl=3 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'`(by-passing MaybePostVerify, expect minor increase) : **125.258s (+2%)**
- Added to `db_stress`: `make crash_test`, `./db_stress --detect_filter_construct_corruption=true`
- Manually smoke-tested: manually corrupted the filter construction in some db level tests with basic PUT and background flush. As expected, the error did get returned to users in subsequent PUT and Flush status.
Reviewed By: pdillinger
Differential Revision: D33746928
Pulled By: hx235
fbshipit-source-id: cb056426be5a7debc1cd16f23bc250f36a08ca57
3 years ago
|
|
|
ASSERT_EQ(new_opt.detect_filter_construct_corruption, true);
|
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
|
|
|
auto bfp = new_opt.filter_policy->CheckedCast<BloomFilterPolicy>();
|
|
|
|
ASSERT_NE(bfp, nullptr);
|
Experimental (production candidate) SST schema for Ribbon filter (#7658)
Summary:
Added experimental public API for Ribbon filter:
NewExperimentalRibbonFilterPolicy(). This experimental API will
take a "Bloom equivalent" bits per key, and configure the Ribbon
filter for the same FP rate as Bloom would have but ~30% space
savings. (Note: optimize_filters_for_memory is not yet implemented
for Ribbon filter. That can be added with no effect on schema.)
Internally, the Ribbon filter is configured using a "one_in_fp_rate"
value, which is 1 over desired FP rate. For example, use 100 for 1%
FP rate. I'm expecting this will be used in the future for configuring
Bloom-like filters, as I expect people to more commonly hold constant
the filter accuracy and change the space vs. time trade-off, rather than
hold constant the space (per key) and change the accuracy vs. time
trade-off, though we might make that available.
### Benchmarking
```
$ ./filter_bench -impl=2 -quick -m_keys_total_max=200 -average_keys_per_filter=100000 -net_includes_hashing
Building...
Build avg ns/key: 34.1341
Number of filters: 1993
Total size (MB): 238.488
Reported total allocated memory (MB): 262.875
Reported internal fragmentation: 10.2255%
Bits/key stored: 10.0029
----------------------------
Mixed inside/outside queries...
Single filter net ns/op: 18.7508
Random filter net ns/op: 258.246
Average FP rate %: 0.968672
----------------------------
Done. (For more info, run with -legend or -help.)
$ ./filter_bench -impl=3 -quick -m_keys_total_max=200 -average_keys_per_filter=100000 -net_includes_hashing
Building...
Build avg ns/key: 130.851
Number of filters: 1993
Total size (MB): 168.166
Reported total allocated memory (MB): 183.211
Reported internal fragmentation: 8.94626%
Bits/key stored: 7.05341
----------------------------
Mixed inside/outside queries...
Single filter net ns/op: 58.4523
Random filter net ns/op: 363.717
Average FP rate %: 0.952978
----------------------------
Done. (For more info, run with -legend or -help.)
```
168.166 / 238.488 = 0.705 -> 29.5% space reduction
130.851 / 34.1341 = 3.83x construction time for this Ribbon filter vs. lastest Bloom filter (could make that as little as about 2.5x for less space reduction)
### Working around a hashing "flaw"
bloom_test discovered a flaw in the simple hashing applied in
StandardHasher when num_starts == 1 (num_slots == 128), showing an
excessively high FP rate. The problem is that when many entries, on the
order of number of hash bits or kCoeffBits, are associated with the same
start location, the correlation between the CoeffRow and ResultRow (for
efficiency) can lead to a solution that is "universal," or nearly so, for
entries mapping to that start location. (Normally, variance in start
location breaks the effective association between CoeffRow and
ResultRow; the same value for CoeffRow is effectively different if start
locations are different.) Without kUseSmash and with num_starts > 1 (thus
num_starts ~= num_slots), this flaw should be completely irrelevant. Even
with 10M slots, the chances of a single slot having just 16 (or more)
entries map to it--not enough to cause an FP problem, which would be local
to that slot if it happened--is 1 in millions. This spreadsheet formula
shows that: =1/(10000000*(1 - POISSON(15, 1, TRUE)))
As kUseSmash==false (the setting for Standard128RibbonBitsBuilder) is
intended for CPU efficiency of filters with many more entries/slots than
kCoeffBits, a very reasonable work-around is to disallow num_starts==1
when !kUseSmash, by making the minimum non-zero number of slots
2*kCoeffBits. This is the work-around I've applied. This also means that
the new Ribbon filter schema (Standard128RibbonBitsBuilder) is not
space-efficient for less than a few hundred entries. Because of this, I
have made it fall back on constructing a Bloom filter, under existing
schema, when that is more space efficient for small filters. (We can
change this in the future if we want.)
TODO: better unit tests for this case in ribbon_test, and probably
update StandardHasher for kUseSmash case so that it can scale nicely to
small filters.
### Other related changes
* Add Ribbon filter to stress/crash test
* Add Ribbon filter to filter_bench as -impl=3
* Add option string support, as in "filter_policy=experimental_ribbon:5.678;"
where 5.678 is the Bloom equivalent bits per key.
* Rename internal mode BloomFilterPolicy::kAuto to kAutoBloom
* Add a general BuiltinFilterBitsBuilder::CalculateNumEntry based on
binary searching CalculateSpace (inefficient), so that subclasses
(especially experimental ones) don't have to provide an efficient
implementation inverting CalculateSpace.
* Minor refactor FastLocalBloomBitsBuilder for new base class
XXH3pFilterBitsBuilder shared with new Standard128RibbonBitsBuilder,
which allows the latter to fall back on Bloom construction in some
extreme cases.
* Mostly updated bloom_test for Ribbon filter, though a test like
FullBloomTest::Schema is a next TODO to ensure schema stability
(in case this becomes production-ready schema as it is).
* Add some APIs to ribbon_impl.h for configuring Ribbon filters.
Although these are reasonably covered by bloom_test, TODO more unit
tests in ribbon_test
* Added a "tool" FindOccupancyForSuccessRate to ribbon_test to get data
for constructing the linear approximations in GetNumSlotsFor95PctSuccess.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7658
Test Plan:
Some unit tests updated but other testing is left TODO. This
is considered experimental but laying down schema compatibility as early
as possible in case it proves production-quality. Also tested in
stress/crash test.
Reviewed By: jay-zhuang
Differential Revision: D24899349
Pulled By: pdillinger
fbshipit-source-id: 9715f3e6371c959d923aea8077c9423c7a9f82b8
4 years ago
|
|
|
EXPECT_EQ(bfp->GetMillibitsPerKey(), 4567);
|
|
|
|
EXPECT_EQ(bfp->GetWholeBitsPerKey(), 5);
|
|
|
|
// Verify that only the lower 32bits are stored in
|
|
|
|
// new_opt.read_amp_bytes_per_bit.
|
|
|
|
EXPECT_EQ(1U, new_opt.read_amp_bytes_per_bit);
|
|
|
|
|
|
|
|
// unknown option
|
|
|
|
Status s = GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;index_type=kBinarySearch;"
|
|
|
|
"bad_option=1",
|
|
|
|
&new_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
ASSERT_EQ(static_cast<bool>(table_opt.cache_index_and_filter_blocks),
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
|
|
|
|
|
|
|
// unrecognized index type
|
|
|
|
s = GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;index_type=kBinarySearchXX", &new_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
|
|
|
|
|
|
|
// unrecognized checksum type
|
|
|
|
ASSERT_NOK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;checksum=kxxHashXX", &new_opt));
|
|
|
|
ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
|
|
|
|
|
|
|
// unrecognized filter policy name
|
|
|
|
s = GetBlockBasedTableOptionsFromString(config_options, table_opt,
|
|
|
|
"filter_policy=bloomfilterxx:4:true",
|
|
|
|
&new_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
Hide deprecated, inefficient block-based filter from public API (#9535)
Summary:
This change removes the ability to configure the deprecated,
inefficient block-based filter in the public API. Options that would
have enabled it now use "full" (and optionally partitioned) filters.
Existing block-based filters can still be read and used, and a "back
door" way to build them still exists, for testing and in case of trouble.
About the only way this removal would cause an issue for users is if
temporary memory for filter construction greatly increases. In
HISTORY.md we suggest a few possible mitigations: partitioned filters,
smaller SST files, or setting reserve_table_builder_memory=true.
Or users who have customized a FilterPolicy using the
CreateFilter/KeyMayMatch mechanism removed in https://github.com/facebook/rocksdb/issues/9501 will have to upgrade
their code. (It's long past time for people to move to the new
builder/reader customization interface.)
This change also introduces some internal-use-only configuration strings
for testing specific filter implementations while bypassing some
compatibility / intelligence logic. This is intended to hint at a path
toward making FilterPolicy Customizable, but it also gives us a "back
door" way to configure block-based filter.
Aside: updated db_bench so that -readonly implies -use_existing_db
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9535
Test Plan:
Unit tests updated. Specifically,
* BlockBasedTableTest.BlockReadCountTest is tweaked to validate the back
door configuration interface and ignoring of `use_block_based_builder`.
* BlockBasedTableTest.TracingGetTest is migrated from testing
block-based filter access pattern to full filter access patter, by
re-ordering some things.
* Options test (pretty self-explanatory)
Performance test - create with `./db_bench -db=/dev/shm/rocksdb1 -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=fillrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0` with and without `-use_block_based_filter`, which creates a DB with 21 SST files in L0. Read with `./db_bench -db=/dev/shm/rocksdb1 -readonly -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=readrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -duration=30`
Without -use_block_based_filter: readrandom 464 ops/sec, 689280 KB DB
With -use_block_based_filter: readrandom 169 ops/sec, 690996 KB DB
No consistent difference with fillrandom
Reviewed By: jay-zhuang
Differential Revision: D34153871
Pulled By: pdillinger
fbshipit-source-id: 31f4a933c542f8f09aca47fa64aec67832a69738
3 years ago
|
|
|
|
|
|
|
// missing bits per key
|
|
|
|
s = GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=bloomfilter", &new_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
|
|
|
// Used to be rejected, now accepted
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=bloomfilter:4", &new_opt));
|
|
|
|
bfp = dynamic_cast<const BloomFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(bfp->GetMillibitsPerKey(), 4000);
|
|
|
|
EXPECT_EQ(bfp->GetWholeBitsPerKey(), 4);
|
|
|
|
|
Hide deprecated, inefficient block-based filter from public API (#9535)
Summary:
This change removes the ability to configure the deprecated,
inefficient block-based filter in the public API. Options that would
have enabled it now use "full" (and optionally partitioned) filters.
Existing block-based filters can still be read and used, and a "back
door" way to build them still exists, for testing and in case of trouble.
About the only way this removal would cause an issue for users is if
temporary memory for filter construction greatly increases. In
HISTORY.md we suggest a few possible mitigations: partitioned filters,
smaller SST files, or setting reserve_table_builder_memory=true.
Or users who have customized a FilterPolicy using the
CreateFilter/KeyMayMatch mechanism removed in https://github.com/facebook/rocksdb/issues/9501 will have to upgrade
their code. (It's long past time for people to move to the new
builder/reader customization interface.)
This change also introduces some internal-use-only configuration strings
for testing specific filter implementations while bypassing some
compatibility / intelligence logic. This is intended to hint at a path
toward making FilterPolicy Customizable, but it also gives us a "back
door" way to configure block-based filter.
Aside: updated db_bench so that -readonly implies -use_existing_db
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9535
Test Plan:
Unit tests updated. Specifically,
* BlockBasedTableTest.BlockReadCountTest is tweaked to validate the back
door configuration interface and ignoring of `use_block_based_builder`.
* BlockBasedTableTest.TracingGetTest is migrated from testing
block-based filter access pattern to full filter access patter, by
re-ordering some things.
* Options test (pretty self-explanatory)
Performance test - create with `./db_bench -db=/dev/shm/rocksdb1 -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=fillrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0` with and without `-use_block_based_filter`, which creates a DB with 21 SST files in L0. Read with `./db_bench -db=/dev/shm/rocksdb1 -readonly -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=readrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -duration=30`
Without -use_block_based_filter: readrandom 464 ops/sec, 689280 KB DB
With -use_block_based_filter: readrandom 169 ops/sec, 690996 KB DB
No consistent difference with fillrandom
Reviewed By: jay-zhuang
Differential Revision: D34153871
Pulled By: pdillinger
fbshipit-source-id: 31f4a933c542f8f09aca47fa64aec67832a69738
3 years ago
|
|
|
// use_block_based_builder=true now ignored in public API (same as false)
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=bloomfilter:4:true", &new_opt));
|
|
|
|
bfp = dynamic_cast<const BloomFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(bfp->GetMillibitsPerKey(), 4000);
|
|
|
|
EXPECT_EQ(bfp->GetWholeBitsPerKey(), 4);
|
|
|
|
|
|
|
|
// Test configuring using other internal names
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"filter_policy=rocksdb.internal.LegacyBloomFilter:3", &new_opt));
|
Remove deprecated block-based filter (#10184)
Summary:
In https://github.com/facebook/rocksdb/issues/9535, release 7.0, we hid the old block-based filter from being created using
the public API, because of its inefficiency. Although we normally maintain read compatibility
on old DBs forever, filters are not required for reading a DB, only for optimizing read
performance. Thus, it should be acceptable to remove this code and the substantial
maintenance burden it carries as useful features are developed and validated (such
as user timestamp).
This change completely removes the code for reading and writing the old block-based
filters, net removing about 1370 lines of code no longer needed. Options removed from
testing / benchmarking tools. The prior existence is only evident in a couple of places:
* `CacheEntryRole::kDeprecatedFilterBlock` - We can update this public API enum in
a major release to minimize source code incompatibilities.
* A warning is logged when an old table file is opened that used the old block-based
filter. This is provided as a courtesy, and would be a pain to unit test, so manual testing
should suffice. Unfortunately, sst_dump does not tell you whether a file uses
block-based filter, and the structure of the code makes it very difficult to fix.
* To detect that case, `kObsoleteFilterBlockPrefix` (renamed from `kFilterBlockPrefix`)
for metaindex is maintained (for now).
Other notes:
* In some cases where numbers are associated with filter configurations, we have had to
update the assigned numbers so that they all correspond to something that exists.
* Fixed potential stat counting bug by assuming `filter_checked = false` for cases
like `filter == nullptr` rather than assuming `filter_checked = true`
* Removed obsolete `block_offset` and `prefix_extractor` parameters from several
functions.
* Removed some unnecessary checks `if (!table_prefix_extractor() && !prefix_extractor)`
because the caller guarantees the prefix extractor exists and is compatible
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10184
Test Plan:
tests updated, manually test new warning in LOG using base version to
generate a DB
Reviewed By: riversand963
Differential Revision: D37212647
Pulled By: pdillinger
fbshipit-source-id: 06ee020d8de3b81260ffc36ad0c1202cbf463a80
3 years ago
|
|
|
auto builtin =
|
|
|
|
dynamic_cast<const BuiltinFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(builtin->GetId(), "rocksdb.internal.LegacyBloomFilter:3");
|
Hide deprecated, inefficient block-based filter from public API (#9535)
Summary:
This change removes the ability to configure the deprecated,
inefficient block-based filter in the public API. Options that would
have enabled it now use "full" (and optionally partitioned) filters.
Existing block-based filters can still be read and used, and a "back
door" way to build them still exists, for testing and in case of trouble.
About the only way this removal would cause an issue for users is if
temporary memory for filter construction greatly increases. In
HISTORY.md we suggest a few possible mitigations: partitioned filters,
smaller SST files, or setting reserve_table_builder_memory=true.
Or users who have customized a FilterPolicy using the
CreateFilter/KeyMayMatch mechanism removed in https://github.com/facebook/rocksdb/issues/9501 will have to upgrade
their code. (It's long past time for people to move to the new
builder/reader customization interface.)
This change also introduces some internal-use-only configuration strings
for testing specific filter implementations while bypassing some
compatibility / intelligence logic. This is intended to hint at a path
toward making FilterPolicy Customizable, but it also gives us a "back
door" way to configure block-based filter.
Aside: updated db_bench so that -readonly implies -use_existing_db
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9535
Test Plan:
Unit tests updated. Specifically,
* BlockBasedTableTest.BlockReadCountTest is tweaked to validate the back
door configuration interface and ignoring of `use_block_based_builder`.
* BlockBasedTableTest.TracingGetTest is migrated from testing
block-based filter access pattern to full filter access patter, by
re-ordering some things.
* Options test (pretty self-explanatory)
Performance test - create with `./db_bench -db=/dev/shm/rocksdb1 -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=fillrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0` with and without `-use_block_based_filter`, which creates a DB with 21 SST files in L0. Read with `./db_bench -db=/dev/shm/rocksdb1 -readonly -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=readrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -duration=30`
Without -use_block_based_filter: readrandom 464 ops/sec, 689280 KB DB
With -use_block_based_filter: readrandom 169 ops/sec, 690996 KB DB
No consistent difference with fillrandom
Reviewed By: jay-zhuang
Differential Revision: D34153871
Pulled By: pdillinger
fbshipit-source-id: 31f4a933c542f8f09aca47fa64aec67832a69738
3 years ago
|
|
|
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"filter_policy=rocksdb.internal.FastLocalBloomFilter:1.234", &new_opt));
|
|
|
|
builtin =
|
|
|
|
dynamic_cast<const BuiltinFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(builtin->GetId(), "rocksdb.internal.FastLocalBloomFilter:1.234");
|
Hide deprecated, inefficient block-based filter from public API (#9535)
Summary:
This change removes the ability to configure the deprecated,
inefficient block-based filter in the public API. Options that would
have enabled it now use "full" (and optionally partitioned) filters.
Existing block-based filters can still be read and used, and a "back
door" way to build them still exists, for testing and in case of trouble.
About the only way this removal would cause an issue for users is if
temporary memory for filter construction greatly increases. In
HISTORY.md we suggest a few possible mitigations: partitioned filters,
smaller SST files, or setting reserve_table_builder_memory=true.
Or users who have customized a FilterPolicy using the
CreateFilter/KeyMayMatch mechanism removed in https://github.com/facebook/rocksdb/issues/9501 will have to upgrade
their code. (It's long past time for people to move to the new
builder/reader customization interface.)
This change also introduces some internal-use-only configuration strings
for testing specific filter implementations while bypassing some
compatibility / intelligence logic. This is intended to hint at a path
toward making FilterPolicy Customizable, but it also gives us a "back
door" way to configure block-based filter.
Aside: updated db_bench so that -readonly implies -use_existing_db
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9535
Test Plan:
Unit tests updated. Specifically,
* BlockBasedTableTest.BlockReadCountTest is tweaked to validate the back
door configuration interface and ignoring of `use_block_based_builder`.
* BlockBasedTableTest.TracingGetTest is migrated from testing
block-based filter access pattern to full filter access patter, by
re-ordering some things.
* Options test (pretty self-explanatory)
Performance test - create with `./db_bench -db=/dev/shm/rocksdb1 -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=fillrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0` with and without `-use_block_based_filter`, which creates a DB with 21 SST files in L0. Read with `./db_bench -db=/dev/shm/rocksdb1 -readonly -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=readrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -duration=30`
Without -use_block_based_filter: readrandom 464 ops/sec, 689280 KB DB
With -use_block_based_filter: readrandom 169 ops/sec, 690996 KB DB
No consistent difference with fillrandom
Reviewed By: jay-zhuang
Differential Revision: D34153871
Pulled By: pdillinger
fbshipit-source-id: 31f4a933c542f8f09aca47fa64aec67832a69738
3 years ago
|
|
|
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"filter_policy=rocksdb.internal.Standard128RibbonFilter:1.234",
|
|
|
|
&new_opt));
|
|
|
|
builtin =
|
|
|
|
dynamic_cast<const BuiltinFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(builtin->GetId(), "rocksdb.internal.Standard128RibbonFilter:1.234");
|
Hide deprecated, inefficient block-based filter from public API (#9535)
Summary:
This change removes the ability to configure the deprecated,
inefficient block-based filter in the public API. Options that would
have enabled it now use "full" (and optionally partitioned) filters.
Existing block-based filters can still be read and used, and a "back
door" way to build them still exists, for testing and in case of trouble.
About the only way this removal would cause an issue for users is if
temporary memory for filter construction greatly increases. In
HISTORY.md we suggest a few possible mitigations: partitioned filters,
smaller SST files, or setting reserve_table_builder_memory=true.
Or users who have customized a FilterPolicy using the
CreateFilter/KeyMayMatch mechanism removed in https://github.com/facebook/rocksdb/issues/9501 will have to upgrade
their code. (It's long past time for people to move to the new
builder/reader customization interface.)
This change also introduces some internal-use-only configuration strings
for testing specific filter implementations while bypassing some
compatibility / intelligence logic. This is intended to hint at a path
toward making FilterPolicy Customizable, but it also gives us a "back
door" way to configure block-based filter.
Aside: updated db_bench so that -readonly implies -use_existing_db
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9535
Test Plan:
Unit tests updated. Specifically,
* BlockBasedTableTest.BlockReadCountTest is tweaked to validate the back
door configuration interface and ignoring of `use_block_based_builder`.
* BlockBasedTableTest.TracingGetTest is migrated from testing
block-based filter access pattern to full filter access patter, by
re-ordering some things.
* Options test (pretty self-explanatory)
Performance test - create with `./db_bench -db=/dev/shm/rocksdb1 -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=fillrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0` with and without `-use_block_based_filter`, which creates a DB with 21 SST files in L0. Read with `./db_bench -db=/dev/shm/rocksdb1 -readonly -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=readrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -duration=30`
Without -use_block_based_filter: readrandom 464 ops/sec, 689280 KB DB
With -use_block_based_filter: readrandom 169 ops/sec, 690996 KB DB
No consistent difference with fillrandom
Reviewed By: jay-zhuang
Differential Revision: D34153871
Pulled By: pdillinger
fbshipit-source-id: 31f4a933c542f8f09aca47fa64aec67832a69738
3 years ago
|
|
|
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
3 years ago
|
|
|
// Ribbon filter policy (no Bloom hybrid)
|
Experimental (production candidate) SST schema for Ribbon filter (#7658)
Summary:
Added experimental public API for Ribbon filter:
NewExperimentalRibbonFilterPolicy(). This experimental API will
take a "Bloom equivalent" bits per key, and configure the Ribbon
filter for the same FP rate as Bloom would have but ~30% space
savings. (Note: optimize_filters_for_memory is not yet implemented
for Ribbon filter. That can be added with no effect on schema.)
Internally, the Ribbon filter is configured using a "one_in_fp_rate"
value, which is 1 over desired FP rate. For example, use 100 for 1%
FP rate. I'm expecting this will be used in the future for configuring
Bloom-like filters, as I expect people to more commonly hold constant
the filter accuracy and change the space vs. time trade-off, rather than
hold constant the space (per key) and change the accuracy vs. time
trade-off, though we might make that available.
### Benchmarking
```
$ ./filter_bench -impl=2 -quick -m_keys_total_max=200 -average_keys_per_filter=100000 -net_includes_hashing
Building...
Build avg ns/key: 34.1341
Number of filters: 1993
Total size (MB): 238.488
Reported total allocated memory (MB): 262.875
Reported internal fragmentation: 10.2255%
Bits/key stored: 10.0029
----------------------------
Mixed inside/outside queries...
Single filter net ns/op: 18.7508
Random filter net ns/op: 258.246
Average FP rate %: 0.968672
----------------------------
Done. (For more info, run with -legend or -help.)
$ ./filter_bench -impl=3 -quick -m_keys_total_max=200 -average_keys_per_filter=100000 -net_includes_hashing
Building...
Build avg ns/key: 130.851
Number of filters: 1993
Total size (MB): 168.166
Reported total allocated memory (MB): 183.211
Reported internal fragmentation: 8.94626%
Bits/key stored: 7.05341
----------------------------
Mixed inside/outside queries...
Single filter net ns/op: 58.4523
Random filter net ns/op: 363.717
Average FP rate %: 0.952978
----------------------------
Done. (For more info, run with -legend or -help.)
```
168.166 / 238.488 = 0.705 -> 29.5% space reduction
130.851 / 34.1341 = 3.83x construction time for this Ribbon filter vs. lastest Bloom filter (could make that as little as about 2.5x for less space reduction)
### Working around a hashing "flaw"
bloom_test discovered a flaw in the simple hashing applied in
StandardHasher when num_starts == 1 (num_slots == 128), showing an
excessively high FP rate. The problem is that when many entries, on the
order of number of hash bits or kCoeffBits, are associated with the same
start location, the correlation between the CoeffRow and ResultRow (for
efficiency) can lead to a solution that is "universal," or nearly so, for
entries mapping to that start location. (Normally, variance in start
location breaks the effective association between CoeffRow and
ResultRow; the same value for CoeffRow is effectively different if start
locations are different.) Without kUseSmash and with num_starts > 1 (thus
num_starts ~= num_slots), this flaw should be completely irrelevant. Even
with 10M slots, the chances of a single slot having just 16 (or more)
entries map to it--not enough to cause an FP problem, which would be local
to that slot if it happened--is 1 in millions. This spreadsheet formula
shows that: =1/(10000000*(1 - POISSON(15, 1, TRUE)))
As kUseSmash==false (the setting for Standard128RibbonBitsBuilder) is
intended for CPU efficiency of filters with many more entries/slots than
kCoeffBits, a very reasonable work-around is to disallow num_starts==1
when !kUseSmash, by making the minimum non-zero number of slots
2*kCoeffBits. This is the work-around I've applied. This also means that
the new Ribbon filter schema (Standard128RibbonBitsBuilder) is not
space-efficient for less than a few hundred entries. Because of this, I
have made it fall back on constructing a Bloom filter, under existing
schema, when that is more space efficient for small filters. (We can
change this in the future if we want.)
TODO: better unit tests for this case in ribbon_test, and probably
update StandardHasher for kUseSmash case so that it can scale nicely to
small filters.
### Other related changes
* Add Ribbon filter to stress/crash test
* Add Ribbon filter to filter_bench as -impl=3
* Add option string support, as in "filter_policy=experimental_ribbon:5.678;"
where 5.678 is the Bloom equivalent bits per key.
* Rename internal mode BloomFilterPolicy::kAuto to kAutoBloom
* Add a general BuiltinFilterBitsBuilder::CalculateNumEntry based on
binary searching CalculateSpace (inefficient), so that subclasses
(especially experimental ones) don't have to provide an efficient
implementation inverting CalculateSpace.
* Minor refactor FastLocalBloomBitsBuilder for new base class
XXH3pFilterBitsBuilder shared with new Standard128RibbonBitsBuilder,
which allows the latter to fall back on Bloom construction in some
extreme cases.
* Mostly updated bloom_test for Ribbon filter, though a test like
FullBloomTest::Schema is a next TODO to ensure schema stability
(in case this becomes production-ready schema as it is).
* Add some APIs to ribbon_impl.h for configuring Ribbon filters.
Although these are reasonably covered by bloom_test, TODO more unit
tests in ribbon_test
* Added a "tool" FindOccupancyForSuccessRate to ribbon_test to get data
for constructing the linear approximations in GetNumSlotsFor95PctSuccess.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7658
Test Plan:
Some unit tests updated but other testing is left TODO. This
is considered experimental but laying down schema compatibility as early
as possible in case it proves production-quality. Also tested in
stress/crash test.
Reviewed By: jay-zhuang
Differential Revision: D24899349
Pulled By: pdillinger
fbshipit-source-id: 9715f3e6371c959d923aea8077c9423c7a9f82b8
4 years ago
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
3 years ago
|
|
|
config_options, table_opt, "filter_policy=ribbonfilter:5.678:-1;",
|
Experimental (production candidate) SST schema for Ribbon filter (#7658)
Summary:
Added experimental public API for Ribbon filter:
NewExperimentalRibbonFilterPolicy(). This experimental API will
take a "Bloom equivalent" bits per key, and configure the Ribbon
filter for the same FP rate as Bloom would have but ~30% space
savings. (Note: optimize_filters_for_memory is not yet implemented
for Ribbon filter. That can be added with no effect on schema.)
Internally, the Ribbon filter is configured using a "one_in_fp_rate"
value, which is 1 over desired FP rate. For example, use 100 for 1%
FP rate. I'm expecting this will be used in the future for configuring
Bloom-like filters, as I expect people to more commonly hold constant
the filter accuracy and change the space vs. time trade-off, rather than
hold constant the space (per key) and change the accuracy vs. time
trade-off, though we might make that available.
### Benchmarking
```
$ ./filter_bench -impl=2 -quick -m_keys_total_max=200 -average_keys_per_filter=100000 -net_includes_hashing
Building...
Build avg ns/key: 34.1341
Number of filters: 1993
Total size (MB): 238.488
Reported total allocated memory (MB): 262.875
Reported internal fragmentation: 10.2255%
Bits/key stored: 10.0029
----------------------------
Mixed inside/outside queries...
Single filter net ns/op: 18.7508
Random filter net ns/op: 258.246
Average FP rate %: 0.968672
----------------------------
Done. (For more info, run with -legend or -help.)
$ ./filter_bench -impl=3 -quick -m_keys_total_max=200 -average_keys_per_filter=100000 -net_includes_hashing
Building...
Build avg ns/key: 130.851
Number of filters: 1993
Total size (MB): 168.166
Reported total allocated memory (MB): 183.211
Reported internal fragmentation: 8.94626%
Bits/key stored: 7.05341
----------------------------
Mixed inside/outside queries...
Single filter net ns/op: 58.4523
Random filter net ns/op: 363.717
Average FP rate %: 0.952978
----------------------------
Done. (For more info, run with -legend or -help.)
```
168.166 / 238.488 = 0.705 -> 29.5% space reduction
130.851 / 34.1341 = 3.83x construction time for this Ribbon filter vs. lastest Bloom filter (could make that as little as about 2.5x for less space reduction)
### Working around a hashing "flaw"
bloom_test discovered a flaw in the simple hashing applied in
StandardHasher when num_starts == 1 (num_slots == 128), showing an
excessively high FP rate. The problem is that when many entries, on the
order of number of hash bits or kCoeffBits, are associated with the same
start location, the correlation between the CoeffRow and ResultRow (for
efficiency) can lead to a solution that is "universal," or nearly so, for
entries mapping to that start location. (Normally, variance in start
location breaks the effective association between CoeffRow and
ResultRow; the same value for CoeffRow is effectively different if start
locations are different.) Without kUseSmash and with num_starts > 1 (thus
num_starts ~= num_slots), this flaw should be completely irrelevant. Even
with 10M slots, the chances of a single slot having just 16 (or more)
entries map to it--not enough to cause an FP problem, which would be local
to that slot if it happened--is 1 in millions. This spreadsheet formula
shows that: =1/(10000000*(1 - POISSON(15, 1, TRUE)))
As kUseSmash==false (the setting for Standard128RibbonBitsBuilder) is
intended for CPU efficiency of filters with many more entries/slots than
kCoeffBits, a very reasonable work-around is to disallow num_starts==1
when !kUseSmash, by making the minimum non-zero number of slots
2*kCoeffBits. This is the work-around I've applied. This also means that
the new Ribbon filter schema (Standard128RibbonBitsBuilder) is not
space-efficient for less than a few hundred entries. Because of this, I
have made it fall back on constructing a Bloom filter, under existing
schema, when that is more space efficient for small filters. (We can
change this in the future if we want.)
TODO: better unit tests for this case in ribbon_test, and probably
update StandardHasher for kUseSmash case so that it can scale nicely to
small filters.
### Other related changes
* Add Ribbon filter to stress/crash test
* Add Ribbon filter to filter_bench as -impl=3
* Add option string support, as in "filter_policy=experimental_ribbon:5.678;"
where 5.678 is the Bloom equivalent bits per key.
* Rename internal mode BloomFilterPolicy::kAuto to kAutoBloom
* Add a general BuiltinFilterBitsBuilder::CalculateNumEntry based on
binary searching CalculateSpace (inefficient), so that subclasses
(especially experimental ones) don't have to provide an efficient
implementation inverting CalculateSpace.
* Minor refactor FastLocalBloomBitsBuilder for new base class
XXH3pFilterBitsBuilder shared with new Standard128RibbonBitsBuilder,
which allows the latter to fall back on Bloom construction in some
extreme cases.
* Mostly updated bloom_test for Ribbon filter, though a test like
FullBloomTest::Schema is a next TODO to ensure schema stability
(in case this becomes production-ready schema as it is).
* Add some APIs to ribbon_impl.h for configuring Ribbon filters.
Although these are reasonably covered by bloom_test, TODO more unit
tests in ribbon_test
* Added a "tool" FindOccupancyForSuccessRate to ribbon_test to get data
for constructing the linear approximations in GetNumSlotsFor95PctSuccess.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7658
Test Plan:
Some unit tests updated but other testing is left TODO. This
is considered experimental but laying down schema compatibility as early
as possible in case it proves production-quality. Also tested in
stress/crash test.
Reviewed By: jay-zhuang
Differential Revision: D24899349
Pulled By: pdillinger
fbshipit-source-id: 9715f3e6371c959d923aea8077c9423c7a9f82b8
4 years ago
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
|
|
|
auto rfp =
|
|
|
|
dynamic_cast<const RibbonFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(rfp->GetMillibitsPerKey(), 5678);
|
|
|
|
EXPECT_EQ(rfp->GetBloomBeforeLevel(), -1);
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
3 years ago
|
|
|
|
|
|
|
// Ribbon filter policy (default Bloom hybrid)
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=ribbonfilter:6.789;",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
|
|
|
rfp = dynamic_cast<const RibbonFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(rfp->GetMillibitsPerKey(), 6789);
|
|
|
|
EXPECT_EQ(rfp->GetBloomBeforeLevel(), 0);
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
3 years ago
|
|
|
|
|
|
|
// Ribbon filter policy (custom Bloom hybrid)
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=ribbonfilter:6.789:5;",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
|
|
|
rfp = dynamic_cast<const RibbonFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(rfp->GetMillibitsPerKey(), 6789);
|
|
|
|
EXPECT_EQ(rfp->GetBloomBeforeLevel(), 5);
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
3 years ago
|
|
|
|
|
|
|
// Check block cache options are overwritten when specified
|
|
|
|
// in new format as a struct.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"block_cache={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;high_pri_pool_ratio=0.5;};"
|
|
|
|
"block_cache_compressed={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;high_pri_pool_ratio=0.5;}",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
|
|
|
|
new_opt.block_cache)->GetHighPriPoolRatio(), 0.5);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL*1024UL);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(
|
|
|
|
new_opt.block_cache_compressed)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
|
|
|
|
new_opt.block_cache_compressed)->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
|
|
|
|
// Set only block cache capacity. Check other values are
|
|
|
|
// reset to default values.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"block_cache={capacity=2M};"
|
|
|
|
"block_cache_compressed={capacity=2M}",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 2*1024UL*1024UL);
|
|
|
|
// Default values
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
GetDefaultCacheShardBits(new_opt.block_cache->GetCapacity()));
|
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), false);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 2*1024UL*1024UL);
|
|
|
|
// Default values
|
|
|
|
ASSERT_EQ(
|
|
|
|
std::dynamic_pointer_cast<ShardedCacheBase>(
|
|
|
|
new_opt.block_cache_compressed)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
GetDefaultCacheShardBits(new_opt.block_cache_compressed->GetCapacity()));
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), false);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache_compressed)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
|
|
|
|
// Set couple of block cache options.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"block_cache={num_shard_bits=5;high_pri_pool_ratio=0.5;};"
|
|
|
|
"block_cache_compressed={num_shard_bits=5;"
|
|
|
|
"high_pri_pool_ratio=0.0;}",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 0);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
5);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), false);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
|
|
|
|
new_opt.block_cache)->GetHighPriPoolRatio(), 0.5);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 0);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(
|
|
|
|
new_opt.block_cache_compressed)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
5);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), false);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache_compressed)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.0);
|
|
|
|
|
|
|
|
// Set couple of block cache options.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"block_cache={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;};"
|
|
|
|
"block_cache_compressed={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;}",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL*1024UL);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(
|
|
|
|
new_opt.block_cache_compressed)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache_compressed)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=rocksdb.BloomFilter:1.234",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
new_opt.filter_policy->IsInstanceOf(BloomFilterPolicy::kClassName()));
|
|
|
|
ASSERT_TRUE(
|
|
|
|
new_opt.filter_policy->IsInstanceOf(BloomFilterPolicy::kNickName()));
|
|
|
|
|
|
|
|
// Ribbon filter policy alternative name
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=rocksdb.RibbonFilter:6.789:5;",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
new_opt.filter_policy->IsInstanceOf(RibbonFilterPolicy::kClassName()));
|
|
|
|
ASSERT_TRUE(
|
|
|
|
new_opt.filter_policy->IsInstanceOf(RibbonFilterPolicy::kNickName()));
|
|
|
|
}
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE // GetPlainTableOptionsFromString is not supported
|
|
|
|
TEST_F(OptionsTest, GetPlainTableOptionsFromString) {
|
|
|
|
PlainTableOptions table_opt;
|
|
|
|
PlainTableOptions new_opt;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
// make sure default values are overwritten by something else
|
|
|
|
ASSERT_OK(GetPlainTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
|
|
|
|
"index_sparseness=8;huge_page_tlb_size=4;encoding_type=kPrefix;"
|
|
|
|
"full_scan_mode=true;store_index_in_file=true",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_EQ(new_opt.user_key_len, 66u);
|
|
|
|
ASSERT_EQ(new_opt.bloom_bits_per_key, 20);
|
|
|
|
ASSERT_EQ(new_opt.hash_table_ratio, 0.5);
|
|
|
|
ASSERT_EQ(new_opt.index_sparseness, 8);
|
|
|
|
ASSERT_EQ(new_opt.huge_page_tlb_size, 4);
|
|
|
|
ASSERT_EQ(new_opt.encoding_type, EncodingType::kPrefix);
|
|
|
|
ASSERT_TRUE(new_opt.full_scan_mode);
|
|
|
|
ASSERT_TRUE(new_opt.store_index_in_file);
|
|
|
|
|
|
|
|
// unknown option
|
|
|
|
Status s = GetPlainTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
|
|
|
|
"bad_option=1",
|
|
|
|
&new_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
|
|
|
// unrecognized EncodingType
|
|
|
|
s = GetPlainTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
|
|
|
|
"encoding_type=kPrefixXX",
|
|
|
|
&new_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
}
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE // GetMemTableRepFactoryFromString is not supported
|
|
|
|
TEST_F(OptionsTest, GetMemTableRepFactoryFromString) {
|
|
|
|
std::unique_ptr<MemTableRepFactory> new_mem_factory = nullptr;
|
|
|
|
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("skip_list", &new_mem_factory));
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("skip_list:16", &new_mem_factory));
|
|
|
|
ASSERT_STREQ(new_mem_factory->Name(), "SkipListFactory");
|
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("skip_list:16:invalid_opt",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("prefix_hash", &new_mem_factory));
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("prefix_hash:1000",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_STREQ(new_mem_factory->Name(), "HashSkipListRepFactory");
|
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("prefix_hash:1000:invalid_opt",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("hash_linkedlist",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("hash_linkedlist:1000",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_EQ(std::string(new_mem_factory->Name()), "HashLinkListRepFactory");
|
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("hash_linkedlist:1000:invalid_opt",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("vector", &new_mem_factory));
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("vector:1024", &new_mem_factory));
|
|
|
|
ASSERT_EQ(std::string(new_mem_factory->Name()), "VectorRepFactory");
|
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("vector:1024:invalid_opt",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("cuckoo", &new_mem_factory));
|
|
|
|
// CuckooHash memtable is already removed.
|
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("cuckoo:1024", &new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("bad_factory", &new_mem_factory));
|
|
|
|
}
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, MemTableRepFactoryCreateFromString) {
|
|
|
|
std::unique_ptr<MemTableRepFactory> new_mem_factory = nullptr;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.ignore_unsupported_options = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(config_options, "skip_list",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(config_options, "skip_list:16",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_STREQ(new_mem_factory->Name(), "SkipListFactory");
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("skip_list"));
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("SkipListFactory"));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "skip_list:16:invalid_opt", &new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "invalid_opt=10", &new_mem_factory));
|
|
|
|
|
|
|
|
// Test a reset
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(config_options, "",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_EQ(new_mem_factory, nullptr);
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "invalid_opt=10", &new_mem_factory));
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "id=skip_list; lookahead=32", &new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(config_options, "prefix_hash",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "prefix_hash:1000", &new_mem_factory));
|
|
|
|
ASSERT_STREQ(new_mem_factory->Name(), "HashSkipListRepFactory");
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("prefix_hash"));
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("HashSkipListRepFactory"));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "prefix_hash:1000:invalid_opt", &new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options,
|
|
|
|
"id=prefix_hash; bucket_count=32; skiplist_height=64; "
|
|
|
|
"branching_factor=16",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options,
|
|
|
|
"id=prefix_hash; bucket_count=32; skiplist_height=64; "
|
|
|
|
"branching_factor=16; invalid=unknown",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "hash_linkedlist", &new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "hash_linkedlist:1000", &new_mem_factory));
|
|
|
|
ASSERT_STREQ(new_mem_factory->Name(), "HashLinkListRepFactory");
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("hash_linkedlist"));
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("HashLinkListRepFactory"));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "hash_linkedlist:1000:invalid_opt", &new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options,
|
|
|
|
"id=hash_linkedlist; bucket_count=32; threshold=64; huge_page_size=16; "
|
|
|
|
"logging_threshold=12; log_when_flash=true",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options,
|
|
|
|
"id=hash_linkedlist; bucket_count=32; threshold=64; huge_page_size=16; "
|
|
|
|
"logging_threshold=12; log_when_flash=true; invalid=unknown",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(config_options, "vector",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(config_options, "vector:1024",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_STREQ(new_mem_factory->Name(), "VectorRepFactory");
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("vector"));
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("VectorRepFactory"));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "vector:1024:invalid_opt", &new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "id=vector; count=42", &new_mem_factory));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "id=vector; invalid=unknown", &new_mem_factory));
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(config_options, "cuckoo",
|
|
|
|
&new_mem_factory));
|
|
|
|
// CuckooHash memtable is already removed.
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(config_options, "cuckoo:1024",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(config_options, "bad_factory",
|
|
|
|
&new_mem_factory));
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE // GetOptionsFromString is not supported in RocksDB Lite
|
|
|
|
class CustomEnv : public EnvWrapper {
|
|
|
|
public:
|
|
|
|
explicit CustomEnv(Env* _target) : EnvWrapper(_target) {}
|
|
|
|
static const char* kClassName() { return "CustomEnv"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, GetOptionsFromStringTest) {
|
|
|
|
Options base_options, new_options;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
|
|
|
|
base_options.write_buffer_size = 20;
|
|
|
|
base_options.min_write_buffer_number_to_merge = 15;
|
|
|
|
BlockBasedTableOptions block_based_table_options;
|
|
|
|
block_based_table_options.cache_index_and_filter_blocks = true;
|
|
|
|
base_options.table_factory.reset(
|
|
|
|
NewBlockBasedTableFactory(block_based_table_options));
|
|
|
|
|
|
|
|
// Register an Env with object registry.
|
|
|
|
ObjectLibrary::Default()->AddFactory<Env>(
|
|
|
|
CustomEnv::kClassName(),
|
|
|
|
[](const std::string& /*name*/, std::unique_ptr<Env>* /*env_guard*/,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
static CustomEnv env(Env::Default());
|
|
|
|
return &env;
|
|
|
|
});
|
|
|
|
|
|
|
|
ASSERT_OK(GetOptionsFromString(
|
|
|
|
config_options, base_options,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_cache=1M;block_size=4;};"
|
|
|
|
"compression_opts=4:5:6;create_if_missing=true;max_open_files=1;"
|
|
|
|
"bottommost_compression_opts=5:6:7;create_if_missing=true;max_open_files="
|
|
|
|
"1;"
|
|
|
|
"rate_limiter_bytes_per_sec=1024;env=CustomEnv",
|
|
|
|
&new_options));
|
|
|
|
|
|
|
|
ASSERT_EQ(new_options.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.max_dict_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.zstd_max_train_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.parallel_threads, 1u);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_options.compression_opts.use_zstd_dict_trainer, true);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression, kDisableCompressionOption);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.strategy, 7);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.max_dict_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.zstd_max_train_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.parallel_threads, 1u);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
true);
|
|
|
|
ASSERT_EQ(new_options.write_buffer_size, 10U);
|
|
|
|
ASSERT_EQ(new_options.max_write_buffer_number, 16);
|
|
|
|
const auto new_bbto =
|
|
|
|
new_options.table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(new_bbto, nullptr);
|
|
|
|
ASSERT_EQ(new_bbto->block_cache->GetCapacity(), 1U << 20);
|
|
|
|
ASSERT_EQ(new_bbto->block_size, 4U);
|
|
|
|
// don't overwrite block based table options
|
|
|
|
ASSERT_TRUE(new_bbto->cache_index_and_filter_blocks);
|
|
|
|
|
|
|
|
ASSERT_EQ(new_options.create_if_missing, true);
|
|
|
|
ASSERT_EQ(new_options.max_open_files, 1);
|
|
|
|
ASSERT_TRUE(new_options.rate_limiter.get() != nullptr);
|
|
|
|
Env* newEnv = new_options.env;
|
|
|
|
ASSERT_OK(Env::LoadEnv(CustomEnv::kClassName(), &newEnv));
|
|
|
|
ASSERT_EQ(newEnv, new_options.env);
|
|
|
|
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
// Test a bad value for a DBOption returns a failure
|
|
|
|
base_options.dump_malloc_stats = false;
|
|
|
|
base_options.write_buffer_size = 1024;
|
|
|
|
Options bad_options = new_options;
|
|
|
|
Status s = GetOptionsFromString(config_options, base_options,
|
|
|
|
"create_if_missing=XX;dump_malloc_stats=true",
|
|
|
|
&bad_options);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
ASSERT_EQ(bad_options.dump_malloc_stats, false);
|
|
|
|
|
|
|
|
bad_options = new_options;
|
|
|
|
s = GetOptionsFromString(config_options, base_options,
|
|
|
|
"write_buffer_size=XX;dump_malloc_stats=true",
|
|
|
|
&bad_options);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
|
|
|
ASSERT_EQ(bad_options.dump_malloc_stats, false);
|
|
|
|
|
|
|
|
// Test a bad value for a TableFactory Option returns a failure
|
|
|
|
bad_options = new_options;
|
|
|
|
s = GetOptionsFromString(config_options, base_options,
|
|
|
|
"write_buffer_size=16;dump_malloc_stats=true"
|
|
|
|
"block_based_table_factory={block_size=XX;};",
|
|
|
|
&bad_options);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
ASSERT_EQ(bad_options.dump_malloc_stats, false);
|
|
|
|
ASSERT_EQ(bad_options.write_buffer_size, 1024);
|
|
|
|
|
|
|
|
config_options.ignore_unknown_options = true;
|
|
|
|
ASSERT_OK(GetOptionsFromString(config_options, base_options,
|
|
|
|
"create_if_missing=XX;dump_malloc_stats=true;"
|
|
|
|
"write_buffer_size=XX;"
|
|
|
|
"block_based_table_factory={block_size=XX;};",
|
|
|
|
&bad_options));
|
|
|
|
ASSERT_EQ(bad_options.create_if_missing, base_options.create_if_missing);
|
|
|
|
ASSERT_EQ(bad_options.dump_malloc_stats, true);
|
|
|
|
ASSERT_EQ(bad_options.write_buffer_size, base_options.write_buffer_size);
|
|
|
|
|
|
|
|
// Test the old interface
|
|
|
|
ASSERT_OK(GetOptionsFromString(
|
|
|
|
base_options,
|
|
|
|
"write_buffer_size=22;max_write_buffer_number=33;max_open_files=44;",
|
|
|
|
&new_options));
|
|
|
|
ASSERT_EQ(new_options.write_buffer_size, 22U);
|
|
|
|
ASSERT_EQ(new_options.max_write_buffer_number, 33);
|
|
|
|
ASSERT_EQ(new_options.max_open_files, 44);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, DBOptionsSerialization) {
|
|
|
|
Options base_options, new_options;
|
|
|
|
Random rnd(301);
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
|
|
|
|
// Phase 1: Make big change in base_options
|
|
|
|
test::RandomInitDBOptions(&base_options, &rnd);
|
|
|
|
|
|
|
|
// Phase 2: obtain a string from base_option
|
|
|
|
std::string base_options_file_content;
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(config_options, base_options,
|
|
|
|
&base_options_file_content));
|
|
|
|
|
|
|
|
// Phase 3: Set new_options from the derived string and expect
|
|
|
|
// new_options == base_options
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(config_options, DBOptions(),
|
|
|
|
base_options_file_content, &new_options));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(config_options, base_options,
|
|
|
|
new_options));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, OptionsComposeDecompose) {
|
|
|
|
// build an Options from DBOptions + CFOptions, then decompose it to verify
|
|
|
|
// we get same constituent options.
|
|
|
|
DBOptions base_db_opts;
|
|
|
|
ColumnFamilyOptions base_cf_opts;
|
|
|
|
ConfigOptions
|
|
|
|
config_options; // Use default for ignore(false) and check (exact)
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
test::RandomInitDBOptions(&base_db_opts, &rnd);
|
|
|
|
test::RandomInitCFOptions(&base_cf_opts, base_db_opts, &rnd);
|
|
|
|
|
|
|
|
Options base_opts(base_db_opts, base_cf_opts);
|
|
|
|
DBOptions new_db_opts(base_opts);
|
|
|
|
ColumnFamilyOptions new_cf_opts(base_opts);
|
|
|
|
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(config_options, base_db_opts,
|
|
|
|
new_db_opts));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opts,
|
|
|
|
new_cf_opts));
|
|
|
|
delete new_cf_opts.compaction_filter;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, DBOptionsComposeImmutable) {
|
|
|
|
// Build a DBOptions from an Immutable/Mutable one and verify that
|
|
|
|
// we get same constituent options.
|
|
|
|
ConfigOptions config_options;
|
|
|
|
Random rnd(301);
|
|
|
|
DBOptions base_opts, new_opts;
|
|
|
|
test::RandomInitDBOptions(&base_opts, &rnd);
|
|
|
|
MutableDBOptions m_opts(base_opts);
|
|
|
|
ImmutableDBOptions i_opts(base_opts);
|
|
|
|
new_opts = BuildDBOptions(i_opts, m_opts);
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(config_options, base_opts,
|
|
|
|
new_opts));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, GetMutableDBOptions) {
|
|
|
|
Random rnd(228);
|
|
|
|
DBOptions base_opts;
|
|
|
|
std::string opts_str;
|
|
|
|
std::unordered_map<std::string, std::string> opts_map;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
|
|
|
|
test::RandomInitDBOptions(&base_opts, &rnd);
|
|
|
|
ImmutableDBOptions i_opts(base_opts);
|
|
|
|
MutableDBOptions m_opts(base_opts);
|
|
|
|
MutableDBOptions new_opts;
|
|
|
|
ASSERT_OK(GetStringFromMutableDBOptions(config_options, m_opts, &opts_str));
|
|
|
|
ASSERT_OK(StringToMap(opts_str, &opts_map));
|
|
|
|
ASSERT_OK(GetMutableDBOptionsFromStrings(m_opts, opts_map, &new_opts));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(
|
|
|
|
config_options, base_opts, BuildDBOptions(i_opts, new_opts)));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, CFOptionsComposeImmutable) {
|
|
|
|
// Build a DBOptions from an Immutable/Mutable one and verify that
|
|
|
|
// we get same constituent options.
|
|
|
|
ConfigOptions config_options;
|
|
|
|
Random rnd(301);
|
|
|
|
ColumnFamilyOptions base_opts, new_opts;
|
|
|
|
DBOptions dummy; // Needed to create ImmutableCFOptions
|
|
|
|
test::RandomInitCFOptions(&base_opts, dummy, &rnd);
|
|
|
|
MutableCFOptions m_opts(base_opts);
|
|
|
|
ImmutableCFOptions i_opts(base_opts);
|
|
|
|
UpdateColumnFamilyOptions(i_opts, &new_opts);
|
|
|
|
UpdateColumnFamilyOptions(m_opts, &new_opts);
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_opts,
|
|
|
|
new_opts));
|
|
|
|
delete new_opts.compaction_filter;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, GetMutableCFOptions) {
|
|
|
|
Random rnd(228);
|
|
|
|
ColumnFamilyOptions base, copy;
|
|
|
|
std::string opts_str;
|
|
|
|
std::unordered_map<std::string, std::string> opts_map;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
DBOptions dummy; // Needed to create ImmutableCFOptions
|
|
|
|
|
|
|
|
test::RandomInitCFOptions(&base, dummy, &rnd);
|
|
|
|
ColumnFamilyOptions result;
|
|
|
|
MutableCFOptions m_opts(base), new_opts;
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromMutableCFOptions(config_options, m_opts, &opts_str));
|
|
|
|
ASSERT_OK(StringToMap(opts_str, &opts_map));
|
|
|
|
ASSERT_OK(GetMutableOptionsFromStrings(m_opts, opts_map, nullptr, &new_opts));
|
|
|
|
UpdateColumnFamilyOptions(ImmutableCFOptions(base), ©);
|
|
|
|
UpdateColumnFamilyOptions(new_opts, ©);
|
|
|
|
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base, copy));
|
|
|
|
delete copy.compaction_filter;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, ColumnFamilyOptionsSerialization) {
|
|
|
|
Options options;
|
|
|
|
ColumnFamilyOptions base_opt, new_opt;
|
|
|
|
Random rnd(302);
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
|
|
|
|
// Phase 1: randomly assign base_opt
|
|
|
|
// custom type options
|
|
|
|
test::RandomInitCFOptions(&base_opt, options, &rnd);
|
|
|
|
|
|
|
|
// Phase 2: obtain a string from base_opt
|
|
|
|
std::string base_options_file_content;
|
|
|
|
ASSERT_OK(GetStringFromColumnFamilyOptions(config_options, base_opt,
|
|
|
|
&base_options_file_content));
|
|
|
|
|
|
|
|
// Phase 3: Set new_opt from the derived string and expect
|
|
|
|
// new_opt == base_opt
|
|
|
|
ASSERT_OK(
|
|
|
|
GetColumnFamilyOptionsFromString(config_options, ColumnFamilyOptions(),
|
|
|
|
base_options_file_content, &new_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(config_options, base_opt, new_opt));
|
|
|
|
if (base_opt.compaction_filter) {
|
|
|
|
delete base_opt.compaction_filter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, CheckBlockBasedTableOptions) {
|
|
|
|
ColumnFamilyOptions cf_opts;
|
|
|
|
DBOptions db_opts;
|
|
|
|
ConfigOptions config_opts;
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_opts, cf_opts, "prefix_extractor=capped:8", &cf_opts));
|
|
|
|
ASSERT_OK(TableFactory::CreateFromString(config_opts, "BlockBasedTable",
|
|
|
|
&cf_opts.table_factory));
|
|
|
|
ASSERT_NE(cf_opts.table_factory.get(), nullptr);
|
|
|
|
ASSERT_TRUE(cf_opts.table_factory->IsInstanceOf(
|
|
|
|
TableFactory::kBlockBasedTableName()));
|
|
|
|
auto bbto = cf_opts.table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_OK(cf_opts.table_factory->ConfigureFromString(
|
|
|
|
config_opts,
|
|
|
|
"block_cache={capacity=1M;num_shard_bits=4;};"
|
|
|
|
"block_size_deviation=101;"
|
|
|
|
"block_restart_interval=0;"
|
|
|
|
"index_block_restart_interval=5;"
|
|
|
|
"partition_filters=true;"
|
|
|
|
"index_type=kHashSearch;"
|
|
|
|
"no_block_cache=1;"));
|
|
|
|
ASSERT_NE(bbto, nullptr);
|
|
|
|
ASSERT_EQ(bbto->block_cache.get(), nullptr);
|
|
|
|
ASSERT_EQ(bbto->block_size_deviation, 0);
|
|
|
|
ASSERT_EQ(bbto->block_restart_interval, 1);
|
|
|
|
ASSERT_EQ(bbto->index_block_restart_interval, 1);
|
|
|
|
ASSERT_FALSE(bbto->partition_filters);
|
|
|
|
ASSERT_OK(TableFactory::CreateFromString(config_opts, "BlockBasedTable",
|
|
|
|
&cf_opts.table_factory));
|
|
|
|
bbto = cf_opts.table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
|
|
|
|
ASSERT_OK(cf_opts.table_factory->ConfigureFromString(config_opts,
|
|
|
|
"no_block_cache=0;"));
|
|
|
|
ASSERT_NE(bbto->block_cache.get(), nullptr);
|
|
|
|
ASSERT_OK(cf_opts.table_factory->ValidateOptions(db_opts, cf_opts));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, MutableTableOptions) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::shared_ptr<TableFactory> bbtf;
|
|
|
|
bbtf.reset(NewBlockBasedTableFactory());
|
|
|
|
auto bbto = bbtf->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(bbto, nullptr);
|
|
|
|
ASSERT_OK(bbtf->ConfigureOption(config_options, "block_align", "true"));
|
|
|
|
ASSERT_OK(bbtf->ConfigureOption(config_options, "block_size", "1024"));
|
|
|
|
ASSERT_EQ(bbto->block_align, true);
|
|
|
|
ASSERT_EQ(bbto->block_size, 1024);
|
|
|
|
ASSERT_OK(bbtf->PrepareOptions(config_options));
|
|
|
|
config_options.mutable_options_only = true;
|
|
|
|
ASSERT_OK(bbtf->ConfigureOption(config_options, "block_size", "1024"));
|
|
|
|
ASSERT_EQ(bbto->block_align, true);
|
|
|
|
ASSERT_NOK(bbtf->ConfigureOption(config_options, "block_align", "false"));
|
|
|
|
ASSERT_OK(bbtf->ConfigureOption(config_options, "block_size", "2048"));
|
|
|
|
ASSERT_EQ(bbto->block_align, true);
|
|
|
|
ASSERT_EQ(bbto->block_size, 2048);
|
|
|
|
|
|
|
|
ColumnFamilyOptions cf_opts;
|
|
|
|
cf_opts.table_factory = bbtf;
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, cf_opts, "block_based_table_factory.block_align=false",
|
|
|
|
&cf_opts));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, cf_opts, "block_based_table_factory.block_size=8192",
|
|
|
|
&cf_opts));
|
|
|
|
ASSERT_EQ(bbto->block_align, true);
|
|
|
|
ASSERT_EQ(bbto->block_size, 8192);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, MutableCFOptions) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
ColumnFamilyOptions cf_opts;
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, cf_opts,
|
|
|
|
"paranoid_file_checks=true; block_based_table_factory.block_align=false; "
|
|
|
|
"block_based_table_factory.block_size=8192;",
|
|
|
|
&cf_opts));
|
|
|
|
ASSERT_TRUE(cf_opts.paranoid_file_checks);
|
|
|
|
ASSERT_NE(cf_opts.table_factory.get(), nullptr);
|
|
|
|
const auto bbto = cf_opts.table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(bbto, nullptr);
|
|
|
|
ASSERT_EQ(bbto->block_size, 8192);
|
|
|
|
ASSERT_EQ(bbto->block_align, false);
|
|
|
|
std::unordered_map<std::string, std::string> unused_opts;
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts, {{"paranoid_file_checks", "false"}}, &cf_opts));
|
|
|
|
ASSERT_EQ(cf_opts.paranoid_file_checks, false);
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts,
|
|
|
|
{{"block_based_table_factory.block_size", "16384"}}, &cf_opts));
|
|
|
|
ASSERT_EQ(bbto, cf_opts.table_factory->GetOptions<BlockBasedTableOptions>());
|
|
|
|
ASSERT_EQ(bbto->block_size, 16384);
|
|
|
|
|
|
|
|
config_options.mutable_options_only = true;
|
|
|
|
// Force consistency checks is not mutable
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts, {{"force_consistency_checks", "true"}},
|
|
|
|
&cf_opts));
|
|
|
|
|
|
|
|
// Attempt to change the table. It is not mutable, so this should fail and
|
|
|
|
// leave the original intact
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts, {{"table_factory", "PlainTable"}}, &cf_opts));
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts, {{"table_factory.id", "PlainTable"}}, &cf_opts));
|
|
|
|
ASSERT_NE(cf_opts.table_factory.get(), nullptr);
|
|
|
|
ASSERT_EQ(bbto, cf_opts.table_factory->GetOptions<BlockBasedTableOptions>());
|
|
|
|
|
|
|
|
// Change the block size. Should update the value in the current table
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts,
|
|
|
|
{{"block_based_table_factory.block_size", "8192"}}, &cf_opts));
|
|
|
|
ASSERT_EQ(bbto, cf_opts.table_factory->GetOptions<BlockBasedTableOptions>());
|
|
|
|
ASSERT_EQ(bbto->block_size, 8192);
|
|
|
|
|
|
|
|
// Attempt to turn off block cache fails, as this option is not mutable
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts,
|
|
|
|
{{"block_based_table_factory.no_block_cache", "true"}}, &cf_opts));
|
|
|
|
ASSERT_EQ(bbto, cf_opts.table_factory->GetOptions<BlockBasedTableOptions>());
|
|
|
|
|
|
|
|
// Attempt to change the block size via a config string/map. Should update
|
|
|
|
// the current value
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts,
|
|
|
|
{{"block_based_table_factory", "{block_size=32768}"}}, &cf_opts));
|
|
|
|
ASSERT_EQ(bbto, cf_opts.table_factory->GetOptions<BlockBasedTableOptions>());
|
|
|
|
ASSERT_EQ(bbto->block_size, 32768);
|
|
|
|
|
|
|
|
// Attempt to change the block size and no cache through the map. Should
|
|
|
|
// fail, leaving the old values intact
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts,
|
|
|
|
{{"block_based_table_factory",
|
|
|
|
"{block_size=16384; no_block_cache=true}"}},
|
|
|
|
&cf_opts));
|
|
|
|
ASSERT_EQ(bbto, cf_opts.table_factory->GetOptions<BlockBasedTableOptions>());
|
|
|
|
ASSERT_EQ(bbto->block_size, 32768);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
|
|
|
Status StringToMap(
|
|
|
|
const std::string& opts_str,
|
|
|
|
std::unordered_map<std::string, std::string>* opts_map);
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE // StringToMap is not supported in ROCKSDB_LITE
|
|
|
|
TEST_F(OptionsTest, StringToMapTest) {
|
|
|
|
std::unordered_map<std::string, std::string> opts_map;
|
|
|
|
// Regular options
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2=v2;k3=v3", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "v2");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "v3");
|
|
|
|
// Value with '='
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1==v1;k2=v2=;", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "=v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "v2=");
|
|
|
|
// Overwrriten option
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k1=v2;k3=v3", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v2");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "v3");
|
|
|
|
// Empty value
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2=;k3=v3;k4=", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_TRUE(opts_map.find("k2") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k2"], "");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "v3");
|
|
|
|
ASSERT_TRUE(opts_map.find("k4") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k4"], "");
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2=;k3=v3;k4= ", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_TRUE(opts_map.find("k2") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k2"], "");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "v3");
|
|
|
|
ASSERT_TRUE(opts_map.find("k4") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k4"], "");
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2=;k3=", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_TRUE(opts_map.find("k2") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k2"], "");
|
|
|
|
ASSERT_TRUE(opts_map.find("k3") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k3"], "");
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2=;k3=;", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_TRUE(opts_map.find("k2") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k2"], "");
|
|
|
|
ASSERT_TRUE(opts_map.find("k3") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k3"], "");
|
|
|
|
// Regular nested options
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2={nk1=nv1;nk2=nv2};k3=v3", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "nk1=nv1;nk2=nv2");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "v3");
|
|
|
|
// Multi-level nested options
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2={nk1=nv1;nk2={nnk1=nnk2}};"
|
|
|
|
"k3={nk1={nnk1={nnnk1=nnnv1;nnnk2;nnnv2}}};k4=v4",
|
|
|
|
&opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "nk1=nv1;nk2={nnk1=nnk2}");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "nk1={nnk1={nnnk1=nnnv1;nnnk2;nnnv2}}");
|
|
|
|
ASSERT_EQ(opts_map["k4"], "v4");
|
|
|
|
// Garbage inside curly braces
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2={dfad=};k3={=};k4=v4",
|
|
|
|
&opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "dfad=");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "=");
|
|
|
|
ASSERT_EQ(opts_map["k4"], "v4");
|
|
|
|
// Empty nested options
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2={};", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "");
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2={{{{}}}{}{}};", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "{{{}}}{}{}");
|
|
|
|
// With random spaces
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap(" k1 = v1 ; k2= {nk1=nv1; nk2={nnk1=nnk2}} ; "
|
|
|
|
"k3={ { } }; k4= v4 ",
|
|
|
|
&opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "nk1=nv1; nk2={nnk1=nnk2}");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "{ }");
|
|
|
|
ASSERT_EQ(opts_map["k4"], "v4");
|
|
|
|
|
|
|
|
// Empty key
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2=v2;=", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("=v1;k2=v2", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2v2;", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2=v2;fadfa", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2=v2;;", &opts_map));
|
|
|
|
// Mismatch curly braces
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={;k3=v3", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{};k3=v3", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={}};k3=v3", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{}{}}};k3=v3", &opts_map));
|
|
|
|
// However this is valid!
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2=};k3=v3", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "}");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "v3");
|
|
|
|
|
|
|
|
// Invalid chars after closing curly brace
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{}}{};k3=v3", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{}}cfda;k3=v3", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{}} cfda;k3=v3", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{}} cfda", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{}}{}", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{dfdl}adfa}{}", &opts_map));
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE // StringToMap is not supported in ROCKSDB_LITE
|
|
|
|
TEST_F(OptionsTest, StringToMapRandomTest) {
|
|
|
|
std::unordered_map<std::string, std::string> opts_map;
|
|
|
|
// Make sure segfault is not hit by semi-random strings
|
|
|
|
|
|
|
|
std::vector<std::string> bases = {
|
|
|
|
"a={aa={};tt={xxx={}}};c=defff",
|
|
|
|
"a={aa={};tt={xxx={}}};c=defff;d={{}yxx{}3{xx}}",
|
|
|
|
"abc={{}{}{}{{{}}}{{}{}{}{}{}{}{}"};
|
|
|
|
|
|
|
|
for (std::string base : bases) {
|
|
|
|
for (int rand_seed = 301; rand_seed < 401; rand_seed++) {
|
|
|
|
Random rnd(rand_seed);
|
|
|
|
for (int attempt = 0; attempt < 10; attempt++) {
|
|
|
|
std::string str = base;
|
|
|
|
// Replace random position to space
|
|
|
|
size_t pos = static_cast<size_t>(
|
|
|
|
rnd.Uniform(static_cast<int>(base.size())));
|
|
|
|
str[pos] = ' ';
|
|
|
|
Status s = StringToMap(str, &opts_map);
|
|
|
|
ASSERT_TRUE(s.ok() || s.IsInvalidArgument());
|
|
|
|
opts_map.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Random Construct a string
|
|
|
|
std::vector<char> chars = {'{', '}', ' ', '=', ';', 'c'};
|
|
|
|
for (int rand_seed = 301; rand_seed < 1301; rand_seed++) {
|
|
|
|
Random rnd(rand_seed);
|
|
|
|
int len = rnd.Uniform(30);
|
|
|
|
std::string str = "";
|
|
|
|
for (int attempt = 0; attempt < len; attempt++) {
|
|
|
|
// Add a random character
|
|
|
|
size_t pos = static_cast<size_t>(
|
|
|
|
rnd.Uniform(static_cast<int>(chars.size())));
|
|
|
|
str.append(1, chars[pos]);
|
|
|
|
}
|
|
|
|
Status s = StringToMap(str, &opts_map);
|
|
|
|
ASSERT_TRUE(s.ok() || s.IsInvalidArgument());
|
|
|
|
s = StringToMap("name=" + str, &opts_map);
|
|
|
|
ASSERT_TRUE(s.ok() || s.IsInvalidArgument());
|
|
|
|
opts_map.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, GetStringFromCompressionType) {
|
|
|
|
std::string res;
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromCompressionType(&res, kNoCompression));
|
|
|
|
ASSERT_EQ(res, "kNoCompression");
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromCompressionType(&res, kSnappyCompression));
|
|
|
|
ASSERT_EQ(res, "kSnappyCompression");
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromCompressionType(&res, kDisableCompressionOption));
|
|
|
|
ASSERT_EQ(res, "kDisableCompressionOption");
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromCompressionType(&res, kLZ4Compression));
|
|
|
|
ASSERT_EQ(res, "kLZ4Compression");
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromCompressionType(&res, kZlibCompression));
|
|
|
|
ASSERT_EQ(res, "kZlibCompression");
|
|
|
|
|
|
|
|
ASSERT_NOK(
|
|
|
|
GetStringFromCompressionType(&res, static_cast<CompressionType>(-10)));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, OnlyMutableDBOptions) {
|
|
|
|
std::string opt_str;
|
|
|
|
Random rnd(302);
|
|
|
|
ConfigOptions cfg_opts;
|
|
|
|
DBOptions db_opts;
|
|
|
|
DBOptions mdb_opts;
|
|
|
|
std::unordered_set<std::string> m_names;
|
|
|
|
std::unordered_set<std::string> a_names;
|
|
|
|
|
|
|
|
test::RandomInitDBOptions(&db_opts, &rnd);
|
|
|
|
auto db_config = DBOptionsAsConfigurable(db_opts);
|
|
|
|
|
|
|
|
// Get all of the DB Option names (mutable or not)
|
|
|
|
ASSERT_OK(db_config->GetOptionNames(cfg_opts, &a_names));
|
|
|
|
|
|
|
|
// Get only the mutable options from db_opts and set those in mdb_opts
|
|
|
|
cfg_opts.mutable_options_only = true;
|
|
|
|
|
|
|
|
// Get only the Mutable DB Option names
|
|
|
|
ASSERT_OK(db_config->GetOptionNames(cfg_opts, &m_names));
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(cfg_opts, db_opts, &opt_str));
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(cfg_opts, mdb_opts, opt_str, &mdb_opts));
|
|
|
|
std::string mismatch;
|
|
|
|
// Comparing only the mutable options, the two are equivalent
|
|
|
|
auto mdb_config = DBOptionsAsConfigurable(mdb_opts);
|
|
|
|
ASSERT_TRUE(mdb_config->AreEquivalent(cfg_opts, db_config.get(), &mismatch));
|
|
|
|
ASSERT_TRUE(db_config->AreEquivalent(cfg_opts, mdb_config.get(), &mismatch));
|
|
|
|
|
|
|
|
ASSERT_GT(a_names.size(), m_names.size());
|
|
|
|
for (const auto& n : m_names) {
|
|
|
|
std::string m, d;
|
|
|
|
ASSERT_OK(mdb_config->GetOption(cfg_opts, n, &m));
|
|
|
|
ASSERT_OK(db_config->GetOption(cfg_opts, n, &d));
|
|
|
|
ASSERT_EQ(m, d);
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg_opts.mutable_options_only = false;
|
|
|
|
// Comparing all of the options, the two are not equivalent
|
|
|
|
ASSERT_FALSE(mdb_config->AreEquivalent(cfg_opts, db_config.get(), &mismatch));
|
|
|
|
ASSERT_FALSE(db_config->AreEquivalent(cfg_opts, mdb_config.get(), &mismatch));
|
|
|
|
|
|
|
|
// Make sure there are only mutable options being configured
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(cfg_opts, DBOptions(), opt_str, &db_opts));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, OnlyMutableCFOptions) {
|
|
|
|
std::string opt_str;
|
|
|
|
Random rnd(302);
|
|
|
|
ConfigOptions cfg_opts;
|
|
|
|
DBOptions db_opts;
|
|
|
|
ColumnFamilyOptions mcf_opts;
|
|
|
|
ColumnFamilyOptions cf_opts;
|
|
|
|
std::unordered_set<std::string> m_names;
|
|
|
|
std::unordered_set<std::string> a_names;
|
|
|
|
|
|
|
|
test::RandomInitCFOptions(&cf_opts, db_opts, &rnd);
|
|
|
|
cf_opts.comparator = ReverseBytewiseComparator();
|
|
|
|
auto cf_config = CFOptionsAsConfigurable(cf_opts);
|
|
|
|
|
|
|
|
// Get all of the CF Option names (mutable or not)
|
|
|
|
ASSERT_OK(cf_config->GetOptionNames(cfg_opts, &a_names));
|
|
|
|
|
|
|
|
// Get only the mutable options from cf_opts and set those in mcf_opts
|
|
|
|
cfg_opts.mutable_options_only = true;
|
|
|
|
// Get only the Mutable CF Option names
|
|
|
|
ASSERT_OK(cf_config->GetOptionNames(cfg_opts, &m_names));
|
|
|
|
ASSERT_OK(GetStringFromColumnFamilyOptions(cfg_opts, cf_opts, &opt_str));
|
|
|
|
ASSERT_OK(
|
|
|
|
GetColumnFamilyOptionsFromString(cfg_opts, mcf_opts, opt_str, &mcf_opts));
|
|
|
|
std::string mismatch;
|
|
|
|
|
|
|
|
auto mcf_config = CFOptionsAsConfigurable(mcf_opts);
|
|
|
|
// Comparing only the mutable options, the two are equivalent
|
|
|
|
ASSERT_TRUE(mcf_config->AreEquivalent(cfg_opts, cf_config.get(), &mismatch));
|
|
|
|
ASSERT_TRUE(cf_config->AreEquivalent(cfg_opts, mcf_config.get(), &mismatch));
|
|
|
|
|
|
|
|
ASSERT_GT(a_names.size(), m_names.size());
|
|
|
|
for (const auto& n : m_names) {
|
|
|
|
std::string m, d;
|
|
|
|
ASSERT_OK(mcf_config->GetOption(cfg_opts, n, &m));
|
|
|
|
ASSERT_OK(cf_config->GetOption(cfg_opts, n, &d));
|
|
|
|
ASSERT_EQ(m, d);
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg_opts.mutable_options_only = false;
|
|
|
|
// Comparing all of the options, the two are not equivalent
|
|
|
|
ASSERT_FALSE(mcf_config->AreEquivalent(cfg_opts, cf_config.get(), &mismatch));
|
|
|
|
ASSERT_FALSE(cf_config->AreEquivalent(cfg_opts, mcf_config.get(), &mismatch));
|
|
|
|
delete cf_opts.compaction_filter;
|
|
|
|
|
|
|
|
// Make sure the options string contains only mutable options
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(cfg_opts, ColumnFamilyOptions(),
|
|
|
|
opt_str, &cf_opts));
|
|
|
|
delete cf_opts.compaction_filter;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, SstPartitionerTest) {
|
|
|
|
ConfigOptions cfg_opts;
|
|
|
|
ColumnFamilyOptions cf_opts, new_opt;
|
|
|
|
std::string opts_str, mismatch;
|
|
|
|
|
|
|
|
ASSERT_OK(SstPartitionerFactory::CreateFromString(
|
|
|
|
cfg_opts, SstPartitionerFixedPrefixFactory::kClassName(),
|
|
|
|
&cf_opts.sst_partitioner_factory));
|
|
|
|
ASSERT_NE(cf_opts.sst_partitioner_factory, nullptr);
|
|
|
|
ASSERT_STREQ(cf_opts.sst_partitioner_factory->Name(),
|
|
|
|
SstPartitionerFixedPrefixFactory::kClassName());
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
cfg_opts, ColumnFamilyOptions(),
|
|
|
|
std::string("sst_partitioner_factory={id=") +
|
|
|
|
SstPartitionerFixedPrefixFactory::kClassName() + "; unknown=10;}",
|
|
|
|
&cf_opts));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
cfg_opts, ColumnFamilyOptions(),
|
|
|
|
std::string("sst_partitioner_factory={id=") +
|
|
|
|
SstPartitionerFixedPrefixFactory::kClassName() + "; length=10;}",
|
|
|
|
&cf_opts));
|
|
|
|
ASSERT_NE(cf_opts.sst_partitioner_factory, nullptr);
|
|
|
|
ASSERT_STREQ(cf_opts.sst_partitioner_factory->Name(),
|
|
|
|
SstPartitionerFixedPrefixFactory::kClassName());
|
|
|
|
ASSERT_OK(GetStringFromColumnFamilyOptions(cfg_opts, cf_opts, &opts_str));
|
|
|
|
ASSERT_OK(
|
|
|
|
GetColumnFamilyOptionsFromString(cfg_opts, cf_opts, opts_str, &new_opt));
|
|
|
|
ASSERT_NE(new_opt.sst_partitioner_factory, nullptr);
|
|
|
|
ASSERT_STREQ(new_opt.sst_partitioner_factory->Name(),
|
|
|
|
SstPartitionerFixedPrefixFactory::kClassName());
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(cfg_opts, cf_opts, new_opt));
|
|
|
|
ASSERT_TRUE(cf_opts.sst_partitioner_factory->AreEquivalent(
|
|
|
|
cfg_opts, new_opt.sst_partitioner_factory.get(), &mismatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, FileChecksumGenFactoryTest) {
|
|
|
|
ConfigOptions cfg_opts;
|
|
|
|
DBOptions db_opts, new_opt;
|
|
|
|
std::string opts_str, mismatch;
|
|
|
|
auto factory = GetFileChecksumGenCrc32cFactory();
|
|
|
|
|
|
|
|
cfg_opts.ignore_unsupported_options = false;
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(cfg_opts, db_opts, &opts_str));
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(cfg_opts, db_opts, opts_str, &new_opt));
|
|
|
|
|
|
|
|
ASSERT_NE(factory, nullptr);
|
|
|
|
ASSERT_OK(FileChecksumGenFactory::CreateFromString(
|
|
|
|
cfg_opts, factory->Name(), &db_opts.file_checksum_gen_factory));
|
|
|
|
ASSERT_NE(db_opts.file_checksum_gen_factory, nullptr);
|
|
|
|
ASSERT_STREQ(db_opts.file_checksum_gen_factory->Name(), factory->Name());
|
|
|
|
ASSERT_NOK(GetDBOptionsFromString(
|
|
|
|
cfg_opts, DBOptions(), "file_checksum_gen_factory=unknown", &db_opts));
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(
|
|
|
|
cfg_opts, DBOptions(),
|
|
|
|
std::string("file_checksum_gen_factory=") + factory->Name(), &db_opts));
|
|
|
|
ASSERT_NE(db_opts.file_checksum_gen_factory, nullptr);
|
|
|
|
ASSERT_STREQ(db_opts.file_checksum_gen_factory->Name(), factory->Name());
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(cfg_opts, db_opts, &opts_str));
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(cfg_opts, db_opts, opts_str, &new_opt));
|
|
|
|
ASSERT_NE(new_opt.file_checksum_gen_factory, nullptr);
|
|
|
|
ASSERT_STREQ(new_opt.file_checksum_gen_factory->Name(), factory->Name());
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(cfg_opts, db_opts, new_opt));
|
|
|
|
ASSERT_TRUE(factory->AreEquivalent(
|
|
|
|
cfg_opts, new_opt.file_checksum_gen_factory.get(), &mismatch));
|
|
|
|
ASSERT_TRUE(db_opts.file_checksum_gen_factory->AreEquivalent(
|
|
|
|
cfg_opts, new_opt.file_checksum_gen_factory.get(), &mismatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
class TestTablePropertiesCollectorFactory
|
|
|
|
: public TablePropertiesCollectorFactory {
|
|
|
|
private:
|
|
|
|
std::string id_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit TestTablePropertiesCollectorFactory(const std::string& id)
|
|
|
|
: id_(id) {}
|
|
|
|
TablePropertiesCollector* CreateTablePropertiesCollector(
|
|
|
|
TablePropertiesCollectorFactory::Context /*context*/) override {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
static const char* kClassName() { return "TestCollector"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
std::string GetId() const override {
|
|
|
|
return std::string(kClassName()) + ":" + id_;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, OptionTablePropertiesTest) {
|
|
|
|
ConfigOptions cfg_opts;
|
|
|
|
ColumnFamilyOptions orig, copy;
|
|
|
|
orig.table_properties_collector_factories.push_back(
|
|
|
|
std::make_shared<TestTablePropertiesCollectorFactory>("1"));
|
|
|
|
orig.table_properties_collector_factories.push_back(
|
|
|
|
std::make_shared<TestTablePropertiesCollectorFactory>("2"));
|
|
|
|
|
|
|
|
// Push two TablePropertiesCollectorFactories then create a new
|
|
|
|
// ColumnFamilyOptions based on those settings. The copy should
|
|
|
|
// have no properties but still match the original
|
|
|
|
std::string opts_str;
|
|
|
|
ASSERT_OK(GetStringFromColumnFamilyOptions(cfg_opts, orig, &opts_str));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(cfg_opts, orig, opts_str, ©));
|
|
|
|
ASSERT_EQ(copy.table_properties_collector_factories.size(), 0);
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(cfg_opts, orig, copy));
|
|
|
|
|
|
|
|
// Now register a TablePropertiesCollectorFactory
|
|
|
|
// Repeat the experiment. The copy should have the same
|
|
|
|
// properties as the original
|
|
|
|
cfg_opts.registry->AddLibrary("collector")
|
|
|
|
->AddFactory<TablePropertiesCollectorFactory>(
|
|
|
|
ObjectLibrary::PatternEntry(
|
|
|
|
TestTablePropertiesCollectorFactory::kClassName(), false)
|
|
|
|
.AddSeparator(":"),
|
|
|
|
[](const std::string& name,
|
|
|
|
std::unique_ptr<TablePropertiesCollectorFactory>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
std::string id = name.substr(
|
|
|
|
strlen(TestTablePropertiesCollectorFactory::kClassName()) + 1);
|
|
|
|
guard->reset(new TestTablePropertiesCollectorFactory(id));
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(cfg_opts, orig, opts_str, ©));
|
|
|
|
ASSERT_EQ(copy.table_properties_collector_factories.size(), 2);
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(cfg_opts, orig, copy));
|
|
|
|
}
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, ConvertOptionsTest) {
|
|
|
|
LevelDBOptions leveldb_opt;
|
|
|
|
Options converted_opt = ConvertOptions(leveldb_opt);
|
|
|
|
|
|
|
|
ASSERT_EQ(converted_opt.create_if_missing, leveldb_opt.create_if_missing);
|
|
|
|
ASSERT_EQ(converted_opt.error_if_exists, leveldb_opt.error_if_exists);
|
|
|
|
ASSERT_EQ(converted_opt.paranoid_checks, leveldb_opt.paranoid_checks);
|
|
|
|
ASSERT_EQ(converted_opt.env, leveldb_opt.env);
|
|
|
|
ASSERT_EQ(converted_opt.info_log.get(), leveldb_opt.info_log);
|
|
|
|
ASSERT_EQ(converted_opt.write_buffer_size, leveldb_opt.write_buffer_size);
|
|
|
|
ASSERT_EQ(converted_opt.max_open_files, leveldb_opt.max_open_files);
|
|
|
|
ASSERT_EQ(converted_opt.compression, leveldb_opt.compression);
|
|
|
|
|
|
|
|
std::shared_ptr<TableFactory> table_factory = converted_opt.table_factory;
|
|
|
|
const auto table_opt = table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(table_opt, nullptr);
|
|
|
|
|
|
|
|
ASSERT_EQ(table_opt->block_cache->GetCapacity(), 8UL << 20);
|
|
|
|
ASSERT_EQ(table_opt->block_size, leveldb_opt.block_size);
|
|
|
|
ASSERT_EQ(table_opt->block_restart_interval,
|
|
|
|
leveldb_opt.block_restart_interval);
|
|
|
|
ASSERT_EQ(table_opt->filter_policy.get(), leveldb_opt.filter_policy);
|
|
|
|
}
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
class TestEventListener : public EventListener {
|
|
|
|
private:
|
|
|
|
std::string id_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit TestEventListener(const std::string& id) : id_("Test" + id) {}
|
|
|
|
const char* Name() const override { return id_.c_str(); }
|
|
|
|
};
|
|
|
|
|
|
|
|
static std::unordered_map<std::string, OptionTypeInfo>
|
|
|
|
test_listener_option_info = {
|
|
|
|
{"s",
|
|
|
|
{0, OptionType::kString, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone}},
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
class TestConfigEventListener : public TestEventListener {
|
|
|
|
private:
|
|
|
|
std::string s_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit TestConfigEventListener(const std::string& id)
|
|
|
|
: TestEventListener("Config" + id) {
|
|
|
|
s_ = id;
|
|
|
|
RegisterOptions("Test", &s_, &test_listener_option_info);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static int RegisterTestEventListener(ObjectLibrary& library,
|
|
|
|
const std::string& arg) {
|
|
|
|
library.AddFactory<EventListener>(
|
|
|
|
"Test" + arg,
|
|
|
|
[](const std::string& name, std::unique_ptr<EventListener>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new TestEventListener(name.substr(4)));
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
library.AddFactory<EventListener>(
|
|
|
|
"TestConfig" + arg,
|
|
|
|
[](const std::string& name, std::unique_ptr<EventListener>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new TestConfigEventListener(name.substr(10)));
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
TEST_F(OptionsTest, OptionsListenerTest) {
|
|
|
|
DBOptions orig, copy;
|
|
|
|
orig.listeners.push_back(std::make_shared<TestEventListener>("1"));
|
|
|
|
orig.listeners.push_back(std::make_shared<TestEventListener>("2"));
|
|
|
|
orig.listeners.push_back(std::make_shared<TestEventListener>(""));
|
|
|
|
orig.listeners.push_back(std::make_shared<TestConfigEventListener>("1"));
|
|
|
|
orig.listeners.push_back(std::make_shared<TestConfigEventListener>("2"));
|
|
|
|
orig.listeners.push_back(std::make_shared<TestConfigEventListener>(""));
|
|
|
|
ConfigOptions config_opts(orig);
|
|
|
|
config_opts.registry->AddLibrary("listener", RegisterTestEventListener, "1");
|
|
|
|
std::string opts_str;
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(config_opts, orig, &opts_str));
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(config_opts, orig, opts_str, ©));
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(config_opts, copy, &opts_str));
|
|
|
|
ASSERT_EQ(
|
|
|
|
copy.listeners.size(),
|
|
|
|
2); // The Test{Config}1 Listeners could be loaded but not the others
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(config_opts, orig, copy));
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
const static std::string kCustomEnvName = "Custom";
|
|
|
|
const static std::string kCustomEnvProp = "env=" + kCustomEnvName;
|
|
|
|
|
|
|
|
static int RegisterCustomEnv(ObjectLibrary& library, const std::string& arg) {
|
|
|
|
library.AddFactory<Env>(
|
|
|
|
arg, [](const std::string& /*name*/, std::unique_ptr<Env>* /*env_guard*/,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
static CustomEnv env(Env::Default());
|
|
|
|
return &env;
|
|
|
|
});
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test suite tests the old APIs into the Configure options methods.
|
|
|
|
// Once those APIs are officially deprecated, this test suite can be deleted.
|
|
|
|
class OptionsOldApiTest : public testing::Test {};
|
|
|
|
|
|
|
|
TEST_F(OptionsOldApiTest, GetOptionsFromMapTest) {
|
|
|
|
std::unordered_map<std::string, std::string> cf_options_map = {
|
|
|
|
{"write_buffer_size", "1"},
|
|
|
|
{"max_write_buffer_number", "2"},
|
|
|
|
{"min_write_buffer_number_to_merge", "3"},
|
|
|
|
{"max_write_buffer_number_to_maintain", "99"},
|
|
|
|
{"max_write_buffer_size_to_maintain", "-99999"},
|
|
|
|
{"compression", "kSnappyCompression"},
|
|
|
|
{"compression_per_level",
|
|
|
|
"kNoCompression:"
|
|
|
|
"kSnappyCompression:"
|
|
|
|
"kZlibCompression:"
|
|
|
|
"kBZip2Compression:"
|
|
|
|
"kLZ4Compression:"
|
|
|
|
"kLZ4HCCompression:"
|
|
|
|
"kXpressCompression:"
|
|
|
|
"kZSTD:"
|
|
|
|
"kZSTDNotFinalCompression"},
|
|
|
|
{"bottommost_compression", "kLZ4Compression"},
|
|
|
|
{"bottommost_compression_opts", "5:6:7:8:9:true"},
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
{"compression_opts", "4:5:6:7:8:9:true:10:false"},
|
|
|
|
{"num_levels", "8"},
|
|
|
|
{"level0_file_num_compaction_trigger", "8"},
|
|
|
|
{"level0_slowdown_writes_trigger", "9"},
|
|
|
|
{"level0_stop_writes_trigger", "10"},
|
|
|
|
{"target_file_size_base", "12"},
|
|
|
|
{"target_file_size_multiplier", "13"},
|
|
|
|
{"max_bytes_for_level_base", "14"},
|
|
|
|
{"level_compaction_dynamic_level_bytes", "true"},
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2 years ago
|
|
|
{"level_compaction_dynamic_file_size", "true"},
|
|
|
|
{"max_bytes_for_level_multiplier", "15.0"},
|
|
|
|
{"max_bytes_for_level_multiplier_additional", "16:17:18"},
|
|
|
|
{"max_compaction_bytes", "21"},
|
|
|
|
{"soft_rate_limit", "1.1"},
|
|
|
|
{"hard_rate_limit", "2.1"},
|
|
|
|
{"rate_limit_delay_max_milliseconds", "100"},
|
|
|
|
{"hard_pending_compaction_bytes_limit", "211"},
|
|
|
|
{"arena_block_size", "22"},
|
|
|
|
{"disable_auto_compactions", "true"},
|
|
|
|
{"compaction_style", "kCompactionStyleLevel"},
|
|
|
|
{"compaction_pri", "kOldestSmallestSeqFirst"},
|
|
|
|
{"verify_checksums_in_compaction", "false"},
|
|
|
|
{"compaction_options_fifo", "23"},
|
|
|
|
{"max_sequential_skip_in_iterations", "24"},
|
|
|
|
{"inplace_update_support", "true"},
|
|
|
|
{"report_bg_io_stats", "true"},
|
|
|
|
{"compaction_measure_io_stats", "false"},
|
|
|
|
{"purge_redundant_kvs_while_flush", "false"},
|
|
|
|
{"inplace_update_num_locks", "25"},
|
|
|
|
{"memtable_prefix_bloom_size_ratio", "0.26"},
|
|
|
|
{"memtable_whole_key_filtering", "true"},
|
|
|
|
{"memtable_huge_page_size", "28"},
|
|
|
|
{"bloom_locality", "29"},
|
|
|
|
{"max_successive_merges", "30"},
|
|
|
|
{"min_partial_merge_operands", "31"},
|
|
|
|
{"prefix_extractor", "fixed:31"},
|
|
|
|
{"experimental_mempurge_threshold", "0.003"},
|
|
|
|
{"optimize_filters_for_hits", "true"},
|
|
|
|
{"enable_blob_files", "true"},
|
|
|
|
{"min_blob_size", "1K"},
|
|
|
|
{"blob_file_size", "1G"},
|
|
|
|
{"blob_compression_type", "kZSTD"},
|
|
|
|
{"enable_blob_garbage_collection", "true"},
|
|
|
|
{"blob_garbage_collection_age_cutoff", "0.5"},
|
Make it possible to force the garbage collection of the oldest blob files (#8994)
Summary:
The current BlobDB garbage collection logic works by relocating the valid
blobs from the oldest blob files as they are encountered during compaction,
and cleaning up blob files once they contain nothing but garbage. However,
with sufficiently skewed workloads, it is theoretically possible to end up in a
situation when few or no compactions get scheduled for the SST files that contain
references to the oldest blob files, which can lead to increased space amp due
to the lack of GC.
In order to efficiently handle such workloads, the patch adds a new BlobDB
configuration option called `blob_garbage_collection_force_threshold`,
which signals to BlobDB to schedule targeted compactions for the SST files
that keep alive the oldest batch of blob files if the overall ratio of garbage in
the given blob files meets the threshold *and* all the given blob files are
eligible for GC based on `blob_garbage_collection_age_cutoff`. (For example,
if the new option is set to 0.9, targeted compactions will get scheduled if the
sum of garbage bytes meets or exceeds 90% of the sum of total bytes in the
oldest blob files, assuming all affected blob files are below the age-based cutoff.)
The net result of these targeted compactions is that the valid blobs in the oldest
blob files are relocated and the oldest blob files themselves cleaned up (since
*all* SST files that rely on them get compacted away).
These targeted compactions are similar to periodic compactions in the sense
that they force certain SST files that otherwise would not get picked up to undergo
compaction and also in the sense that instead of merging files from multiple levels,
they target a single file. (Note: such compactions might still include neighboring files
from the same level due to the need of having a "clean cut" boundary but they never
include any files from any other level.)
This functionality is currently only supported with the leveled compaction style
and is inactive by default (since the default value is set to 1.0, i.e. 100%).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8994
Test Plan: Ran `make check` and tested using `db_bench` and the stress/crash tests.
Reviewed By: riversand963
Differential Revision: D31489850
Pulled By: ltamasi
fbshipit-source-id: 44057d511726a0e2a03c5d9313d7511b3f0c4eab
3 years ago
|
|
|
{"blob_garbage_collection_force_threshold", "0.75"},
|
|
|
|
{"blob_compaction_readahead_size", "256K"},
|
|
|
|
{"blob_file_starting_level", "1"},
|
|
|
|
{"prepopulate_blob_cache", "kDisable"},
|
|
|
|
{"last_level_temperature", "kWarm"},
|
|
|
|
};
|
|
|
|
|
|
|
|
std::unordered_map<std::string, std::string> db_options_map = {
|
|
|
|
{"create_if_missing", "false"},
|
|
|
|
{"create_missing_column_families", "true"},
|
|
|
|
{"error_if_exists", "false"},
|
|
|
|
{"paranoid_checks", "true"},
|
|
|
|
{"track_and_verify_wals_in_manifest", "true"},
|
|
|
|
{"verify_sst_unique_id_in_manifest", "true"},
|
|
|
|
{"max_open_files", "32"},
|
|
|
|
{"max_total_wal_size", "33"},
|
|
|
|
{"use_fsync", "true"},
|
|
|
|
{"db_log_dir", "/db_log_dir"},
|
|
|
|
{"wal_dir", "/wal_dir"},
|
|
|
|
{"delete_obsolete_files_period_micros", "34"},
|
|
|
|
{"max_background_compactions", "35"},
|
|
|
|
{"max_background_flushes", "36"},
|
|
|
|
{"max_log_file_size", "37"},
|
|
|
|
{"log_file_time_to_roll", "38"},
|
|
|
|
{"keep_log_file_num", "39"},
|
|
|
|
{"recycle_log_file_num", "5"},
|
|
|
|
{"max_manifest_file_size", "40"},
|
|
|
|
{"table_cache_numshardbits", "41"},
|
|
|
|
{"WAL_ttl_seconds", "43"},
|
|
|
|
{"WAL_size_limit_MB", "44"},
|
|
|
|
{"manifest_preallocation_size", "45"},
|
|
|
|
{"allow_mmap_reads", "true"},
|
|
|
|
{"allow_mmap_writes", "false"},
|
|
|
|
{"use_direct_reads", "false"},
|
|
|
|
{"use_direct_io_for_flush_and_compaction", "false"},
|
|
|
|
{"is_fd_close_on_exec", "true"},
|
|
|
|
{"skip_log_error_on_recovery", "false"},
|
|
|
|
{"stats_dump_period_sec", "46"},
|
|
|
|
{"stats_persist_period_sec", "57"},
|
|
|
|
{"persist_stats_to_disk", "false"},
|
|
|
|
{"stats_history_buffer_size", "69"},
|
|
|
|
{"advise_random_on_open", "true"},
|
|
|
|
{"use_adaptive_mutex", "false"},
|
|
|
|
{"compaction_readahead_size", "100"},
|
|
|
|
{"random_access_max_buffer_size", "3145728"},
|
|
|
|
{"writable_file_max_buffer_size", "314159"},
|
|
|
|
{"bytes_per_sync", "47"},
|
|
|
|
{"wal_bytes_per_sync", "48"},
|
|
|
|
{"strict_bytes_per_sync", "true"},
|
|
|
|
{"preserve_deletes", "false"},
|
|
|
|
};
|
|
|
|
|
|
|
|
ColumnFamilyOptions base_cf_opt;
|
|
|
|
ColumnFamilyOptions new_cf_opt;
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(
|
|
|
|
base_cf_opt, cf_options_map, &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 1U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 2);
|
|
|
|
ASSERT_EQ(new_cf_opt.min_write_buffer_number_to_merge, 3);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number_to_maintain, 99);
|
Refactor trimming logic for immutable memtables (#5022)
Summary:
MyRocks currently sets `max_write_buffer_number_to_maintain` in order to maintain enough history for transaction conflict checking. The effectiveness of this approach depends on the size of memtables. When memtables are small, it may not keep enough history; when memtables are large, this may consume too much memory.
We are proposing a new way to configure memtable list history: by limiting the memory usage of immutable memtables. The new option is `max_write_buffer_size_to_maintain` and it will take precedence over the old `max_write_buffer_number_to_maintain` if they are both set to non-zero values. The new option accounts for the total memory usage of flushed immutable memtables and mutable memtable. When the total usage exceeds the limit, RocksDB may start dropping immutable memtables (which is also called trimming history), starting from the oldest one.
The semantics of the old option actually works both as an upper bound and lower bound. History trimming will start if number of immutable memtables exceeds the limit, but it will never go below (limit-1) due to history trimming.
In order the mimic the behavior with the new option, history trimming will stop if dropping the next immutable memtable causes the total memory usage go below the size limit. For example, assuming the size limit is set to 64MB, and there are 3 immutable memtables with sizes of 20, 30, 30. Although the total memory usage is 80MB > 64MB, dropping the oldest memtable will reduce the memory usage to 60MB < 64MB, so in this case no memtable will be dropped.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5022
Differential Revision: D14394062
Pulled By: miasantreble
fbshipit-source-id: 60457a509c6af89d0993f988c9b5c2aa9e45f5c5
5 years ago
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_size_to_maintain, -99999);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression, kSnappyCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level.size(), 9U);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[0], kNoCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[1], kSnappyCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[2], kZlibCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[3], kBZip2Compression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[4], kLZ4Compression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[5], kLZ4HCCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[6], kXpressCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[7], kZSTD);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[8], kZSTDNotFinalCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes, 7u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.zstd_max_train_bytes, 8u);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.parallel_threads, 9u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_buffer_bytes, 10u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.use_zstd_dict_trainer, false);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression, kLZ4Compression);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.strategy, 7);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_bytes, 8u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.zstd_max_train_bytes, 9u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.parallel_threads,
|
|
|
|
CompressionOptions().parallel_threads);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_buffer_bytes,
|
|
|
|
CompressionOptions().max_dict_buffer_bytes);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
CompressionOptions().use_zstd_dict_trainer);
|
|
|
|
ASSERT_EQ(new_cf_opt.num_levels, 8);
|
|
|
|
ASSERT_EQ(new_cf_opt.level0_file_num_compaction_trigger, 8);
|
|
|
|
ASSERT_EQ(new_cf_opt.level0_slowdown_writes_trigger, 9);
|
|
|
|
ASSERT_EQ(new_cf_opt.level0_stop_writes_trigger, 10);
|
|
|
|
ASSERT_EQ(new_cf_opt.target_file_size_base, static_cast<uint64_t>(12));
|
|
|
|
ASSERT_EQ(new_cf_opt.target_file_size_multiplier, 13);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_base, 14U);
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
ASSERT_EQ(new_cf_opt.level_compaction_dynamic_level_bytes, true);
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2 years ago
|
|
|
ASSERT_EQ(new_cf_opt.level_compaction_dynamic_file_size, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier, 15.0);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional.size(), 3U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[0], 16);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[1], 17);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[2], 18);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_compaction_bytes, 21);
|
|
|
|
ASSERT_EQ(new_cf_opt.hard_pending_compaction_bytes_limit, 211);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 22U);
|
|
|
|
ASSERT_EQ(new_cf_opt.disable_auto_compactions, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.compaction_style, kCompactionStyleLevel);
|
|
|
|
ASSERT_EQ(new_cf_opt.compaction_pri, kOldestSmallestSeqFirst);
|
|
|
|
ASSERT_EQ(new_cf_opt.compaction_options_fifo.max_table_files_size,
|
|
|
|
static_cast<uint64_t>(23));
|
|
|
|
ASSERT_EQ(new_cf_opt.max_sequential_skip_in_iterations,
|
|
|
|
static_cast<uint64_t>(24));
|
|
|
|
ASSERT_EQ(new_cf_opt.inplace_update_support, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 25U);
|
|
|
|
ASSERT_EQ(new_cf_opt.memtable_prefix_bloom_size_ratio, 0.26);
|
|
|
|
ASSERT_EQ(new_cf_opt.memtable_whole_key_filtering, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.memtable_huge_page_size, 28U);
|
|
|
|
ASSERT_EQ(new_cf_opt.bloom_locality, 29U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_successive_merges, 30U);
|
|
|
|
ASSERT_TRUE(new_cf_opt.prefix_extractor != nullptr);
|
|
|
|
ASSERT_EQ(new_cf_opt.optimize_filters_for_hits, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.prefix_extractor->AsString(), "rocksdb.FixedPrefix.31");
|
|
|
|
ASSERT_EQ(new_cf_opt.experimental_mempurge_threshold, 0.003);
|
|
|
|
ASSERT_EQ(new_cf_opt.enable_blob_files, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.min_blob_size, 1ULL << 10);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_file_size, 1ULL << 30);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_compression_type, kZSTD);
|
|
|
|
ASSERT_EQ(new_cf_opt.enable_blob_garbage_collection, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_garbage_collection_age_cutoff, 0.5);
|
Make it possible to force the garbage collection of the oldest blob files (#8994)
Summary:
The current BlobDB garbage collection logic works by relocating the valid
blobs from the oldest blob files as they are encountered during compaction,
and cleaning up blob files once they contain nothing but garbage. However,
with sufficiently skewed workloads, it is theoretically possible to end up in a
situation when few or no compactions get scheduled for the SST files that contain
references to the oldest blob files, which can lead to increased space amp due
to the lack of GC.
In order to efficiently handle such workloads, the patch adds a new BlobDB
configuration option called `blob_garbage_collection_force_threshold`,
which signals to BlobDB to schedule targeted compactions for the SST files
that keep alive the oldest batch of blob files if the overall ratio of garbage in
the given blob files meets the threshold *and* all the given blob files are
eligible for GC based on `blob_garbage_collection_age_cutoff`. (For example,
if the new option is set to 0.9, targeted compactions will get scheduled if the
sum of garbage bytes meets or exceeds 90% of the sum of total bytes in the
oldest blob files, assuming all affected blob files are below the age-based cutoff.)
The net result of these targeted compactions is that the valid blobs in the oldest
blob files are relocated and the oldest blob files themselves cleaned up (since
*all* SST files that rely on them get compacted away).
These targeted compactions are similar to periodic compactions in the sense
that they force certain SST files that otherwise would not get picked up to undergo
compaction and also in the sense that instead of merging files from multiple levels,
they target a single file. (Note: such compactions might still include neighboring files
from the same level due to the need of having a "clean cut" boundary but they never
include any files from any other level.)
This functionality is currently only supported with the leveled compaction style
and is inactive by default (since the default value is set to 1.0, i.e. 100%).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8994
Test Plan: Ran `make check` and tested using `db_bench` and the stress/crash tests.
Reviewed By: riversand963
Differential Revision: D31489850
Pulled By: ltamasi
fbshipit-source-id: 44057d511726a0e2a03c5d9313d7511b3f0c4eab
3 years ago
|
|
|
ASSERT_EQ(new_cf_opt.blob_garbage_collection_force_threshold, 0.75);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_compaction_readahead_size, 262144);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_file_starting_level, 1);
|
|
|
|
ASSERT_EQ(new_cf_opt.prepopulate_blob_cache, PrepopulateBlobCache::kDisable);
|
|
|
|
ASSERT_EQ(new_cf_opt.last_level_temperature, Temperature::kWarm);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_temperature, Temperature::kWarm);
|
|
|
|
|
|
|
|
cf_options_map["write_buffer_size"] = "hello";
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(
|
|
|
|
base_cf_opt, cf_options_map, &new_cf_opt));
|
|
|
|
ConfigOptions exact, loose;
|
|
|
|
exact.sanity_level = ConfigOptions::kSanityLevelExactMatch;
|
|
|
|
loose.sanity_level = ConfigOptions::kSanityLevelLooselyCompatible;
|
|
|
|
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
cf_options_map["write_buffer_size"] = "1";
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(
|
|
|
|
base_cf_opt, cf_options_map, &new_cf_opt));
|
|
|
|
|
|
|
|
cf_options_map["unknown_option"] = "1";
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(
|
|
|
|
base_cf_opt, cf_options_map, &new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(base_cf_opt, cf_options_map,
|
|
|
|
&new_cf_opt,
|
|
|
|
false, /* input_strings_escaped */
|
|
|
|
true /* ignore_unknown_options */));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
loose, base_cf_opt, new_cf_opt, nullptr /* new_opt_map */));
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
exact /* default for VerifyCFOptions */, base_cf_opt, new_cf_opt, nullptr));
|
|
|
|
|
|
|
|
DBOptions base_db_opt;
|
|
|
|
DBOptions new_db_opt;
|
|
|
|
ASSERT_OK(GetDBOptionsFromMap(base_db_opt, db_options_map, &new_db_opt));
|
|
|
|
ASSERT_EQ(new_db_opt.create_if_missing, false);
|
|
|
|
ASSERT_EQ(new_db_opt.create_missing_column_families, true);
|
|
|
|
ASSERT_EQ(new_db_opt.error_if_exists, false);
|
|
|
|
ASSERT_EQ(new_db_opt.paranoid_checks, true);
|
|
|
|
ASSERT_EQ(new_db_opt.track_and_verify_wals_in_manifest, true);
|
|
|
|
ASSERT_EQ(new_db_opt.max_open_files, 32);
|
|
|
|
ASSERT_EQ(new_db_opt.max_total_wal_size, static_cast<uint64_t>(33));
|
|
|
|
ASSERT_EQ(new_db_opt.use_fsync, true);
|
|
|
|
ASSERT_EQ(new_db_opt.db_log_dir, "/db_log_dir");
|
|
|
|
ASSERT_EQ(new_db_opt.wal_dir, "/wal_dir");
|
|
|
|
ASSERT_EQ(new_db_opt.delete_obsolete_files_period_micros,
|
|
|
|
static_cast<uint64_t>(34));
|
|
|
|
ASSERT_EQ(new_db_opt.max_background_compactions, 35);
|
|
|
|
ASSERT_EQ(new_db_opt.max_background_flushes, 36);
|
|
|
|
ASSERT_EQ(new_db_opt.max_log_file_size, 37U);
|
|
|
|
ASSERT_EQ(new_db_opt.log_file_time_to_roll, 38U);
|
|
|
|
ASSERT_EQ(new_db_opt.keep_log_file_num, 39U);
|
|
|
|
ASSERT_EQ(new_db_opt.recycle_log_file_num, 5U);
|
|
|
|
ASSERT_EQ(new_db_opt.max_manifest_file_size, static_cast<uint64_t>(40));
|
|
|
|
ASSERT_EQ(new_db_opt.table_cache_numshardbits, 41);
|
|
|
|
ASSERT_EQ(new_db_opt.WAL_ttl_seconds, static_cast<uint64_t>(43));
|
|
|
|
ASSERT_EQ(new_db_opt.WAL_size_limit_MB, static_cast<uint64_t>(44));
|
|
|
|
ASSERT_EQ(new_db_opt.manifest_preallocation_size, 45U);
|
|
|
|
ASSERT_EQ(new_db_opt.allow_mmap_reads, true);
|
|
|
|
ASSERT_EQ(new_db_opt.allow_mmap_writes, false);
|
|
|
|
ASSERT_EQ(new_db_opt.use_direct_reads, false);
|
|
|
|
ASSERT_EQ(new_db_opt.use_direct_io_for_flush_and_compaction, false);
|
|
|
|
ASSERT_EQ(new_db_opt.is_fd_close_on_exec, true);
|
|
|
|
ASSERT_EQ(new_db_opt.stats_dump_period_sec, 46U);
|
|
|
|
ASSERT_EQ(new_db_opt.stats_persist_period_sec, 57U);
|
|
|
|
ASSERT_EQ(new_db_opt.persist_stats_to_disk, false);
|
|
|
|
ASSERT_EQ(new_db_opt.stats_history_buffer_size, 69U);
|
|
|
|
ASSERT_EQ(new_db_opt.advise_random_on_open, true);
|
|
|
|
ASSERT_EQ(new_db_opt.use_adaptive_mutex, false);
|
|
|
|
ASSERT_EQ(new_db_opt.compaction_readahead_size, 100);
|
|
|
|
ASSERT_EQ(new_db_opt.random_access_max_buffer_size, 3145728);
|
|
|
|
ASSERT_EQ(new_db_opt.writable_file_max_buffer_size, 314159);
|
|
|
|
ASSERT_EQ(new_db_opt.bytes_per_sync, static_cast<uint64_t>(47));
|
|
|
|
ASSERT_EQ(new_db_opt.wal_bytes_per_sync, static_cast<uint64_t>(48));
|
Optionally wait on bytes_per_sync to smooth I/O (#5183)
Summary:
The existing implementation does not guarantee bytes reach disk every `bytes_per_sync` when writing SST files, or every `wal_bytes_per_sync` when writing WALs. This can cause confusing behavior for users who enable this feature to avoid large syncs during flush and compaction, but then end up hitting them anyways.
My understanding of the existing behavior is we used `sync_file_range` with `SYNC_FILE_RANGE_WRITE` to submit ranges for async writeback, such that we could continue processing the next range of bytes while that I/O is happening. I believe we can preserve that benefit while also limiting how far the processing can get ahead of the I/O, which prevents huge syncs from happening when the file finishes.
Consider this `sync_file_range` usage: `sync_file_range(fd_, 0, static_cast<off_t>(offset + nbytes), SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE)`. Expanding the range to start at 0 and adding the `SYNC_FILE_RANGE_WAIT_BEFORE` flag causes any pending writeback (like from a previous call to `sync_file_range`) to finish before it proceeds to submit the latest `nbytes` for writeback. The latest `nbytes` are still written back asynchronously, unless processing exceeds I/O speed, in which case the following `sync_file_range` will need to wait on it.
There is a second change in this PR to use `fdatasync` when `sync_file_range` is unavailable (determined statically) or has some known problem with the underlying filesystem (determined dynamically).
The above two changes only apply when the user enables a new option, `strict_bytes_per_sync`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5183
Differential Revision: D14953553
Pulled By: siying
fbshipit-source-id: 445c3862e019fb7b470f9c7f314fc231b62706e9
6 years ago
|
|
|
ASSERT_EQ(new_db_opt.strict_bytes_per_sync, true);
|
|
|
|
|
|
|
|
db_options_map["max_open_files"] = "hello";
|
|
|
|
ASSERT_NOK(GetDBOptionsFromMap(base_db_opt, db_options_map, &new_db_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(loose, base_db_opt, new_db_opt));
|
|
|
|
|
|
|
|
// unknow options should fail parsing without ignore_unknown_options = true
|
|
|
|
db_options_map["unknown_db_option"] = "1";
|
|
|
|
ASSERT_NOK(GetDBOptionsFromMap(base_db_opt, db_options_map, &new_db_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
|
|
|
|
ASSERT_OK(GetDBOptionsFromMap(base_db_opt, db_options_map, &new_db_opt,
|
|
|
|
false, /* input_strings_escaped */
|
|
|
|
true /* ignore_unknown_options */));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(loose, base_db_opt, new_db_opt));
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsOldApiTest, GetColumnFamilyOptionsFromStringTest) {
|
|
|
|
ColumnFamilyOptions base_cf_opt;
|
|
|
|
ColumnFamilyOptions new_cf_opt;
|
|
|
|
base_cf_opt.table_factory.reset();
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt, "", &new_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=5", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 5U);
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory == nullptr);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=6;", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 6U);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
" write_buffer_size = 7 ", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 7U);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
" write_buffer_size = 8 ; ", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 8U);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=9;max_write_buffer_number=10", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 9U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 10);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=11; max_write_buffer_number = 12 ;",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 11U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 12);
|
|
|
|
// Wrong name "max_write_buffer_number_"
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=13;max_write_buffer_number_=14;",
|
|
|
|
&new_cf_opt));
|
|
|
|
ConfigOptions exact;
|
|
|
|
exact.sanity_level = ConfigOptions::kSanityLevelExactMatch;
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
// Comparator from object registry
|
|
|
|
std::string kCompName = "reverse_comp";
|
|
|
|
ObjectLibrary::Default()->AddFactory<const Comparator>(
|
|
|
|
kCompName,
|
|
|
|
[](const std::string& /*name*/,
|
|
|
|
std::unique_ptr<const Comparator>* /*guard*/,
|
|
|
|
std::string* /* errmsg */) { return ReverseBytewiseComparator(); });
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
base_cf_opt, "comparator=" + kCompName + ";", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.comparator, ReverseBytewiseComparator());
|
|
|
|
|
|
|
|
// MergeOperator from object registry
|
|
|
|
std::unique_ptr<BytesXOROperator> bxo(new BytesXOROperator());
|
|
|
|
std::string kMoName = bxo->Name();
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
base_cf_opt, "merge_operator=" + kMoName + ";", &new_cf_opt));
|
|
|
|
ASSERT_EQ(kMoName, std::string(new_cf_opt.merge_operator->Name()));
|
|
|
|
|
|
|
|
// Wrong key/value pair
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=13;max_write_buffer_number;", &new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
// Error Paring value
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=13;max_write_buffer_number=;", &new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
// Missing option name
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=13; =100;", &new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
const uint64_t kilo = 1024UL;
|
|
|
|
const uint64_t mega = 1024 * kilo;
|
|
|
|
const uint64_t giga = 1024 * mega;
|
|
|
|
const uint64_t tera = 1024 * giga;
|
|
|
|
|
|
|
|
// Units (k)
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
base_cf_opt, "max_write_buffer_number=15K", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 15 * kilo);
|
|
|
|
// Units (m)
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"max_write_buffer_number=16m;inplace_update_num_locks=17M",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 16 * mega);
|
|
|
|
ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 17u * mega);
|
|
|
|
// Units (g)
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
base_cf_opt,
|
|
|
|
"write_buffer_size=18g;prefix_extractor=capped:8;"
|
|
|
|
"arena_block_size=19G",
|
|
|
|
&new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 18 * giga);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 19 * giga);
|
|
|
|
ASSERT_TRUE(new_cf_opt.prefix_extractor.get() != nullptr);
|
|
|
|
ASSERT_EQ(new_cf_opt.prefix_extractor->AsString(), "rocksdb.CappedPrefix.8");
|
|
|
|
|
|
|
|
// Units (t)
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=20t;arena_block_size=21T", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 20 * tera);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 21 * tera);
|
|
|
|
|
|
|
|
// Nested block based table options
|
|
|
|
// Empty
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={};arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
// Non-empty
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_cache=1M;block_size=4;};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
// Last one
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_cache=1M;block_size=4;}",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
// Mismatch curly braces
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={{{block_size=4;};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
// Unexpected chars after closing curly brace
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_size=4;}};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_size=4;}xdfa;"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_size=4;}xdfa",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
// Invalid block based table option
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={xx_block_size=4;}",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"optimize_filters_for_hits=true",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"optimize_filters_for_hits=false",
|
|
|
|
&new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"optimize_filters_for_hits=junk",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
// Nested plain table options
|
|
|
|
// Empty
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"plain_table_factory={};arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
ASSERT_EQ(std::string(new_cf_opt.table_factory->Name()), "PlainTable");
|
|
|
|
// Non-empty
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"plain_table_factory={user_key_len=66;bloom_bits_per_key=20;};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
ASSERT_EQ(std::string(new_cf_opt.table_factory->Name()), "PlainTable");
|
|
|
|
|
|
|
|
// memtable factory
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"memtable=skip_list:10;arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.memtable_factory != nullptr);
|
|
|
|
ASSERT_TRUE(new_cf_opt.memtable_factory->IsInstanceOf("SkipListFactory"));
|
|
|
|
|
|
|
|
// blob cache
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
base_cf_opt,
|
|
|
|
"blob_cache={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;high_pri_pool_ratio=0.5;};",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_NE(new_cf_opt.blob_cache, nullptr);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_cache->GetCapacity(), 1024UL * 1024UL);
|
|
|
|
ASSERT_EQ(static_cast<ShardedCacheBase*>(new_cf_opt.blob_cache.get())
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_cache->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(static_cast<LRUCache*>(new_cf_opt.blob_cache.get())
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, SliceTransformCreateFromString) {
|
|
|
|
std::shared_ptr<const SliceTransform> transform = nullptr;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.ignore_unsupported_options = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
SliceTransform::CreateFromString(config_options, "fixed:31", &transform));
|
|
|
|
ASSERT_NE(transform, nullptr);
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("capped"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("fixed"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("rocksdb.FixedPrefix"));
|
|
|
|
ASSERT_EQ(transform->GetId(), "rocksdb.FixedPrefix.31");
|
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.FixedPrefix.42", &transform));
|
|
|
|
ASSERT_NE(transform, nullptr);
|
|
|
|
ASSERT_EQ(transform->GetId(), "rocksdb.FixedPrefix.42");
|
|
|
|
|
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(config_options, "capped:16",
|
|
|
|
&transform));
|
|
|
|
ASSERT_NE(transform, nullptr);
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("fixed"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("capped"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("rocksdb.CappedPrefix"));
|
|
|
|
ASSERT_EQ(transform->GetId(), "rocksdb.CappedPrefix.16");
|
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.CappedPrefix.42", &transform));
|
|
|
|
ASSERT_NE(transform, nullptr);
|
|
|
|
ASSERT_EQ(transform->GetId(), "rocksdb.CappedPrefix.42");
|
|
|
|
|
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(config_options, "rocksdb.Noop",
|
|
|
|
&transform));
|
|
|
|
ASSERT_NE(transform, nullptr);
|
|
|
|
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(config_options,
|
|
|
|
"fixed:21:invalid", &transform));
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(config_options,
|
|
|
|
"capped:21:invalid", &transform));
|
|
|
|
ASSERT_NOK(
|
|
|
|
SliceTransform::CreateFromString(config_options, "fixed", &transform));
|
|
|
|
ASSERT_NOK(
|
|
|
|
SliceTransform::CreateFromString(config_options, "capped", &transform));
|
|
|
|
ASSERT_NOK(
|
|
|
|
SliceTransform::CreateFromString(config_options, "fixed:", &transform));
|
|
|
|
ASSERT_NOK(
|
|
|
|
SliceTransform::CreateFromString(config_options, "capped:", &transform));
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.FixedPrefix:42", &transform));
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.CappedPrefix:42", &transform));
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.FixedPrefix", &transform));
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.CappedPrefix", &transform));
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.FixedPrefix.", &transform));
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.CappedPrefix.", &transform));
|
|
|
|
ASSERT_NOK(
|
|
|
|
SliceTransform::CreateFromString(config_options, "invalid", &transform));
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.CappedPrefix.11", &transform));
|
|
|
|
ASSERT_NE(transform, nullptr);
|
|
|
|
ASSERT_EQ(transform->GetId(), "rocksdb.CappedPrefix.11");
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("capped"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("capped:11"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("rocksdb.CappedPrefix"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("rocksdb.CappedPrefix.11"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("fixed"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("fixed:11"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("rocksdb.FixedPrefix"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("rocksdb.FixedPrefix.11"));
|
|
|
|
|
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.FixedPrefix.11", &transform));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("fixed"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("fixed:11"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("rocksdb.FixedPrefix"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("rocksdb.FixedPrefix.11"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("capped"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("capped:11"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("rocksdb.CappedPrefix"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("rocksdb.CappedPrefix.11"));
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsOldApiTest, GetBlockBasedTableOptionsFromString) {
|
|
|
|
BlockBasedTableOptions table_opt;
|
|
|
|
BlockBasedTableOptions new_opt;
|
|
|
|
// make sure default values are overwritten by something else
|
Allow fractional bits/key in BloomFilterPolicy (#6092)
Summary:
There's no technological impediment to allowing the Bloom
filter bits/key to be non-integer (fractional/decimal) values, and it
provides finer control over the memory vs. accuracy trade-off. This is
especially handy in using the format_version=5 Bloom filter in place
of the old one, because bits_per_key=9.55 provides the same accuracy as
the old bits_per_key=10.
This change not only requires refining the logic for choosing the best
num_probes for a given bits/key setting, it revealed a flaw in that logic.
As bits/key gets higher, the best num_probes for a cache-local Bloom
filter is closer to bpk / 2 than to bpk * 0.69, the best choice for a
standard Bloom filter. For example, at 16 bits per key, the best
num_probes is 9 (FP rate = 0.0843%) not 11 (FP rate = 0.0884%).
This change fixes and refines that logic (for the format_version=5
Bloom filter only, just in case) based on empirical tests to find
accuracy inflection points between each num_probes.
Although bits_per_key is now specified as a double, the new Bloom
filter converts/rounds this to "millibits / key" for predictable/precise
internal computations. Just in case of unforeseen compatibility
issues, we round to the nearest whole number bits / key for the
legacy Bloom filter, so as not to unlock new behaviors for it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6092
Test Plan: unit tests included
Differential Revision: D18711313
Pulled By: pdillinger
fbshipit-source-id: 1aa73295f152a995328cb846ef9157ae8a05522a
5 years ago
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;index_type=kHashSearch;"
|
|
|
|
"checksum=kxxHash;no_block_cache=1;"
|
Allow fractional bits/key in BloomFilterPolicy (#6092)
Summary:
There's no technological impediment to allowing the Bloom
filter bits/key to be non-integer (fractional/decimal) values, and it
provides finer control over the memory vs. accuracy trade-off. This is
especially handy in using the format_version=5 Bloom filter in place
of the old one, because bits_per_key=9.55 provides the same accuracy as
the old bits_per_key=10.
This change not only requires refining the logic for choosing the best
num_probes for a given bits/key setting, it revealed a flaw in that logic.
As bits/key gets higher, the best num_probes for a cache-local Bloom
filter is closer to bpk / 2 than to bpk * 0.69, the best choice for a
standard Bloom filter. For example, at 16 bits per key, the best
num_probes is 9 (FP rate = 0.0843%) not 11 (FP rate = 0.0884%).
This change fixes and refines that logic (for the format_version=5
Bloom filter only, just in case) based on empirical tests to find
accuracy inflection points between each num_probes.
Although bits_per_key is now specified as a double, the new Bloom
filter converts/rounds this to "millibits / key" for predictable/precise
internal computations. Just in case of unforeseen compatibility
issues, we round to the nearest whole number bits / key for the
legacy Bloom filter, so as not to unlock new behaviors for it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6092
Test Plan: unit tests included
Differential Revision: D18711313
Pulled By: pdillinger
fbshipit-source-id: 1aa73295f152a995328cb846ef9157ae8a05522a
5 years ago
|
|
|
"block_cache=1M;block_cache_compressed=1k;block_size=1024;"
|
|
|
|
"block_size_deviation=8;block_restart_interval=4;"
|
|
|
|
"format_version=5;whole_key_filtering=1;"
|
|
|
|
"filter_policy=bloomfilter:4.567:false;",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(new_opt.index_type, BlockBasedTableOptions::kHashSearch);
|
|
|
|
ASSERT_EQ(new_opt.checksum, ChecksumType::kxxHash);
|
|
|
|
ASSERT_TRUE(new_opt.no_block_cache);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL);
|
|
|
|
ASSERT_EQ(new_opt.block_size, 1024UL);
|
|
|
|
ASSERT_EQ(new_opt.block_size_deviation, 8);
|
|
|
|
ASSERT_EQ(new_opt.block_restart_interval, 4);
|
Allow fractional bits/key in BloomFilterPolicy (#6092)
Summary:
There's no technological impediment to allowing the Bloom
filter bits/key to be non-integer (fractional/decimal) values, and it
provides finer control over the memory vs. accuracy trade-off. This is
especially handy in using the format_version=5 Bloom filter in place
of the old one, because bits_per_key=9.55 provides the same accuracy as
the old bits_per_key=10.
This change not only requires refining the logic for choosing the best
num_probes for a given bits/key setting, it revealed a flaw in that logic.
As bits/key gets higher, the best num_probes for a cache-local Bloom
filter is closer to bpk / 2 than to bpk * 0.69, the best choice for a
standard Bloom filter. For example, at 16 bits per key, the best
num_probes is 9 (FP rate = 0.0843%) not 11 (FP rate = 0.0884%).
This change fixes and refines that logic (for the format_version=5
Bloom filter only, just in case) based on empirical tests to find
accuracy inflection points between each num_probes.
Although bits_per_key is now specified as a double, the new Bloom
filter converts/rounds this to "millibits / key" for predictable/precise
internal computations. Just in case of unforeseen compatibility
issues, we round to the nearest whole number bits / key for the
legacy Bloom filter, so as not to unlock new behaviors for it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6092
Test Plan: unit tests included
Differential Revision: D18711313
Pulled By: pdillinger
fbshipit-source-id: 1aa73295f152a995328cb846ef9157ae8a05522a
5 years ago
|
|
|
ASSERT_EQ(new_opt.format_version, 5U);
|
|
|
|
ASSERT_EQ(new_opt.whole_key_filtering, true);
|
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
|
|
|
const BloomFilterPolicy* bfp =
|
|
|
|
dynamic_cast<const BloomFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(bfp->GetMillibitsPerKey(), 4567);
|
|
|
|
EXPECT_EQ(bfp->GetWholeBitsPerKey(), 5);
|
|
|
|
|
|
|
|
// unknown option
|
|
|
|
ASSERT_NOK(GetBlockBasedTableOptionsFromString(table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;index_type=kBinarySearch;"
|
|
|
|
"bad_option=1",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_EQ(static_cast<bool>(table_opt.cache_index_and_filter_blocks),
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
|
|
|
|
|
|
|
// unrecognized index type
|
|
|
|
ASSERT_NOK(GetBlockBasedTableOptionsFromString(table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;index_type=kBinarySearchXX",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
|
|
|
|
|
|
|
// unrecognized checksum type
|
|
|
|
ASSERT_NOK(GetBlockBasedTableOptionsFromString(table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;checksum=kxxHashXX",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
|
|
|
|
|
|
|
// unrecognized filter policy name
|
|
|
|
ASSERT_NOK(GetBlockBasedTableOptionsFromString(table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;"
|
|
|
|
"filter_policy=bloomfilterxx:4:true",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.filter_policy, new_opt.filter_policy);
|
|
|
|
|
|
|
|
// Used to be rejected, now accepted
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
table_opt, "filter_policy=bloomfilter:4", &new_opt));
|
|
|
|
bfp = dynamic_cast<const BloomFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(bfp->GetMillibitsPerKey(), 4000);
|
|
|
|
EXPECT_EQ(bfp->GetWholeBitsPerKey(), 4);
|
|
|
|
|
|
|
|
// Check block cache options are overwritten when specified
|
|
|
|
// in new format as a struct.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(table_opt,
|
|
|
|
"block_cache={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;high_pri_pool_ratio=0.5;};"
|
|
|
|
"block_cache_compressed={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;high_pri_pool_ratio=0.5;}",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
|
|
|
|
new_opt.block_cache)->GetHighPriPoolRatio(), 0.5);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL*1024UL);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(
|
|
|
|
new_opt.block_cache_compressed)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
|
|
|
|
new_opt.block_cache_compressed)->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
|
|
|
|
// Set only block cache capacity. Check other values are
|
|
|
|
// reset to default values.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(table_opt,
|
|
|
|
"block_cache={capacity=2M};"
|
|
|
|
"block_cache_compressed={capacity=2M}",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 2*1024UL*1024UL);
|
|
|
|
// Default values
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
GetDefaultCacheShardBits(new_opt.block_cache->GetCapacity()));
|
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), false);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 2*1024UL*1024UL);
|
|
|
|
// Default values
|
|
|
|
ASSERT_EQ(
|
|
|
|
std::dynamic_pointer_cast<ShardedCacheBase>(
|
|
|
|
new_opt.block_cache_compressed)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
GetDefaultCacheShardBits(new_opt.block_cache_compressed->GetCapacity()));
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), false);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache_compressed)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
|
|
|
|
// Set couple of block cache options.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
table_opt,
|
|
|
|
"block_cache={num_shard_bits=5;high_pri_pool_ratio=0.5;};"
|
|
|
|
"block_cache_compressed={num_shard_bits=5;"
|
|
|
|
"high_pri_pool_ratio=0.0;}",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 0);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
5);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), false);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
|
|
|
|
new_opt.block_cache)->GetHighPriPoolRatio(), 0.5);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 0);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(
|
|
|
|
new_opt.block_cache_compressed)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
5);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), false);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache_compressed)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.0);
|
|
|
|
|
|
|
|
// Set couple of block cache options.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(table_opt,
|
|
|
|
"block_cache={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;};"
|
|
|
|
"block_cache_compressed={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;}",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL*1024UL);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(
|
|
|
|
new_opt.block_cache_compressed)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
|
|
|
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache_compressed)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsOldApiTest, GetPlainTableOptionsFromString) {
|
|
|
|
PlainTableOptions table_opt;
|
|
|
|
PlainTableOptions new_opt;
|
|
|
|
// make sure default values are overwritten by something else
|
|
|
|
ASSERT_OK(GetPlainTableOptionsFromString(table_opt,
|
|
|
|
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
|
|
|
|
"index_sparseness=8;huge_page_tlb_size=4;encoding_type=kPrefix;"
|
|
|
|
"full_scan_mode=true;store_index_in_file=true",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_EQ(new_opt.user_key_len, 66u);
|
|
|
|
ASSERT_EQ(new_opt.bloom_bits_per_key, 20);
|
|
|
|
ASSERT_EQ(new_opt.hash_table_ratio, 0.5);
|
|
|
|
ASSERT_EQ(new_opt.index_sparseness, 8);
|
|
|
|
ASSERT_EQ(new_opt.huge_page_tlb_size, 4);
|
|
|
|
ASSERT_EQ(new_opt.encoding_type, EncodingType::kPrefix);
|
|
|
|
ASSERT_TRUE(new_opt.full_scan_mode);
|
|
|
|
ASSERT_TRUE(new_opt.store_index_in_file);
|
|
|
|
|
|
|
|
std::unordered_map<std::string, std::string> opt_map;
|
|
|
|
ASSERT_OK(StringToMap(
|
|
|
|
"user_key_len=55;bloom_bits_per_key=10;huge_page_tlb_size=8;", &opt_map));
|
|
|
|
ASSERT_OK(GetPlainTableOptionsFromMap(table_opt, opt_map, &new_opt));
|
|
|
|
ASSERT_EQ(new_opt.user_key_len, 55u);
|
|
|
|
ASSERT_EQ(new_opt.bloom_bits_per_key, 10);
|
|
|
|
ASSERT_EQ(new_opt.huge_page_tlb_size, 8);
|
|
|
|
|
|
|
|
// unknown option
|
|
|
|
ASSERT_NOK(GetPlainTableOptionsFromString(table_opt,
|
|
|
|
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
|
|
|
|
"bad_option=1",
|
|
|
|
&new_opt));
|
|
|
|
|
|
|
|
// unrecognized EncodingType
|
|
|
|
ASSERT_NOK(GetPlainTableOptionsFromString(table_opt,
|
|
|
|
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
|
|
|
|
"encoding_type=kPrefixXX",
|
|
|
|
&new_opt));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsOldApiTest, GetOptionsFromStringTest) {
|
|
|
|
Options base_options, new_options;
|
|
|
|
base_options.write_buffer_size = 20;
|
|
|
|
base_options.min_write_buffer_number_to_merge = 15;
|
|
|
|
BlockBasedTableOptions block_based_table_options;
|
|
|
|
block_based_table_options.cache_index_and_filter_blocks = true;
|
|
|
|
base_options.table_factory.reset(
|
|
|
|
NewBlockBasedTableFactory(block_based_table_options));
|
|
|
|
|
|
|
|
// Register an Env with object registry.
|
|
|
|
ObjectLibrary::Default()->AddFactory<Env>(
|
|
|
|
"CustomEnvDefault",
|
|
|
|
[](const std::string& /*name*/, std::unique_ptr<Env>* /*env_guard*/,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
static CustomEnv env(Env::Default());
|
|
|
|
return &env;
|
|
|
|
});
|
|
|
|
|
|
|
|
ASSERT_OK(GetOptionsFromString(
|
|
|
|
base_options,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_cache=1M;block_size=4;};"
|
|
|
|
"compression_opts=4:5:6;create_if_missing=true;max_open_files=1;"
|
|
|
|
"bottommost_compression_opts=5:6:7;create_if_missing=true;max_open_files="
|
|
|
|
"1;"
|
|
|
|
"rate_limiter_bytes_per_sec=1024;env=CustomEnvDefault",
|
|
|
|
&new_options));
|
|
|
|
|
|
|
|
ASSERT_EQ(new_options.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.max_dict_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.zstd_max_train_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.parallel_threads, 1u);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_options.compression_opts.use_zstd_dict_trainer, true);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression, kDisableCompressionOption);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.strategy, 7);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.max_dict_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.zstd_max_train_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.parallel_threads, 1u);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
3 years ago
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
true);
|
|
|
|
ASSERT_EQ(new_options.write_buffer_size, 10U);
|
|
|
|
ASSERT_EQ(new_options.max_write_buffer_number, 16);
|
|
|
|
|
|
|
|
auto new_block_based_table_options =
|
|
|
|
new_options.table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(new_block_based_table_options, nullptr);
|
|
|
|
ASSERT_EQ(new_block_based_table_options->block_cache->GetCapacity(),
|
|
|
|
1U << 20);
|
|
|
|
ASSERT_EQ(new_block_based_table_options->block_size, 4U);
|
|
|
|
// don't overwrite block based table options
|
|
|
|
ASSERT_TRUE(new_block_based_table_options->cache_index_and_filter_blocks);
|
|
|
|
|
|
|
|
ASSERT_EQ(new_options.create_if_missing, true);
|
|
|
|
ASSERT_EQ(new_options.max_open_files, 1);
|
|
|
|
ASSERT_TRUE(new_options.rate_limiter.get() != nullptr);
|
|
|
|
Env* newEnv = new_options.env;
|
|
|
|
ASSERT_OK(Env::LoadEnv("CustomEnvDefault", &newEnv));
|
|
|
|
ASSERT_EQ(newEnv, new_options.env);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsOldApiTest, DBOptionsSerialization) {
|
|
|
|
Options base_options, new_options;
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
// Phase 1: Make big change in base_options
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
9 years ago
|
|
|
test::RandomInitDBOptions(&base_options, &rnd);
|
|
|
|
|
|
|
|
// Phase 2: obtain a string from base_option
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
std::string base_options_file_content;
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(&base_options_file_content, base_options));
|
|
|
|
|
|
|
|
// Phase 3: Set new_options from the derived string and expect
|
|
|
|
// new_options == base_options
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
ASSERT_OK(GetDBOptionsFromString(DBOptions(), base_options_file_content,
|
|
|
|
&new_options));
|
|
|
|
ConfigOptions config_options;
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(config_options, base_options, new_options));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsOldApiTest, ColumnFamilyOptionsSerialization) {
|
|
|
|
Options options;
|
|
|
|
ColumnFamilyOptions base_opt, new_opt;
|
|
|
|
Random rnd(302);
|
|
|
|
// Phase 1: randomly assign base_opt
|
|
|
|
// custom type options
|
|
|
|
test::RandomInitCFOptions(&base_opt, options, &rnd);
|
|
|
|
|
|
|
|
// Phase 2: obtain a string from base_opt
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
std::string base_options_file_content;
|
|
|
|
ASSERT_OK(
|
|
|
|
GetStringFromColumnFamilyOptions(&base_options_file_content, base_opt));
|
|
|
|
|
|
|
|
// Phase 3: Set new_opt from the derived string and expect
|
|
|
|
// new_opt == base_opt
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
ColumnFamilyOptions(), base_options_file_content, &new_opt));
|
|
|
|
ConfigOptions config_options;
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_opt, new_opt));
|
|
|
|
if (base_opt.compaction_filter) {
|
|
|
|
delete base_opt.compaction_filter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
class OptionsParserTest : public testing::Test {
|
|
|
|
public:
|
|
|
|
OptionsParserTest() { fs_.reset(new test::StringFS(FileSystem::Default())); }
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
|
|
|
|
protected:
|
|
|
|
std::shared_ptr<test::StringFS> fs_;
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, Comment) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.14.0\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[ DBOptions ]\n"
|
|
|
|
" # note that we don't support space around \"=\"\n"
|
|
|
|
" max_open_files=12345;\n"
|
|
|
|
" max_background_flushes=301 # comment after a statement is fine\n"
|
|
|
|
" # max_background_flushes=1000 # this line would be ignored\n"
|
|
|
|
" # max_background_compactions=2000 # so does this one\n"
|
|
|
|
" max_total_wal_size=1024 # keep_log_file_num=1000\n"
|
|
|
|
"[CFOptions \"default\"] # column family must be specified\n"
|
|
|
|
" # in the correct order\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
ASSERT_OK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
|
|
|
|
ConfigOptions exact;
|
|
|
|
exact.input_strings_escaped = false;
|
|
|
|
exact.sanity_level = ConfigOptions::kSanityLevelExactMatch;
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(exact, *parser.db_opt(), db_opt));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
ASSERT_EQ(parser.NumColumnFamilies(), 1U);
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
exact, *parser.GetCFOptions("default"), cf_opt));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, ExtraSpace) {
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[ Version ]\n"
|
|
|
|
" rocksdb_version = 3.14.0 \n"
|
|
|
|
" options_file_version=1 # some comment\n"
|
|
|
|
"[DBOptions ] # some comment\n"
|
|
|
|
"max_open_files=12345 \n"
|
|
|
|
" max_background_flushes = 301 \n"
|
|
|
|
" max_total_wal_size = 1024 # keep_log_file_num=1000\n"
|
|
|
|
" [CFOptions \"default\" ]\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
ASSERT_OK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, MissingDBOptions) {
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.14.0\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[CFOptions \"default\"]\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
ASSERT_NOK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
|
|
|
;
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, DoubleDBOptions) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.14.0\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
" max_open_files=12345\n"
|
|
|
|
" max_background_flushes=301\n"
|
|
|
|
" max_total_wal_size=1024 # keep_log_file_num=1000\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
"[CFOptions \"default\"]\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
ASSERT_NOK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, NoDefaultCFOptions) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.14.0\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
" max_open_files=12345\n"
|
|
|
|
" max_background_flushes=301\n"
|
|
|
|
" max_total_wal_size=1024 # keep_log_file_num=1000\n"
|
|
|
|
"[CFOptions \"something_else\"]\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
ASSERT_NOK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, DefaultCFOptionsMustBeTheFirst) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.14.0\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
" max_open_files=12345\n"
|
|
|
|
" max_background_flushes=301\n"
|
|
|
|
" max_total_wal_size=1024 # keep_log_file_num=1000\n"
|
|
|
|
"[CFOptions \"something_else\"]\n"
|
|
|
|
" # if a section is blank, we will use the default\n"
|
|
|
|
"[CFOptions \"default\"]\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
ASSERT_NOK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, DuplicateCFOptions) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.14.0\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
" max_open_files=12345\n"
|
|
|
|
" max_background_flushes=301\n"
|
|
|
|
" max_total_wal_size=1024 # keep_log_file_num=1000\n"
|
|
|
|
"[CFOptions \"default\"]\n"
|
|
|
|
"[CFOptions \"something_else\"]\n"
|
|
|
|
"[CFOptions \"something_else\"]\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
ASSERT_NOK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, IgnoreUnknownOptions) {
|
|
|
|
for (int case_id = 0; case_id < 5; case_id++) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string version_string;
|
|
|
|
bool should_ignore = true;
|
|
|
|
if (case_id == 0) {
|
|
|
|
// same version
|
|
|
|
should_ignore = false;
|
|
|
|
version_string = std::to_string(ROCKSDB_MAJOR) + "." +
|
|
|
|
std::to_string(ROCKSDB_MINOR) + ".0";
|
|
|
|
} else if (case_id == 1) {
|
|
|
|
// higher minor version
|
|
|
|
should_ignore = true;
|
|
|
|
version_string = std::to_string(ROCKSDB_MAJOR) + "." +
|
|
|
|
std::to_string(ROCKSDB_MINOR + 1) + ".0";
|
|
|
|
} else if (case_id == 2) {
|
|
|
|
// higher major version.
|
|
|
|
should_ignore = true;
|
|
|
|
version_string = std::to_string(ROCKSDB_MAJOR + 1) + ".0.0";
|
|
|
|
} else if (case_id == 3) {
|
|
|
|
// lower minor version
|
|
|
|
#if ROCKSDB_MINOR == 0
|
|
|
|
continue;
|
|
|
|
#else
|
|
|
|
version_string = std::to_string(ROCKSDB_MAJOR) + "." +
|
|
|
|
std::to_string(ROCKSDB_MINOR - 1) + ".0";
|
|
|
|
should_ignore = false;
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
// lower major version
|
|
|
|
should_ignore = false;
|
|
|
|
version_string = std::to_string(ROCKSDB_MAJOR - 1) + "." +
|
|
|
|
std::to_string(ROCKSDB_MINOR) + ".0";
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=" +
|
|
|
|
version_string +
|
|
|
|
"\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
" max_open_files=12345\n"
|
|
|
|
" max_background_flushes=301\n"
|
|
|
|
" max_total_wal_size=1024 # keep_log_file_num=1000\n"
|
|
|
|
" unknown_db_option1=321\n"
|
|
|
|
" unknown_db_option2=false\n"
|
|
|
|
"[CFOptions \"default\"]\n"
|
|
|
|
" unknown_cf_option1=hello\n"
|
|
|
|
"[CFOptions \"something_else\"]\n"
|
|
|
|
" unknown_cf_option2=world\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
|
|
|
auto s = fs_->FileExists(kTestFileName, IOOptions(), nullptr);
|
|
|
|
ASSERT_TRUE(s.ok() || s.IsNotFound());
|
|
|
|
if (s.ok()) {
|
|
|
|
ASSERT_OK(fs_->DeleteFile(kTestFileName, IOOptions(), nullptr));
|
|
|
|
}
|
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
ASSERT_NOK(parser.Parse(kTestFileName, fs_.get(), false,
|
|
|
|
4096 /* readahead_size */));
|
|
|
|
if (should_ignore) {
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
ASSERT_OK(parser.Parse(kTestFileName, fs_.get(),
|
|
|
|
true /* ignore_unknown_options */,
|
|
|
|
4096 /* readahead_size */));
|
|
|
|
} else {
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
ASSERT_NOK(parser.Parse(kTestFileName, fs_.get(),
|
|
|
|
true /* ignore_unknown_options */,
|
|
|
|
4096 /* readahead_size */));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
TEST_F(OptionsParserTest, ParseVersion) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string file_template =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.13.1\n"
|
|
|
|
" options_file_version=%s\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
"[CFOptions \"default\"]\n";
|
|
|
|
const int kLength = 1000;
|
|
|
|
char buffer[kLength];
|
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
|
|
|
|
const std::vector<std::string> invalid_versions = {
|
|
|
|
"a.b.c", "3.2.2b", "3.-12", "3. 1", // only digits and dots are allowed
|
|
|
|
"1.2.3.4",
|
|
|
|
"1.2.3" // can only contains at most one dot.
|
|
|
|
"0", // options_file_version must be at least one
|
|
|
|
"3..2",
|
|
|
|
".", ".1.2", // must have at least one digit before each dot
|
|
|
|
"1.2.", "1.", "2.34."}; // must have at least one digit after each dot
|
|
|
|
for (auto iv : invalid_versions) {
|
|
|
|
snprintf(buffer, kLength - 1, file_template.c_str(), iv.c_str());
|
|
|
|
|
|
|
|
parser.Reset();
|
|
|
|
ASSERT_OK(fs_->WriteToNewFile(iv, buffer));
|
|
|
|
ASSERT_NOK(parser.Parse(iv, fs_.get(), false, 0 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
}
|
|
|
|
|
|
|
|
const std::vector<std::string> valid_versions = {
|
|
|
|
"1.232", "100", "3.12", "1", "12.3 ", " 1.25 "};
|
|
|
|
for (auto vv : valid_versions) {
|
|
|
|
snprintf(buffer, kLength - 1, file_template.c_str(), vv.c_str());
|
|
|
|
parser.Reset();
|
|
|
|
ASSERT_OK(fs_->WriteToNewFile(vv, buffer));
|
|
|
|
ASSERT_OK(parser.Parse(vv, fs_.get(), false, 0 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VerifyCFPointerTypedOptions(
|
|
|
|
ColumnFamilyOptions* base_cf_opt, const ColumnFamilyOptions* new_cf_opt,
|
|
|
|
const std::unordered_map<std::string, std::string>* new_cf_opt_map) {
|
|
|
|
std::string name_buffer;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, *base_cf_opt,
|
|
|
|
*new_cf_opt, new_cf_opt_map));
|
|
|
|
|
|
|
|
// change the name of merge operator back-and-forth
|
|
|
|
{
|
|
|
|
auto* merge_operator = base_cf_opt->merge_operator
|
|
|
|
->CheckedCast<test::ChanglingMergeOperator>();
|
|
|
|
if (merge_operator != nullptr) {
|
|
|
|
name_buffer = merge_operator->Name();
|
|
|
|
// change the name and expect non-ok status
|
|
|
|
merge_operator->SetName("some-other-name");
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
|
|
|
// change the name back and expect ok status
|
|
|
|
merge_operator->SetName(name_buffer);
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// change the name of the compaction filter factory back-and-forth
|
|
|
|
{
|
|
|
|
auto* compaction_filter_factory =
|
|
|
|
base_cf_opt->compaction_filter_factory
|
|
|
|
->CheckedCast<test::ChanglingCompactionFilterFactory>();
|
|
|
|
if (compaction_filter_factory != nullptr) {
|
|
|
|
name_buffer = compaction_filter_factory->Name();
|
|
|
|
// change the name and expect non-ok status
|
|
|
|
compaction_filter_factory->SetName("some-other-name");
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
|
|
|
// change the name back and expect ok status
|
|
|
|
compaction_filter_factory->SetName(name_buffer);
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// test by setting compaction_filter to nullptr
|
|
|
|
{
|
|
|
|
auto* tmp_compaction_filter = base_cf_opt->compaction_filter;
|
|
|
|
if (tmp_compaction_filter != nullptr) {
|
|
|
|
base_cf_opt->compaction_filter = nullptr;
|
|
|
|
// set compaction_filter to nullptr and expect non-ok status
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
|
|
|
// set the value back and expect ok status
|
|
|
|
base_cf_opt->compaction_filter = tmp_compaction_filter;
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// test by setting table_factory to nullptr
|
|
|
|
{
|
|
|
|
auto tmp_table_factory = base_cf_opt->table_factory;
|
|
|
|
if (tmp_table_factory != nullptr) {
|
|
|
|
base_cf_opt->table_factory.reset();
|
|
|
|
// set table_factory to nullptr and expect non-ok status
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
|
|
|
// set the value back and expect ok status
|
|
|
|
base_cf_opt->table_factory = tmp_table_factory;
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// test by setting memtable_factory to nullptr
|
|
|
|
{
|
|
|
|
auto tmp_memtable_factory = base_cf_opt->memtable_factory;
|
|
|
|
if (tmp_memtable_factory != nullptr) {
|
|
|
|
base_cf_opt->memtable_factory.reset();
|
|
|
|
// set memtable_factory to nullptr and expect non-ok status
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
|
|
|
// set the value back and expect ok status
|
|
|
|
base_cf_opt->memtable_factory = tmp_memtable_factory;
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, Readahead) {
|
|
|
|
DBOptions base_db_opt;
|
|
|
|
std::vector<ColumnFamilyOptions> base_cf_opts;
|
|
|
|
base_cf_opts.emplace_back();
|
|
|
|
base_cf_opts.emplace_back();
|
|
|
|
|
|
|
|
std::string one_mb_string = std::string(1024 * 1024, 'x');
|
|
|
|
std::vector<std::string> cf_names = {"default", one_mb_string};
|
|
|
|
const std::string kOptionsFileName = "test-persisted-options.ini";
|
|
|
|
|
|
|
|
ASSERT_OK(PersistRocksDBOptions(base_db_opt, cf_names, base_cf_opts,
|
|
|
|
kOptionsFileName, fs_.get()));
|
|
|
|
|
|
|
|
uint64_t file_size = 0;
|
|
|
|
ASSERT_OK(
|
|
|
|
fs_->GetFileSize(kOptionsFileName, IOOptions(), &file_size, nullptr));
|
|
|
|
assert(file_size > 0);
|
|
|
|
|
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
|
|
|
|
fs_->num_seq_file_read_ = 0;
|
|
|
|
size_t readahead_size = 128 * 1024;
|
|
|
|
|
|
|
|
ASSERT_OK(parser.Parse(kOptionsFileName, fs_.get(), false, readahead_size));
|
|
|
|
ASSERT_EQ(fs_->num_seq_file_read_.load(),
|
|
|
|
(file_size - 1) / readahead_size + 1);
|
|
|
|
|
|
|
|
fs_->num_seq_file_read_.store(0);
|
|
|
|
readahead_size = 1024 * 1024;
|
|
|
|
ASSERT_OK(parser.Parse(kOptionsFileName, fs_.get(), false, readahead_size));
|
|
|
|
ASSERT_EQ(fs_->num_seq_file_read_.load(),
|
|
|
|
(file_size - 1) / readahead_size + 1);
|
|
|
|
|
|
|
|
// Tiny readahead. 8 KB is read each time.
|
|
|
|
fs_->num_seq_file_read_.store(0);
|
|
|
|
ASSERT_OK(
|
|
|
|
parser.Parse(kOptionsFileName, fs_.get(), false, 1 /* readahead_size */));
|
|
|
|
ASSERT_GE(fs_->num_seq_file_read_.load(), file_size / (8 * 1024));
|
|
|
|
ASSERT_LT(fs_->num_seq_file_read_.load(), file_size / (8 * 1024) * 2);
|
|
|
|
|
|
|
|
// Disable readahead means 512KB readahead.
|
|
|
|
fs_->num_seq_file_read_.store(0);
|
|
|
|
ASSERT_OK(
|
|
|
|
parser.Parse(kOptionsFileName, fs_.get(), false, 0 /* readahead_size */));
|
|
|
|
ASSERT_GE(fs_->num_seq_file_read_.load(), (file_size - 1) / (512 * 1024) + 1);
|
|
|
|
}
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
TEST_F(OptionsParserTest, DumpAndParse) {
|
|
|
|
DBOptions base_db_opt;
|
|
|
|
std::vector<ColumnFamilyOptions> base_cf_opts;
|
|
|
|
std::vector<std::string> cf_names = {"default", "cf1", "cf2", "cf3",
|
|
|
|
"c:f:4:4:4"
|
|
|
|
"p\\i\\k\\a\\chu\\\\\\",
|
|
|
|
"###rocksdb#1-testcf#2###"};
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
const int num_cf = static_cast<int>(cf_names.size());
|
|
|
|
Random rnd(302);
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
9 years ago
|
|
|
test::RandomInitDBOptions(&base_db_opt, &rnd);
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
base_db_opt.db_log_dir += "/#odd #but #could #happen #path #/\\\\#OMG";
|
|
|
|
|
|
|
|
BlockBasedTableOptions special_bbto;
|
|
|
|
special_bbto.cache_index_and_filter_blocks = true;
|
|
|
|
special_bbto.block_size = 999999;
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
for (int c = 0; c < num_cf; ++c) {
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
Random cf_rnd(0xFB + c);
|
|
|
|
test::RandomInitCFOptions(&cf_opt, base_db_opt, &cf_rnd);
|
|
|
|
if (c < 4) {
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
9 years ago
|
|
|
cf_opt.prefix_extractor.reset(test::RandomSliceTransform(&rnd, c));
|
|
|
|
}
|
|
|
|
if (c < 3) {
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
9 years ago
|
|
|
cf_opt.table_factory.reset(test::RandomTableFactory(&rnd, c));
|
|
|
|
} else if (c == 4) {
|
|
|
|
cf_opt.table_factory.reset(NewBlockBasedTableFactory(special_bbto));
|
|
|
|
} else if (c == 5) {
|
|
|
|
// A table factory that doesn't support deserialization should be
|
|
|
|
// supported.
|
|
|
|
cf_opt.table_factory.reset(new UnregisteredTableFactory());
|
|
|
|
}
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
base_cf_opts.emplace_back(cf_opt);
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string kOptionsFileName = "test-persisted-options.ini";
|
|
|
|
// Use default for escaped(true), unknown(false) and check (exact)
|
|
|
|
ConfigOptions config_options;
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
ASSERT_OK(PersistRocksDBOptions(base_db_opt, cf_names, base_cf_opts,
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
kOptionsFileName, fs_.get()));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
|
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
ASSERT_OK(parser.Parse(config_options, kOptionsFileName, fs_.get()));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
|
|
|
|
// Make sure block-based table factory options was deserialized correctly
|
|
|
|
std::shared_ptr<TableFactory> ttf = (*parser.cf_opts())[4].table_factory;
|
|
|
|
ASSERT_EQ(TableFactory::kBlockBasedTableName(), std::string(ttf->Name()));
|
|
|
|
const auto parsed_bbto = ttf->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(parsed_bbto, nullptr);
|
|
|
|
ASSERT_EQ(special_bbto.block_size, parsed_bbto->block_size);
|
|
|
|
ASSERT_EQ(special_bbto.cache_index_and_filter_blocks,
|
|
|
|
parsed_bbto->cache_index_and_filter_blocks);
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
|
|
|
|
config_options, base_db_opt, cf_names, base_cf_opts, kOptionsFileName,
|
|
|
|
fs_.get()));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(
|
|
|
|
config_options, *parser.db_opt(), base_db_opt));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
for (int c = 0; c < num_cf; ++c) {
|
|
|
|
const auto* cf_opt = parser.GetCFOptions(cf_names[c]);
|
|
|
|
ASSERT_NE(cf_opt, nullptr);
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, base_cf_opts[c], *cf_opt,
|
|
|
|
&(parser.cf_opt_maps()->at(c))));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
}
|
|
|
|
|
|
|
|
// Further verify pointer-typed options
|
|
|
|
for (int c = 0; c < num_cf; ++c) {
|
|
|
|
const auto* cf_opt = parser.GetCFOptions(cf_names[c]);
|
|
|
|
ASSERT_NE(cf_opt, nullptr);
|
|
|
|
VerifyCFPointerTypedOptions(&base_cf_opts[c], cf_opt,
|
|
|
|
&(parser.cf_opt_maps()->at(c)));
|
|
|
|
}
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
ASSERT_EQ(parser.GetCFOptions("does not exist"), nullptr);
|
|
|
|
|
|
|
|
base_db_opt.max_open_files++;
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
|
|
|
|
config_options, base_db_opt, cf_names, base_cf_opts, kOptionsFileName,
|
|
|
|
fs_.get()));
|
|
|
|
|
|
|
|
for (int c = 0; c < num_cf; ++c) {
|
|
|
|
if (base_cf_opts[c].compaction_filter) {
|
|
|
|
delete base_cf_opts[c].compaction_filter;
|
|
|
|
}
|
|
|
|
}
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, DifferentDefault) {
|
|
|
|
const std::string kOptionsFileName = "test-persisted-options.ini";
|
|
|
|
|
|
|
|
ColumnFamilyOptions cf_level_opts;
|
|
|
|
ASSERT_EQ(CompactionPri::kMinOverlappingRatio, cf_level_opts.compaction_pri);
|
|
|
|
cf_level_opts.OptimizeLevelStyleCompaction();
|
|
|
|
|
|
|
|
ColumnFamilyOptions cf_univ_opts;
|
|
|
|
cf_univ_opts.OptimizeUniversalStyleCompaction();
|
|
|
|
|
|
|
|
ASSERT_OK(PersistRocksDBOptions(DBOptions(), {"default", "universal"},
|
|
|
|
{cf_level_opts, cf_univ_opts},
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
kOptionsFileName, fs_.get()));
|
|
|
|
|
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
ASSERT_OK(parser.Parse(kOptionsFileName, fs_.get(), false,
|
|
|
|
4096 /* readahead_size */));
|
|
|
|
|
|
|
|
{
|
|
|
|
Options old_default_opts;
|
|
|
|
old_default_opts.OldDefaults();
|
|
|
|
ASSERT_EQ(10 * 1048576, old_default_opts.max_bytes_for_level_base);
|
|
|
|
ASSERT_EQ(5000, old_default_opts.max_open_files);
|
|
|
|
ASSERT_EQ(2 * 1024U * 1024U, old_default_opts.delayed_write_rate);
|
|
|
|
ASSERT_EQ(WALRecoveryMode::kTolerateCorruptedTailRecords,
|
|
|
|
old_default_opts.wal_recovery_mode);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
Options old_default_opts;
|
|
|
|
old_default_opts.OldDefaults(4, 6);
|
|
|
|
ASSERT_EQ(10 * 1048576, old_default_opts.max_bytes_for_level_base);
|
|
|
|
ASSERT_EQ(5000, old_default_opts.max_open_files);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
Options old_default_opts;
|
|
|
|
old_default_opts.OldDefaults(4, 7);
|
|
|
|
ASSERT_NE(10 * 1048576, old_default_opts.max_bytes_for_level_base);
|
|
|
|
ASSERT_NE(4, old_default_opts.table_cache_numshardbits);
|
|
|
|
ASSERT_EQ(5000, old_default_opts.max_open_files);
|
|
|
|
ASSERT_EQ(2 * 1024U * 1024U, old_default_opts.delayed_write_rate);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
ColumnFamilyOptions old_default_cf_opts;
|
|
|
|
old_default_cf_opts.OldDefaults();
|
|
|
|
ASSERT_EQ(2 * 1048576, old_default_cf_opts.target_file_size_base);
|
|
|
|
ASSERT_EQ(4 << 20, old_default_cf_opts.write_buffer_size);
|
|
|
|
ASSERT_EQ(2 * 1048576, old_default_cf_opts.target_file_size_base);
|
|
|
|
ASSERT_EQ(0, old_default_cf_opts.soft_pending_compaction_bytes_limit);
|
|
|
|
ASSERT_EQ(0, old_default_cf_opts.hard_pending_compaction_bytes_limit);
|
|
|
|
ASSERT_EQ(CompactionPri::kByCompensatedSize,
|
|
|
|
old_default_cf_opts.compaction_pri);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
ColumnFamilyOptions old_default_cf_opts;
|
|
|
|
old_default_cf_opts.OldDefaults(4, 6);
|
|
|
|
ASSERT_EQ(2 * 1048576, old_default_cf_opts.target_file_size_base);
|
|
|
|
ASSERT_EQ(CompactionPri::kByCompensatedSize,
|
|
|
|
old_default_cf_opts.compaction_pri);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
ColumnFamilyOptions old_default_cf_opts;
|
|
|
|
old_default_cf_opts.OldDefaults(4, 7);
|
|
|
|
ASSERT_NE(2 * 1048576, old_default_cf_opts.target_file_size_base);
|
|
|
|
ASSERT_EQ(CompactionPri::kByCompensatedSize,
|
|
|
|
old_default_cf_opts.compaction_pri);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
Options old_default_opts;
|
|
|
|
old_default_opts.OldDefaults(5, 1);
|
|
|
|
ASSERT_EQ(2 * 1024U * 1024U, old_default_opts.delayed_write_rate);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
Options old_default_opts;
|
|
|
|
old_default_opts.OldDefaults(5, 2);
|
|
|
|
ASSERT_EQ(16 * 1024U * 1024U, old_default_opts.delayed_write_rate);
|
|
|
|
ASSERT_TRUE(old_default_opts.compaction_pri ==
|
|
|
|
CompactionPri::kByCompensatedSize);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
Options old_default_opts;
|
|
|
|
old_default_opts.OldDefaults(5, 18);
|
|
|
|
ASSERT_TRUE(old_default_opts.compaction_pri ==
|
|
|
|
CompactionPri::kByCompensatedSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
Options small_opts;
|
|
|
|
small_opts.OptimizeForSmallDb();
|
|
|
|
ASSERT_EQ(2 << 20, small_opts.write_buffer_size);
|
|
|
|
ASSERT_EQ(5000, small_opts.max_open_files);
|
|
|
|
}
|
|
|
|
|
|
|
|
class OptionsSanityCheckTest : public OptionsParserTest,
|
|
|
|
public ::testing::WithParamInterface<bool> {
|
|
|
|
protected:
|
|
|
|
ConfigOptions config_options_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
OptionsSanityCheckTest() {
|
|
|
|
config_options_.ignore_unknown_options = false;
|
|
|
|
config_options_.ignore_unsupported_options = GetParam();
|
|
|
|
config_options_.input_strings_escaped = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
Status SanityCheckOptions(const DBOptions& db_opts,
|
|
|
|
const ColumnFamilyOptions& cf_opts,
|
|
|
|
ConfigOptions::SanityLevel level) {
|
|
|
|
config_options_.sanity_level = level;
|
|
|
|
return RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
|
|
|
|
config_options_, db_opts, {"default"}, {cf_opts}, kOptionsFileName,
|
|
|
|
fs_.get());
|
|
|
|
}
|
|
|
|
|
|
|
|
Status SanityCheckCFOptions(const ColumnFamilyOptions& cf_opts,
|
|
|
|
ConfigOptions::SanityLevel level) {
|
|
|
|
return SanityCheckOptions(DBOptions(), cf_opts, level);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SanityCheckCFOptions(const ColumnFamilyOptions& opts, bool exact) {
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
|
|
|
if (exact) {
|
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
} else {
|
|
|
|
ASSERT_NOK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status SanityCheckDBOptions(const DBOptions& db_opts,
|
|
|
|
ConfigOptions::SanityLevel level) {
|
|
|
|
return SanityCheckOptions(db_opts, ColumnFamilyOptions(), level);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SanityCheckDBOptions(const DBOptions& opts, bool exact) {
|
|
|
|
ASSERT_OK(SanityCheckDBOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckDBOptions(opts, ConfigOptions::kSanityLevelNone));
|
|
|
|
if (exact) {
|
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckDBOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
} else {
|
|
|
|
ASSERT_NOK(
|
|
|
|
SanityCheckDBOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PersistOptions(const DBOptions& db_opts,
|
|
|
|
const ColumnFamilyOptions& cf_opts) {
|
|
|
|
Status s = fs_->DeleteFile(kOptionsFileName, IOOptions(), nullptr);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
return PersistRocksDBOptions(db_opts, {"default"}, {cf_opts},
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
kOptionsFileName, fs_.get());
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PersistCFOptions(const ColumnFamilyOptions& cf_opts) {
|
|
|
|
return PersistOptions(DBOptions(), cf_opts);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PersistDBOptions(const DBOptions& db_opts) {
|
|
|
|
return PersistOptions(db_opts, ColumnFamilyOptions());
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string kOptionsFileName = "OPTIONS";
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_P(OptionsSanityCheckTest, CFOptionsSanityCheck) {
|
|
|
|
ColumnFamilyOptions opts;
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
// default ColumnFamilyOptions
|
|
|
|
{
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
// prefix_extractor
|
|
|
|
{
|
|
|
|
// Okay to change prefix_extractor form nullptr to non-nullptr
|
|
|
|
ASSERT_EQ(opts.prefix_extractor.get(), nullptr);
|
|
|
|
opts.prefix_extractor.reset(NewCappedPrefixTransform(10));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
|
|
|
|
// use same prefix extractor but with different parameter
|
|
|
|
opts.prefix_extractor.reset(NewCappedPrefixTransform(15));
|
|
|
|
// expect pass only in
|
|
|
|
// ConfigOptions::kSanityLevelLooselyCompatible
|
|
|
|
ASSERT_NOK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
|
|
|
|
|
|
|
// repeat the test with FixedPrefixTransform
|
|
|
|
opts.prefix_extractor.reset(NewFixedPrefixTransform(10));
|
|
|
|
ASSERT_NOK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
|
|
|
|
|
|
|
// persist the change of prefix_extractor
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
|
|
|
|
// use same prefix extractor but with different parameter
|
|
|
|
opts.prefix_extractor.reset(NewFixedPrefixTransform(15));
|
|
|
|
// expect pass only in
|
|
|
|
// ConfigOptions::kSanityLevelLooselyCompatible
|
|
|
|
SanityCheckCFOptions(opts, false);
|
|
|
|
|
|
|
|
// Change prefix extractor from non-nullptr to nullptr
|
|
|
|
opts.prefix_extractor.reset();
|
|
|
|
// expect pass as it's safe to change prefix_extractor
|
|
|
|
// from non-null to null
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
|
|
|
}
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
|
|
|
|
// table_factory
|
|
|
|
{
|
|
|
|
for (int tb = 0; tb <= 2; ++tb) {
|
|
|
|
// change the table factory
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
9 years ago
|
|
|
opts.table_factory.reset(test::RandomTableFactory(&rnd, tb));
|
|
|
|
ASSERT_NOK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// merge_operator
|
|
|
|
{
|
|
|
|
// Test when going from nullptr -> merge operator
|
|
|
|
opts.merge_operator.reset(test::RandomMergeOperator(&rnd));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
SanityCheckCFOptions(opts, config_options_.ignore_unsupported_options);
|
|
|
|
|
|
|
|
for (int test = 0; test < 5; ++test) {
|
|
|
|
// change the merge operator
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
9 years ago
|
|
|
opts.merge_operator.reset(test::RandomMergeOperator(&rnd));
|
|
|
|
ASSERT_NOK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
SanityCheckCFOptions(opts, config_options_.ignore_unsupported_options);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test when going from merge operator -> nullptr
|
|
|
|
opts.merge_operator = nullptr;
|
|
|
|
ASSERT_NOK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
SanityCheckCFOptions(opts, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// compaction_filter
|
|
|
|
{
|
|
|
|
for (int test = 0; test < 5; ++test) {
|
|
|
|
// change the compaction filter
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
9 years ago
|
|
|
opts.compaction_filter = test::RandomCompactionFilter(&rnd);
|
|
|
|
SanityCheckCFOptions(opts, false);
|
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
SanityCheckCFOptions(opts, config_options_.ignore_unsupported_options);
|
|
|
|
delete opts.compaction_filter;
|
|
|
|
opts.compaction_filter = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// compaction_filter_factory
|
|
|
|
{
|
|
|
|
for (int test = 0; test < 5; ++test) {
|
|
|
|
// change the compaction filter factory
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
9 years ago
|
|
|
opts.compaction_filter_factory.reset(
|
|
|
|
test::RandomCompactionFilterFactory(&rnd));
|
|
|
|
SanityCheckCFOptions(opts, false);
|
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
SanityCheckCFOptions(opts, config_options_.ignore_unsupported_options);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptionsSanityCheckTest, DBOptionsSanityCheck) {
|
|
|
|
DBOptions opts;
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
// default DBOptions
|
|
|
|
{
|
|
|
|
ASSERT_OK(PersistDBOptions(opts));
|
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckDBOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
// File checksum generator
|
|
|
|
{
|
|
|
|
class MockFileChecksumGenFactory : public FileChecksumGenFactory {
|
|
|
|
public:
|
|
|
|
static const char* kClassName() { return "Mock"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
std::unique_ptr<FileChecksumGenerator> CreateFileChecksumGenerator(
|
|
|
|
const FileChecksumGenContext& /*context*/) override {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Okay to change file_checksum_gen_factory form nullptr to non-nullptr
|
|
|
|
ASSERT_EQ(opts.file_checksum_gen_factory.get(), nullptr);
|
|
|
|
opts.file_checksum_gen_factory.reset(new MockFileChecksumGenFactory());
|
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistDBOptions(opts));
|
|
|
|
SanityCheckDBOptions(opts, config_options_.ignore_unsupported_options);
|
|
|
|
|
|
|
|
// Change file_checksum_gen_factory from non-nullptr to nullptr
|
|
|
|
opts.file_checksum_gen_factory.reset();
|
|
|
|
// expect pass as it's safe to change file_checksum_gen_factory
|
|
|
|
// from non-null to null
|
|
|
|
SanityCheckDBOptions(opts, false);
|
|
|
|
}
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistDBOptions(opts));
|
|
|
|
ASSERT_OK(SanityCheckDBOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
}
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
namespace {
|
|
|
|
bool IsEscapedString(const std::string& str) {
|
|
|
|
for (size_t i = 0; i < str.size(); ++i) {
|
|
|
|
if (str[i] == '\\') {
|
|
|
|
// since we already handle those two consecutive '\'s in
|
|
|
|
// the next if-then branch, any '\' appear at the end
|
|
|
|
// of an escaped string in such case is not valid.
|
|
|
|
if (i == str.size() - 1) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (str[i + 1] == '\\') {
|
|
|
|
// if there're two consecutive '\'s, skip the second one.
|
|
|
|
i++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
switch (str[i + 1]) {
|
|
|
|
case ':':
|
|
|
|
case '\\':
|
|
|
|
case '#':
|
|
|
|
continue;
|
|
|
|
default:
|
|
|
|
// if true, '\' together with str[i + 1] is not a valid escape.
|
|
|
|
if (UnescapeChar(str[i + 1]) == str[i + 1]) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (isSpecialChar(str[i]) && (i == 0 || str[i - 1] != '\\')) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, IntegerParsing) {
|
|
|
|
ASSERT_EQ(ParseUint64("18446744073709551615"), 18446744073709551615U);
|
|
|
|
ASSERT_EQ(ParseUint32("4294967295"), 4294967295U);
|
|
|
|
ASSERT_EQ(ParseSizeT("18446744073709551615"), 18446744073709551615U);
|
|
|
|
ASSERT_EQ(ParseInt64("9223372036854775807"), 9223372036854775807);
|
|
|
|
ASSERT_EQ(ParseInt64("-9223372036854775808"),
|
|
|
|
std::numeric_limits<int64_t>::min());
|
|
|
|
ASSERT_EQ(ParseInt32("2147483647"), 2147483647);
|
|
|
|
ASSERT_EQ(ParseInt32("-2147483648"), std::numeric_limits<int32_t>::min());
|
|
|
|
ASSERT_EQ(ParseInt("-32767"), -32767);
|
|
|
|
ASSERT_EQ(ParseDouble("-1.234567"), -1.234567);
|
|
|
|
}
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
TEST_F(OptionsParserTest, EscapeOptionString) {
|
|
|
|
ASSERT_EQ(UnescapeOptionString(
|
|
|
|
"This is a test string with \\# \\: and \\\\ escape chars."),
|
|
|
|
"This is a test string with # : and \\ escape chars.");
|
|
|
|
|
|
|
|
ASSERT_EQ(
|
|
|
|
EscapeOptionString("This is a test string with # : and \\ escape chars."),
|
|
|
|
"This is a test string with \\# \\: and \\\\ escape chars.");
|
|
|
|
|
|
|
|
std::string readible_chars =
|
|
|
|
"A String like this \"1234567890-=_)(*&^%$#@!ertyuiop[]{POIU"
|
|
|
|
"YTREWQasdfghjkl;':LKJHGFDSAzxcvbnm,.?>"
|
|
|
|
"<MNBVCXZ\\\" should be okay to \\#\\\\\\:\\#\\#\\#\\ "
|
|
|
|
"be serialized and deserialized";
|
|
|
|
|
|
|
|
std::string escaped_string = EscapeOptionString(readible_chars);
|
|
|
|
ASSERT_TRUE(IsEscapedString(escaped_string));
|
|
|
|
// This two transformations should be canceled and should output
|
|
|
|
// the original input.
|
|
|
|
ASSERT_EQ(UnescapeOptionString(escaped_string), readible_chars);
|
|
|
|
|
|
|
|
std::string all_chars;
|
|
|
|
for (unsigned char c = 0;; ++c) {
|
|
|
|
all_chars += c;
|
|
|
|
if (c == 255) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
escaped_string = EscapeOptionString(all_chars);
|
|
|
|
ASSERT_TRUE(IsEscapedString(escaped_string));
|
|
|
|
ASSERT_EQ(UnescapeOptionString(escaped_string), all_chars);
|
|
|
|
|
|
|
|
ASSERT_EQ(RocksDBOptionsParser::TrimAndRemoveComment(
|
|
|
|
" A simple statement with a comment. # like this :)"),
|
|
|
|
"A simple statement with a comment.");
|
|
|
|
|
|
|
|
ASSERT_EQ(RocksDBOptionsParser::TrimAndRemoveComment(
|
|
|
|
"Escape \\# and # comment together ."),
|
|
|
|
"Escape \\# and");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void TestAndCompareOption(const ConfigOptions& config_options,
|
|
|
|
const OptionTypeInfo& opt_info,
|
|
|
|
const std::string& opt_name, void* base_ptr,
|
|
|
|
void* comp_ptr, bool strip = false) {
|
|
|
|
std::string result, mismatch;
|
|
|
|
ASSERT_OK(opt_info.Serialize(config_options, opt_name, base_ptr, &result));
|
|
|
|
if (strip) {
|
|
|
|
ASSERT_EQ(result.at(0), '{');
|
|
|
|
ASSERT_EQ(result.at(result.size() - 1), '}');
|
|
|
|
result = result.substr(1, result.size() - 2);
|
|
|
|
}
|
|
|
|
ASSERT_OK(opt_info.Parse(config_options, opt_name, result, comp_ptr));
|
|
|
|
ASSERT_TRUE(opt_info.AreEqual(config_options, opt_name, base_ptr, comp_ptr,
|
|
|
|
&mismatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void TestParseAndCompareOption(const ConfigOptions& config_options,
|
|
|
|
const OptionTypeInfo& opt_info,
|
|
|
|
const std::string& opt_name,
|
|
|
|
const std::string& opt_value,
|
|
|
|
void* base_ptr, void* comp_ptr,
|
|
|
|
bool strip = false) {
|
|
|
|
ASSERT_OK(opt_info.Parse(config_options, opt_name, opt_value, base_ptr));
|
|
|
|
TestAndCompareOption(config_options, opt_info, opt_name, base_ptr, comp_ptr,
|
|
|
|
strip);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
void TestOptInfo(const ConfigOptions& config_options, OptionType opt_type,
|
|
|
|
T* base, T* comp) {
|
|
|
|
std::string result;
|
|
|
|
OptionTypeInfo opt_info(0, opt_type);
|
|
|
|
ASSERT_FALSE(opt_info.AreEqual(config_options, "base", base, comp, &result));
|
|
|
|
ASSERT_EQ(result, "base");
|
|
|
|
ASSERT_NE(*base, *comp);
|
|
|
|
TestAndCompareOption(config_options, opt_info, "base", base, comp);
|
|
|
|
ASSERT_EQ(*base, *comp);
|
|
|
|
}
|
|
|
|
|
|
|
|
class OptionTypeInfoTest : public testing::Test {};
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, BasicTypes) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
{
|
|
|
|
bool a = true, b = false;
|
|
|
|
TestOptInfo(config_options, OptionType::kBoolean, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
int a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kInt, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
int32_t a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kInt32T, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
int64_t a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kInt64T, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
unsigned int a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kUInt, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
uint32_t a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kUInt32T, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
uint64_t a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kUInt64T, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
size_t a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kSizeT, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
std::string a = "100", b = "200";
|
|
|
|
TestOptInfo(config_options, OptionType::kString, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
double a = 1.0, b = 2.0;
|
|
|
|
TestOptInfo(config_options, OptionType::kDouble, &a, &b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestInvalidArgs) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
bool b;
|
|
|
|
int i;
|
|
|
|
int32_t i32;
|
|
|
|
int64_t i64;
|
|
|
|
unsigned int u;
|
|
|
|
int32_t u32;
|
|
|
|
int64_t u64;
|
|
|
|
size_t sz;
|
|
|
|
double d;
|
|
|
|
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kBoolean)
|
|
|
|
.Parse(config_options, "b", "x", &b));
|
|
|
|
ASSERT_NOK(
|
|
|
|
OptionTypeInfo(0, OptionType::kInt).Parse(config_options, "b", "x", &i));
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kInt32T)
|
|
|
|
.Parse(config_options, "b", "x", &i32));
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kInt64T)
|
|
|
|
.Parse(config_options, "b", "x", &i64));
|
|
|
|
ASSERT_NOK(
|
|
|
|
OptionTypeInfo(0, OptionType::kUInt).Parse(config_options, "b", "x", &u));
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kUInt32T)
|
|
|
|
.Parse(config_options, "b", "x", &u32));
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kUInt64T)
|
|
|
|
.Parse(config_options, "b", "x", &u64));
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kSizeT)
|
|
|
|
.Parse(config_options, "b", "x", &sz));
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kDouble)
|
|
|
|
.Parse(config_options, "b", "x", &d));
|
|
|
|
|
|
|
|
// Don't know how to convert Unknowns to anything else
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kUnknown)
|
|
|
|
.Parse(config_options, "b", "x", &d));
|
|
|
|
|
|
|
|
// Verify that if the parse function throws an exception, it is also trapped
|
|
|
|
OptionTypeInfo func_info(0, OptionType::kUnknown,
|
|
|
|
OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone,
|
|
|
|
[](const ConfigOptions&, const std::string&,
|
|
|
|
const std::string& value, void* addr) {
|
|
|
|
auto ptr = static_cast<int*>(addr);
|
|
|
|
*ptr = ParseInt(value);
|
|
|
|
return Status::OK();
|
|
|
|
});
|
|
|
|
ASSERT_OK(func_info.Parse(config_options, "b", "1", &i));
|
|
|
|
ASSERT_NOK(func_info.Parse(config_options, "b", "x", &i));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestParseFunc) {
|
|
|
|
OptionTypeInfo opt_info(0, OptionType::kUnknown,
|
|
|
|
OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone);
|
|
|
|
opt_info.SetParseFunc([](const ConfigOptions& /*opts*/,
|
|
|
|
const std::string& name, const std::string& value,
|
|
|
|
void* addr) {
|
|
|
|
auto ptr = static_cast<std::string*>(addr);
|
|
|
|
if (name == "Oops") {
|
|
|
|
return Status::InvalidArgument(value);
|
|
|
|
} else {
|
|
|
|
*ptr = value + " " + name;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::string base;
|
|
|
|
ASSERT_OK(opt_info.Parse(config_options, "World", "Hello", &base));
|
|
|
|
ASSERT_EQ(base, "Hello World");
|
|
|
|
ASSERT_NOK(opt_info.Parse(config_options, "Oops", "Hello", &base));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestSerializeFunc) {
|
|
|
|
OptionTypeInfo opt_info(0, OptionType::kString,
|
|
|
|
OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone);
|
|
|
|
opt_info.SetSerializeFunc([](const ConfigOptions& /*opts*/,
|
|
|
|
const std::string& name, const void* /*addr*/,
|
|
|
|
std::string* value) {
|
|
|
|
if (name == "Oops") {
|
|
|
|
return Status::InvalidArgument(name);
|
|
|
|
} else {
|
|
|
|
*value = name;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::string base;
|
|
|
|
std::string value;
|
|
|
|
ASSERT_OK(opt_info.Serialize(config_options, "Hello", &base, &value));
|
|
|
|
ASSERT_EQ(value, "Hello");
|
|
|
|
ASSERT_NOK(opt_info.Serialize(config_options, "Oops", &base, &value));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestEqualsFunc) {
|
|
|
|
OptionTypeInfo opt_info(0, OptionType::kInt, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone);
|
|
|
|
opt_info.SetEqualsFunc([](const ConfigOptions& /*opts*/,
|
|
|
|
const std::string& name, const void* addr1,
|
|
|
|
const void* addr2, std::string* mismatch) {
|
|
|
|
auto i1 = *(static_cast<const int*>(addr1));
|
|
|
|
auto i2 = *(static_cast<const int*>(addr2));
|
|
|
|
if (name == "LT") {
|
|
|
|
return i1 < i2;
|
|
|
|
} else if (name == "GT") {
|
|
|
|
return i1 > i2;
|
|
|
|
} else if (name == "EQ") {
|
|
|
|
return i1 == i2;
|
|
|
|
} else {
|
|
|
|
*mismatch = name + "???";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
ConfigOptions config_options;
|
|
|
|
int int1 = 100;
|
|
|
|
int int2 = 200;
|
|
|
|
std::string mismatch;
|
|
|
|
ASSERT_TRUE(opt_info.AreEqual(config_options, "LT", &int1, &int2, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "");
|
|
|
|
ASSERT_FALSE(
|
|
|
|
opt_info.AreEqual(config_options, "GT", &int1, &int2, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "GT");
|
|
|
|
ASSERT_FALSE(
|
|
|
|
opt_info.AreEqual(config_options, "NO", &int1, &int2, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "NO???");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestPrepareFunc) {
|
|
|
|
OptionTypeInfo opt_info(0, OptionType::kInt, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone);
|
|
|
|
opt_info.SetPrepareFunc(
|
|
|
|
[](const ConfigOptions& /*opts*/, const std::string& name, void* addr) {
|
|
|
|
auto i1 = static_cast<int*>(addr);
|
|
|
|
if (name == "x2") {
|
|
|
|
*i1 *= 2;
|
|
|
|
} else if (name == "/2") {
|
|
|
|
*i1 /= 2;
|
|
|
|
} else {
|
|
|
|
return Status::InvalidArgument("Bad Argument", name);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
});
|
|
|
|
ConfigOptions config_options;
|
|
|
|
int int1 = 100;
|
|
|
|
ASSERT_OK(opt_info.Prepare(config_options, "x2", &int1));
|
|
|
|
ASSERT_EQ(int1, 200);
|
|
|
|
ASSERT_OK(opt_info.Prepare(config_options, "/2", &int1));
|
|
|
|
ASSERT_EQ(int1, 100);
|
|
|
|
ASSERT_NOK(opt_info.Prepare(config_options, "??", &int1));
|
|
|
|
ASSERT_EQ(int1, 100);
|
|
|
|
}
|
|
|
|
TEST_F(OptionTypeInfoTest, TestValidateFunc) {
|
|
|
|
OptionTypeInfo opt_info(0, OptionType::kSizeT,
|
|
|
|
OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone);
|
|
|
|
opt_info.SetValidateFunc([](const DBOptions& db_opts,
|
|
|
|
const ColumnFamilyOptions& cf_opts,
|
|
|
|
const std::string& name, const void* addr) {
|
|
|
|
const auto sz = static_cast<const size_t*>(addr);
|
|
|
|
bool is_valid = false;
|
|
|
|
if (name == "keep_log_file_num") {
|
|
|
|
is_valid = (*sz == db_opts.keep_log_file_num);
|
|
|
|
} else if (name == "write_buffer_size") {
|
|
|
|
is_valid = (*sz == cf_opts.write_buffer_size);
|
|
|
|
}
|
|
|
|
if (is_valid) {
|
|
|
|
return Status::OK();
|
|
|
|
} else {
|
|
|
|
return Status::InvalidArgument("Mismatched value", name);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
ConfigOptions config_options;
|
|
|
|
DBOptions db_options;
|
|
|
|
ColumnFamilyOptions cf_options;
|
|
|
|
|
|
|
|
ASSERT_OK(opt_info.Validate(db_options, cf_options, "keep_log_file_num",
|
|
|
|
&db_options.keep_log_file_num));
|
|
|
|
ASSERT_OK(opt_info.Validate(db_options, cf_options, "write_buffer_size",
|
|
|
|
&cf_options.write_buffer_size));
|
|
|
|
ASSERT_NOK(opt_info.Validate(db_options, cf_options, "keep_log_file_num",
|
|
|
|
&cf_options.write_buffer_size));
|
|
|
|
ASSERT_NOK(opt_info.Validate(db_options, cf_options, "write_buffer_size",
|
|
|
|
&db_options.keep_log_file_num));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestOptionFlags) {
|
|
|
|
OptionTypeInfo opt_none(0, OptionType::kString,
|
|
|
|
OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kDontSerialize);
|
|
|
|
OptionTypeInfo opt_never(0, OptionType::kString,
|
|
|
|
OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kCompareNever);
|
|
|
|
OptionTypeInfo opt_alias(0, OptionType::kString,
|
|
|
|
OptionVerificationType::kAlias,
|
|
|
|
OptionTypeFlags::kNone);
|
|
|
|
OptionTypeInfo opt_deprecated(0, OptionType::kString,
|
|
|
|
OptionVerificationType::kDeprecated,
|
|
|
|
OptionTypeFlags::kNone);
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::string opts_str;
|
|
|
|
std::string base = "base";
|
|
|
|
std::string comp = "comp";
|
|
|
|
|
|
|
|
// If marked string none, the serialization returns not supported
|
|
|
|
ASSERT_NOK(opt_none.Serialize(config_options, "None", &base, &opts_str));
|
|
|
|
// If marked never compare, they match even when they do not
|
|
|
|
ASSERT_TRUE(opt_never.AreEqual(config_options, "Never", &base, &comp, &base));
|
|
|
|
ASSERT_FALSE(opt_none.AreEqual(config_options, "Never", &base, &comp, &base));
|
|
|
|
|
|
|
|
// An alias can change the value via parse, but does nothing on serialize on
|
|
|
|
// match
|
|
|
|
std::string result;
|
|
|
|
ASSERT_OK(opt_alias.Parse(config_options, "Alias", "Alias", &base));
|
|
|
|
ASSERT_OK(opt_alias.Serialize(config_options, "Alias", &base, &result));
|
|
|
|
ASSERT_TRUE(
|
|
|
|
opt_alias.AreEqual(config_options, "Alias", &base, &comp, &result));
|
|
|
|
ASSERT_EQ(base, "Alias");
|
|
|
|
ASSERT_NE(base, comp);
|
|
|
|
|
|
|
|
// Deprecated options do nothing on any of the commands
|
|
|
|
ASSERT_OK(opt_deprecated.Parse(config_options, "Alias", "Deprecated", &base));
|
|
|
|
ASSERT_OK(opt_deprecated.Serialize(config_options, "Alias", &base, &result));
|
|
|
|
ASSERT_TRUE(
|
|
|
|
opt_deprecated.AreEqual(config_options, "Alias", &base, &comp, &result));
|
|
|
|
ASSERT_EQ(base, "Alias");
|
|
|
|
ASSERT_NE(base, comp);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestCustomEnum) {
|
|
|
|
enum TestEnum { kA, kB, kC };
|
|
|
|
std::unordered_map<std::string, TestEnum> enum_map = {
|
|
|
|
{"A", TestEnum::kA},
|
|
|
|
{"B", TestEnum::kB},
|
|
|
|
{"C", TestEnum::kC},
|
|
|
|
};
|
|
|
|
OptionTypeInfo opt_info = OptionTypeInfo::Enum<TestEnum>(0, &enum_map);
|
|
|
|
TestEnum e1, e2;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::string result, mismatch;
|
|
|
|
|
|
|
|
e2 = TestEnum::kA;
|
|
|
|
|
|
|
|
ASSERT_OK(opt_info.Parse(config_options, "", "B", &e1));
|
|
|
|
ASSERT_OK(opt_info.Serialize(config_options, "", &e1, &result));
|
|
|
|
ASSERT_EQ(e1, TestEnum::kB);
|
|
|
|
ASSERT_EQ(result, "B");
|
|
|
|
|
|
|
|
ASSERT_FALSE(opt_info.AreEqual(config_options, "Enum", &e1, &e2, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "Enum");
|
|
|
|
|
|
|
|
TestParseAndCompareOption(config_options, opt_info, "", "C", &e1, &e2);
|
|
|
|
ASSERT_EQ(e2, TestEnum::kC);
|
|
|
|
|
|
|
|
ASSERT_NOK(opt_info.Parse(config_options, "", "D", &e1));
|
|
|
|
ASSERT_EQ(e1, TestEnum::kC);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestBuiltinEnum) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
for (auto iter : OptionsHelper::compaction_style_string_map) {
|
|
|
|
CompactionStyle e1, e2;
|
|
|
|
TestParseAndCompareOption(config_options,
|
|
|
|
OptionTypeInfo(0, OptionType::kCompactionStyle),
|
|
|
|
"CompactionStyle", iter.first, &e1, &e2);
|
|
|
|
ASSERT_EQ(e1, iter.second);
|
|
|
|
}
|
|
|
|
for (auto iter : OptionsHelper::compaction_pri_string_map) {
|
|
|
|
CompactionPri e1, e2;
|
|
|
|
TestParseAndCompareOption(config_options,
|
|
|
|
OptionTypeInfo(0, OptionType::kCompactionPri),
|
|
|
|
"CompactionPri", iter.first, &e1, &e2);
|
|
|
|
ASSERT_EQ(e1, iter.second);
|
|
|
|
}
|
|
|
|
for (auto iter : OptionsHelper::compression_type_string_map) {
|
|
|
|
CompressionType e1, e2;
|
|
|
|
TestParseAndCompareOption(config_options,
|
|
|
|
OptionTypeInfo(0, OptionType::kCompressionType),
|
|
|
|
"CompressionType", iter.first, &e1, &e2);
|
|
|
|
ASSERT_EQ(e1, iter.second);
|
|
|
|
}
|
|
|
|
for (auto iter : OptionsHelper::compaction_stop_style_string_map) {
|
|
|
|
CompactionStopStyle e1, e2;
|
|
|
|
TestParseAndCompareOption(
|
|
|
|
config_options, OptionTypeInfo(0, OptionType::kCompactionStopStyle),
|
|
|
|
"CompactionStopStyle", iter.first, &e1, &e2);
|
|
|
|
ASSERT_EQ(e1, iter.second);
|
|
|
|
}
|
|
|
|
for (auto iter : OptionsHelper::checksum_type_string_map) {
|
|
|
|
ChecksumType e1, e2;
|
|
|
|
TestParseAndCompareOption(config_options,
|
|
|
|
OptionTypeInfo(0, OptionType::kChecksumType),
|
|
|
|
"CheckSumType", iter.first, &e1, &e2);
|
|
|
|
ASSERT_EQ(e1, iter.second);
|
|
|
|
}
|
|
|
|
for (auto iter : OptionsHelper::encoding_type_string_map) {
|
|
|
|
EncodingType e1, e2;
|
|
|
|
TestParseAndCompareOption(config_options,
|
|
|
|
OptionTypeInfo(0, OptionType::kEncodingType),
|
|
|
|
"EncodingType", iter.first, &e1, &e2);
|
|
|
|
ASSERT_EQ(e1, iter.second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestStruct) {
|
|
|
|
struct Basic {
|
|
|
|
int i = 42;
|
|
|
|
std::string s = "Hello";
|
|
|
|
};
|
|
|
|
|
|
|
|
struct Extended {
|
|
|
|
int j = 11;
|
|
|
|
Basic b;
|
|
|
|
};
|
|
|
|
|
|
|
|
std::unordered_map<std::string, OptionTypeInfo> basic_type_map = {
|
|
|
|
{"i", {offsetof(struct Basic, i), OptionType::kInt}},
|
|
|
|
{"s", {offsetof(struct Basic, s), OptionType::kString}},
|
|
|
|
};
|
|
|
|
OptionTypeInfo basic_info = OptionTypeInfo::Struct(
|
|
|
|
"b", &basic_type_map, 0, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kMutable);
|
|
|
|
|
|
|
|
std::unordered_map<std::string, OptionTypeInfo> extended_type_map = {
|
|
|
|
{"j", {offsetof(struct Extended, j), OptionType::kInt}},
|
|
|
|
{"b", OptionTypeInfo::Struct(
|
|
|
|
"b", &basic_type_map, offsetof(struct Extended, b),
|
|
|
|
OptionVerificationType::kNormal, OptionTypeFlags::kNone)},
|
|
|
|
{"m", OptionTypeInfo::Struct(
|
|
|
|
"m", &basic_type_map, offsetof(struct Extended, b),
|
|
|
|
OptionVerificationType::kNormal, OptionTypeFlags::kMutable)},
|
|
|
|
};
|
|
|
|
OptionTypeInfo extended_info = OptionTypeInfo::Struct(
|
|
|
|
"e", &extended_type_map, 0, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kMutable);
|
|
|
|
Extended e1, e2;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::string mismatch;
|
|
|
|
TestParseAndCompareOption(config_options, basic_info, "b", "{i=33;s=33}",
|
|
|
|
&e1.b, &e2.b);
|
|
|
|
ASSERT_EQ(e1.b.i, 33);
|
|
|
|
ASSERT_EQ(e1.b.s, "33");
|
|
|
|
|
|
|
|
TestParseAndCompareOption(config_options, basic_info, "b.i", "44", &e1.b,
|
|
|
|
&e2.b);
|
|
|
|
ASSERT_EQ(e1.b.i, 44);
|
|
|
|
|
|
|
|
TestParseAndCompareOption(config_options, basic_info, "i", "55", &e1.b,
|
|
|
|
&e2.b);
|
|
|
|
ASSERT_EQ(e1.b.i, 55);
|
|
|
|
|
|
|
|
e1.b.i = 0;
|
|
|
|
|
|
|
|
ASSERT_FALSE(
|
|
|
|
basic_info.AreEqual(config_options, "b", &e1.b, &e2.b, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "b.i");
|
|
|
|
mismatch.clear();
|
|
|
|
ASSERT_FALSE(
|
|
|
|
basic_info.AreEqual(config_options, "b.i", &e1.b, &e2.b, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "b.i");
|
|
|
|
mismatch.clear();
|
|
|
|
ASSERT_FALSE(
|
|
|
|
basic_info.AreEqual(config_options, "i", &e1.b, &e2.b, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "b.i");
|
|
|
|
mismatch.clear();
|
|
|
|
|
|
|
|
e1 = e2;
|
|
|
|
ASSERT_NOK(basic_info.Parse(config_options, "b", "{i=33;s=33;j=44}", &e1.b));
|
|
|
|
ASSERT_NOK(basic_info.Parse(config_options, "b.j", "44", &e1.b));
|
|
|
|
ASSERT_NOK(basic_info.Parse(config_options, "j", "44", &e1.b));
|
|
|
|
|
|
|
|
TestParseAndCompareOption(config_options, extended_info, "e",
|
|
|
|
"b={i=55;s=55}; j=22;", &e1, &e2);
|
|
|
|
ASSERT_EQ(e1.b.i, 55);
|
|
|
|
ASSERT_EQ(e1.j, 22);
|
|
|
|
ASSERT_EQ(e1.b.s, "55");
|
|
|
|
TestParseAndCompareOption(config_options, extended_info, "e.b",
|
|
|
|
"{i=66;s=66;}", &e1, &e2);
|
|
|
|
ASSERT_EQ(e1.b.i, 66);
|
|
|
|
ASSERT_EQ(e1.j, 22);
|
|
|
|
ASSERT_EQ(e1.b.s, "66");
|
|
|
|
TestParseAndCompareOption(config_options, extended_info, "e.b.i", "77", &e1,
|
|
|
|
&e2);
|
|
|
|
ASSERT_EQ(e1.b.i, 77);
|
|
|
|
ASSERT_EQ(e1.j, 22);
|
|
|
|
ASSERT_EQ(e1.b.s, "66");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestArrayType) {
|
|
|
|
OptionTypeInfo array_info = OptionTypeInfo::Array<std::string, 4>(
|
|
|
|
0, OptionVerificationType::kNormal, OptionTypeFlags::kNone,
|
|
|
|
{0, OptionType::kString});
|
|
|
|
std::array<std::string, 4> array1, array2;
|
|
|
|
std::string mismatch;
|
|
|
|
|
|
|
|
ConfigOptions config_options;
|
|
|
|
TestParseAndCompareOption(config_options, array_info, "v", "a:b:c:d", &array1,
|
|
|
|
&array2);
|
|
|
|
|
|
|
|
ASSERT_EQ(array1.size(), 4);
|
|
|
|
ASSERT_EQ(array1[0], "a");
|
|
|
|
ASSERT_EQ(array1[1], "b");
|
|
|
|
ASSERT_EQ(array1[2], "c");
|
|
|
|
ASSERT_EQ(array1[3], "d");
|
|
|
|
array1[3] = "e";
|
|
|
|
ASSERT_FALSE(
|
|
|
|
array_info.AreEqual(config_options, "v", &array1, &array2, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "v");
|
|
|
|
|
|
|
|
// Test vectors with inner brackets
|
|
|
|
TestParseAndCompareOption(config_options, array_info, "v", "a:{b}:c:d",
|
|
|
|
&array1, &array2);
|
|
|
|
ASSERT_EQ(array1.size(), 4);
|
|
|
|
ASSERT_EQ(array1[0], "a");
|
|
|
|
ASSERT_EQ(array1[1], "b");
|
|
|
|
ASSERT_EQ(array1[2], "c");
|
|
|
|
ASSERT_EQ(array1[3], "d");
|
|
|
|
|
|
|
|
std::array<std::string, 3> array3, array4;
|
|
|
|
OptionTypeInfo bar_info = OptionTypeInfo::Array<std::string, 3>(
|
|
|
|
0, OptionVerificationType::kNormal, OptionTypeFlags::kNone,
|
|
|
|
{0, OptionType::kString}, '|');
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v", "x|y|z", &array3,
|
|
|
|
&array4);
|
|
|
|
|
|
|
|
// Test arrays with inner array
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v",
|
|
|
|
"a|{b1|b2}|{c1|c2|{d1|d2}}", &array3, &array4,
|
|
|
|
false);
|
|
|
|
ASSERT_EQ(array3.size(), 3);
|
|
|
|
ASSERT_EQ(array3[0], "a");
|
|
|
|
ASSERT_EQ(array3[1], "b1|b2");
|
|
|
|
ASSERT_EQ(array3[2], "c1|c2|{d1|d2}");
|
|
|
|
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v",
|
|
|
|
"{a1|a2}|{b1|{c1|c2}}|d1", &array3, &array4, true);
|
|
|
|
ASSERT_EQ(array3.size(), 3);
|
|
|
|
ASSERT_EQ(array3[0], "a1|a2");
|
|
|
|
ASSERT_EQ(array3[1], "b1|{c1|c2}");
|
|
|
|
ASSERT_EQ(array3[2], "d1");
|
|
|
|
|
|
|
|
// Test invalid input: less element than requested
|
|
|
|
auto s = bar_info.Parse(config_options, "opt_name1", "a1|a2", &array3);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
|
|
|
// Test invalid input: more element than requested
|
|
|
|
s = bar_info.Parse(config_options, "opt_name2", "a1|b|c1|d3", &array3);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestVectorType) {
|
|
|
|
OptionTypeInfo vec_info = OptionTypeInfo::Vector<std::string>(
|
|
|
|
0, OptionVerificationType::kNormal, OptionTypeFlags::kNone,
|
|
|
|
{0, OptionType::kString});
|
|
|
|
std::vector<std::string> vec1, vec2;
|
|
|
|
std::string mismatch;
|
|
|
|
|
|
|
|
ConfigOptions config_options;
|
|
|
|
TestParseAndCompareOption(config_options, vec_info, "v", "a:b:c:d", &vec1,
|
|
|
|
&vec2);
|
|
|
|
ASSERT_EQ(vec1.size(), 4);
|
|
|
|
ASSERT_EQ(vec1[0], "a");
|
|
|
|
ASSERT_EQ(vec1[1], "b");
|
|
|
|
ASSERT_EQ(vec1[2], "c");
|
|
|
|
ASSERT_EQ(vec1[3], "d");
|
|
|
|
vec1[3] = "e";
|
|
|
|
ASSERT_FALSE(vec_info.AreEqual(config_options, "v", &vec1, &vec2, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "v");
|
|
|
|
|
|
|
|
// Test vectors with inner brackets
|
|
|
|
TestParseAndCompareOption(config_options, vec_info, "v", "a:{b}:c:d", &vec1,
|
|
|
|
&vec2);
|
|
|
|
ASSERT_EQ(vec1.size(), 4);
|
|
|
|
ASSERT_EQ(vec1[0], "a");
|
|
|
|
ASSERT_EQ(vec1[1], "b");
|
|
|
|
ASSERT_EQ(vec1[2], "c");
|
|
|
|
ASSERT_EQ(vec1[3], "d");
|
|
|
|
|
|
|
|
OptionTypeInfo bar_info = OptionTypeInfo::Vector<std::string>(
|
|
|
|
0, OptionVerificationType::kNormal, OptionTypeFlags::kNone,
|
|
|
|
{0, OptionType::kString}, '|');
|
|
|
|
TestParseAndCompareOption(config_options, vec_info, "v", "x|y|z", &vec1,
|
|
|
|
&vec2);
|
|
|
|
// Test vectors with inner vector
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v",
|
|
|
|
"a|{b1|b2}|{c1|c2|{d1|d2}}", &vec1, &vec2, false);
|
|
|
|
ASSERT_EQ(vec1.size(), 3);
|
|
|
|
ASSERT_EQ(vec1[0], "a");
|
|
|
|
ASSERT_EQ(vec1[1], "b1|b2");
|
|
|
|
ASSERT_EQ(vec1[2], "c1|c2|{d1|d2}");
|
|
|
|
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v",
|
|
|
|
"{a1|a2}|{b1|{c1|c2}}|d1", &vec1, &vec2, true);
|
|
|
|
ASSERT_EQ(vec1.size(), 3);
|
|
|
|
ASSERT_EQ(vec1[0], "a1|a2");
|
|
|
|
ASSERT_EQ(vec1[1], "b1|{c1|c2}");
|
|
|
|
ASSERT_EQ(vec1[2], "d1");
|
|
|
|
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v", "{a1}", &vec1, &vec2,
|
|
|
|
false);
|
|
|
|
ASSERT_EQ(vec1.size(), 1);
|
|
|
|
ASSERT_EQ(vec1[0], "a1");
|
|
|
|
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v", "{a1|a2}|{b1|b2}",
|
|
|
|
&vec1, &vec2, true);
|
|
|
|
ASSERT_EQ(vec1.size(), 2);
|
|
|
|
ASSERT_EQ(vec1[0], "a1|a2");
|
|
|
|
ASSERT_EQ(vec1[1], "b1|b2");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestStaticType) {
|
|
|
|
struct SimpleOptions {
|
|
|
|
size_t size = 0;
|
|
|
|
bool verify = true;
|
|
|
|
};
|
|
|
|
|
|
|
|
static std::unordered_map<std::string, OptionTypeInfo> type_map = {
|
|
|
|
{"size", {offsetof(struct SimpleOptions, size), OptionType::kSizeT}},
|
|
|
|
{"verify",
|
|
|
|
{offsetof(struct SimpleOptions, verify), OptionType::kBoolean}},
|
|
|
|
};
|
|
|
|
|
|
|
|
ConfigOptions config_options;
|
|
|
|
SimpleOptions opts, copy;
|
|
|
|
opts.size = 12345;
|
|
|
|
opts.verify = false;
|
|
|
|
std::string str, mismatch;
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
OptionTypeInfo::SerializeType(config_options, type_map, &opts, &str));
|
|
|
|
ASSERT_FALSE(OptionTypeInfo::TypesAreEqual(config_options, type_map, &opts,
|
|
|
|
©, &mismatch));
|
|
|
|
ASSERT_OK(OptionTypeInfo::ParseType(config_options, str, type_map, ©));
|
|
|
|
ASSERT_TRUE(OptionTypeInfo::TypesAreEqual(config_options, type_map, &opts,
|
|
|
|
©, &mismatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
class ConfigOptionsTest : public testing::Test {};
|
|
|
|
|
|
|
|
TEST_F(ConfigOptionsTest, EnvFromConfigOptions) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
DBOptions db_opts;
|
|
|
|
Options opts;
|
|
|
|
Env* mem_env = NewMemEnv(Env::Default());
|
|
|
|
config_options.registry->AddLibrary("custom-env", RegisterCustomEnv,
|
|
|
|
kCustomEnvName);
|
|
|
|
|
|
|
|
config_options.env = mem_env;
|
|
|
|
// First test that we can get the env as expected
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(config_options, DBOptions(), kCustomEnvProp,
|
|
|
|
&db_opts));
|
|
|
|
ASSERT_OK(
|
|
|
|
GetOptionsFromString(config_options, Options(), kCustomEnvProp, &opts));
|
|
|
|
ASSERT_NE(config_options.env, db_opts.env);
|
|
|
|
ASSERT_EQ(opts.env, db_opts.env);
|
|
|
|
Env* custom_env = db_opts.env;
|
|
|
|
|
|
|
|
// Now try a "bad" env" and check that nothing changed
|
|
|
|
config_options.ignore_unsupported_options = true;
|
|
|
|
ASSERT_OK(
|
|
|
|
GetDBOptionsFromString(config_options, db_opts, "env=unknown", &db_opts));
|
|
|
|
ASSERT_OK(GetOptionsFromString(config_options, opts, "env=unknown", &opts));
|
|
|
|
ASSERT_EQ(config_options.env, mem_env);
|
|
|
|
ASSERT_EQ(db_opts.env, custom_env);
|
|
|
|
ASSERT_EQ(opts.env, db_opts.env);
|
|
|
|
|
|
|
|
// Now try a "bad" env" ignoring unknown objects
|
|
|
|
config_options.ignore_unsupported_options = false;
|
|
|
|
ASSERT_NOK(
|
|
|
|
GetDBOptionsFromString(config_options, db_opts, "env=unknown", &db_opts));
|
|
|
|
ASSERT_EQ(config_options.env, mem_env);
|
|
|
|
ASSERT_EQ(db_opts.env, custom_env);
|
|
|
|
ASSERT_EQ(opts.env, db_opts.env);
|
|
|
|
|
|
|
|
delete mem_env;
|
|
|
|
}
|
|
|
|
TEST_F(ConfigOptionsTest, MergeOperatorFromString) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::shared_ptr<MergeOperator> merge_op;
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(config_options, "put", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("put"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), "PutOperator");
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
MergeOperator::CreateFromString(config_options, "put_v1", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("PutOperator"));
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
MergeOperator::CreateFromString(config_options, "uint64add", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("uint64add"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), "UInt64AddOperator");
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(config_options, "max", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("max"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), "MaxOperator");
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
MergeOperator::CreateFromString(config_options, "bytesxor", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("bytesxor"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), BytesXOROperator::kClassName());
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
MergeOperator::CreateFromString(config_options, "sortlist", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("sortlist"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), SortList::kClassName());
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(config_options, "stringappend",
|
|
|
|
&merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("stringappend"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), StringAppendOperator::kClassName());
|
|
|
|
auto delimiter = merge_op->GetOptions<std::string>("Delimiter");
|
|
|
|
ASSERT_NE(delimiter, nullptr);
|
|
|
|
ASSERT_EQ(*delimiter, ",");
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(config_options, "stringappendtest",
|
|
|
|
&merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("stringappendtest"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), StringAppendTESTOperator::kClassName());
|
|
|
|
delimiter = merge_op->GetOptions<std::string>("Delimiter");
|
|
|
|
ASSERT_NE(delimiter, nullptr);
|
|
|
|
ASSERT_EQ(*delimiter, ",");
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(
|
|
|
|
config_options, "id=stringappend; delimiter=||", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("stringappend"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), StringAppendOperator::kClassName());
|
|
|
|
delimiter = merge_op->GetOptions<std::string>("Delimiter");
|
|
|
|
ASSERT_NE(delimiter, nullptr);
|
|
|
|
ASSERT_EQ(*delimiter, "||");
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(
|
|
|
|
config_options, "id=stringappendtest; delimiter=&&", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("stringappendtest"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), StringAppendTESTOperator::kClassName());
|
|
|
|
delimiter = merge_op->GetOptions<std::string>("Delimiter");
|
|
|
|
ASSERT_NE(delimiter, nullptr);
|
|
|
|
ASSERT_EQ(*delimiter, "&&");
|
|
|
|
|
|
|
|
std::shared_ptr<MergeOperator> copy;
|
|
|
|
std::string mismatch;
|
|
|
|
std::string opts_str = merge_op->ToString(config_options);
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(config_options, opts_str, ©));
|
|
|
|
ASSERT_TRUE(merge_op->AreEquivalent(config_options, copy.get(), &mismatch));
|
|
|
|
ASSERT_NE(copy, nullptr);
|
|
|
|
delimiter = copy->GetOptions<std::string>("Delimiter");
|
|
|
|
ASSERT_NE(delimiter, nullptr);
|
|
|
|
ASSERT_EQ(*delimiter, "&&");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(ConfigOptionsTest, ConfiguringOptionsDoesNotRevertRateLimiterBandwidth) {
|
|
|
|
// Regression test for bug where rate limiter's dynamically set bandwidth
|
|
|
|
// could be silently reverted when configuring an options structure with an
|
|
|
|
// existing `rate_limiter`.
|
|
|
|
Options base_options;
|
|
|
|
base_options.rate_limiter.reset(
|
|
|
|
NewGenericRateLimiter(1 << 20 /* rate_bytes_per_sec */));
|
|
|
|
Options copy_options(base_options);
|
|
|
|
|
|
|
|
base_options.rate_limiter->SetBytesPerSecond(2 << 20);
|
|
|
|
ASSERT_EQ(2 << 20, base_options.rate_limiter->GetBytesPerSecond());
|
|
|
|
|
|
|
|
ASSERT_OK(GetOptionsFromString(base_options, "", ©_options));
|
|
|
|
ASSERT_EQ(2 << 20, base_options.rate_limiter->GetBytesPerSecond());
|
|
|
|
}
|
|
|
|
|
|
|
|
INSTANTIATE_TEST_CASE_P(OptionsSanityCheckTest, OptionsSanityCheckTest,
|
|
|
|
::testing::Bool());
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
9 years ago
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
#ifdef GFLAGS
|
|
|
|
ParseCommandLineFlags(&argc, &argv, true);
|
|
|
|
#endif // GFLAGS
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|