You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
rocksdb/options/options_settable_test.cc

481 lines
21 KiB

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <cstring>
#include "options/options_helper.h"
#include "rocksdb/convenience.h"
#include "util/testharness.h"
#ifndef GFLAGS
bool FLAGS_enable_print = false;
#else
#include "util/gflags_compat.h"
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
DEFINE_bool(enable_print, false, "Print options generated to console.");
#endif // GFLAGS
namespace rocksdb {
// Verify options are settable from options strings.
// We take the approach that depends on compiler behavior that copy constructor
// won't touch implicit padding bytes, so that the test is fragile.
// As a result, we only run the tests to verify new fields in options are
// settable through string on limited platforms as it depends on behavior of
// compilers.
#ifndef ROCKSDB_LITE
#if defined OS_LINUX || defined OS_WIN
#ifndef __clang__
class OptionsSettableTest : public testing::Test {
public:
OptionsSettableTest() {}
};
const char kSpecialChar = 'z';
typedef std::vector<std::pair<size_t, size_t>> OffsetGap;
void FillWithSpecialChar(char* start_ptr, size_t total_size,
const OffsetGap& blacklist) {
size_t offset = 0;
for (auto& pair : blacklist) {
std::memset(start_ptr + offset, kSpecialChar, pair.first - offset);
offset = pair.first + pair.second;
}
std::memset(start_ptr + offset, kSpecialChar, total_size - offset);
}
int NumUnsetBytes(char* start_ptr, size_t total_size,
const OffsetGap& blacklist) {
int total_unset_bytes_base = 0;
size_t offset = 0;
for (auto& pair : blacklist) {
for (char* ptr = start_ptr + offset; ptr < start_ptr + pair.first; ptr++) {
if (*ptr == kSpecialChar) {
total_unset_bytes_base++;
}
}
offset = pair.first + pair.second;
}
for (char* ptr = start_ptr + offset; ptr < start_ptr + total_size; ptr++) {
if (*ptr == kSpecialChar) {
total_unset_bytes_base++;
}
}
return total_unset_bytes_base;
}
// If the test fails, likely a new option is added to BlockBasedTableOptions
// but it cannot be set through GetBlockBasedTableOptionsFromString(), or the
// test is not updated accordingly.
// After adding an option, we need to make sure it is settable by
// GetBlockBasedTableOptionsFromString() and add the option to the input string
// passed to the GetBlockBasedTableOptionsFromString() in this test.
// If it is a complicated type, you also need to add the field to
// kBbtoBlacklist, and maybe add customized verification for it.
TEST_F(OptionsSettableTest, BlockBasedTableOptionsAllFieldsSettable) {
// Items in the form of <offset, size>. Need to be in ascending order
// and not overlapping. Need to updated if new pointer-option is added.
const OffsetGap kBbtoBlacklist = {
{offsetof(struct BlockBasedTableOptions, flush_block_policy_factory),
sizeof(std::shared_ptr<FlushBlockPolicyFactory>)},
{offsetof(struct BlockBasedTableOptions, block_cache),
sizeof(std::shared_ptr<Cache>)},
{offsetof(struct BlockBasedTableOptions, persistent_cache),
sizeof(std::shared_ptr<PersistentCache>)},
{offsetof(struct BlockBasedTableOptions, block_cache_compressed),
sizeof(std::shared_ptr<Cache>)},
{offsetof(struct BlockBasedTableOptions, filter_policy),
sizeof(std::shared_ptr<const FilterPolicy>)},
};
// In this test, we catch a new option of BlockBasedTableOptions that is not
// settable through GetBlockBasedTableOptionsFromString().
// We count padding bytes of the option struct, and assert it to be the same
// as unset bytes of an option struct initialized by
// GetBlockBasedTableOptionsFromString().
char* bbto_ptr = new char[sizeof(BlockBasedTableOptions)];
// Count padding bytes by setting all bytes in the memory to a special char,
// copy a well constructed struct to this memory and see how many special
// bytes left.
BlockBasedTableOptions* bbto = new (bbto_ptr) BlockBasedTableOptions();
FillWithSpecialChar(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoBlacklist);
// It based on the behavior of compiler that padding bytes are not changed
// when copying the struct. It's prone to failure when compiler behavior
// changes. We verify there is unset bytes to detect the case.
*bbto = BlockBasedTableOptions();
int unset_bytes_base =
NumUnsetBytes(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoBlacklist);
ASSERT_GT(unset_bytes_base, 0);
bbto->~BlockBasedTableOptions();
// Construct the base option passed into
// GetBlockBasedTableOptionsFromString().
bbto = new (bbto_ptr) BlockBasedTableOptions();
FillWithSpecialChar(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoBlacklist);
// This option is not setable:
bbto->use_delta_encoding = true;
char* new_bbto_ptr = new char[sizeof(BlockBasedTableOptions)];
BlockBasedTableOptions* new_bbto =
new (new_bbto_ptr) BlockBasedTableOptions();
FillWithSpecialChar(new_bbto_ptr, sizeof(BlockBasedTableOptions),
kBbtoBlacklist);
// Need to update the option string if a new option is added.
ASSERT_OK(GetBlockBasedTableOptionsFromString(
*bbto,
"cache_index_and_filter_blocks=1;"
"cache_index_and_filter_blocks_with_high_priority=true;"
"pin_l0_filter_and_index_blocks_in_cache=1;"
"pin_top_level_index_and_filter=1;"
"index_type=kHashSearch;"
"data_block_index_type=kDataBlockBinaryAndHash;"
"data_block_hash_table_util_ratio=0.75;"
"checksum=kxxHash;hash_index_allow_collision=1;no_block_cache=1;"
"block_cache=1M;block_cache_compressed=1k;block_size=1024;"
"block_size_deviation=8;block_restart_interval=4; "
"metadata_block_size=1024;"
"partition_filters=false;"
"index_block_restart_interval=4;"
"filter_policy=bloomfilter:4:true;whole_key_filtering=1;"
"format_version=1;"
"hash_index_allow_collision=false;"
"verify_compression=true;read_amp_bytes_per_bit=0;"
"enable_index_compression=false;"
"block_align=true",
new_bbto));
ASSERT_EQ(unset_bytes_base,
NumUnsetBytes(new_bbto_ptr, sizeof(BlockBasedTableOptions),
kBbtoBlacklist));
ASSERT_TRUE(new_bbto->block_cache.get() != nullptr);
ASSERT_TRUE(new_bbto->block_cache_compressed.get() != nullptr);
ASSERT_TRUE(new_bbto->filter_policy.get() != nullptr);
bbto->~BlockBasedTableOptions();
new_bbto->~BlockBasedTableOptions();
delete[] bbto_ptr;
delete[] new_bbto_ptr;
}
// If the test fails, likely a new option is added to DBOptions
// but it cannot be set through GetDBOptionsFromString(), or the test is not
// updated accordingly.
// After adding an option, we need to make sure it is settable by
// GetDBOptionsFromString() and add the option to the input string passed to
// DBOptionsFromString()in this test.
// If it is a complicated type, you also need to add the field to
// kDBOptionsBlacklist, and maybe add customized verification for it.
TEST_F(OptionsSettableTest, DBOptionsAllFieldsSettable) {
const OffsetGap kDBOptionsBlacklist = {
{offsetof(struct DBOptions, env), sizeof(Env*)},
{offsetof(struct DBOptions, rate_limiter),
sizeof(std::shared_ptr<RateLimiter>)},
{offsetof(struct DBOptions, sst_file_manager),
sizeof(std::shared_ptr<SstFileManager>)},
{offsetof(struct DBOptions, info_log), sizeof(std::shared_ptr<Logger>)},
{offsetof(struct DBOptions, statistics),
sizeof(std::shared_ptr<Statistics>)},
{offsetof(struct DBOptions, db_paths), sizeof(std::vector<DbPath>)},
{offsetof(struct DBOptions, db_log_dir), sizeof(std::string)},
{offsetof(struct DBOptions, wal_dir), sizeof(std::string)},
{offsetof(struct DBOptions, write_buffer_manager),
sizeof(std::shared_ptr<WriteBufferManager>)},
{offsetof(struct DBOptions, listeners),
sizeof(std::vector<std::shared_ptr<EventListener>>)},
{offsetof(struct DBOptions, row_cache), sizeof(std::shared_ptr<Cache>)},
{offsetof(struct DBOptions, wal_filter), sizeof(const WalFilter*)},
};
char* options_ptr = new char[sizeof(DBOptions)];
// Count padding bytes by setting all bytes in the memory to a special char,
// copy a well constructed struct to this memory and see how many special
// bytes left.
DBOptions* options = new (options_ptr) DBOptions();
FillWithSpecialChar(options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
// It based on the behavior of compiler that padding bytes are not changed
// when copying the struct. It's prone to failure when compiler behavior
// changes. We verify there is unset bytes to detect the case.
*options = DBOptions();
int unset_bytes_base =
NumUnsetBytes(options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
ASSERT_GT(unset_bytes_base, 0);
options->~DBOptions();
options = new (options_ptr) DBOptions();
FillWithSpecialChar(options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
char* new_options_ptr = new char[sizeof(DBOptions)];
DBOptions* new_options = new (new_options_ptr) DBOptions();
FillWithSpecialChar(new_options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
// Need to update the option string if a new option is added.
ASSERT_OK(
GetDBOptionsFromString(*options,
"wal_bytes_per_sync=4295048118;"
"delete_obsolete_files_period_micros=4294967758;"
"WAL_ttl_seconds=4295008036;"
"WAL_size_limit_MB=4295036161;"
"wal_dir=path/to/wal_dir;"
"db_write_buffer_size=2587;"
"max_subcompactions=64330;"
"table_cache_numshardbits=28;"
"max_open_files=72;"
"max_file_opening_threads=35;"
"max_background_jobs=8;"
"base_background_compactions=3;"
"max_background_compactions=33;"
"use_fsync=true;"
"use_adaptive_mutex=false;"
"max_total_wal_size=4295005604;"
"compaction_readahead_size=0;"
"new_table_reader_for_compaction_inputs=false;"
"keep_log_file_num=4890;"
"skip_stats_update_on_db_open=false;"
"max_manifest_file_size=4295009941;"
"db_log_dir=path/to/db_log_dir;"
"skip_log_error_on_recovery=true;"
"writable_file_max_buffer_size=1048576;"
"paranoid_checks=true;"
"is_fd_close_on_exec=false;"
"bytes_per_sync=4295013613;"
"enable_thread_tracking=false;"
"recycle_log_file_num=0;"
"create_missing_column_families=true;"
"log_file_time_to_roll=3097;"
"max_background_flushes=35;"
"create_if_missing=false;"
"error_if_exists=true;"
"delayed_write_rate=4294976214;"
"manifest_preallocation_size=1222;"
"allow_mmap_writes=false;"
"stats_dump_period_sec=70127;"
"stats_persist_period_sec=54321;"
"stats_history_buffer_size=14159;"
"allow_fallocate=true;"
"allow_mmap_reads=false;"
"use_direct_reads=false;"
"use_direct_io_for_flush_and_compaction=false;"
"max_log_file_size=4607;"
"random_access_max_buffer_size=1048576;"
"advise_random_on_open=true;"
"fail_if_options_file_error=false;"
"enable_pipelined_write=false;"
"allow_concurrent_memtable_write=true;"
"wal_recovery_mode=kPointInTimeRecovery;"
"enable_write_thread_adaptive_yield=true;"
"write_thread_slow_yield_usec=5;"
"write_thread_max_yield_usec=1000;"
"access_hint_on_compaction_start=NONE;"
"info_log_level=DEBUG_LEVEL;"
"dump_malloc_stats=false;"
"allow_2pc=false;"
"avoid_flush_during_recovery=false;"
"avoid_flush_during_shutdown=false;"
Optimize for serial commits in 2PC Summary: Throughput: 46k tps in our sysbench settings (filling the details later) The idea is to have the simplest change that gives us a reasonable boost in 2PC throughput. Major design changes: 1. The WAL file internal buffer is not flushed after each write. Instead it is flushed before critical operations (WAL copy via fs) or when FlushWAL is called by MySQL. Flushing the WAL buffer is also protected via mutex_. 2. Use two sequence numbers: last seq, and last seq for write. Last seq is the last visible sequence number for reads. Last seq for write is the next sequence number that should be used to write to WAL/memtable. This allows to have a memtable write be in parallel to WAL writes. 3. BatchGroup is not used for writes. This means that we can have parallel writers which changes a major assumption in the code base. To accommodate for that i) allow only 1 WriteImpl that intends to write to memtable via mem_mutex_--which is fine since in 2PC almost all of the memtable writes come via group commit phase which is serial anyway, ii) make all the parts in the code base that assumed to be the only writer (via EnterUnbatched) to also acquire mem_mutex_, iii) stat updates are protected via a stat_mutex_. Note: the first commit has the approach figured out but is not clean. Submitting the PR anyway to get the early feedback on the approach. If we are ok with the approach I will go ahead with this updates: 0) Rebase with Yi's pipelining changes 1) Currently batching is disabled by default to make sure that it will be consistent with all unit tests. Will make this optional via a config. 2) A couple of unit tests are disabled. They need to be updated with the serial commit of 2PC taken into account. 3) Replacing BatchGroup with mem_mutex_ got a bit ugly as it requires releasing mutex_ beforehand (the same way EnterUnbatched does). This needs to be cleaned up. Closes https://github.com/facebook/rocksdb/pull/2345 Differential Revision: D5210732 Pulled By: maysamyabandeh fbshipit-source-id: 78653bd95a35cd1e831e555e0e57bdfd695355a4
7 years ago
"allow_ingest_behind=false;"
Added support for differential snapshots Summary: The motivation for this PR is to add to RocksDB support for differential (incremental) snapshots, as snapshot of the DB changes between two points in time (one can think of it as diff between to sequence numbers, or the diff D which can be thought of as an SST file or just set of KVs that can be applied to sequence number S1 to get the database to the state at sequence number S2). This feature would be useful for various distributed storages layers built on top of RocksDB, as it should help reduce resources (time and network bandwidth) needed to recover and rebuilt DB instances as replicas in the context of distributed storages. From the API standpoint that would like client app requesting iterator between (start seqnum) and current DB state, and reading the "diff". This is a very draft PR for initial review in the discussion on the approach, i'm going to rework some parts and keep updating the PR. For now, what's done here according to initial discussions: Preserving deletes: - We want to be able to optionally preserve recent deletes for some defined period of time, so that if a delete came in recently and might need to be included in the next incremental snapshot it would't get dropped by a compaction. This is done by adding new param to Options (preserve deletes flag) and new variable to DB Impl where we keep track of the sequence number after which we don't want to drop tombstones, even if they are otherwise eligible for deletion. - I also added a new API call for clients to be able to advance this cutoff seqnum after which we drop deletes; i assume it's more flexible to let clients control this, since otherwise we'd need to keep some kind of timestamp < -- > seqnum mapping inside the DB, which sounds messy and painful to support. Clients could make use of it by periodically calling GetLatestSequenceNumber(), noting the timestamp, doing some calculation and figuring out by how much we need to advance the cutoff seqnum. - Compaction codepath in compaction_iterator.cc has been modified to avoid dropping tombstones with seqnum > cutoff seqnum. Iterator changes: - couple params added to ReadOptions, to optionally allow client to request internal keys instead of user keys (so that client can get the latest value of a key, be it delete marker or a put), as well as min timestamp and min seqnum. TableCache changes: - I modified table_cache code to be able to quickly exclude SST files from iterators heep if creation_time on the file is less then iter_start_ts as passed in ReadOptions. That would help a lot in some DB settings (like reading very recent data only or using FIFO compactions), but not so much for universal compaction with more or less long iterator time span. What's left: - Still looking at how to best plug that inside DBIter codepath. So far it seems that FindNextUserKeyInternal only parses values as UserKeys, and iter->key() call generally returns user key. Can we add new API to DBIter as internal_key(), and modify this internal method to optionally set saved_key_ to point to the full internal key? I don't need to store actual seqnum there, but I do need to store type. Closes https://github.com/facebook/rocksdb/pull/2999 Differential Revision: D6175602 Pulled By: mikhail-antonov fbshipit-source-id: c779a6696ee2d574d86c69cec866a3ae095aa900
7 years ago
"preserve_deletes=false;"
Optimize for serial commits in 2PC Summary: Throughput: 46k tps in our sysbench settings (filling the details later) The idea is to have the simplest change that gives us a reasonable boost in 2PC throughput. Major design changes: 1. The WAL file internal buffer is not flushed after each write. Instead it is flushed before critical operations (WAL copy via fs) or when FlushWAL is called by MySQL. Flushing the WAL buffer is also protected via mutex_. 2. Use two sequence numbers: last seq, and last seq for write. Last seq is the last visible sequence number for reads. Last seq for write is the next sequence number that should be used to write to WAL/memtable. This allows to have a memtable write be in parallel to WAL writes. 3. BatchGroup is not used for writes. This means that we can have parallel writers which changes a major assumption in the code base. To accommodate for that i) allow only 1 WriteImpl that intends to write to memtable via mem_mutex_--which is fine since in 2PC almost all of the memtable writes come via group commit phase which is serial anyway, ii) make all the parts in the code base that assumed to be the only writer (via EnterUnbatched) to also acquire mem_mutex_, iii) stat updates are protected via a stat_mutex_. Note: the first commit has the approach figured out but is not clean. Submitting the PR anyway to get the early feedback on the approach. If we are ok with the approach I will go ahead with this updates: 0) Rebase with Yi's pipelining changes 1) Currently batching is disabled by default to make sure that it will be consistent with all unit tests. Will make this optional via a config. 2) A couple of unit tests are disabled. They need to be updated with the serial commit of 2PC taken into account. 3) Replacing BatchGroup with mem_mutex_ got a bit ugly as it requires releasing mutex_ beforehand (the same way EnterUnbatched does). This needs to be cleaned up. Closes https://github.com/facebook/rocksdb/pull/2345 Differential Revision: D5210732 Pulled By: maysamyabandeh fbshipit-source-id: 78653bd95a35cd1e831e555e0e57bdfd695355a4
7 years ago
"concurrent_prepare=false;"
"two_write_queues=false;"
"manual_wal_flush=false;"
"seq_per_batch=false;"
"atomic_flush=false",
new_options));
ASSERT_EQ(unset_bytes_base, NumUnsetBytes(new_options_ptr, sizeof(DBOptions),
kDBOptionsBlacklist));
options->~DBOptions();
new_options->~DBOptions();
delete[] options_ptr;
delete[] new_options_ptr;
}
template <typename T1, typename T2>
inline int offset_of(T1 T2::*member) {
static T2 obj;
return int(size_t(&(obj.*member)) - size_t(&obj));
}
// If the test fails, likely a new option is added to ColumnFamilyOptions
// but it cannot be set through GetColumnFamilyOptionsFromString(), or the
// test is not updated accordingly.
// After adding an option, we need to make sure it is settable by
// GetColumnFamilyOptionsFromString() and add the option to the input
// string passed to GetColumnFamilyOptionsFromString()in this test.
// If it is a complicated type, you also need to add the field to
// kColumnFamilyOptionsBlacklist, and maybe add customized verification
// for it.
TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
// options in the blacklist need to appear in the same order as in
// ColumnFamilyOptions.
const OffsetGap kColumnFamilyOptionsBlacklist = {
{offset_of(&ColumnFamilyOptions::inplace_callback),
sizeof(UpdateStatus(*)(char*, uint32_t*, Slice, std::string*))},
{offset_of(
&ColumnFamilyOptions::memtable_insert_with_hint_prefix_extractor),
sizeof(std::shared_ptr<const SliceTransform>)},
{offset_of(&ColumnFamilyOptions::compression_per_level),
sizeof(std::vector<CompressionType>)},
{offset_of(
&ColumnFamilyOptions::max_bytes_for_level_multiplier_additional),
sizeof(std::vector<int>)},
{offset_of(&ColumnFamilyOptions::memtable_factory),
sizeof(std::shared_ptr<MemTableRepFactory>)},
{offset_of(&ColumnFamilyOptions::table_properties_collector_factories),
sizeof(ColumnFamilyOptions::TablePropertiesCollectorFactories)},
{offset_of(&ColumnFamilyOptions::comparator), sizeof(Comparator*)},
{offset_of(&ColumnFamilyOptions::merge_operator),
sizeof(std::shared_ptr<MergeOperator>)},
{offset_of(&ColumnFamilyOptions::compaction_filter),
sizeof(const CompactionFilter*)},
{offset_of(&ColumnFamilyOptions::compaction_filter_factory),
sizeof(std::shared_ptr<CompactionFilterFactory>)},
{offset_of(&ColumnFamilyOptions::prefix_extractor),
sizeof(std::shared_ptr<const SliceTransform>)},
{offset_of(&ColumnFamilyOptions::table_factory),
sizeof(std::shared_ptr<TableFactory>)},
{offset_of(&ColumnFamilyOptions::cf_paths),
sizeof(std::vector<DbPath>)},
Concurrent task limiter for compaction thread control (#4332) Summary: The PR is targeting to resolve the issue of: https://github.com/facebook/rocksdb/issues/3972#issue-330771918 We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD. When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth. Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us. With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening. ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant. The usage is straight forward: e.g.: // // Enable compaction thread limiter thru ColumnFamilyOptions // std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4)); Options options; ColumnFamilyOptions cf_opt(options); cf_opt.compaction_thread_limiter = ctl; ... // // Compaction thread limiter can be tuned or disabled on-the-fly // ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks ... ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter ctl->SetMaxOutstandingTask(-1); // Same as above ... ctl->SetMaxOutstandingTask(0); // full throttle (0 task) // // Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue) // std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8)); std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4)); Options options; ColumnFamilyOptions cf_opt_ssd1(options); ColumnFamilyOptions cf_opt_ssd2(options); ColumnFamilyOptions cf_opt_hdd1(options); ColumnFamilyOptions cf_opt_hdd2(options); ColumnFamilyOptions cf_opt_hdd3(options); // SSD CFs cf_opt_ssd1.compaction_thread_limiter = ctl_ssd; cf_opt_ssd2.compaction_thread_limiter = ctl_ssd; // HDD CFs cf_opt_hdd1.compaction_thread_limiter = ctl_hdd; cf_opt_hdd2.compaction_thread_limiter = ctl_hdd; cf_opt_hdd3.compaction_thread_limiter = ctl_hdd; ... // // The limiter is disabled by default (or set to nullptr explicitly) // Options options; ColumnFamilyOptions cf_opt(options); cf_opt.compaction_thread_limiter = nullptr; Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332 Differential Revision: D13226590 Pulled By: siying fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
6 years ago
{offset_of(&ColumnFamilyOptions::compaction_thread_limiter),
sizeof(std::shared_ptr<ConcurrentTaskLimiter>)},
};
char* options_ptr = new char[sizeof(ColumnFamilyOptions)];
// Count padding bytes by setting all bytes in the memory to a special char,
// copy a well constructed struct to this memory and see how many special
// bytes left.
ColumnFamilyOptions* options = new (options_ptr) ColumnFamilyOptions();
FillWithSpecialChar(options_ptr, sizeof(ColumnFamilyOptions),
kColumnFamilyOptionsBlacklist);
// It based on the behavior of compiler that padding bytes are not changed
// when copying the struct. It's prone to failure when compiler behavior
// changes. We verify there is unset bytes to detect the case.
*options = ColumnFamilyOptions();
// Deprecatd option which is not initialized. Need to set it to avoid
// Valgrind error
options->max_mem_compaction_level = 0;
int unset_bytes_base = NumUnsetBytes(options_ptr, sizeof(ColumnFamilyOptions),
kColumnFamilyOptionsBlacklist);
ASSERT_GT(unset_bytes_base, 0);
options->~ColumnFamilyOptions();
options = new (options_ptr) ColumnFamilyOptions();
FillWithSpecialChar(options_ptr, sizeof(ColumnFamilyOptions),
kColumnFamilyOptionsBlacklist);
// Following options are not settable through
// GetColumnFamilyOptionsFromString():
options->rate_limit_delay_max_milliseconds = 33;
options->compaction_options_universal = CompactionOptionsUniversal();
options->compression_opts = CompressionOptions();
options->bottommost_compression_opts = CompressionOptions();
options->hard_rate_limit = 0;
options->soft_rate_limit = 0;
options->purge_redundant_kvs_while_flush = false;
options->max_mem_compaction_level = 0;
Concurrent task limiter for compaction thread control (#4332) Summary: The PR is targeting to resolve the issue of: https://github.com/facebook/rocksdb/issues/3972#issue-330771918 We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD. When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth. Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us. With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening. ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant. The usage is straight forward: e.g.: // // Enable compaction thread limiter thru ColumnFamilyOptions // std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4)); Options options; ColumnFamilyOptions cf_opt(options); cf_opt.compaction_thread_limiter = ctl; ... // // Compaction thread limiter can be tuned or disabled on-the-fly // ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks ... ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter ctl->SetMaxOutstandingTask(-1); // Same as above ... ctl->SetMaxOutstandingTask(0); // full throttle (0 task) // // Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue) // std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8)); std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4)); Options options; ColumnFamilyOptions cf_opt_ssd1(options); ColumnFamilyOptions cf_opt_ssd2(options); ColumnFamilyOptions cf_opt_hdd1(options); ColumnFamilyOptions cf_opt_hdd2(options); ColumnFamilyOptions cf_opt_hdd3(options); // SSD CFs cf_opt_ssd1.compaction_thread_limiter = ctl_ssd; cf_opt_ssd2.compaction_thread_limiter = ctl_ssd; // HDD CFs cf_opt_hdd1.compaction_thread_limiter = ctl_hdd; cf_opt_hdd2.compaction_thread_limiter = ctl_hdd; cf_opt_hdd3.compaction_thread_limiter = ctl_hdd; ... // // The limiter is disabled by default (or set to nullptr explicitly) // Options options; ColumnFamilyOptions cf_opt(options); cf_opt.compaction_thread_limiter = nullptr; Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332 Differential Revision: D13226590 Pulled By: siying fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
6 years ago
options->compaction_filter = nullptr;
char* new_options_ptr = new char[sizeof(ColumnFamilyOptions)];
ColumnFamilyOptions* new_options =
new (new_options_ptr) ColumnFamilyOptions();
FillWithSpecialChar(new_options_ptr, sizeof(ColumnFamilyOptions),
kColumnFamilyOptionsBlacklist);
// Need to update the option string if a new option is added.
ASSERT_OK(GetColumnFamilyOptionsFromString(
*options,
"compaction_filter_factory=mpudlojcujCompactionFilterFactory;"
"table_factory=PlainTable;"
"prefix_extractor=rocksdb.CappedPrefix.13;"
"comparator=leveldb.BytewiseComparator;"
"compression_per_level=kBZip2Compression:kBZip2Compression:"
"kBZip2Compression:kNoCompression:kZlibCompression:kBZip2Compression:"
"kSnappyCompression;"
"max_bytes_for_level_base=986;"
"bloom_locality=8016;"
"target_file_size_base=4294976376;"
"memtable_huge_page_size=2557;"
"max_successive_merges=5497;"
"max_sequential_skip_in_iterations=4294971408;"
"arena_block_size=1893;"
"target_file_size_multiplier=35;"
"min_write_buffer_number_to_merge=9;"
"max_write_buffer_number=84;"
"write_buffer_size=1653;"
"max_compaction_bytes=64;"
"max_bytes_for_level_multiplier=60;"
"memtable_factory=SkipListFactory;"
"compression=kNoCompression;"
"bottommost_compression=kDisableCompressionOption;"
"level0_stop_writes_trigger=33;"
"num_levels=99;"
"level0_slowdown_writes_trigger=22;"
"level0_file_num_compaction_trigger=14;"
"compaction_filter=urxcqstuwnCompactionFilter;"
"soft_rate_limit=530.615385;"
"soft_pending_compaction_bytes_limit=0;"
"max_write_buffer_number_to_maintain=84;"
"merge_operator=aabcxehazrMergeOperator;"
"memtable_prefix_bloom_size_ratio=0.4642;"
"memtable_whole_key_filtering=true;"
"memtable_insert_with_hint_prefix_extractor=rocksdb.CappedPrefix.13;"
"paranoid_file_checks=true;"
"force_consistency_checks=true;"
"inplace_update_num_locks=7429;"
"optimize_filters_for_hits=false;"
"level_compaction_dynamic_level_bytes=false;"
"inplace_update_support=false;"
"compaction_style=kCompactionStyleFIFO;"
"compaction_pri=kMinOverlappingRatio;"
"hard_pending_compaction_bytes_limit=0;"
"disable_auto_compactions=false;"
"report_bg_io_stats=true;"
"ttl=60;"
"compaction_options_fifo={max_table_files_size=3;allow_"
"compaction=false;};",
new_options));
ASSERT_EQ(unset_bytes_base,
NumUnsetBytes(new_options_ptr, sizeof(ColumnFamilyOptions),
kColumnFamilyOptionsBlacklist));
options->~ColumnFamilyOptions();
new_options->~ColumnFamilyOptions();
delete[] options_ptr;
delete[] new_options_ptr;
}
#endif // !__clang__
#endif // OS_LINUX || OS_WIN
#endif // !ROCKSDB_LITE
} // namespace rocksdb
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
#ifdef GFLAGS
ParseCommandLineFlags(&argc, &argv, true);
#endif // GFLAGS
return RUN_ALL_TESTS();
}