You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
rocksdb/utilities/options/options_util_test.cc

312 lines
9.6 KiB

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#ifndef ROCKSDB_LITE
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include <cctype>
#include <unordered_map>
#include "options/options_parser.h"
#include "rocksdb/db.h"
#include "rocksdb/table.h"
#include "rocksdb/utilities/options_util.h"
#include "util/random.h"
#include "util/testharness.h"
#include "util/testutil.h"
#ifndef GFLAGS
bool FLAGS_enable_print = false;
#else
#include <gflags/gflags.h>
using GFLAGS::ParseCommandLineFlags;
DEFINE_bool(enable_print, false, "Print options generated to console.");
#endif // GFLAGS
namespace rocksdb {
class OptionsUtilTest : public testing::Test {
public:
OptionsUtilTest() : rnd_(0xFB) {
env_.reset(new test::StringEnv(Env::Default()));
dbname_ = test::TmpDir() + "/options_util_test";
}
protected:
std::unique_ptr<test::StringEnv> env_;
std::string dbname_;
Random rnd_;
};
bool IsBlockBasedTableFactory(TableFactory* tf) {
return tf->Name() == BlockBasedTableFactory().Name();
}
TEST_F(OptionsUtilTest, SaveAndLoad) {
const size_t kCFCount = 5;
DBOptions db_opt;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
test::RandomInitDBOptions(&db_opt, &rnd_);
for (size_t i = 0; i < kCFCount; ++i) {
cf_names.push_back(i == 0 ? kDefaultColumnFamilyName
: test::RandomName(&rnd_, 10));
cf_opts.emplace_back();
test::RandomInitCFOptions(&cf_opts.back(), &rnd_);
}
const std::string kFileName = "OPTIONS-123456";
PersistRocksDBOptions(db_opt, cf_names, cf_opts, kFileName, env_.get());
DBOptions loaded_db_opt;
std::vector<ColumnFamilyDescriptor> loaded_cf_descs;
ASSERT_OK(LoadOptionsFromFile(kFileName, env_.get(), &loaded_db_opt,
&loaded_cf_descs));
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(db_opt, loaded_db_opt));
test::RandomInitDBOptions(&db_opt, &rnd_);
ASSERT_NOK(RocksDBOptionsParser::VerifyDBOptions(db_opt, loaded_db_opt));
for (size_t i = 0; i < kCFCount; ++i) {
ASSERT_EQ(cf_names[i], loaded_cf_descs[i].name);
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
cf_opts[i], loaded_cf_descs[i].options));
if (IsBlockBasedTableFactory(cf_opts[i].table_factory.get())) {
ASSERT_OK(RocksDBOptionsParser::VerifyTableFactory(
cf_opts[i].table_factory.get(),
loaded_cf_descs[i].options.table_factory.get()));
}
test::RandomInitCFOptions(&cf_opts[i], &rnd_);
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
cf_opts[i], loaded_cf_descs[i].options));
}
for (size_t i = 0; i < kCFCount; ++i) {
if (cf_opts[i].compaction_filter) {
delete cf_opts[i].compaction_filter;
}
}
}
namespace {
class DummyTableFactory : public TableFactory {
public:
DummyTableFactory() {}
virtual ~DummyTableFactory() {}
virtual const char* Name() const { return "DummyTableFactory"; }
virtual Status NewTableReader(
const TableReaderOptions& /*table_reader_options*/,
unique_ptr<RandomAccessFileReader>&& /*file*/, uint64_t /*file_size*/,
unique_ptr<TableReader>* /*table_reader*/,
bool /*prefetch_index_and_filter_in_cache*/) const {
return Status::NotSupported();
}
virtual TableBuilder* NewTableBuilder(
const TableBuilderOptions& /*table_builder_options*/,
uint32_t /*column_family_id*/, WritableFileWriter* /*file*/) const {
return nullptr;
}
virtual Status SanitizeOptions(const DBOptions& /*db_opts*/,
const ColumnFamilyOptions& /*cf_opts*/) const {
return Status::NotSupported();
}
virtual std::string GetPrintableTableOptions() const { return ""; }
};
class DummyMergeOperator : public MergeOperator {
public:
DummyMergeOperator() {}
virtual ~DummyMergeOperator() {}
virtual bool FullMergeV2(const MergeOperationInput& /*merge_in*/,
MergeOperationOutput* /*merge_out*/) const override {
return false;
}
virtual bool PartialMergeMulti(const Slice& /*key*/,
const std::deque<Slice>& /*operand_list*/,
std::string* /*new_value*/,
Logger* /*logger*/) const override {
return false;
}
Introduce FullMergeV2 (eliminate memcpy from merge operators) Summary: This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice> This diff is stacked on top of D56493 and D56511 In this diff we - Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future - Replace std::deque<std::string> with std::vector<Slice> to pass operands - Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187) - Allow FullMergeV2 output to be an existing operand ``` [Everything in Memtable | 10K operands | 10 KB each | 1 operand per key] DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000 [FullMergeV2] readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s [master] readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s ``` ``` [Everything in Memtable | 10K operands | 10 KB each | 10 operand per key] DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000 [FullMergeV2] readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s [master] readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s ``` ``` [Everything in Block cache | 10K operands | 10 KB each | 1 operand per key] [FullMergeV2] $ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s [master] readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s ``` ``` [Everything in Block cache | 10K operands | 10 KB each | 10 operand per key] DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions [FullMergeV2] readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s [master] readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s ``` Test Plan: COMPILE_WITH_ASAN=1 make check -j64 Reviewers: yhchiang, andrewkr, sdong Reviewed By: sdong Subscribers: lovro, andrewkr, dhruba Differential Revision: https://reviews.facebook.net/D57075
8 years ago
virtual const char* Name() const override { return "DummyMergeOperator"; }
};
class DummySliceTransform : public SliceTransform {
public:
DummySliceTransform() {}
virtual ~DummySliceTransform() {}
// Return the name of this transformation.
virtual const char* Name() const { return "DummySliceTransform"; }
// transform a src in domain to a dst in the range
virtual Slice Transform(const Slice& src) const { return src; }
// determine whether this is a valid src upon the function applies
virtual bool InDomain(const Slice& /*src*/) const { return false; }
// determine whether dst=Transform(src) for some src
virtual bool InRange(const Slice& /*dst*/) const { return false; }
};
} // namespace
TEST_F(OptionsUtilTest, SanityCheck) {
DBOptions db_opt;
std::vector<ColumnFamilyDescriptor> cf_descs;
const size_t kCFCount = 5;
for (size_t i = 0; i < kCFCount; ++i) {
cf_descs.emplace_back();
cf_descs.back().name =
(i == 0) ? kDefaultColumnFamilyName : test::RandomName(&rnd_, 10);
cf_descs.back().options.table_factory.reset(NewBlockBasedTableFactory());
// Assign non-null values to prefix_extractors except the first cf.
cf_descs.back().options.prefix_extractor.reset(
i != 0 ? test::RandomSliceTransform(&rnd_) : nullptr);
cf_descs.back().options.merge_operator.reset(
test::RandomMergeOperator(&rnd_));
}
db_opt.create_missing_column_families = true;
db_opt.create_if_missing = true;
DestroyDB(dbname_, Options(db_opt, cf_descs[0].options));
DB* db;
std::vector<ColumnFamilyHandle*> handles;
// open and persist the options
ASSERT_OK(DB::Open(db_opt, dbname_, cf_descs, &handles, &db));
// close the db
for (auto* handle : handles) {
delete handle;
}
delete db;
// perform sanity check
ASSERT_OK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
ASSERT_GE(kCFCount, 5);
// merge operator
{
std::shared_ptr<MergeOperator> merge_op =
cf_descs[0].options.merge_operator;
ASSERT_NE(merge_op.get(), nullptr);
cf_descs[0].options.merge_operator.reset();
ASSERT_NOK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
cf_descs[0].options.merge_operator.reset(new DummyMergeOperator());
ASSERT_NOK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
cf_descs[0].options.merge_operator = merge_op;
ASSERT_OK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
}
// prefix extractor
{
std::shared_ptr<const SliceTransform> prefix_extractor =
cf_descs[1].options.prefix_extractor;
// It's okay to set prefix_extractor to nullptr.
ASSERT_NE(prefix_extractor, nullptr);
cf_descs[1].options.prefix_extractor.reset();
ASSERT_OK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
cf_descs[1].options.prefix_extractor.reset(new DummySliceTransform());
ASSERT_OK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
cf_descs[1].options.prefix_extractor = prefix_extractor;
ASSERT_OK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
}
// prefix extractor nullptr case
{
std::shared_ptr<const SliceTransform> prefix_extractor =
cf_descs[0].options.prefix_extractor;
// It's okay to set prefix_extractor to nullptr.
ASSERT_EQ(prefix_extractor, nullptr);
cf_descs[0].options.prefix_extractor.reset();
ASSERT_OK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
// It's okay to change prefix_extractor from nullptr to non-nullptr
cf_descs[0].options.prefix_extractor.reset(new DummySliceTransform());
ASSERT_OK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
cf_descs[0].options.prefix_extractor = prefix_extractor;
ASSERT_OK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
}
// comparator
{
test::SimpleSuffixReverseComparator comparator;
auto* prev_comparator = cf_descs[2].options.comparator;
cf_descs[2].options.comparator = &comparator;
ASSERT_NOK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
cf_descs[2].options.comparator = prev_comparator;
ASSERT_OK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
}
// table factory
{
std::shared_ptr<TableFactory> table_factory =
cf_descs[3].options.table_factory;
ASSERT_NE(table_factory, nullptr);
cf_descs[3].options.table_factory.reset(new DummyTableFactory());
ASSERT_NOK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
cf_descs[3].options.table_factory = table_factory;
ASSERT_OK(
CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
}
}
} // namespace rocksdb
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
#ifdef GFLAGS
ParseCommandLineFlags(&argc, &argv, true);
#endif // GFLAGS
return RUN_ALL_TESTS();
}
#else
#include <cstdio>
int main(int argc, char** argv) {
printf("Skipped in RocksDBLite as utilities are not supported.\n");
return 0;
}
#endif // !ROCKSDB_LITE