|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
|
|
|
// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include "rocksdb/sst_dump_tool.h"
|
|
|
|
|
|
|
|
#include "rocksdb/filter_policy.h"
|
|
|
|
#include "table/block_based_table_factory.h"
|
|
|
|
#include "table/table_builder.h"
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
#include "util/file_reader_writer.h"
|
|
|
|
#include "util/testharness.h"
|
|
|
|
#include "util/testutil.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
const uint32_t optLength = 100;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
static std::string MakeKey(int i) {
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "k_%04d", i);
|
|
|
|
InternalKey key(std::string(buf), 0, ValueType::kTypeValue);
|
|
|
|
return key.Encode().ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::string MakeValue(int i) {
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "v_%04d", i);
|
|
|
|
InternalKey key(std::string(buf), 0, ValueType::kTypeValue);
|
|
|
|
return key.Encode().ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
void createSST(const std::string& file_name,
|
|
|
|
const BlockBasedTableOptions& table_options) {
|
|
|
|
std::shared_ptr<rocksdb::TableFactory> tf;
|
|
|
|
tf.reset(new rocksdb::BlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
unique_ptr<WritableFile> file;
|
|
|
|
Env* env = Env::Default();
|
|
|
|
EnvOptions env_options;
|
|
|
|
ReadOptions read_options;
|
|
|
|
Options opts;
|
|
|
|
const ImmutableCFOptions imoptions(opts);
|
|
|
|
rocksdb::InternalKeyComparator ikc(opts.comparator);
|
|
|
|
unique_ptr<TableBuilder> tb;
|
|
|
|
|
|
|
|
env->NewWritableFile(file_name, &file, env_options);
|
|
|
|
opts.table_factory = tf;
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
10 years ago
|
|
|
std::vector<std::unique_ptr<IntTblPropCollectorFactory> >
|
|
|
|
int_tbl_prop_collector_factories;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
unique_ptr<WritableFileWriter> file_writer(
|
|
|
|
new WritableFileWriter(std::move(file), EnvOptions()));
|
|
|
|
std::string column_family_name;
|
|
|
|
int unknown_level = -1;
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
10 years ago
|
|
|
tb.reset(opts.table_factory->NewTableBuilder(
|
|
|
|
TableBuilderOptions(imoptions, ikc, &int_tbl_prop_collector_factories,
|
|
|
|
CompressionType::kNoCompression, CompressionOptions(),
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
9 years ago
|
|
|
nullptr /* compression_dict */,
|
|
|
|
false /* skip_filters */, column_family_name,
|
|
|
|
unknown_level),
|
|
|
|
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
file_writer.get()));
|
|
|
|
|
|
|
|
// Populate slightly more than 1K keys
|
|
|
|
uint32_t num_keys = 1024;
|
|
|
|
for (uint32_t i = 0; i < num_keys; i++) {
|
|
|
|
tb->Add(MakeKey(i), MakeValue(i));
|
|
|
|
}
|
|
|
|
tb->Finish();
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
file_writer->Close();
|
|
|
|
}
|
|
|
|
|
|
|
|
void cleanup(const std::string& file_name) {
|
|
|
|
Env* env = Env::Default();
|
|
|
|
env->DeleteFile(file_name);
|
|
|
|
std::string outfile_name = file_name.substr(0, file_name.length() - 4);
|
|
|
|
outfile_name.append("_dump.txt");
|
|
|
|
env->DeleteFile(outfile_name);
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// Test for sst dump tool "raw" mode
|
|
|
|
class SSTDumpToolTest : public testing::Test {
|
|
|
|
public:
|
|
|
|
BlockBasedTableOptions table_options_;
|
|
|
|
|
|
|
|
SSTDumpToolTest() {}
|
|
|
|
|
|
|
|
~SSTDumpToolTest() {}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(SSTDumpToolTest, EmptyFilter) {
|
|
|
|
std::string file_name = "rocksdb_sst_test.sst";
|
|
|
|
createSST(file_name, table_options_);
|
|
|
|
|
|
|
|
char* usage[3];
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
usage[i] = new char[optLength];
|
|
|
|
}
|
|
|
|
snprintf(usage[0], optLength, "./sst_dump");
|
|
|
|
snprintf(usage[1], optLength, "--command=raw");
|
|
|
|
snprintf(usage[2], optLength, "--file=rocksdb_sst_test.sst");
|
|
|
|
|
|
|
|
rocksdb::SSTDumpTool tool;
|
|
|
|
ASSERT_TRUE(!tool.Run(3, usage));
|
|
|
|
|
|
|
|
cleanup(file_name);
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
delete[] usage[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(SSTDumpToolTest, FilterBlock) {
|
|
|
|
table_options_.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true));
|
|
|
|
std::string file_name = "rocksdb_sst_test.sst";
|
|
|
|
createSST(file_name, table_options_);
|
|
|
|
|
|
|
|
char* usage[3];
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
usage[i] = new char[optLength];
|
|
|
|
}
|
|
|
|
snprintf(usage[0], optLength, "./sst_dump");
|
|
|
|
snprintf(usage[1], optLength, "--command=raw");
|
|
|
|
snprintf(usage[2], optLength, "--file=rocksdb_sst_test.sst");
|
|
|
|
|
|
|
|
rocksdb::SSTDumpTool tool;
|
|
|
|
ASSERT_TRUE(!tool.Run(3, usage));
|
|
|
|
|
|
|
|
cleanup(file_name);
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
delete[] usage[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(SSTDumpToolTest, FullFilterBlock) {
|
|
|
|
table_options_.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false));
|
|
|
|
std::string file_name = "rocksdb_sst_test.sst";
|
|
|
|
createSST(file_name, table_options_);
|
|
|
|
|
|
|
|
char* usage[3];
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
usage[i] = new char[optLength];
|
|
|
|
}
|
|
|
|
snprintf(usage[0], optLength, "./sst_dump");
|
|
|
|
snprintf(usage[1], optLength, "--command=raw");
|
|
|
|
snprintf(usage[2], optLength, "--file=rocksdb_sst_test.sst");
|
|
|
|
|
|
|
|
rocksdb::SSTDumpTool tool;
|
|
|
|
ASSERT_TRUE(!tool.Run(3, usage));
|
|
|
|
|
|
|
|
cleanup(file_name);
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
delete[] usage[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(SSTDumpToolTest, GetProperties) {
|
|
|
|
table_options_.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false));
|
|
|
|
std::string file_name = "rocksdb_sst_test.sst";
|
|
|
|
createSST(file_name, table_options_);
|
|
|
|
|
|
|
|
char* usage[3];
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
usage[i] = new char[optLength];
|
|
|
|
}
|
|
|
|
snprintf(usage[0], optLength, "./sst_dump");
|
|
|
|
snprintf(usage[1], optLength, "--show_properties");
|
|
|
|
snprintf(usage[2], optLength, "--file=rocksdb_sst_test.sst");
|
|
|
|
|
|
|
|
rocksdb::SSTDumpTool tool;
|
|
|
|
ASSERT_TRUE(!tool.Run(3, usage));
|
|
|
|
|
|
|
|
cleanup(file_name);
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
delete[] usage[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(SSTDumpToolTest, CompressedSizes) {
|
|
|
|
table_options_.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false));
|
|
|
|
std::string file_name = "rocksdb_sst_test.sst";
|
|
|
|
createSST(file_name, table_options_);
|
|
|
|
|
|
|
|
char* usage[3];
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
usage[i] = new char[optLength];
|
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(usage[0], optLength, "./sst_dump");
|
|
|
|
snprintf(usage[1], optLength, "--show_compression_sizes");
|
|
|
|
snprintf(usage[2], optLength, "--file=rocksdb_sst_test.sst");
|
|
|
|
rocksdb::SSTDumpTool tool;
|
|
|
|
ASSERT_TRUE(!tool.Run(3, usage));
|
|
|
|
|
|
|
|
cleanup(file_name);
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
delete[] usage[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
fprintf(stderr, "SKIPPED as SSTDumpTool is not supported in ROCKSDB_LITE\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // !ROCKSDB_LITE return RUN_ALL_TESTS();
|