|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
|
|
|
#include <vector>
|
|
|
|
#include <string>
|
|
|
|
#include <map>
|
|
|
|
#include <utility>
|
|
|
|
|
|
|
|
#include "file/random_access_file_reader.h"
|
|
|
|
#include "file/writable_file_writer.h"
|
|
|
|
#include "table/cuckoo/cuckoo_table_builder.h"
|
|
|
|
#include "table/meta_blocks.h"
|
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
extern const uint64_t kCuckooTableMagicNumber;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hash_map;
|
|
|
|
|
|
|
|
uint64_t GetSliceHash(const Slice& s, uint32_t index,
|
|
|
|
uint64_t /*max_num_buckets*/) {
|
|
|
|
return hash_map[s.ToString()][index];
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
class CuckooBuilderTest : public testing::Test {
|
|
|
|
public:
|
|
|
|
CuckooBuilderTest() {
|
|
|
|
env_ = Env::Default();
|
|
|
|
Options options;
|
|
|
|
options.allow_mmap_reads = true;
|
|
|
|
env_options_ = EnvOptions(options);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckFileContents(const std::vector<std::string>& keys,
|
|
|
|
const std::vector<std::string>& values,
|
|
|
|
const std::vector<uint64_t>& expected_locations,
|
|
|
|
std::string expected_unused_bucket, uint64_t expected_table_size,
|
|
|
|
uint32_t expected_num_hash_func, bool expected_is_last_level,
|
|
|
|
uint32_t expected_cuckoo_block_size = 1) {
|
|
|
|
uint64_t num_deletions = 0;
|
|
|
|
for (const auto& key : keys) {
|
|
|
|
ParsedInternalKey parsed;
|
|
|
|
if (ParseInternalKey(key, &parsed) && parsed.type == kTypeDeletion) {
|
|
|
|
num_deletions++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Read file
|
|
|
|
std::unique_ptr<RandomAccessFile> read_file;
|
|
|
|
ASSERT_OK(env_->NewRandomAccessFile(fname, &read_file, env_options_));
|
|
|
|
uint64_t read_file_size;
|
|
|
|
ASSERT_OK(env_->GetFileSize(fname, &read_file_size));
|
|
|
|
|
|
|
|
Options options;
|
|
|
|
options.allow_mmap_reads = true;
|
|
|
|
ImmutableCFOptions ioptions(options);
|
|
|
|
|
|
|
|
// Assert Table Properties.
|
|
|
|
TableProperties* props = nullptr;
|
|
|
|
std::unique_ptr<RandomAccessFileReader> file_reader(
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
new RandomAccessFileReader(NewLegacyRandomAccessFileWrapper(read_file),
|
|
|
|
fname));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
ASSERT_OK(ReadTableProperties(file_reader.get(), read_file_size,
|
|
|
|
kCuckooTableMagicNumber, ioptions,
|
|
|
|
&props, true /* compression_type_missing */));
|
|
|
|
// Check unused bucket.
|
|
|
|
std::string unused_key = props->user_collected_properties[
|
|
|
|
CuckooTablePropertyNames::kEmptyKey];
|
|
|
|
ASSERT_EQ(expected_unused_bucket.substr(0,
|
|
|
|
props->fixed_key_len), unused_key);
|
|
|
|
|
|
|
|
uint64_t value_len_found =
|
|
|
|
*reinterpret_cast<const uint64_t*>(props->user_collected_properties[
|
|
|
|
CuckooTablePropertyNames::kValueLength].data());
|
|
|
|
ASSERT_EQ(values.empty() ? 0 : values[0].size(), value_len_found);
|
|
|
|
ASSERT_EQ(props->raw_value_size, values.size()*value_len_found);
|
|
|
|
const uint64_t table_size =
|
|
|
|
*reinterpret_cast<const uint64_t*>(props->user_collected_properties[
|
|
|
|
CuckooTablePropertyNames::kHashTableSize].data());
|
|
|
|
ASSERT_EQ(expected_table_size, table_size);
|
|
|
|
const uint32_t num_hash_func_found =
|
|
|
|
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
|
|
|
|
CuckooTablePropertyNames::kNumHashFunc].data());
|
|
|
|
ASSERT_EQ(expected_num_hash_func, num_hash_func_found);
|
|
|
|
const uint32_t cuckoo_block_size =
|
|
|
|
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
|
|
|
|
CuckooTablePropertyNames::kCuckooBlockSize].data());
|
|
|
|
ASSERT_EQ(expected_cuckoo_block_size, cuckoo_block_size);
|
|
|
|
const bool is_last_level_found =
|
|
|
|
*reinterpret_cast<const bool*>(props->user_collected_properties[
|
|
|
|
CuckooTablePropertyNames::kIsLastLevel].data());
|
|
|
|
ASSERT_EQ(expected_is_last_level, is_last_level_found);
|
|
|
|
|
|
|
|
ASSERT_EQ(props->num_entries, keys.size());
|
|
|
|
ASSERT_EQ(props->num_deletions, num_deletions);
|
|
|
|
ASSERT_EQ(props->fixed_key_len, keys.empty() ? 0 : keys[0].size());
|
|
|
|
ASSERT_EQ(props->data_size, expected_unused_bucket.size() *
|
|
|
|
(expected_table_size + expected_cuckoo_block_size - 1));
|
|
|
|
ASSERT_EQ(props->raw_key_size, keys.size()*props->fixed_key_len);
|
|
|
|
ASSERT_EQ(props->column_family_id, 0);
|
|
|
|
ASSERT_EQ(props->column_family_name, kDefaultColumnFamilyName);
|
|
|
|
delete props;
|
|
|
|
|
|
|
|
// Check contents of the bucket.
|
|
|
|
std::vector<bool> keys_found(keys.size(), false);
|
|
|
|
size_t bucket_size = expected_unused_bucket.size();
|
|
|
|
for (uint32_t i = 0; i < table_size + cuckoo_block_size - 1; ++i) {
|
|
|
|
Slice read_slice;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
ASSERT_OK(file_reader->Read(i * bucket_size, bucket_size, &read_slice,
|
|
|
|
nullptr, nullptr));
|
|
|
|
size_t key_idx =
|
|
|
|
std::find(expected_locations.begin(), expected_locations.end(), i) -
|
|
|
|
expected_locations.begin();
|
|
|
|
if (key_idx == keys.size()) {
|
|
|
|
// i is not one of the expected locations. Empty bucket.
|
|
|
|
if (read_slice.data() == nullptr) {
|
|
|
|
ASSERT_EQ(0, expected_unused_bucket.size());
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
keys_found[key_idx] = true;
|
|
|
|
ASSERT_EQ(read_slice.compare(keys[key_idx] + values[key_idx]), 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (auto key_found : keys_found) {
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
// Check that all keys wereReader found.
|
|
|
|
ASSERT_TRUE(key_found);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string GetInternalKey(Slice user_key, bool zero_seqno,
|
|
|
|
ValueType type = kTypeValue) {
|
|
|
|
IterKey ikey;
|
|
|
|
ikey.SetInternalKey(user_key, zero_seqno ? 0 : 1000, type);
|
|
|
|
return ikey.GetInternalKey().ToString();
|
|
|
|
}
|
|
|
|
|
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two.
Summary:
Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod.
After these changes, we get almost 50% improvement in performance.
Results:
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.231us (4.3 Mqps) with batch size of 0
Time taken per op is 0.229us (4.4 Mqps) with batch size of 0
Time taken per op is 0.185us (5.4 Mqps) with batch size of 0
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.108us (9.3 Mqps) with batch size of 10
Time taken per op is 0.100us (10.0 Mqps) with batch size of 10
Time taken per op is 0.103us (9.7 Mqps) with batch size of 10
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.101us (9.9 Mqps) with batch size of 25
Time taken per op is 0.098us (10.2 Mqps) with batch size of 25
Time taken per op is 0.097us (10.3 Mqps) with batch size of 25
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.100us (10.0 Mqps) with batch size of 50
Time taken per op is 0.097us (10.3 Mqps) with batch size of 50
Time taken per op is 0.097us (10.3 Mqps) with batch size of 50
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.102us (9.8 Mqps) with batch size of 100
Time taken per op is 0.098us (10.2 Mqps) with batch size of 100
Time taken per op is 0.115us (8.7 Mqps) with batch size of 100
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.201us (5.0 Mqps) with batch size of 0
Time taken per op is 0.155us (6.5 Mqps) with batch size of 0
Time taken per op is 0.152us (6.6 Mqps) with batch size of 0
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.089us (11.3 Mqps) with batch size of 10
Time taken per op is 0.084us (11.9 Mqps) with batch size of 10
Time taken per op is 0.086us (11.6 Mqps) with batch size of 10
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.087us (11.5 Mqps) with batch size of 25
Time taken per op is 0.085us (11.7 Mqps) with batch size of 25
Time taken per op is 0.093us (10.8 Mqps) with batch size of 25
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.094us (10.6 Mqps) with batch size of 50
Time taken per op is 0.094us (10.7 Mqps) with batch size of 50
Time taken per op is 0.093us (10.8 Mqps) with batch size of 50
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.092us (10.9 Mqps) with batch size of 100
Time taken per op is 0.089us (11.2 Mqps) with batch size of 100
Time taken per op is 0.088us (11.3 Mqps) with batch size of 100
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.154us (6.5 Mqps) with batch size of 0
Time taken per op is 0.168us (6.0 Mqps) with batch size of 0
Time taken per op is 0.190us (5.3 Mqps) with batch size of 0
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.081us (12.4 Mqps) with batch size of 10
Time taken per op is 0.077us (13.0 Mqps) with batch size of 10
Time taken per op is 0.083us (12.1 Mqps) with batch size of 10
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.077us (13.0 Mqps) with batch size of 25
Time taken per op is 0.073us (13.7 Mqps) with batch size of 25
Time taken per op is 0.073us (13.7 Mqps) with batch size of 25
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.076us (13.1 Mqps) with batch size of 50
Time taken per op is 0.072us (13.8 Mqps) with batch size of 50
Time taken per op is 0.072us (13.8 Mqps) with batch size of 50
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.077us (13.0 Mqps) with batch size of 100
Time taken per op is 0.074us (13.6 Mqps) with batch size of 100
Time taken per op is 0.073us (13.6 Mqps) with batch size of 100
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.190us (5.3 Mqps) with batch size of 0
Time taken per op is 0.186us (5.4 Mqps) with batch size of 0
Time taken per op is 0.184us (5.4 Mqps) with batch size of 0
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.079us (12.7 Mqps) with batch size of 10
Time taken per op is 0.070us (14.2 Mqps) with batch size of 10
Time taken per op is 0.072us (14.0 Mqps) with batch size of 10
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.080us (12.5 Mqps) with batch size of 25
Time taken per op is 0.072us (14.0 Mqps) with batch size of 25
Time taken per op is 0.071us (14.1 Mqps) with batch size of 25
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.082us (12.1 Mqps) with batch size of 50
Time taken per op is 0.071us (14.1 Mqps) with batch size of 50
Time taken per op is 0.073us (13.6 Mqps) with batch size of 50
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.080us (12.5 Mqps) with batch size of 100
Time taken per op is 0.077us (13.0 Mqps) with batch size of 100
Time taken per op is 0.078us (12.8 Mqps) with batch size of 100
Test Plan:
make check all
make valgrind_check
make asan_check
Reviewers: sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22539
10 years ago
|
|
|
uint64_t NextPowOf2(uint64_t num) {
|
|
|
|
uint64_t n = 2;
|
|
|
|
while (n <= num) {
|
|
|
|
n *= 2;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t GetExpectedTableSize(uint64_t num) {
|
|
|
|
return NextPowOf2(static_cast<uint64_t>(num / kHashTableRatio));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Env* env_;
|
|
|
|
EnvOptions env_options_;
|
|
|
|
std::string fname;
|
|
|
|
const double kHashTableRatio = 0.9;
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(CuckooBuilderTest, SuccessWithEmptyFile) {
|
|
|
|
std::unique_ptr<WritableFile> writable_file;
|
|
|
|
fname = test::PerThreadDBPath("EmptyFile");
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(writable_file)), fname,
|
|
|
|
EnvOptions()));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, 4, 100,
|
|
|
|
BytewiseComparator(), 1, false, false,
|
|
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
|
|
kDefaultColumnFamilyName);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
ASSERT_EQ(0UL, builder.FileSize());
|
|
|
|
ASSERT_OK(builder.Finish());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
ASSERT_OK(file_writer->Close());
|
|
|
|
CheckFileContents({}, {}, {}, "", 2, 2, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) {
|
|
|
|
for (auto type : {kTypeValue, kTypeDeletion}) {
|
|
|
|
uint32_t num_hash_fun = 4;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
|
|
std::vector<std::string> values;
|
|
|
|
if (type == kTypeValue) {
|
|
|
|
values = {"v01", "v02", "v03", "v04"};
|
|
|
|
} else {
|
|
|
|
values = {"", "", "", ""};
|
|
|
|
}
|
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
|
|
{user_keys[1], {1, 2, 3, 4}},
|
|
|
|
{user_keys[2], {2, 3, 4, 5}},
|
|
|
|
{user_keys[3], {3, 4, 5, 6}}};
|
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
for (auto& user_key : user_keys) {
|
|
|
|
keys.push_back(GetInternalKey(user_key, false, type));
|
|
|
|
}
|
|
|
|
uint64_t expected_table_size = GetExpectedTableSize(keys.size());
|
|
|
|
|
|
|
|
std::unique_ptr<WritableFile> writable_file;
|
|
|
|
fname = test::PerThreadDBPath("NoCollisionFullKey");
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(writable_file)), fname,
|
|
|
|
EnvOptions()));
|
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
|
|
100, BytewiseComparator(), 1, false, false,
|
|
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
|
|
kDefaultColumnFamilyName);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
}
|
|
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
|
|
ASSERT_OK(builder.Finish());
|
|
|
|
ASSERT_OK(file_writer->Close());
|
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
|
|
|
|
expected_table_size, 2, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
|
|
|
|
uint32_t num_hash_fun = 4;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
|
|
{user_keys[1], {0, 1, 2, 3}},
|
|
|
|
{user_keys[2], {0, 1, 2, 3}},
|
|
|
|
{user_keys[3], {0, 1, 2, 3}},
|
|
|
|
};
|
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
for (auto& user_key : user_keys) {
|
|
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
|
|
}
|
|
|
|
uint64_t expected_table_size = GetExpectedTableSize(keys.size());
|
|
|
|
|
|
|
|
std::unique_ptr<WritableFile> writable_file;
|
|
|
|
fname = test::PerThreadDBPath("WithCollisionFullKey");
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(writable_file)), fname,
|
|
|
|
EnvOptions()));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
|
|
100, BytewiseComparator(), 1, false, false,
|
|
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
|
|
kDefaultColumnFamilyName);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
}
|
|
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
|
|
ASSERT_OK(builder.Finish());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
ASSERT_OK(file_writer->Close());
|
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(keys, values, expected_locations,
|
|
|
|
expected_unused_bucket, expected_table_size, 4, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
|
|
|
|
uint32_t num_hash_fun = 4;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
|
|
{user_keys[1], {0, 1, 2, 3}},
|
|
|
|
{user_keys[2], {0, 1, 2, 3}},
|
|
|
|
{user_keys[3], {0, 1, 2, 3}},
|
|
|
|
};
|
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
for (auto& user_key : user_keys) {
|
|
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
|
|
}
|
|
|
|
uint64_t expected_table_size = GetExpectedTableSize(keys.size());
|
|
|
|
|
|
|
|
std::unique_ptr<WritableFile> writable_file;
|
|
|
|
uint32_t cuckoo_block_size = 2;
|
|
|
|
fname = test::PerThreadDBPath("WithCollisionFullKey2");
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(writable_file)), fname,
|
|
|
|
EnvOptions()));
|
|
|
|
CuckooTableBuilder builder(
|
|
|
|
file_writer.get(), kHashTableRatio, num_hash_fun, 100,
|
|
|
|
BytewiseComparator(), cuckoo_block_size, false, false, GetSliceHash,
|
|
|
|
0 /* column_family_id */, kDefaultColumnFamilyName);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
}
|
|
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
|
|
ASSERT_OK(builder.Finish());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
ASSERT_OK(file_writer->Close());
|
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(keys, values, expected_locations,
|
|
|
|
expected_unused_bucket, expected_table_size, 3, false, cuckoo_block_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) {
|
|
|
|
// Have two hash functions. Insert elements with overlapping hashes.
|
|
|
|
// Finally insert an element with hash value somewhere in the middle
|
|
|
|
// so that it displaces all the elements after that.
|
|
|
|
uint32_t num_hash_fun = 2;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
|
|
|
"key04", "key05"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1}},
|
|
|
|
{user_keys[1], {1, 2}},
|
|
|
|
{user_keys[2], {2, 3}},
|
|
|
|
{user_keys[3], {3, 4}},
|
|
|
|
{user_keys[4], {0, 2}},
|
|
|
|
};
|
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2};
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
for (auto& user_key : user_keys) {
|
|
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
|
|
}
|
|
|
|
uint64_t expected_table_size = GetExpectedTableSize(keys.size());
|
|
|
|
|
|
|
|
std::unique_ptr<WritableFile> writable_file;
|
|
|
|
fname = test::PerThreadDBPath("WithCollisionPathFullKey");
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(writable_file)), fname,
|
|
|
|
EnvOptions()));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
|
|
100, BytewiseComparator(), 1, false, false,
|
|
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
|
|
kDefaultColumnFamilyName);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
}
|
|
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
|
|
ASSERT_OK(builder.Finish());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
ASSERT_OK(file_writer->Close());
|
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(keys, values, expected_locations,
|
|
|
|
expected_unused_bucket, expected_table_size, 2, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
|
|
|
|
uint32_t num_hash_fun = 2;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
|
|
|
"key04", "key05"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1}},
|
|
|
|
{user_keys[1], {1, 2}},
|
|
|
|
{user_keys[2], {3, 4}},
|
|
|
|
{user_keys[3], {4, 5}},
|
|
|
|
{user_keys[4], {0, 3}},
|
|
|
|
};
|
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
|
|
|
std::vector<uint64_t> expected_locations = {2, 1, 3, 4, 0};
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
for (auto& user_key : user_keys) {
|
|
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
|
|
}
|
|
|
|
uint64_t expected_table_size = GetExpectedTableSize(keys.size());
|
|
|
|
|
|
|
|
std::unique_ptr<WritableFile> writable_file;
|
|
|
|
fname = test::PerThreadDBPath("WithCollisionPathFullKeyAndCuckooBlock");
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(writable_file)), fname,
|
|
|
|
EnvOptions()));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
|
|
100, BytewiseComparator(), 2, false, false,
|
|
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
|
|
kDefaultColumnFamilyName);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
}
|
|
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
|
|
ASSERT_OK(builder.Finish());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
ASSERT_OK(file_writer->Close());
|
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(keys, values, expected_locations,
|
|
|
|
expected_unused_bucket, expected_table_size, 2, false, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
|
|
|
|
uint32_t num_hash_fun = 4;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
|
|
{user_keys[1], {1, 2, 3, 4}},
|
|
|
|
{user_keys[2], {2, 3, 4, 5}},
|
|
|
|
{user_keys[3], {3, 4, 5, 6}}};
|
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
|
|
uint64_t expected_table_size = GetExpectedTableSize(user_keys.size());
|
|
|
|
|
|
|
|
std::unique_ptr<WritableFile> writable_file;
|
|
|
|
fname = test::PerThreadDBPath("NoCollisionUserKey");
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(writable_file)), fname,
|
|
|
|
EnvOptions()));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
|
|
100, BytewiseComparator(), 1, false, false,
|
|
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
|
|
kDefaultColumnFamilyName);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
}
|
|
|
|
size_t bucket_size = user_keys[0].size() + values[0].size();
|
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
|
|
ASSERT_OK(builder.Finish());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
ASSERT_OK(file_writer->Close());
|
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
|
|
|
|
std::string expected_unused_bucket = "key00";
|
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(user_keys, values, expected_locations,
|
|
|
|
expected_unused_bucket, expected_table_size, 2, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
|
|
|
|
uint32_t num_hash_fun = 4;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
|
|
{user_keys[1], {0, 1, 2, 3}},
|
|
|
|
{user_keys[2], {0, 1, 2, 3}},
|
|
|
|
{user_keys[3], {0, 1, 2, 3}},
|
|
|
|
};
|
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
|
|
uint64_t expected_table_size = GetExpectedTableSize(user_keys.size());
|
|
|
|
|
|
|
|
std::unique_ptr<WritableFile> writable_file;
|
|
|
|
fname = test::PerThreadDBPath("WithCollisionUserKey");
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(writable_file)), fname,
|
|
|
|
EnvOptions()));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
|
|
100, BytewiseComparator(), 1, false, false,
|
|
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
|
|
kDefaultColumnFamilyName);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
}
|
|
|
|
size_t bucket_size = user_keys[0].size() + values[0].size();
|
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
|
|
ASSERT_OK(builder.Finish());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
ASSERT_OK(file_writer->Close());
|
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
|
|
|
|
std::string expected_unused_bucket = "key00";
|
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(user_keys, values, expected_locations,
|
|
|
|
expected_unused_bucket, expected_table_size, 4, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CuckooBuilderTest, WithCollisionPathUserKey) {
|
|
|
|
uint32_t num_hash_fun = 2;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
|
|
|
"key04", "key05"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1}},
|
|
|
|
{user_keys[1], {1, 2}},
|
|
|
|
{user_keys[2], {2, 3}},
|
|
|
|
{user_keys[3], {3, 4}},
|
|
|
|
{user_keys[4], {0, 2}},
|
|
|
|
};
|
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2};
|
|
|
|
uint64_t expected_table_size = GetExpectedTableSize(user_keys.size());
|
|
|
|
|
|
|
|
std::unique_ptr<WritableFile> writable_file;
|
|
|
|
fname = test::PerThreadDBPath("WithCollisionPathUserKey");
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(writable_file)), fname,
|
|
|
|
EnvOptions()));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
|
|
2, BytewiseComparator(), 1, false, false,
|
|
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
|
|
kDefaultColumnFamilyName);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
}
|
|
|
|
size_t bucket_size = user_keys[0].size() + values[0].size();
|
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
|
|
ASSERT_OK(builder.Finish());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
ASSERT_OK(file_writer->Close());
|
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
|
|
|
|
std::string expected_unused_bucket = "key00";
|
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(user_keys, values, expected_locations,
|
|
|
|
expected_unused_bucket, expected_table_size, 2, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
|
|
|
|
// Have two hash functions. Insert elements with overlapping hashes.
|
|
|
|
// Finally try inserting an element with hash value somewhere in the middle
|
|
|
|
// and it should fail because the no. of elements to displace is too high.
|
|
|
|
uint32_t num_hash_fun = 2;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
|
|
|
"key04", "key05"};
|
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1}},
|
|
|
|
{user_keys[1], {1, 2}},
|
|
|
|
{user_keys[2], {2, 3}},
|
|
|
|
{user_keys[3], {3, 4}},
|
|
|
|
{user_keys[4], {0, 1}},
|
|
|
|
};
|
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
|
|
|
std::unique_ptr<WritableFile> writable_file;
|
|
|
|
fname = test::PerThreadDBPath("WithCollisionPathUserKey");
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(writable_file)), fname,
|
|
|
|
EnvOptions()));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
|
|
2, BytewiseComparator(), 1, false, false,
|
|
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
|
|
kDefaultColumnFamilyName);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], false)), Slice("value"));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
}
|
|
|
|
ASSERT_TRUE(builder.Finish().IsNotSupported());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
ASSERT_OK(file_writer->Close());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CuckooBuilderTest, FailWhenSameKeyInserted) {
|
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{"repeatedkey", {0, 1, 2, 3}}};
|
|
|
|
hash_map = std::move(hm);
|
|
|
|
uint32_t num_hash_fun = 4;
|
|
|
|
std::string user_key = "repeatedkey";
|
|
|
|
|
|
|
|
std::unique_ptr<WritableFile> writable_file;
|
|
|
|
fname = test::PerThreadDBPath("FailWhenSameKeyInserted");
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
5 years ago
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(writable_file)), fname,
|
|
|
|
EnvOptions()));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
|
|
100, BytewiseComparator(), 1, false, false,
|
|
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
|
|
kDefaultColumnFamilyName);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
|
|
|
|
builder.Add(Slice(GetInternalKey(user_key, false)), Slice("value1"));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), 1u);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
builder.Add(Slice(GetInternalKey(user_key, true)), Slice("value2"));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), 2u);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
|
|
|
|
ASSERT_TRUE(builder.Finish().IsNotSupported());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
10 years ago
|
|
|
ASSERT_OK(file_writer->Close());
|
|
|
|
}
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
int main(int /*argc*/, char** /*argv*/) {
|
|
|
|
fprintf(stderr, "SKIPPED as Cuckoo table is not supported in ROCKSDB_LITE\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // ROCKSDB_LITE
|