Implementation of CuckooTableReader

Summary:
Contains:
- Implementation of TableReader based on Cuckoo Hashing
- Unittests for CuckooTableReader
- Performance test for TableReader

Test Plan:
make cuckoo_table_reader_test
./cuckoo_table_reader_test
make valgrind_check
make asan_check

Reviewers: yhchiang, sdong, igor, ljin

Reviewed By: ljin

Subscribers: leveldb

Differential Revision: https://reviews.facebook.net/D20511
main
Radheshyam Balasundaram 11 years ago
parent d650612c4c
commit 62f9b071ff
  1. 8
      Makefile
  2. 1
      include/rocksdb/table.h
  3. 24
      table/cuckoo_table_builder.cc
  4. 41
      table/cuckoo_table_builder_test.cc
  5. 112
      table/cuckoo_table_reader.cc
  6. 71
      table/cuckoo_table_reader.h
  7. 353
      table/cuckoo_table_reader_test.cc

@ -116,9 +116,10 @@ TESTS = \
table_test \
thread_local_test \
geodb_test \
rate_limiter_test \
rate_limiter_test \
cuckoo_table_builder_test \
options_test
options_test \
cuckoo_table_reader_test
TOOLS = \
sst_dump \
@ -420,6 +421,9 @@ geodb_test: utilities/geodb/geodb_test.o $(LIBOBJECTS) $(TESTHARNESS)
cuckoo_table_builder_test: table/cuckoo_table_builder_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(CXX) table/cuckoo_table_builder_test.o $(LIBOBJECTS) $(TESTHARNESS) $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) $(COVERAGEFLAGS)
cuckoo_table_reader_test: table/cuckoo_table_reader_test.o $(LIBOBJECTS) $(TESTHARNESS) $(BENCHHARNESS)
$(CXX) table/cuckoo_table_reader_test.o $(LIBOBJECTS) $(TESTHARNESS) $(BENCHHARNESS) $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) $(COVERAGEFLAGS)
options_test: util/options_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(CXX) util/options_test.o $(LIBOBJECTS) $(TESTHARNESS) $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) $(COVERAGEFLAGS)

@ -189,6 +189,7 @@ struct CuckooTablePropertyNames {
static const std::string kValueLength;
static const std::string kNumHashTable;
static const std::string kMaxNumBuckets;
static const std::string kIsLastLevel;
};
#endif // ROCKSDB_LITE

@ -29,6 +29,8 @@ const std::string CuckooTablePropertyNames::kMaxNumBuckets =
"rocksdb.cuckoo.bucket.maxnum";
const std::string CuckooTablePropertyNames::kValueLength =
"rocksdb.cuckoo.value.length";
const std::string CuckooTablePropertyNames::kIsLastLevel =
"rocksdb.cuckoo.file.islastlevel";
// Obtained by running echo rocksdb.table.cuckoo | sha1sum
extern const uint64_t kCuckooTableMagicNumber = 0x926789d0c5f17873ull;
@ -170,11 +172,13 @@ Status CuckooTableBuilder::Finish() {
unused_bucket.resize(bucket_size_, 'a');
// Write the table.
uint32_t num_added = 0;
for (auto& bucket : buckets_) {
Status s;
if (bucket.is_empty) {
s = file_->Append(Slice(unused_bucket));
} else {
++num_added;
if (is_last_level_file_) {
Slice user_key = ExtractUserKey(bucket.key);
s = file_->Append(user_key);
@ -192,6 +196,7 @@ Status CuckooTableBuilder::Finish() {
return s;
}
}
assert(num_added == NumEntries());
uint64_t offset = buckets_.size() * bucket_size_;
unused_bucket.resize(properties_.fixed_key_len);
@ -204,6 +209,10 @@ Status CuckooTableBuilder::Finish() {
CuckooTablePropertyNames::kMaxNumBuckets].assign(
reinterpret_cast<const char*>(&max_num_buckets_),
sizeof(max_num_buckets_));
properties_.user_collected_properties[
CuckooTablePropertyNames::kIsLastLevel].assign(
reinterpret_cast<const char*>(&is_last_level_file_),
sizeof(is_last_level_file_));
// Write meta blocks.
MetaIndexBuilder meta_index_builder;
@ -266,7 +275,7 @@ bool CuckooTableBuilder::MakeSpaceForKey(const Slice& key,
struct CuckooNode {
uint64_t bucket_id;
uint32_t depth;
int parent_pos;
uint32_t parent_pos;
CuckooNode(uint64_t bucket_id, uint32_t depth, int parent_pos)
: bucket_id(bucket_id), depth(depth), parent_pos(parent_pos) {}
};
@ -286,7 +295,7 @@ bool CuckooTableBuilder::MakeSpaceForKey(const Slice& key,
uint64_t bucket_id = hash_vals[hash_cnt];
buckets_[bucket_id].make_space_for_key_call_id =
make_space_for_key_call_id_;
tree.push_back(CuckooNode(bucket_id, 0, -1));
tree.push_back(CuckooNode(bucket_id, 0, 0));
}
bool null_found = false;
uint32_t curr_pos = 0;
@ -299,9 +308,6 @@ bool CuckooTableBuilder::MakeSpaceForKey(const Slice& key,
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_table_; ++hash_cnt) {
uint64_t child_bucket_id = GetSliceHash(
ExtractUserKey(curr_bucket.key), hash_cnt, max_num_buckets_);
if (child_bucket_id == curr_node.bucket_id) {
continue;
}
if (buckets_[child_bucket_id].make_space_for_key_call_id ==
make_space_for_key_call_id_) {
continue;
@ -319,17 +325,19 @@ bool CuckooTableBuilder::MakeSpaceForKey(const Slice& key,
}
if (null_found) {
int bucket_to_replace_pos = tree.size()-1;
uint32_t bucket_to_replace_pos = tree.size()-1;
while (bucket_to_replace_pos >= 0) {
CuckooNode& curr_node = tree[bucket_to_replace_pos];
if (curr_node.parent_pos != -1) {
buckets_[curr_node.bucket_id] = buckets_[curr_node.parent_pos];
if (bucket_to_replace_pos >= num_hash_table_) {
buckets_[curr_node.bucket_id] =
buckets_[tree[curr_node.parent_pos].bucket_id];
bucket_to_replace_pos = curr_node.parent_pos;
} else {
*bucket_id = curr_node.bucket_id;
return true;
}
}
assert(false);
return true;
} else {
return false;

@ -44,7 +44,7 @@ class CuckooBuilderTest {
void CheckFileContents(const std::string& expected_data,
std::string expected_unused_bucket, uint64_t expected_max_buckets,
uint32_t expected_num_hash_fun) {
uint32_t expected_num_hash_fun, bool expected_is_last_level) {
// Read file
unique_ptr<RandomAccessFile> read_file;
ASSERT_OK(env_->NewRandomAccessFile(fname, &read_file, env_options_));
@ -75,6 +75,10 @@ class CuckooBuilderTest {
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
CuckooTablePropertyNames::kNumHashTable].data());
ASSERT_EQ(expected_num_hash_fun, num_hash_fun_found);
const bool is_last_level_found =
*reinterpret_cast<const bool*>(props->user_collected_properties[
CuckooTablePropertyNames::kIsLastLevel].data());
ASSERT_EQ(expected_is_last_level, is_last_level_found);
delete props;
// Check contents of the bucket.
std::string read_data;
@ -149,7 +153,7 @@ TEST(CuckooBuilderTest, NoCollision) {
ASSERT_OK(cuckoo_builder.Finish());
writable_file->Close();
CheckFileContents(expected_file_data, expected_unused_bucket,
expected_max_buckets, expected_num_hash_fun);
expected_max_buckets, expected_num_hash_fun, false);
}
TEST(CuckooBuilderTest, NoCollisionLastLevel) {
@ -200,7 +204,7 @@ TEST(CuckooBuilderTest, NoCollisionLastLevel) {
ASSERT_OK(cuckoo_builder.Finish());
writable_file->Close();
CheckFileContents(expected_file_data, expected_unused_bucket,
expected_max_buckets, expected_num_hash_fun);
expected_max_buckets, expected_num_hash_fun, true);
}
TEST(CuckooBuilderTest, WithCollision) {
@ -253,7 +257,7 @@ TEST(CuckooBuilderTest, WithCollision) {
ASSERT_OK(cuckoo_builder.Finish());
writable_file->Close();
CheckFileContents(expected_file_data, expected_unused_bucket,
expected_max_buckets, expected_num_hash_fun);
expected_max_buckets, expected_num_hash_fun, false);
}
TEST(CuckooBuilderTest, FailWithTooManyCollisions) {
@ -326,11 +330,12 @@ TEST(CuckooBuilderTest, FailWhenSameKeyInserted) {
TEST(CuckooBuilderTest, WithACollisionPath) {
hash_map.clear();
// Have two hash functions. Insert elements with overlapping hashes.
// Finally insert an element which will displace all the current elements.
// Finally insert an element with hash value somewhere in the middle
// so that it displaces all the elements after that.
num_hash_fun = 2;
uint32_t expected_num_hash_fun = num_hash_fun;
uint32_t max_search_depth = 100;
num_items = max_search_depth + 2;
num_items = 2*max_search_depth + 2;
std::vector<std::string> user_keys(num_items);
std::vector<std::string> keys(num_items);
std::vector<std::string> values(num_items);
@ -342,16 +347,20 @@ TEST(CuckooBuilderTest, WithACollisionPath) {
values[i] = "value" + std::to_string(i+100);
// Make all hash values collide with the next element.
AddHashLookups(user_keys[i], i, num_hash_fun);
expected_bucket_id[i] = i+1;
if (i <= max_search_depth) {
expected_bucket_id[i] = i;
} else {
expected_bucket_id[i] = i+1;
}
}
expected_bucket_id[0] = 0;
user_keys.back() = "keys" + std::to_string(num_items + 99);
ParsedInternalKey ikey(user_keys.back(), num_items + 1000, kTypeValue);
AppendInternalKey(&keys.back(), ikey);
values.back() = "value" + std::to_string(num_items+100);
// Make both hash values collide with first element.
AddHashLookups(user_keys.back(), 0, num_hash_fun);
expected_bucket_id.back() = 1;
// Make hash values collide with first and middle elements.
// Inserting at 0 will fail after exceeding search depth limit.
hash_map[user_keys.back()] = {0, max_search_depth + 1};
expected_bucket_id.back() = max_search_depth + 1;
ikey_length = keys[0].size();
value_length = values[0].size();
@ -387,7 +396,7 @@ TEST(CuckooBuilderTest, WithACollisionPath) {
ASSERT_OK(cuckoo_builder.Finish());
writable_file->Close();
CheckFileContents(expected_file_data, expected_unused_bucket,
expected_max_buckets, expected_num_hash_fun);
expected_max_buckets, expected_num_hash_fun, false);
}
TEST(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
@ -397,7 +406,7 @@ TEST(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
num_hash_fun = 2;
uint32_t max_search_depth = 100;
num_items = max_search_depth + 3;
num_items = 2*max_search_depth + 3;
std::vector<std::string> user_keys(num_items);
std::vector<std::string> keys(num_items);
std::vector<std::string> values(num_items);
@ -412,9 +421,9 @@ TEST(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
user_keys.back() = "keys" + std::to_string(num_items + 99);
ParsedInternalKey ikey(user_keys.back(), num_items + 1000, kTypeValue);
AppendInternalKey(&keys.back(), ikey);
Slice(values.back()) = "value" + std::to_string(num_items+100);
// Make both hash values collide with first element.
AddHashLookups(user_keys.back(), 0, num_hash_fun);
values.back() = "value" + std::to_string(num_items+100);
// Make hash values collide with middle element.
hash_map[user_keys.back()] = {0, max_search_depth + 1};
ikey_length = keys[0].size();
value_length = values[0].size();

@ -0,0 +1,112 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef ROCKSDB_LITE
#include "table/cuckoo_table_reader.h"
#include <string>
#include "table/meta_blocks.h"
#include "util/coding.h"
namespace rocksdb {
extern const uint64_t kCuckooTableMagicNumber;
CuckooTableReader::CuckooTableReader(
const Options& options,
std::unique_ptr<RandomAccessFile>&& file,
uint64_t file_size,
uint64_t (*GetSliceHashPtr)(const Slice&, uint32_t, uint64_t))
: file_(std::move(file)),
file_size_(file_size),
GetSliceHash(GetSliceHashPtr) {
if (!options.allow_mmap_reads) {
status_ = Status::InvalidArgument("File is not mmaped");
}
TableProperties* props = nullptr;
status_ = ReadTableProperties(file_.get(), file_size, kCuckooTableMagicNumber,
options.env, options.info_log.get(), &props);
if (!status_.ok()) {
return;
}
table_props_.reset(props);
auto& user_props = props->user_collected_properties;
auto hash_funs = user_props.find(CuckooTablePropertyNames::kNumHashTable);
if (hash_funs == user_props.end()) {
status_ = Status::InvalidArgument("Number of hash functions not found");
return;
}
num_hash_fun_ = *reinterpret_cast<const uint32_t*>(hash_funs->second.data());
auto unused_key = user_props.find(CuckooTablePropertyNames::kEmptyKey);
if (unused_key == user_props.end()) {
status_ = Status::InvalidArgument("Empty bucket value not found");
return;
}
unused_key_ = unused_key->second;
key_length_ = props->fixed_key_len;
auto value_length = user_props.find(CuckooTablePropertyNames::kValueLength);
if (value_length == user_props.end()) {
status_ = Status::InvalidArgument("Value length not found");
return;
}
value_length_ = *reinterpret_cast<const uint32_t*>(
value_length->second.data());
bucket_length_ = key_length_ + value_length_;
auto num_buckets = user_props.find(CuckooTablePropertyNames::kMaxNumBuckets);
if (num_buckets == user_props.end()) {
status_ = Status::InvalidArgument("Num buckets not found");
return;
}
num_buckets_ = *reinterpret_cast<const uint64_t*>(num_buckets->second.data());
auto is_last_level = user_props.find(CuckooTablePropertyNames::kIsLastLevel);
if (is_last_level == user_props.end()) {
status_ = Status::InvalidArgument("Is last level not found");
return;
}
is_last_level_ = *reinterpret_cast<const bool*>(is_last_level->second.data());
status_ = file_->Read(0, file_size, &file_data_, nullptr);
}
Status CuckooTableReader::Get(
const ReadOptions& readOptions, const Slice& key, void* handle_context,
bool (*result_handler)(void* arg, const ParsedInternalKey& k,
const Slice& v),
void (*mark_key_may_exist_handler)(void* handle_context)) {
ParsedInternalKey ikey;
if (!ParseInternalKey(key, &ikey)) {
return Status::Corruption("Unable to parse key into inernal key.");
}
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_fun_; ++hash_cnt) {
uint64_t hash_val = GetSliceHash(ikey.user_key, hash_cnt, num_buckets_);
assert(hash_val < num_buckets_);
uint64_t offset = hash_val * bucket_length_;
const char* bucket = &file_data_.data()[offset];
if (unused_key_.compare(0, key_length_, bucket, key_length_) == 0) {
return Status::OK();
}
// Here, we compare only the user key part as we support only one entry
// per user key and we don't support sanpshot.
if (ikey.user_key.compare(Slice(bucket, ikey.user_key.size())) == 0) {
Slice value = Slice(&bucket[key_length_], value_length_);
result_handler(handle_context, ikey, value);
// We don't support merge operations. So, we return here.
return Status::OK();
}
}
return Status::OK();
}
Iterator* CuckooTableReader::NewIterator(const ReadOptions&, Arena* arena) {
// TODO(rbs): Implement this as this will be used in compaction.
return nullptr;
}
} // namespace rocksdb
#endif

@ -0,0 +1,71 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#ifndef ROCKSDB_LITE
#include <string>
#include <memory>
#include "db/dbformat.h"
#include "rocksdb/env.h"
#include "table/table_reader.h"
namespace rocksdb {
class Arena;
class TableReader;
class CuckooTableReader: public TableReader {
public:
CuckooTableReader(
const Options& options,
std::unique_ptr<RandomAccessFile>&& file,
uint64_t file_size,
uint64_t (*GetSliceHash)(const Slice&, uint32_t, uint64_t));
~CuckooTableReader() {}
std::shared_ptr<const TableProperties> GetTableProperties() const {
return table_props_;
}
Status status() const { return status_; }
Status Get(
const ReadOptions& readOptions, const Slice& key, void* handle_context,
bool (*result_handler)(void* arg, const ParsedInternalKey& k,
const Slice& v),
void (*mark_key_may_exist_handler)(void* handle_context) = nullptr);
Iterator* NewIterator(const ReadOptions&, Arena* arena = nullptr);
// Following methods are not implemented for Cuckoo Table Reader
uint64_t ApproximateOffsetOf(const Slice& key) { return 0; }
void SetupForCompaction() {}
void Prepare(const Slice& target) {}
// End of methods not implemented.
private:
std::unique_ptr<RandomAccessFile> file_;
Slice file_data_;
const uint64_t file_size_;
bool is_last_level_;
std::shared_ptr<const TableProperties> table_props_;
Status status_;
uint32_t num_hash_fun_;
std::string unused_key_;
uint32_t key_length_;
uint32_t value_length_;
uint32_t bucket_length_;
uint64_t num_buckets_;
uint64_t (*GetSliceHash)(const Slice& s, uint32_t index,
uint64_t max_num_buckets);
};
} // namespace rocksdb
#endif // ROCKSDB_LITE

@ -0,0 +1,353 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
#ifndef GFLAGS
#include <cstdio>
int main() {
fprintf(stderr, "Please install gflags to run this test\n");
return 1;
}
#else
#include <gflags/gflags.h>
#include <vector>
#include <string>
#include <map>
#include "table/meta_blocks.h"
#include "table/cuckoo_table_builder.h"
#include "table/cuckoo_table_reader.h"
#include "table/cuckoo_table_factory.h"
#include "util/random.h"
#include "util/testharness.h"
#include "util/testutil.h"
using GFLAGS::ParseCommandLineFlags;
using GFLAGS::SetUsageMessage;
DEFINE_string(file_dir, "", "Directory where the files will be created"
" for benchmark. Added for using tmpfs.");
DEFINE_bool(enable_perf, false, "Run Benchmark Tests too.");
namespace rocksdb {
extern const uint64_t kCuckooTableMagicNumber;
namespace {
const uint32_t kNumHashFunc = 10;
// Methods, variables related to Hash functions.
std::unordered_map<std::string, std::vector<uint64_t>> hash_map;
void AddHashLookups(const std::string& s, uint64_t bucket_id,
uint32_t num_hash_fun) {
std::vector<uint64_t> v;
for (uint32_t i = 0; i < num_hash_fun; i++) {
v.push_back(bucket_id + i);
}
hash_map[s] = v;
}
uint64_t GetSliceHash(const Slice& s, uint32_t index,
uint64_t max_num_buckets) {
return hash_map[s.ToString()][index];
}
// Methods, variables for checking key and values read.
struct ValuesToAssert {
ValuesToAssert(const std::string& key, const Slice& value)
: expected_user_key(key),
expected_value(value),
call_count(0) {}
std::string expected_user_key;
Slice expected_value;
int call_count;
};
bool AssertValues(void* assert_obj,
const ParsedInternalKey& k, const Slice& v) {
ValuesToAssert *ptr = reinterpret_cast<ValuesToAssert*>(assert_obj);
ASSERT_EQ(ptr->expected_value.ToString(), v.ToString());
ASSERT_EQ(ptr->expected_user_key, k.user_key.ToString());
++ptr->call_count;
return false;
}
} // namespace
class CuckooReaderTest {
public:
CuckooReaderTest() {
options.allow_mmap_reads = true;
env = options.env;
env_options = EnvOptions(options);
}
void SetUp(int num_items) {
this->num_items = num_items;
hash_map.clear();
keys.clear();
keys.resize(num_items);
user_keys.clear();
user_keys.resize(num_items);
values.clear();
values.resize(num_items);
}
void CreateCuckooFile(bool is_last_level) {
unique_ptr<WritableFile> writable_file;
ASSERT_OK(env->NewWritableFile(fname, &writable_file, env_options));
CuckooTableBuilder builder(
writable_file.get(), keys[0].size(), values[0].size(), 0.9,
10000, kNumHashFunc, 100, is_last_level, GetSliceHash);
ASSERT_OK(builder.status());
for (uint32_t key_idx = 0; key_idx < num_items; ++key_idx) {
builder.Add(Slice(keys[key_idx]), Slice(values[key_idx]));
ASSERT_EQ(builder.NumEntries(), key_idx + 1);
ASSERT_OK(builder.status());
}
ASSERT_OK(builder.Finish());
ASSERT_EQ(num_items, builder.NumEntries());
file_size = builder.FileSize();
ASSERT_OK(writable_file->Close());
}
void CheckReader() {
unique_ptr<RandomAccessFile> read_file;
ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
CuckooTableReader reader(
options,
std::move(read_file),
file_size,
GetSliceHash);
ASSERT_OK(reader.status());
for (uint32_t i = 0; i < num_items; ++i) {
ValuesToAssert v(user_keys[i], values[i]);
ASSERT_OK(reader.Get(
ReadOptions(), Slice(keys[i]), &v, AssertValues, nullptr));
ASSERT_EQ(1, v.call_count);
}
}
std::vector<std::string> keys;
std::vector<std::string> user_keys;
std::vector<std::string> values;
uint32_t num_items;
std::string fname;
uint64_t file_size;
Options options;
Env* env;
EnvOptions env_options;
};
TEST(CuckooReaderTest, WhenKeyExists) {
SetUp(10);
fname = test::TmpDir() + "/CuckooReader_WhenKeyExists";
for (uint32_t i = 0; i < num_items; i++) {
user_keys[i] = "keys" + std::to_string(i+100);
ParsedInternalKey ikey(user_keys[i], i + 1000, kTypeValue);
AppendInternalKey(&keys[i], ikey);
values[i] = "value" + std::to_string(i+100);
AddHashLookups(user_keys[i], i * kNumHashFunc, kNumHashFunc);
}
CreateCuckooFile(false);
CheckReader();
// Last level file.
CreateCuckooFile(true);
CheckReader();
// Test with collision. Make all hash values collide.
hash_map.clear();
for (uint32_t i = 0; i < num_items; i++) {
AddHashLookups(user_keys[i], 0, kNumHashFunc);
}
CreateCuckooFile(false);
CheckReader();
// Last level file.
CreateCuckooFile(true);
CheckReader();
}
TEST(CuckooReaderTest, WhenKeyNotFound) {
// Add keys with colliding hash values.
SetUp(kNumHashFunc / 2);
fname = test::TmpDir() + "/CuckooReader_WhenKeyNotFound";
for (uint32_t i = 0; i < num_items; i++) {
user_keys[i] = "keys" + std::to_string(i+100);
ParsedInternalKey ikey(user_keys[i], i + 1000, kTypeValue);
AppendInternalKey(&keys[i], ikey);
values[i] = "value" + std::to_string(i+100);
// Make all hash values collide.
AddHashLookups(user_keys[i], 0, kNumHashFunc);
}
CreateCuckooFile(false);
CheckReader();
unique_ptr<RandomAccessFile> read_file;
ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
CuckooTableReader reader(
options,
std::move(read_file),
file_size,
GetSliceHash);
ASSERT_OK(reader.status());
// Search for a key with colliding hash values.
std::string not_found_user_key = "keys" + std::to_string(num_items + 100);
std::string not_found_key;
AddHashLookups(not_found_user_key, 0, kNumHashFunc);
ParsedInternalKey ikey(not_found_user_key, 1000, kTypeValue);
AppendInternalKey(&not_found_key, ikey);
ValuesToAssert v("", "");
ASSERT_OK(reader.Get(
ReadOptions(), Slice(not_found_key), &v, AssertValues, nullptr));
ASSERT_EQ(0, v.call_count);
ASSERT_OK(reader.status());
// Search for a key with an independent hash value.
std::string not_found_user_key2 = "keys" + std::to_string(num_items + 101);
std::string not_found_key2;
AddHashLookups(not_found_user_key2, kNumHashFunc, kNumHashFunc);
ParsedInternalKey ikey2(not_found_user_key2, 1000, kTypeValue);
AppendInternalKey(&not_found_key2, ikey2);
ASSERT_OK(reader.Get(
ReadOptions(), Slice(not_found_key2), &v, AssertValues, nullptr));
ASSERT_EQ(0, v.call_count);
ASSERT_OK(reader.status());
// Test read with corrupted key.
not_found_key2.pop_back();
ASSERT_TRUE(!ParseInternalKey(not_found_key2, &ikey));
ASSERT_TRUE(reader.Get(
ReadOptions(), Slice(not_found_key2), &v,
AssertValues, nullptr).IsCorruption());
ASSERT_EQ(0, v.call_count);
ASSERT_OK(reader.status());
// Test read when key is unused key.
std::string unused_user_key = "keys10:";
// Add hash values that map to empty buckets.
AddHashLookups(unused_user_key, kNumHashFunc, kNumHashFunc);
std::string unused_key;
ParsedInternalKey ikey3(unused_user_key, 1000, kTypeValue);
AppendInternalKey(&unused_key, ikey3);
ASSERT_OK(reader.Get(
ReadOptions(), Slice(unused_key), &v, AssertValues, nullptr));
ASSERT_EQ(0, v.call_count);
ASSERT_OK(reader.status());
}
// Performance tests
namespace {
void GenerateKeys(uint64_t num, std::vector<std::string>* keys,
uint32_t user_key_length) {
for (uint64_t i = 0; i < num; ++i) {
std::string new_key(reinterpret_cast<char*>(&i), sizeof(i));
new_key = std::string(user_key_length - new_key.size(), 'k') + new_key;
ParsedInternalKey ikey(new_key, num, kTypeValue);
std::string full_key;
AppendInternalKey(&full_key, ikey);
keys->push_back(full_key);
}
}
bool DoNothing(void* arg, const ParsedInternalKey& k, const Slice& v) {
// Deliberately empty.
return false;
}
bool CheckValue(void* cnt_ptr, const ParsedInternalKey& k, const Slice& v) {
++*reinterpret_cast<int*>(cnt_ptr);
std::string expected_value;
AppendInternalKey(&expected_value, k);
ASSERT_EQ(0, v.compare(Slice(&expected_value[0], v.size())));
return false;
}
// Create last level file as we are interested in measuring performance of
// last level file only.
void BM_CuckooRead(uint64_t num, uint32_t key_length,
uint32_t value_length, uint64_t num_reads, double hash_ratio) {
assert(value_length <= key_length);
std::vector<std::string> keys;
Options options;
options.allow_mmap_reads = true;
Env* env = options.env;
EnvOptions env_options = EnvOptions(options);
uint64_t file_size;
if (FLAGS_file_dir.empty()) {
FLAGS_file_dir = test::TmpDir();
}
std::string fname = FLAGS_file_dir + "/cuckoo_read_benchmark";
GenerateKeys(num, &keys, key_length);
uint64_t predicted_file_size =
num * (key_length + value_length) / hash_ratio + 1024;
unique_ptr<WritableFile> writable_file;
ASSERT_OK(env->NewWritableFile(fname, &writable_file, env_options));
CuckooTableBuilder builder(
writable_file.get(), keys[0].size(), value_length, hash_ratio,
predicted_file_size, kMaxNumHashTable, 1000, true, GetSliceMurmurHash);
ASSERT_OK(builder.status());
for (uint32_t key_idx = 0; key_idx < num; ++key_idx) {
// Value is just a part of key.
builder.Add(Slice(keys[key_idx]), Slice(&keys[key_idx][0], value_length));
ASSERT_EQ(builder.NumEntries(), key_idx + 1);
ASSERT_OK(builder.status());
}
ASSERT_OK(builder.Finish());
ASSERT_EQ(num, builder.NumEntries());
file_size = builder.FileSize();
ASSERT_OK(writable_file->Close());
unique_ptr<RandomAccessFile> read_file;
ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
CuckooTableReader reader(
options,
std::move(read_file),
file_size,
GetSliceMurmurHash);
ASSERT_OK(reader.status());
const UserCollectedProperties user_props =
reader.GetTableProperties()->user_collected_properties;
const uint32_t num_hash_fun = *reinterpret_cast<const uint32_t*>(
user_props.at(CuckooTablePropertyNames::kNumHashTable).data());
fprintf(stderr, "With %lu items and hash table ratio %f, number of hash"
" functions used: %u.\n", num, hash_ratio, num_hash_fun);
ReadOptions r_options;
for (auto& key : keys) {
int cnt = 0;
ASSERT_OK(reader.Get(r_options, Slice(key), &cnt, CheckValue, nullptr));
ASSERT_EQ(1, cnt);
}
// Shuffle Keys.
std::random_shuffle(keys.begin(), keys.end());
uint64_t time_now = env->NowMicros();
for (uint64_t i = 0; i < num_reads; ++i) {
reader.Get(r_options, Slice(keys[i % num]), nullptr, DoNothing, nullptr);
}
fprintf(stderr, "Time taken per op is %.3fus\n",
(env->NowMicros() - time_now)*1.0/num_reads);
}
} // namespace.
TEST(CuckooReaderTest, Performance) {
// In all these tests, num_reads = 10*num_items.
if (!FLAGS_enable_perf) {
return;
}
BM_CuckooRead(100000, 8, 4, 1000000, 0.9);
BM_CuckooRead(1000000, 8, 4, 10000000, 0.9);
BM_CuckooRead(1000000, 8, 4, 10000000, 0.7);
BM_CuckooRead(10000000, 8, 4, 100000000, 0.9);
BM_CuckooRead(10000000, 8, 4, 100000000, 0.7);
}
} // namespace rocksdb
int main(int argc, char** argv) {
ParseCommandLineFlags(&argc, &argv, true);
rocksdb::test::RunAllTests();
return 0;
}
#endif // GFLAGS.
Loading…
Cancel
Save