You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
rocksdb/table/cuckoo_table_reader.h

83 lines
2.5 KiB

// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#ifndef ROCKSDB_LITE
#include <string>
#include <memory>
#include <utility>
#include <vector>
#include "db/dbformat.h"
#include "rocksdb/env.h"
#include "rocksdb/options.h"
#include "table/table_reader.h"
namespace rocksdb {
class Arena;
class TableReader;
class CuckooTableReader: public TableReader {
public:
CuckooTableReader(
const ImmutableCFOptions& ioptions,
std::unique_ptr<RandomAccessFile>&& file,
uint64_t file_size,
const Comparator* user_comparator,
uint64_t (*get_slice_hash)(const Slice&, uint32_t, uint64_t));
~CuckooTableReader() {}
std::shared_ptr<const TableProperties> GetTableProperties() const override {
return table_props_;
}
Status status() const { return status_; }
Status Get(const ReadOptions& read_options, const Slice& key,
GetContext* get_context) override;
Iterator* NewIterator(const ReadOptions&, Arena* arena = nullptr) override;
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
11 years ago
void Prepare(const Slice& target) override;
// Report an approximation of how much memory has been used.
size_t ApproximateMemoryUsage() const override;
// Following methods are not implemented for Cuckoo Table Reader
uint64_t ApproximateOffsetOf(const Slice& key) override { return 0; }
void SetupForCompaction() override {}
// End of methods not implemented.
private:
friend class CuckooTableIterator;
void LoadAllKeys(std::vector<std::pair<Slice, uint32_t>>* key_to_bucket_id);
std::unique_ptr<RandomAccessFile> file_;
Slice file_data_;
bool is_last_level_;
CuckooTable: add one option to allow identity function for the first hash function Summary: MurmurHash becomes expensive when we do millions Get() a second in one thread. Add this option to allow the first hash function to use identity function as hash function. It results in QPS increase from 3.7M/s to ~4.3M/s. I did not observe improvement for end to end RocksDB performance. This may be caused by other bottlenecks that I will address in a separate diff. Test Plan: ``` [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=0 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.272us (3.7 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.138us (7.2 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.1 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.0 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.144us (6.9 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.123us (8.1 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.112us (8.9 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.251us (4.0 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.107us (9.4 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.099us (10.1 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.116us (8.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.189us (5.3 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.095us (10.5 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.096us (10.4 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.098us (10.2 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.105us (9.5 Mqps) with batch size of 100, # of found keys 73400320 [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=1 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.230us (4.3 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.086us (11.7 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.088us (11.3 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.159us (6.3 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.6 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.082us (12.2 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (12.9 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.079us (12.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.218us (4.6 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.083us (12.0 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.085us (11.7 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.086us (11.6 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 100, # of found keys 73400320 ``` Reviewers: sdong, igor, yhchiang Reviewed By: igor Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D23451
10 years ago
bool identity_as_first_hash_;
bool use_module_hash_;
std::shared_ptr<const TableProperties> table_props_;
Status status_;
uint32_t num_hash_func_;
std::string unused_key_;
uint32_t key_length_;
uint32_t user_key_length_;
uint32_t value_length_;
uint32_t bucket_length_;
uint32_t cuckoo_block_size_;
uint32_t cuckoo_block_bytes_minus_one_;
uint64_t table_size_;
const Comparator* ucomp_;
uint64_t (*get_slice_hash_)(const Slice& s, uint32_t index,
uint64_t max_num_buckets);
};
} // namespace rocksdb
#endif // ROCKSDB_LITE