Format files under table/ by clang-format (#10852)

Summary:
Run clang-format on files under the `table` directory.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10852

Reviewed By: ajkr

Differential Revision: D40650732

Pulled By: anand1976

fbshipit-source-id: 2023a958e37fd6274040c5181130284600c9e0ef
main
anand76 2 years ago committed by Facebook GitHub Bot
parent 7a95938899
commit 727bad78b8
  1. 7
      table/adaptive/adaptive_table_factory.cc
  2. 1
      table/adaptive/adaptive_table_factory.h
  3. 1
      table/block_based/block_based_table_builder.cc
  4. 3
      table/block_based/block_based_table_factory.cc
  5. 1
      table/block_based/block_based_table_iterator.h
  6. 4
      table/block_based/block_based_table_reader.cc
  7. 1
      table/block_based/block_based_table_reader_impl.h
  8. 2
      table/block_based/block_builder.cc
  9. 3
      table/block_based/block_builder.h
  10. 20
      table/block_based/cachable_entry.h
  11. 3
      table/block_based/data_block_hash_index.cc
  12. 1
      table/block_based/filter_block_reader_common.cc
  13. 5
      table/block_based/filter_policy.cc
  14. 4
      table/block_based/flush_block_policy.cc
  15. 3
      table/block_based/full_filter_block.cc
  16. 1
      table/block_based/full_filter_block.h
  17. 1
      table/block_based/full_filter_block_test.cc
  18. 2
      table/block_based/index_builder.h
  19. 1
      table/block_based/index_reader_common.h
  20. 7
      table/block_based/partitioned_filter_block_test.cc
  21. 1
      table/block_based/partitioned_index_iterator.h
  22. 1
      table/block_based/uncompression_dict_reader.h
  23. 11
      table/block_fetcher.cc
  24. 4
      table/block_fetcher.h
  25. 128
      table/cuckoo/cuckoo_table_builder.cc
  26. 7
      table/cuckoo/cuckoo_table_builder.h
  27. 118
      table/cuckoo/cuckoo_table_builder_test.cc
  28. 3
      table/cuckoo/cuckoo_table_factory.h
  29. 89
      table/cuckoo/cuckoo_table_reader.cc
  30. 4
      table/cuckoo/cuckoo_table_reader.h
  31. 49
      table/cuckoo/cuckoo_table_reader_test.cc
  32. 2
      table/iter_heap.h
  33. 2
      table/iterator.cc
  34. 4
      table/iterator_wrapper.h
  35. 10
      table/meta_blocks.cc
  36. 11
      table/multiget_context.h
  37. 1
      table/persistent_cache_helper.cc
  38. 2
      table/plain/plain_table_bloom.cc
  39. 17
      table/plain/plain_table_builder.cc
  40. 4
      table/plain/plain_table_builder.h
  41. 4
      table/plain/plain_table_factory.h
  42. 5
      table/plain/plain_table_index.cc
  43. 4
      table/plain/plain_table_index.h
  44. 1
      table/plain/plain_table_key_coding.cc
  45. 29
      table/plain/plain_table_reader.cc
  46. 24
      table/plain/plain_table_reader.h
  47. 7
      table/scoped_arena_iterator.h
  48. 5
      table/sst_file_dumper.cc
  49. 7
      table/sst_file_writer.cc
  50. 45
      table/table_properties.cc
  51. 5
      table/table_reader.h
  52. 17
      table/table_reader_bench.cc
  53. 67
      table/table_test.cc
  54. 1
      table/two_level_iterator.cc
  55. 2
      table/two_level_iterator.h

@ -6,9 +6,9 @@
#ifndef ROCKSDB_LITE
#include "table/adaptive/adaptive_table_factory.h"
#include "table/table_builder.h"
#include "table/format.h"
#include "port/port.h"
#include "table/format.h"
#include "table/table_builder.h"
namespace ROCKSDB_NAMESPACE {
@ -118,7 +118,8 @@ extern TableFactory* NewAdaptiveTableFactory(
std::shared_ptr<TableFactory> plain_table_factory,
std::shared_ptr<TableFactory> cuckoo_table_factory) {
return new AdaptiveTableFactory(table_factory_to_write,
block_based_table_factory, plain_table_factory, cuckoo_table_factory);
block_based_table_factory,
plain_table_factory, cuckoo_table_factory);
}
} // namespace ROCKSDB_NAMESPACE

@ -8,6 +8,7 @@
#ifndef ROCKSDB_LITE
#include <string>
#include "rocksdb/options.h"
#include "rocksdb/table.h"

@ -59,7 +59,6 @@ namespace ROCKSDB_NAMESPACE {
extern const std::string kHashIndexPrefixesBlock;
extern const std::string kHashIndexPrefixesMetadataBlock;
// Without anonymous namespace here, we fail the warning -Wmissing-prototypes
namespace {

@ -474,7 +474,8 @@ void BlockBasedTableFactory::InitializeOptions() {
}
if (table_options_.index_type == BlockBasedTableOptions::kHashSearch &&
table_options_.index_block_restart_interval != 1) {
// Currently kHashSearch is incompatible with index_block_restart_interval > 1
// Currently kHashSearch is incompatible with
// index_block_restart_interval > 1
table_options_.index_block_restart_interval = 1;
}
if (table_options_.partition_filters &&

@ -8,7 +8,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include "table/block_based/block_based_table_reader.h"
#include "table/block_based/block_based_table_reader_impl.h"
#include "table/block_based/block_prefetcher.h"
#include "table/block_based/reader_common.h"

@ -104,9 +104,7 @@ extern const uint64_t kBlockBasedTableMagicNumber;
extern const std::string kHashIndexPrefixesBlock;
extern const std::string kHashIndexPrefixesMetadataBlock;
BlockBasedTable::~BlockBasedTable() {
delete rep_;
}
BlockBasedTable::~BlockBasedTable() { delete rep_; }
namespace {
// Read the block identified by "handle" from "file".

@ -8,7 +8,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include "table/block_based/block_based_table_reader.h"
#include "table/block_based/reader_common.h"
// The file contains some member functions of BlockBasedTable that

@ -34,7 +34,9 @@
#include "table/block_based/block_builder.h"
#include <assert.h>
#include <algorithm>
#include "db/dbformat.h"
#include "rocksdb/comparator.h"
#include "table/block_based/data_block_footer.h"

@ -8,9 +8,10 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include <stdint.h>
#include <vector>
#include <stdint.h>
#include "rocksdb/slice.h"
#include "rocksdb/table.h"
#include "table/block_based/data_block_hash_index.h"

@ -10,6 +10,7 @@
#pragma once
#include <cassert>
#include "port/likely.h"
#include "rocksdb/cache.h"
#include "rocksdb/cleanable.h"
@ -40,16 +41,15 @@ namespace ROCKSDB_NAMESPACE {
template <class T>
class CachableEntry {
public:
public:
CachableEntry() = default;
CachableEntry(T* value, Cache* cache, Cache::Handle* cache_handle,
bool own_value)
: value_(value)
, cache_(cache)
, cache_handle_(cache_handle)
, own_value_(own_value)
{
: value_(value),
cache_(cache),
cache_handle_(cache_handle),
own_value_(own_value) {
assert(value_ != nullptr ||
(cache_ == nullptr && cache_handle_ == nullptr && !own_value_));
assert(!!cache_ == !!cache_handle_);
@ -94,9 +94,7 @@ public:
return *this;
}
~CachableEntry() {
ReleaseResource();
}
~CachableEntry() { ReleaseResource(); }
bool IsEmpty() const {
return value_ == nullptr && cache_ == nullptr && cache_handle_ == nullptr &&
@ -193,7 +191,7 @@ public:
return true;
}
private:
private:
void ReleaseResource() noexcept {
if (LIKELY(cache_handle_ != nullptr)) {
assert(cache_ != nullptr);
@ -224,7 +222,7 @@ private:
delete static_cast<T*>(arg1);
}
private:
private:
T* value_ = nullptr;
Cache* cache_ = nullptr;
Cache::Handle* cache_handle_ = nullptr;

@ -2,11 +2,12 @@
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include "table/block_based/data_block_hash_index.h"
#include <string>
#include <vector>
#include "rocksdb/slice.h"
#include "table/block_based/data_block_hash_index.h"
#include "util/coding.h"
#include "util/hash.h"

@ -5,6 +5,7 @@
//
#include "table/block_based/filter_block_reader_common.h"
#include "monitoring/perf_context_imp.h"
#include "table/block_based/block_based_table_reader.h"
#include "table/block_based/parsed_full_filter_block.h"

@ -1424,8 +1424,7 @@ FilterBitsBuilder* BloomLikeFilterPolicy::GetFastLocalBloomBuilderWithContext(
}
return new FastLocalBloomBitsBuilder(
millibits_per_key_, offm ? &aggregate_rounding_balance_ : nullptr,
cache_res_mgr,
context.table_options.detect_filter_construct_corruption);
cache_res_mgr, context.table_options.detect_filter_construct_corruption);
}
FilterBitsBuilder* BloomLikeFilterPolicy::GetLegacyBloomBuilderWithContext(
@ -1788,7 +1787,7 @@ FilterBuildingContext::FilterBuildingContext(
const BlockBasedTableOptions& _table_options)
: table_options(_table_options) {}
FilterPolicy::~FilterPolicy() { }
FilterPolicy::~FilterPolicy() {}
std::shared_ptr<const FilterPolicy> BloomLikeFilterPolicy::Create(
const std::string& name, double bits_per_key) {

@ -16,7 +16,6 @@
#include "table/block_based/flush_block_policy.h"
#include "table/format.h"
namespace ROCKSDB_NAMESPACE {
// Flush block by size
@ -27,8 +26,7 @@ class FlushBlockBySizePolicy : public FlushBlockPolicy {
// @params block_size_deviation: This is used to close a block before it
// reaches the configured
FlushBlockBySizePolicy(const uint64_t block_size,
const uint64_t block_size_deviation,
const bool align,
const uint64_t block_size_deviation, const bool align,
const BlockBuilder& data_block_builder)
: block_size_(block_size),
block_size_deviation_limit_(

@ -121,8 +121,7 @@ Slice FullFilterBlockBuilder::Finish(
FullFilterBlockReader::FullFilterBlockReader(
const BlockBasedTable* t,
CachableEntry<ParsedFullFilterBlock>&& filter_block)
: FilterBlockReaderCommon(t, std::move(filter_block)) {
}
: FilterBlockReaderCommon(t, std::move(filter_block)) {}
bool FullFilterBlockReader::KeyMayMatch(const Slice& key, const bool no_io,
const Slice* const /*const_ikey_ptr*/,

@ -133,6 +133,7 @@ class FullFilterBlockReader
BlockCacheLookupContext* lookup_context,
Env::IOPriority rate_limiter_priority) override;
size_t ApproximateMemoryUsage() const override;
private:
bool MayMatch(const Slice& entry, bool no_io, GetContext* get_context,
BlockCacheLookupContext* lookup_context,

@ -80,7 +80,6 @@ class TestFilterBitsReader : public FilterBitsReader {
uint32_t len_;
};
class TestHashFilter : public FilterPolicy {
public:
const char* Name() const override { return "TestHashFilter"; }

@ -10,8 +10,8 @@
#pragma once
#include <assert.h>
#include <cinttypes>
#include <cinttypes>
#include <list>
#include <string>
#include <unordered_map>

@ -9,7 +9,6 @@
#pragma once
#include "table/block_based/block_based_table_reader.h"
#include "table/block_based/reader_common.h"
namespace ROCKSDB_NAMESPACE {

@ -86,7 +86,8 @@ class PartitionedFilterBlockTest
int num_keys = sizeof(keys) / sizeof(*keys);
uint64_t max_key_size = 0;
for (int i = 1; i < num_keys; i++) {
max_key_size = std::max(max_key_size, static_cast<uint64_t>(keys[i].size()));
max_key_size =
std::max(max_key_size, static_cast<uint64_t>(keys[i].size()));
}
uint64_t max_index_size = num_keys * (max_key_size + 8 /*handle*/);
return max_index_size;
@ -116,8 +117,8 @@ class PartitionedFilterBlockTest
PartitionedIndexBuilder* const p_index_builder,
const SliceTransform* prefix_extractor = nullptr) {
assert(table_options_.block_size_deviation <= 100);
auto partition_size = static_cast<uint32_t>(
((table_options_.metadata_block_size *
auto partition_size =
static_cast<uint32_t>(((table_options_.metadata_block_size *
(100 - table_options_.block_size_deviation)) +
99) /
100);

@ -8,7 +8,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include "table/block_based/block_based_table_reader.h"
#include "table/block_based/block_based_table_reader_impl.h"
#include "table/block_based/block_prefetcher.h"
#include "table/block_based/reader_common.h"

@ -7,6 +7,7 @@
#pragma once
#include <cassert>
#include "table/block_based/cachable_entry.h"
#include "table/format.h"

@ -147,12 +147,11 @@ inline void BlockFetcher::PrepareBufferForBlockFromFile() {
// file reader that does not implement mmap reads properly.
used_buf_ = &stack_buf_[0];
} else if (maybe_compressed_ && !do_uncompress_) {
compressed_buf_ = AllocateBlock(block_size_with_trailer_,
memory_allocator_compressed_);
compressed_buf_ =
AllocateBlock(block_size_with_trailer_, memory_allocator_compressed_);
used_buf_ = compressed_buf_.get();
} else {
heap_buf_ =
AllocateBlock(block_size_with_trailer_, memory_allocator_);
heap_buf_ = AllocateBlock(block_size_with_trailer_, memory_allocator_);
used_buf_ = heap_buf_.get();
}
}
@ -187,8 +186,8 @@ inline void BlockFetcher::CopyBufferToHeapBuf() {
inline void BlockFetcher::CopyBufferToCompressedBuf() {
assert(used_buf_ != compressed_buf_.get());
compressed_buf_ = AllocateBlock(block_size_with_trailer_,
memory_allocator_compressed_);
compressed_buf_ =
AllocateBlock(block_size_with_trailer_, memory_allocator_compressed_);
memcpy(compressed_buf_.get(), used_buf_, block_size_with_trailer_);
#ifndef NDEBUG
num_compressed_buf_memcpy_++;

@ -19,8 +19,8 @@ namespace ROCKSDB_NAMESPACE {
// Retrieves a single block of a given file. Utilizes the prefetch buffer and/or
// persistent cache provided (if any) to try to avoid reading from the file
// directly. Note that both the prefetch buffer and the persistent cache are
// optional; also, note that the persistent cache may be configured to store either
// compressed or uncompressed blocks.
// optional; also, note that the persistent cache may be configured to store
// either compressed or uncompressed blocks.
//
// If the retrieved block is compressed and the do_uncompress flag is set,
// BlockFetcher uncompresses the block (using the uncompression dictionary,

@ -7,6 +7,7 @@
#include "table/cuckoo/cuckoo_table_builder.h"
#include <assert.h>
#include <algorithm>
#include <limits>
#include <string>
@ -174,9 +175,12 @@ bool CuckooTableBuilder::IsDeletedKey(uint64_t idx) const {
Slice CuckooTableBuilder::GetKey(uint64_t idx) const {
assert(closed_);
if (IsDeletedKey(idx)) {
return Slice(&deleted_keys_[static_cast<size_t>((idx - num_values_) * key_size_)], static_cast<size_t>(key_size_));
return Slice(
&deleted_keys_[static_cast<size_t>((idx - num_values_) * key_size_)],
static_cast<size_t>(key_size_));
}
return Slice(&kvs_[static_cast<size_t>(idx * (key_size_ + value_size_))], static_cast<size_t>(key_size_));
return Slice(&kvs_[static_cast<size_t>(idx * (key_size_ + value_size_))],
static_cast<size_t>(key_size_));
}
Slice CuckooTableBuilder::GetUserKey(uint64_t idx) const {
@ -190,11 +194,14 @@ Slice CuckooTableBuilder::GetValue(uint64_t idx) const {
static std::string empty_value(static_cast<unsigned int>(value_size_), 'a');
return Slice(empty_value);
}
return Slice(&kvs_[static_cast<size_t>(idx * (key_size_ + value_size_) + key_size_)], static_cast<size_t>(value_size_));
return Slice(
&kvs_[static_cast<size_t>(idx * (key_size_ + value_size_) + key_size_)],
static_cast<size_t>(value_size_));
}
Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
buckets->resize(static_cast<size_t>(hash_table_size_ + cuckoo_block_size_ - 1));
buckets->resize(
static_cast<size_t>(hash_table_size_ + cuckoo_block_size_ - 1));
uint32_t make_space_for_key_call_id = 0;
for (uint32_t vector_idx = 0; vector_idx < num_entries_; vector_idx++) {
uint64_t bucket_id = 0;
@ -203,28 +210,32 @@ Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
Slice user_key = GetUserKey(vector_idx);
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_ && !bucket_found;
++hash_cnt) {
uint64_t hash_val = CuckooHash(user_key, hash_cnt, use_module_hash_,
hash_table_size_, identity_as_first_hash_, get_slice_hash_);
uint64_t hash_val =
CuckooHash(user_key, hash_cnt, use_module_hash_, hash_table_size_,
identity_as_first_hash_, get_slice_hash_);
// If there is a collision, check next cuckoo_block_size_ locations for
// empty locations. While checking, if we reach end of the hash table,
// stop searching and proceed for next hash function.
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
++block_idx, ++hash_val) {
if ((*buckets)[static_cast<size_t>(hash_val)].vector_idx == kMaxVectorIdx) {
if ((*buckets)[static_cast<size_t>(hash_val)].vector_idx ==
kMaxVectorIdx) {
bucket_id = hash_val;
bucket_found = true;
break;
} else {
if (ucomp_->Compare(user_key,
GetUserKey((*buckets)[static_cast<size_t>(hash_val)].vector_idx)) == 0) {
if (ucomp_->Compare(
user_key, GetUserKey((*buckets)[static_cast<size_t>(hash_val)]
.vector_idx)) == 0) {
return Status::NotSupported("Same key is being inserted again.");
}
hash_vals.push_back(hash_val);
}
}
}
while (!bucket_found && !MakeSpaceForKey(hash_vals,
++make_space_for_key_call_id, buckets, &bucket_id)) {
while (!bucket_found &&
!MakeSpaceForKey(hash_vals, ++make_space_for_key_call_id, buckets,
&bucket_id)) {
// Rehash by increashing number of hash tables.
if (num_hash_func_ >= max_num_hash_func_) {
return Status::NotSupported("Too many collisions. Unable to hash.");
@ -232,11 +243,13 @@ Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
// We don't really need to rehash the entire table because old hashes are
// still valid and we only increased the number of hash functions.
uint64_t hash_val = CuckooHash(user_key, num_hash_func_, use_module_hash_,
hash_table_size_, identity_as_first_hash_, get_slice_hash_);
hash_table_size_, identity_as_first_hash_,
get_slice_hash_);
++num_hash_func_;
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
++block_idx, ++hash_val) {
if ((*buckets)[static_cast<size_t>(hash_val)].vector_idx == kMaxVectorIdx) {
if ((*buckets)[static_cast<size_t>(hash_val)].vector_idx ==
kMaxVectorIdx) {
bucket_found = true;
bucket_id = hash_val;
break;
@ -300,9 +313,8 @@ Status CuckooTableBuilder::Finish() {
properties_.num_entries = num_entries_;
properties_.num_deletions = num_entries_ - num_values_;
properties_.fixed_key_len = key_size_;
properties_.user_collected_properties[
CuckooTablePropertyNames::kValueLength].assign(
reinterpret_cast<const char*>(&value_size_), sizeof(value_size_));
properties_.user_collected_properties[CuckooTablePropertyNames::kValueLength]
.assign(reinterpret_cast<const char*>(&value_size_), sizeof(value_size_));
uint64_t bucket_size = key_size_ + value_size_;
unused_bucket.resize(static_cast<size_t>(bucket_size), 'a');
@ -332,36 +344,34 @@ Status CuckooTableBuilder::Finish() {
uint64_t offset = buckets.size() * bucket_size;
properties_.data_size = offset;
unused_bucket.resize(static_cast<size_t>(properties_.fixed_key_len));
properties_.user_collected_properties[
CuckooTablePropertyNames::kEmptyKey] = unused_bucket;
properties_.user_collected_properties[
CuckooTablePropertyNames::kNumHashFunc].assign(
reinterpret_cast<char*>(&num_hash_func_), sizeof(num_hash_func_));
properties_.user_collected_properties[
CuckooTablePropertyNames::kHashTableSize].assign(
reinterpret_cast<const char*>(&hash_table_size_),
properties_.user_collected_properties[CuckooTablePropertyNames::kEmptyKey] =
unused_bucket;
properties_.user_collected_properties[CuckooTablePropertyNames::kNumHashFunc]
.assign(reinterpret_cast<char*>(&num_hash_func_), sizeof(num_hash_func_));
properties_
.user_collected_properties[CuckooTablePropertyNames::kHashTableSize]
.assign(reinterpret_cast<const char*>(&hash_table_size_),
sizeof(hash_table_size_));
properties_.user_collected_properties[
CuckooTablePropertyNames::kIsLastLevel].assign(
reinterpret_cast<const char*>(&is_last_level_file_),
properties_.user_collected_properties[CuckooTablePropertyNames::kIsLastLevel]
.assign(reinterpret_cast<const char*>(&is_last_level_file_),
sizeof(is_last_level_file_));
properties_.user_collected_properties[
CuckooTablePropertyNames::kCuckooBlockSize].assign(
reinterpret_cast<const char*>(&cuckoo_block_size_),
properties_
.user_collected_properties[CuckooTablePropertyNames::kCuckooBlockSize]
.assign(reinterpret_cast<const char*>(&cuckoo_block_size_),
sizeof(cuckoo_block_size_));
properties_.user_collected_properties[
CuckooTablePropertyNames::kIdentityAsFirstHash].assign(
reinterpret_cast<const char*>(&identity_as_first_hash_),
properties_
.user_collected_properties[CuckooTablePropertyNames::kIdentityAsFirstHash]
.assign(reinterpret_cast<const char*>(&identity_as_first_hash_),
sizeof(identity_as_first_hash_));
properties_.user_collected_properties[
CuckooTablePropertyNames::kUseModuleHash].assign(
reinterpret_cast<const char*>(&use_module_hash_),
properties_
.user_collected_properties[CuckooTablePropertyNames::kUseModuleHash]
.assign(reinterpret_cast<const char*>(&use_module_hash_),
sizeof(use_module_hash_));
uint32_t user_key_len = static_cast<uint32_t>(smallest_user_key_.size());
properties_.user_collected_properties[
CuckooTablePropertyNames::kUserKeyLength].assign(
reinterpret_cast<const char*>(&user_key_len),
properties_
.user_collected_properties[CuckooTablePropertyNames::kUserKeyLength]
.assign(reinterpret_cast<const char*>(&user_key_len),
sizeof(user_key_len));
// Write meta blocks.
@ -406,9 +416,7 @@ void CuckooTableBuilder::Abandon() {
closed_ = true;
}
uint64_t CuckooTableBuilder::NumEntries() const {
return num_entries_;
}
uint64_t CuckooTableBuilder::NumEntries() const { return num_entries_; }
uint64_t CuckooTableBuilder::FileSize() const {
if (closed_) {
@ -418,8 +426,8 @@ uint64_t CuckooTableBuilder::FileSize() const {
}
if (use_module_hash_) {
return static_cast<uint64_t>((key_size_ + value_size_) *
num_entries_ / max_hash_table_ratio_);
return static_cast<uint64_t>((key_size_ + value_size_) * num_entries_ /
max_hash_table_ratio_);
} else {
// Account for buckets being a power of two.
// As elements are added, file size remains constant for a while and
@ -468,7 +476,8 @@ bool CuckooTableBuilder::MakeSpaceForKey(
// no. of times this will be called is <= max_num_hash_func_ + num_entries_.
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) {
uint64_t bid = hash_vals[hash_cnt];
(*buckets)[static_cast<size_t>(bid)].make_space_for_key_call_id = make_space_for_key_call_id;
(*buckets)[static_cast<size_t>(bid)].make_space_for_key_call_id =
make_space_for_key_call_id;
tree.push_back(CuckooNode(bid, 0, 0));
}
bool null_found = false;
@ -479,24 +488,25 @@ bool CuckooTableBuilder::MakeSpaceForKey(
if (curr_depth >= max_search_depth_) {
break;
}
CuckooBucket& curr_bucket = (*buckets)[static_cast<size_t>(curr_node.bucket_id)];
for (uint32_t hash_cnt = 0;
hash_cnt < num_hash_func_ && !null_found; ++hash_cnt) {
uint64_t child_bucket_id = CuckooHash(GetUserKey(curr_bucket.vector_idx),
hash_cnt, use_module_hash_, hash_table_size_, identity_as_first_hash_,
get_slice_hash_);
CuckooBucket& curr_bucket =
(*buckets)[static_cast<size_t>(curr_node.bucket_id)];
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_ && !null_found;
++hash_cnt) {
uint64_t child_bucket_id = CuckooHash(
GetUserKey(curr_bucket.vector_idx), hash_cnt, use_module_hash_,
hash_table_size_, identity_as_first_hash_, get_slice_hash_);
// Iterate inside Cuckoo Block.
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
++block_idx, ++child_bucket_id) {
if ((*buckets)[static_cast<size_t>(child_bucket_id)].make_space_for_key_call_id ==
make_space_for_key_call_id) {
if ((*buckets)[static_cast<size_t>(child_bucket_id)]
.make_space_for_key_call_id == make_space_for_key_call_id) {
continue;
}
(*buckets)[static_cast<size_t>(child_bucket_id)].make_space_for_key_call_id =
make_space_for_key_call_id;
tree.push_back(CuckooNode(child_bucket_id, curr_depth + 1,
curr_pos));
if ((*buckets)[static_cast<size_t>(child_bucket_id)].vector_idx == kMaxVectorIdx) {
(*buckets)[static_cast<size_t>(child_bucket_id)]
.make_space_for_key_call_id = make_space_for_key_call_id;
tree.push_back(CuckooNode(child_bucket_id, curr_depth + 1, curr_pos));
if ((*buckets)[static_cast<size_t>(child_bucket_id)].vector_idx ==
kMaxVectorIdx) {
null_found = true;
break;
}

@ -6,10 +6,12 @@
#pragma once
#ifndef ROCKSDB_LITE
#include <stdint.h>
#include <limits>
#include <string>
#include <utility>
#include <vector>
#include "db/version_edit.h"
#include "port/port.h"
#include "rocksdb/status.h"
@ -20,7 +22,7 @@
namespace ROCKSDB_NAMESPACE {
class CuckooTableBuilder: public TableBuilder {
class CuckooTableBuilder : public TableBuilder {
public:
CuckooTableBuilder(
WritableFileWriter* file, double max_hash_table_ratio,
@ -78,8 +80,7 @@ class CuckooTableBuilder: public TableBuilder {
private:
struct CuckooBucket {
CuckooBucket()
: vector_idx(kMaxVectorIdx), make_space_for_key_call_id(0) {}
CuckooBucket() : vector_idx(kMaxVectorIdx), make_space_for_key_call_id(0) {}
uint32_t vector_idx;
// This number will not exceed kvs_.size() + max_num_hash_func_.
// We assume number of items is <= 2^32.

@ -44,8 +44,10 @@ class CuckooBuilderTest : public testing::Test {
void CheckFileContents(const std::vector<std::string>& keys,
const std::vector<std::string>& values,
const std::vector<uint64_t>& expected_locations,
std::string expected_unused_bucket, uint64_t expected_table_size,
uint32_t expected_num_hash_func, bool expected_is_last_level,
std::string expected_unused_bucket,
uint64_t expected_table_size,
uint32_t expected_num_hash_func,
bool expected_is_last_level,
uint32_t expected_cuckoo_block_size = 1) {
uint64_t num_deletions = 0;
for (const auto& key : keys) {
@ -72,39 +74,44 @@ class CuckooBuilderTest : public testing::Test {
ASSERT_OK(ReadTableProperties(file_reader.get(), read_file_size,
kCuckooTableMagicNumber, ioptions, &props));
// Check unused bucket.
std::string unused_key = props->user_collected_properties[
CuckooTablePropertyNames::kEmptyKey];
ASSERT_EQ(expected_unused_bucket.substr(0,
props->fixed_key_len), unused_key);
uint64_t value_len_found =
*reinterpret_cast<const uint64_t*>(props->user_collected_properties[
CuckooTablePropertyNames::kValueLength].data());
std::string unused_key =
props->user_collected_properties[CuckooTablePropertyNames::kEmptyKey];
ASSERT_EQ(expected_unused_bucket.substr(0, props->fixed_key_len),
unused_key);
uint64_t value_len_found = *reinterpret_cast<const uint64_t*>(
props->user_collected_properties[CuckooTablePropertyNames::kValueLength]
.data());
ASSERT_EQ(values.empty() ? 0 : values[0].size(), value_len_found);
ASSERT_EQ(props->raw_value_size, values.size()*value_len_found);
const uint64_t table_size =
*reinterpret_cast<const uint64_t*>(props->user_collected_properties[
CuckooTablePropertyNames::kHashTableSize].data());
ASSERT_EQ(props->raw_value_size, values.size() * value_len_found);
const uint64_t table_size = *reinterpret_cast<const uint64_t*>(
props
->user_collected_properties
[CuckooTablePropertyNames::kHashTableSize]
.data());
ASSERT_EQ(expected_table_size, table_size);
const uint32_t num_hash_func_found =
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
CuckooTablePropertyNames::kNumHashFunc].data());
const uint32_t num_hash_func_found = *reinterpret_cast<const uint32_t*>(
props->user_collected_properties[CuckooTablePropertyNames::kNumHashFunc]
.data());
ASSERT_EQ(expected_num_hash_func, num_hash_func_found);
const uint32_t cuckoo_block_size =
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
CuckooTablePropertyNames::kCuckooBlockSize].data());
const uint32_t cuckoo_block_size = *reinterpret_cast<const uint32_t*>(
props
->user_collected_properties
[CuckooTablePropertyNames::kCuckooBlockSize]
.data());
ASSERT_EQ(expected_cuckoo_block_size, cuckoo_block_size);
const bool is_last_level_found =
*reinterpret_cast<const bool*>(props->user_collected_properties[
CuckooTablePropertyNames::kIsLastLevel].data());
const bool is_last_level_found = *reinterpret_cast<const bool*>(
props->user_collected_properties[CuckooTablePropertyNames::kIsLastLevel]
.data());
ASSERT_EQ(expected_is_last_level, is_last_level_found);
ASSERT_EQ(props->num_entries, keys.size());
ASSERT_EQ(props->num_deletions, num_deletions);
ASSERT_EQ(props->fixed_key_len, keys.empty() ? 0 : keys[0].size());
ASSERT_EQ(props->data_size, expected_unused_bucket.size() *
ASSERT_EQ(props->data_size,
expected_unused_bucket.size() *
(expected_table_size + expected_cuckoo_block_size - 1));
ASSERT_EQ(props->raw_key_size, keys.size()*props->fixed_key_len);
ASSERT_EQ(props->raw_key_size, keys.size() * props->fixed_key_len);
ASSERT_EQ(props->column_family_id, 0);
ASSERT_EQ(props->column_family_name, kDefaultColumnFamilyName);
@ -156,7 +163,6 @@ class CuckooBuilderTest : public testing::Test {
return NextPowOf2(static_cast<uint64_t>(num / kHashTableRatio));
}
Env* env_;
FileOptions file_options_;
std::string fname;
@ -276,8 +282,8 @@ TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
std::string expected_unused_bucket = GetInternalKey("key00", true);
expected_unused_bucket += std::string(values[0].size(), 'a');
CheckFileContents(keys, values, expected_locations,
expected_unused_bucket, expected_table_size, 4, false);
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
expected_table_size, 4, false);
}
TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
@ -324,8 +330,8 @@ TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
std::string expected_unused_bucket = GetInternalKey("key00", true);
expected_unused_bucket += std::string(values[0].size(), 'a');
CheckFileContents(keys, values, expected_locations,
expected_unused_bucket, expected_table_size, 3, false, cuckoo_block_size);
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
expected_table_size, 3, false, cuckoo_block_size);
}
TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) {
@ -333,17 +339,14 @@ TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) {
// Finally insert an element with hash value somewhere in the middle
// so that it displaces all the elements after that.
uint32_t num_hash_fun = 2;
std::vector<std::string> user_keys = {"key01", "key02", "key03",
"key04", "key05"};
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04",
"key05"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {2, 3}},
{user_keys[3], {3, 4}},
{user_keys[4], {0, 2}},
{user_keys[0], {0, 1}}, {user_keys[1], {1, 2}}, {user_keys[2], {2, 3}},
{user_keys[3], {3, 4}}, {user_keys[4], {0, 2}},
};
hash_map = std::move(hm);
@ -376,23 +379,20 @@ TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) {
std::string expected_unused_bucket = GetInternalKey("key00", true);
expected_unused_bucket += std::string(values[0].size(), 'a');
CheckFileContents(keys, values, expected_locations,
expected_unused_bucket, expected_table_size, 2, false);
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
expected_table_size, 2, false);
}
TEST_F(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
uint32_t num_hash_fun = 2;
std::vector<std::string> user_keys = {"key01", "key02", "key03",
"key04", "key05"};
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04",
"key05"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {3, 4}},
{user_keys[3], {4, 5}},
{user_keys[4], {0, 3}},
{user_keys[0], {0, 1}}, {user_keys[1], {1, 2}}, {user_keys[2], {3, 4}},
{user_keys[3], {4, 5}}, {user_keys[4], {0, 3}},
};
hash_map = std::move(hm);
@ -425,8 +425,8 @@ TEST_F(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
std::string expected_unused_bucket = GetInternalKey("key00", true);
expected_unused_bucket += std::string(values[0].size(), 'a');
CheckFileContents(keys, values, expected_locations,
expected_unused_bucket, expected_table_size, 2, false, 2);
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
expected_table_size, 2, false, 2);
}
TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
@ -518,17 +518,14 @@ TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
TEST_F(CuckooBuilderTest, WithCollisionPathUserKey) {
uint32_t num_hash_fun = 2;
std::vector<std::string> user_keys = {"key01", "key02", "key03",
"key04", "key05"};
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04",
"key05"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {2, 3}},
{user_keys[3], {3, 4}},
{user_keys[4], {0, 2}},
{user_keys[0], {0, 1}}, {user_keys[1], {1, 2}}, {user_keys[2], {2, 3}},
{user_keys[3], {3, 4}}, {user_keys[4], {0, 2}},
};
hash_map = std::move(hm);
@ -567,16 +564,13 @@ TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
// Finally try inserting an element with hash value somewhere in the middle
// and it should fail because the no. of elements to displace is too high.
uint32_t num_hash_fun = 2;
std::vector<std::string> user_keys = {"key01", "key02", "key03",
"key04", "key05"};
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04",
"key05"};
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {2, 3}},
{user_keys[3], {3, 4}},
{user_keys[4], {0, 1}},
{user_keys[0], {0, 1}}, {user_keys[1], {1, 2}}, {user_keys[2], {2, 3}},
{user_keys[3], {3, 4}}, {user_keys[4], {0, 1}},
};
hash_map = std::move(hm);

@ -7,9 +7,10 @@
#ifndef ROCKSDB_LITE
#include <string>
#include "rocksdb/options.h"
#include "rocksdb/table.h"
#include "util/murmurhash.h"
#include "rocksdb/options.h"
namespace ROCKSDB_NAMESPACE {

@ -30,7 +30,7 @@ namespace ROCKSDB_NAMESPACE {
namespace {
const uint64_t CACHE_LINE_MASK = ~((uint64_t)CACHE_LINE_SIZE - 1);
const uint32_t kInvalidIndex = std::numeric_limits<uint32_t>::max();
}
} // namespace
extern const uint64_t kCuckooTableMagicNumber;
@ -87,26 +87,26 @@ CuckooTableReader::CuckooTableReader(
status_ = Status::Corruption("User key length not found");
return;
}
user_key_length_ = *reinterpret_cast<const uint32_t*>(
user_key_len->second.data());
user_key_length_ =
*reinterpret_cast<const uint32_t*>(user_key_len->second.data());
auto value_length = user_props.find(CuckooTablePropertyNames::kValueLength);
if (value_length == user_props.end()) {
status_ = Status::Corruption("Value length not found");
return;
}
value_length_ = *reinterpret_cast<const uint32_t*>(
value_length->second.data());
value_length_ =
*reinterpret_cast<const uint32_t*>(value_length->second.data());
bucket_length_ = key_length_ + value_length_;
auto hash_table_size = user_props.find(
CuckooTablePropertyNames::kHashTableSize);
auto hash_table_size =
user_props.find(CuckooTablePropertyNames::kHashTableSize);
if (hash_table_size == user_props.end()) {
status_ = Status::Corruption("Hash table size not found");
return;
}
table_size_ = *reinterpret_cast<const uint64_t*>(
hash_table_size->second.data());
table_size_ =
*reinterpret_cast<const uint64_t*>(hash_table_size->second.data());
auto is_last_level = user_props.find(CuckooTablePropertyNames::kIsLastLevel);
if (is_last_level == user_props.end()) {
@ -115,31 +115,31 @@ CuckooTableReader::CuckooTableReader(
}
is_last_level_ = *reinterpret_cast<const bool*>(is_last_level->second.data());
auto identity_as_first_hash = user_props.find(
CuckooTablePropertyNames::kIdentityAsFirstHash);
auto identity_as_first_hash =
user_props.find(CuckooTablePropertyNames::kIdentityAsFirstHash);
if (identity_as_first_hash == user_props.end()) {
status_ = Status::Corruption("identity as first hash not found");
return;
}
identity_as_first_hash_ = *reinterpret_cast<const bool*>(
identity_as_first_hash->second.data());
identity_as_first_hash_ =
*reinterpret_cast<const bool*>(identity_as_first_hash->second.data());
auto use_module_hash = user_props.find(
CuckooTablePropertyNames::kUseModuleHash);
auto use_module_hash =
user_props.find(CuckooTablePropertyNames::kUseModuleHash);
if (use_module_hash == user_props.end()) {
status_ = Status::Corruption("hash type is not found");
return;
}
use_module_hash_ = *reinterpret_cast<const bool*>(
use_module_hash->second.data());
auto cuckoo_block_size = user_props.find(
CuckooTablePropertyNames::kCuckooBlockSize);
use_module_hash_ =
*reinterpret_cast<const bool*>(use_module_hash->second.data());
auto cuckoo_block_size =
user_props.find(CuckooTablePropertyNames::kCuckooBlockSize);
if (cuckoo_block_size == user_props.end()) {
status_ = Status::Corruption("Cuckoo block size not found");
return;
}
cuckoo_block_size_ = *reinterpret_cast<const uint32_t*>(
cuckoo_block_size->second.data());
cuckoo_block_size_ =
*reinterpret_cast<const uint32_t*>(cuckoo_block_size->second.data());
cuckoo_block_bytes_minus_one_ = cuckoo_block_size_ * bucket_length_ - 1;
// TODO: rate limit reads of whole cuckoo tables.
status_ =
@ -154,9 +154,10 @@ Status CuckooTableReader::Get(const ReadOptions& /*readOptions*/,
assert(key.size() == key_length_ + (is_last_level_ ? 8 : 0));
Slice user_key = ExtractUserKey(key);
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) {
uint64_t offset = bucket_length_ * CuckooHash(
user_key, hash_cnt, use_module_hash_, table_size_,
identity_as_first_hash_, get_slice_hash_);
uint64_t offset =
bucket_length_ * CuckooHash(user_key, hash_cnt, use_module_hash_,
table_size_, identity_as_first_hash_,
get_slice_hash_);
const char* bucket = &file_data_.data()[offset];
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
++block_idx, bucket += bucket_length_) {
@ -195,7 +196,8 @@ Status CuckooTableReader::Get(const ReadOptions& /*readOptions*/,
void CuckooTableReader::Prepare(const Slice& key) {
// Prefetch the first Cuckoo Block.
Slice user_key = ExtractUserKey(key);
uint64_t addr = reinterpret_cast<uint64_t>(file_data_.data()) +
uint64_t addr =
reinterpret_cast<uint64_t>(file_data_.data()) +
bucket_length_ * CuckooHash(user_key, 0, use_module_hash_, table_size_,
identity_as_first_hash_, nullptr);
uint64_t end_addr = addr + cuckoo_block_bytes_minus_one_;
@ -234,15 +236,16 @@ class CuckooTableIterator : public InternalIterator {
user_key_len_(user_key_len),
target_(target) {}
bool operator()(const uint32_t first, const uint32_t second) const {
const char* first_bucket =
(first == kInvalidIndex) ? target_.data() :
&file_data_.data()[first * bucket_len_];
const char* first_bucket = (first == kInvalidIndex)
? target_.data()
: &file_data_.data()[first * bucket_len_];
const char* second_bucket =
(second == kInvalidIndex) ? target_.data() :
&file_data_.data()[second * bucket_len_];
(second == kInvalidIndex) ? target_.data()
: &file_data_.data()[second * bucket_len_];
return ucomp_->Compare(Slice(first_bucket, user_key_len_),
Slice(second_bucket, user_key_len_)) < 0;
}
private:
const Slice file_data_;
const Comparator* ucomp_;
@ -278,7 +281,8 @@ void CuckooTableIterator::InitIfNeeded() {
if (initialized_) {
return;
}
sorted_bucket_ids_.reserve(static_cast<size_t>(reader_->GetTableProperties()->num_entries));
sorted_bucket_ids_.reserve(
static_cast<size_t>(reader_->GetTableProperties()->num_entries));
uint64_t num_buckets = reader_->table_size_ + reader_->cuckoo_block_size_ - 1;
assert(num_buckets < kInvalidIndex);
const char* bucket = reader_->file_data_.data();
@ -311,13 +315,11 @@ void CuckooTableIterator::SeekToLast() {
void CuckooTableIterator::Seek(const Slice& target) {
InitIfNeeded();
const BucketComparator seek_comparator(
reader_->file_data_, reader_->ucomp_,
reader_->bucket_length_, reader_->user_key_length_,
ExtractUserKey(target));
auto seek_it = std::lower_bound(sorted_bucket_ids_.begin(),
sorted_bucket_ids_.end(),
kInvalidIndex,
seek_comparator);
reader_->file_data_, reader_->ucomp_, reader_->bucket_length_,
reader_->user_key_length_, ExtractUserKey(target));
auto seek_it =
std::lower_bound(sorted_bucket_ids_.begin(), sorted_bucket_ids_.end(),
kInvalidIndex, seek_comparator);
curr_key_idx_ =
static_cast<uint32_t>(std::distance(sorted_bucket_ids_.begin(), seek_it));
PrepareKVAtCurrIdx();
@ -339,12 +341,12 @@ void CuckooTableIterator::PrepareKVAtCurrIdx() {
return;
}
uint32_t id = sorted_bucket_ids_[curr_key_idx_];
const char* offset = reader_->file_data_.data() +
id * reader_->bucket_length_;
const char* offset =
reader_->file_data_.data() + id * reader_->bucket_length_;
if (reader_->is_last_level_) {
// Always return internal key.
curr_key_.SetInternalKey(Slice(offset, reader_->user_key_length_),
0, kTypeValue);
curr_key_.SetInternalKey(Slice(offset, reader_->user_key_length_), 0,
kTypeValue);
} else {
curr_key_.SetInternalKey(Slice(offset, reader_->key_length_));
}
@ -388,8 +390,7 @@ InternalIterator* CuckooTableReader::NewIterator(
const ReadOptions& /*read_options*/,
const SliceTransform* /* prefix_extractor */, Arena* arena,
bool /*skip_filters*/, TableReaderCaller /*caller*/,
size_t /*compaction_readahead_size*/,
bool /* allow_unprepared_value */) {
size_t /*compaction_readahead_size*/, bool /* allow_unprepared_value */) {
if (!status().ok()) {
return NewErrorInternalIterator<Slice>(
Status::Corruption("CuckooTableReader status is not okay."), arena);

@ -9,8 +9,8 @@
#pragma once
#ifndef ROCKSDB_LITE
#include <string>
#include <memory>
#include <string>
#include <utility>
#include <vector>
@ -25,7 +25,7 @@ class Arena;
class TableReader;
struct ImmutableOptions;
class CuckooTableReader: public TableReader {
class CuckooTableReader : public TableReader {
public:
CuckooTableReader(const ImmutableOptions& ioptions,
std::unique_ptr<RandomAccessFileReader>&& file,

@ -33,7 +33,8 @@ int main() {
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
DEFINE_string(file_dir, "", "Directory where the files will be created"
DEFINE_string(file_dir, "",
"Directory where the files will be created"
" for benchmark. Added for using tmpfs.");
DEFINE_bool(enable_perf, false, "Run Benchmark Tests too.");
DEFINE_bool(write, false,
@ -45,7 +46,7 @@ namespace ROCKSDB_NAMESPACE {
namespace {
const uint32_t kNumHashFunc = 10;
// Methods, variables related to Hash functions.
std::unordered_map<std::string, std::vector<uint64_t>> hash_map;
std::unordered_map<std::string, std::vector<uint64_t> > hash_map;
void AddHashLookups(const std::string& s, uint64_t bucket_id,
uint32_t num_hash_fun) {
@ -128,8 +129,8 @@ class CuckooReaderTest : public testing::Test {
}
void UpdateKeys(bool with_zero_seqno) {
for (uint32_t i = 0; i < num_items; i++) {
ParsedInternalKey ikey(user_keys[i],
with_zero_seqno ? 0 : i + 1000, kTypeValue);
ParsedInternalKey ikey(user_keys[i], with_zero_seqno ? 0 : i + 1000,
kTypeValue);
keys[i].clear();
AppendInternalKey(&keys[i], ikey);
}
@ -189,11 +190,11 @@ class CuckooReaderTest : public testing::Test {
TableReaderCaller::kUncategorized);
ASSERT_OK(it->status());
ASSERT_TRUE(!it->Valid());
it->Seek(keys[num_items/2]);
it->Seek(keys[num_items / 2]);
ASSERT_TRUE(it->Valid());
ASSERT_OK(it->status());
ASSERT_TRUE(keys[num_items/2] == it->key());
ASSERT_TRUE(values[num_items/2] == it->value());
ASSERT_TRUE(keys[num_items / 2] == it->key());
ASSERT_TRUE(values[num_items / 2] == it->value());
ASSERT_OK(it->status());
it->~InternalIterator();
}
@ -273,7 +274,7 @@ TEST_F(CuckooReaderTest, WhenKeyExistsWithUint64Comparator) {
}
TEST_F(CuckooReaderTest, CheckIterator) {
SetUp(2*kNumHashFunc);
SetUp(2 * kNumHashFunc);
fname = test::PerThreadDBPath("CuckooReader_CheckIterator");
for (uint64_t i = 0; i < num_items; i++) {
user_keys[i] = "key" + NumToStr(i);
@ -281,7 +282,7 @@ TEST_F(CuckooReaderTest, CheckIterator) {
AppendInternalKey(&keys[i], ikey);
values[i] = "value" + NumToStr(i);
// Give disjoint hash values, in reverse order.
AddHashLookups(user_keys[i], num_items-i-1, kNumHashFunc);
AddHashLookups(user_keys[i], num_items - i - 1, kNumHashFunc);
}
CreateCuckooFileAndCheckReader();
CheckIterator();
@ -292,7 +293,7 @@ TEST_F(CuckooReaderTest, CheckIterator) {
}
TEST_F(CuckooReaderTest, CheckIteratorUint64) {
SetUp(2*kNumHashFunc);
SetUp(2 * kNumHashFunc);
fname = test::PerThreadDBPath("CuckooReader_CheckIterator");
for (uint64_t i = 0; i < num_items; i++) {
user_keys[i].resize(8);
@ -301,7 +302,7 @@ TEST_F(CuckooReaderTest, CheckIteratorUint64) {
AppendInternalKey(&keys[i], ikey);
values[i] = "value" + NumToStr(i);
// Give disjoint hash values, in reverse order.
AddHashLookups(user_keys[i], num_items-i-1, kNumHashFunc);
AddHashLookups(user_keys[i], num_items - i - 1, kNumHashFunc);
}
CreateCuckooFileAndCheckReader(test::Uint64Comparator());
CheckIterator(test::Uint64Comparator());
@ -369,8 +370,8 @@ TEST_F(CuckooReaderTest, WhenKeyNotFound) {
reader.GetTableProperties()->user_collected_properties.at(
CuckooTablePropertyNames::kEmptyKey);
// Add hash values that map to empty buckets.
AddHashLookups(ExtractUserKey(unused_key).ToString(),
kNumHashFunc, kNumHashFunc);
AddHashLookups(ExtractUserKey(unused_key).ToString(), kNumHashFunc,
kNumHashFunc);
value.Reset();
GetContext get_context3(
ucmp, nullptr, nullptr, nullptr, GetContext::kNotFound, Slice(unused_key),
@ -407,8 +408,8 @@ std::string GetFileName(uint64_t num) {
// Create last level file as we are interested in measuring performance of
// last level file only.
void WriteFile(const std::vector<std::string>& keys,
const uint64_t num, double hash_ratio) {
void WriteFile(const std::vector<std::string>& keys, const uint64_t num,
double hash_ratio) {
Options options;
options.allow_mmap_reads = true;
const auto& fs = options.env->GetFileSystem();
@ -483,8 +484,11 @@ void ReadKeys(uint64_t num, uint32_t batch_size) {
user_props.at(CuckooTablePropertyNames::kNumHashFunc).data());
const uint64_t table_size = *reinterpret_cast<const uint64_t*>(
user_props.at(CuckooTablePropertyNames::kHashTableSize).data());
fprintf(stderr, "With %" PRIu64 " items, utilization is %.2f%%, number of"
" hash functions: %u.\n", num, num * 100.0 / (table_size), num_hash_fun);
fprintf(stderr,
"With %" PRIu64
" items, utilization is %.2f%%, number of"
" hash functions: %u.\n",
num, num * 100.0 / (table_size), num_hash_fun);
ReadOptions r_options;
std::vector<uint64_t> keys;
@ -502,10 +506,10 @@ void ReadKeys(uint64_t num, uint32_t batch_size) {
uint64_t start_time = env->NowMicros();
if (batch_size > 0) {
for (uint64_t i = 0; i < num; i += batch_size) {
for (uint64_t j = i; j < i+batch_size && j < num; ++j) {
for (uint64_t j = i; j < i + batch_size && j < num; ++j) {
reader.Prepare(Slice(reinterpret_cast<char*>(&keys[j]), 16));
}
for (uint64_t j = i; j < i+batch_size && j < num; ++j) {
for (uint64_t j = i; j < i + batch_size && j < num; ++j) {
reader.Get(r_options, Slice(reinterpret_cast<char*>(&keys[j]), 16),
&get_context, nullptr);
}
@ -531,10 +535,11 @@ TEST_F(CuckooReaderTest, TestReadPerformance) {
// These numbers are chosen to have a hash utilization % close to
// 0.9, 0.75, 0.6 and 0.5 respectively.
// They all create 128 M buckets.
std::vector<uint64_t> nums = {120*1024*1024, 100*1024*1024, 80*1024*1024,
70*1024*1024};
std::vector<uint64_t> nums = {120 * 1024 * 1024, 100 * 1024 * 1024,
80 * 1024 * 1024, 70 * 1024 * 1024};
#ifndef NDEBUG
fprintf(stdout,
fprintf(
stdout,
"WARNING: Not compiled with DNDEBUG. Performance tests may be slow.\n");
#endif
for (uint64_t num : nums) {

@ -21,6 +21,7 @@ class MaxIteratorComparator {
bool operator()(IteratorWrapper* a, IteratorWrapper* b) const {
return comparator_->Compare(a->key(), b->key()) < 0;
}
private:
const InternalKeyComparator* comparator_;
};
@ -35,6 +36,7 @@ class MinIteratorComparator {
bool operator()(IteratorWrapper* a, IteratorWrapper* b) const {
return comparator_->Compare(a->key(), b->key()) > 0;
}
private:
const InternalKeyComparator* comparator_;
};

@ -29,7 +29,7 @@ Status Iterator::GetProperty(std::string prop_name, std::string* prop) {
namespace {
class EmptyIterator : public Iterator {
public:
explicit EmptyIterator(const Status& s) : status_(s) { }
explicit EmptyIterator(const Status& s) : status_(s) {}
bool Valid() const override { return false; }
void Seek(const Slice& /*target*/) override {}
void SeekForPrev(const Slice& /*target*/) override {}

@ -145,9 +145,7 @@ class IteratorWrapperBase {
return iter_->IsValuePinned();
}
bool IsValuePrepared() const {
return result_.value_prepared;
}
bool IsValuePrepared() const { return result_.value_prepared; }
Slice user_key() const {
assert(Valid());

@ -35,8 +35,7 @@ const std::string kRangeDelBlockName = "rocksdb.range_del";
MetaIndexBuilder::MetaIndexBuilder()
: meta_index_block_(new BlockBuilder(1 /* restart interval */)) {}
void MetaIndexBuilder::Add(const std::string& key,
const BlockHandle& handle) {
void MetaIndexBuilder::Add(const std::string& key, const BlockHandle& handle) {
std::string handle_encoding;
handle.EncodeTo(&handle_encoding);
meta_block_handles_.insert({key, handle_encoding});
@ -173,8 +172,8 @@ void LogPropertiesCollectionError(Logger* info_log, const std::string& method,
assert(method == "Add" || method == "Finish");
std::string msg =
"Encountered error when calling TablePropertiesCollector::" +
method + "() with collector name: " + name;
"Encountered error when calling TablePropertiesCollector::" + method +
"() with collector name: " + name;
ROCKS_LOG_ERROR(info_log, "%s", msg.c_str());
}
@ -347,7 +346,8 @@ Status ReadTablePropertiesHelper(
// skip malformed value
auto error_msg =
"Detect malformed value in properties meta-block:"
"\tkey: " + key + "\tval: " + raw_val.ToString();
"\tkey: " +
key + "\tval: " + raw_val.ToString();
ROCKS_LOG_ERROR(ioptions.logger, "%s", error_msg.c_str());
continue;
}

@ -123,8 +123,7 @@ class MultiGetContext {
assert(num_keys <= MAX_BATCH_SIZE);
if (num_keys > MAX_LOOKUP_KEYS_ON_STACK) {
lookup_key_heap_buf.reset(new char[sizeof(LookupKey) * num_keys]);
lookup_key_ptr_ = reinterpret_cast<LookupKey*>(
lookup_key_heap_buf.get());
lookup_key_ptr_ = reinterpret_cast<LookupKey*>(lookup_key_heap_buf.get());
}
for (size_t iter = 0; iter != num_keys_; ++iter) {
@ -157,8 +156,9 @@ class MultiGetContext {
private:
static const int MAX_LOOKUP_KEYS_ON_STACK = 16;
alignas(alignof(LookupKey))
char lookup_key_stack_buf[sizeof(LookupKey) * MAX_LOOKUP_KEYS_ON_STACK];
alignas(
alignof(LookupKey)) char lookup_key_stack_buf[sizeof(LookupKey) *
MAX_LOOKUP_KEYS_ON_STACK];
std::array<KeyContext*, MAX_BATCH_SIZE> sorted_keys_;
size_t num_keys_;
Mask value_mask_;
@ -250,8 +250,7 @@ class MultiGetContext {
size_t index_;
};
Range(const Range& mget_range,
const Iterator& first,
Range(const Range& mget_range, const Iterator& first,
const Iterator& last) {
ctx_ = mget_range.ctx_;
if (first == last) {

@ -4,6 +4,7 @@
// (found in the LICENSE.Apache file in the root directory).
#include "table/persistent_cache_helper.h"
#include "table/block_based/block_based_table_reader.h"
#include "table/format.h"

@ -7,9 +7,9 @@
#include <algorithm>
#include <string>
#include "util/dynamic_bloom.h"
#include "memory/allocator.h"
#include "util/dynamic_bloom.h"
namespace ROCKSDB_NAMESPACE {

@ -82,8 +82,9 @@ PlainTableBuilder::PlainTableBuilder(
index_builder_.reset(new PlainTableIndexBuilder(
&arena_, ioptions, moptions.prefix_extractor.get(), index_sparseness,
hash_table_ratio, huge_page_tlb_size_));
properties_.user_collected_properties
[PlainTablePropertyNames::kBloomVersion] = "1"; // For future use
properties_
.user_collected_properties[PlainTablePropertyNames::kBloomVersion] =
"1"; // For future use
}
properties_.fixed_key_len = user_key_len;
@ -112,8 +113,8 @@ PlainTableBuilder::PlainTableBuilder(
std::string val;
PutFixed32(&val, static_cast<uint32_t>(encoder_.GetEncodingType()));
properties_.user_collected_properties
[PlainTablePropertyNames::kEncodingType] = val;
properties_
.user_collected_properties[PlainTablePropertyNames::kEncodingType] = val;
assert(int_tbl_prop_collector_factories);
for (auto& factory : *int_tbl_prop_collector_factories) {
@ -303,17 +304,13 @@ Status PlainTableBuilder::Finish() {
return status_;
}
void PlainTableBuilder::Abandon() {
closed_ = true;
}
void PlainTableBuilder::Abandon() { closed_ = true; }
uint64_t PlainTableBuilder::NumEntries() const {
return properties_.num_entries;
}
uint64_t PlainTableBuilder::FileSize() const {
return offset_;
}
uint64_t PlainTableBuilder::FileSize() const { return offset_; }
std::string PlainTableBuilder::GetFileChecksum() const {
if (file_ != nullptr) {

@ -7,8 +7,10 @@
#ifndef ROCKSDB_LITE
#include <stdint.h>
#include <string>
#include <vector>
#include "db/version_edit.h"
#include "rocksdb/options.h"
#include "rocksdb/status.h"
@ -29,7 +31,7 @@ class TableBuilder;
// The builder class of PlainTable. For description of PlainTable format
// See comments of class PlainTableFactory, where instances of
// PlainTableReader are created.
class PlainTableBuilder: public TableBuilder {
class PlainTableBuilder : public TableBuilder {
public:
// Create a builder that will store the contents of the table it is
// building in *file. Does not close the file. It is up to the

@ -6,9 +6,10 @@
#pragma once
#ifndef ROCKSDB_LITE
#include <stdint.h>
#include <memory>
#include <string>
#include <stdint.h>
#include "rocksdb/table.h"
@ -177,6 +178,5 @@ class PlainTableFactory : public TableFactory {
PlainTableOptions table_options_;
};
} // namespace ROCKSDB_NAMESPACE
#endif // ROCKSDB_LITE

@ -19,7 +19,7 @@ inline uint32_t GetBucketIdFromHash(uint32_t hash, uint32_t num_buckets) {
assert(num_buckets > 0);
return hash % num_buckets;
}
}
} // namespace
Status PlainTableIndex::InitFromRawData(Slice data) {
if (!GetVarint32(&data, &index_size_)) {
@ -180,7 +180,8 @@ Slice PlainTableIndexBuilder::FillIndexes(
break;
default:
// point to second level indexes.
PutUnaligned(index + i, sub_index_offset | PlainTableIndex::kSubIndexMask);
PutUnaligned(index + i,
sub_index_offset | PlainTableIndex::kSubIndexMask);
char* prev_ptr = &sub_index[sub_index_offset];
char* cur_ptr = EncodeVarint32(prev_ptr, num_keys_for_bucket);
sub_index_offset += static_cast<uint32_t>(cur_ptr - prev_ptr);

@ -188,8 +188,8 @@ class PlainTableIndexBuilder {
num_records_in_current_group_;
}
IndexRecord* At(size_t index) {
return &(groups_[index / kNumRecordsPerGroup]
[index % kNumRecordsPerGroup]);
return &(
groups_[index / kNumRecordsPerGroup][index % kNumRecordsPerGroup]);
}
private:

@ -8,6 +8,7 @@
#include <algorithm>
#include <string>
#include "db/dbformat.h"
#include "file/writable_file_writer.h"
#include "table/plain/plain_table_factory.h"

@ -11,14 +11,15 @@
#include <vector>
#include "db/dbformat.h"
#include "memory/arena.h"
#include "monitoring/histogram.h"
#include "monitoring/perf_context_imp.h"
#include "rocksdb/cache.h"
#include "rocksdb/comparator.h"
#include "rocksdb/env.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/options.h"
#include "rocksdb/statistics.h"
#include "table/block_based/block.h"
#include "table/block_based/filter_block.h"
#include "table/format.h"
@ -29,10 +30,6 @@
#include "table/plain/plain_table_factory.h"
#include "table/plain/plain_table_key_coding.h"
#include "table/two_level_iterator.h"
#include "memory/arena.h"
#include "monitoring/histogram.h"
#include "monitoring/perf_context_imp.h"
#include "util/coding.h"
#include "util/dynamic_bloom.h"
#include "util/hash.h"
@ -194,14 +191,12 @@ Status PlainTableReader::Open(
return s;
}
void PlainTableReader::SetupForCompaction() {
}
void PlainTableReader::SetupForCompaction() {}
InternalIterator* PlainTableReader::NewIterator(
const ReadOptions& options, const SliceTransform* /* prefix_extractor */,
Arena* arena, bool /*skip_filters*/, TableReaderCaller /*caller*/,
size_t /*compaction_readahead_size*/,
bool /* allow_unprepared_value */) {
size_t /*compaction_readahead_size*/, bool /* allow_unprepared_value */) {
// Not necessarily used here, but make sure this has been initialized
assert(table_properties_);
@ -640,8 +635,7 @@ PlainTableIterator::PlainTableIterator(PlainTableReader* table,
next_offset_ = offset_ = table_->file_info_.data_end_offset;
}
PlainTableIterator::~PlainTableIterator() {
}
PlainTableIterator::~PlainTableIterator() {}
bool PlainTableIterator::Valid() const {
return offset_ < table_->file_info_.data_end_offset &&
@ -671,8 +665,7 @@ void PlainTableIterator::Seek(const Slice& target) {
// it. This is needed for compaction: it creates iterator with
// total_order_seek = true but usually never does Seek() on it,
// only SeekToFirst().
status_ =
Status::InvalidArgument(
status_ = Status::InvalidArgument(
"total_order_seek not implemented for PlainTable.");
offset_ = next_offset_ = table_->file_info_.data_end_offset;
return;
@ -754,9 +747,7 @@ void PlainTableIterator::Next() {
}
}
void PlainTableIterator::Prev() {
assert(false);
}
void PlainTableIterator::Prev() { assert(false); }
Slice PlainTableIterator::key() const {
assert(Valid());
@ -768,9 +759,7 @@ Slice PlainTableIterator::value() const {
return value_;
}
Status PlainTableIterator::status() const {
return status_;
}
Status PlainTableIterator::status() const { return status_; }
} // namespace ROCKSDB_NAMESPACE
#endif // ROCKSDB_LITE

@ -6,11 +6,12 @@
#pragma once
#ifndef ROCKSDB_LITE
#include <unordered_map>
#include <stdint.h>
#include <memory>
#include <vector>
#include <string>
#include <stdint.h>
#include <unordered_map>
#include <vector>
#include "file/random_access_file_reader.h"
#include "memory/arena.h"
@ -58,14 +59,14 @@ struct PlainTableReaderFileInfo {
// The reader class of PlainTable. For description of PlainTable format
// See comments of class PlainTableFactory, where instances of
// PlainTableReader are created.
class PlainTableReader: public TableReader {
class PlainTableReader : public TableReader {
public:
// Based on following output file format shown in plain_table_factory.h
// When opening the output file, PlainTableReader creates a hash table
// from key prefixes to offset of the output file. PlainTable will decide
// whether it points to the data offset of the first key with the key prefix
// or the offset of it. If there are too many keys share this prefix, it will
// create a binary search-able index from the suffix to offset on disk.
// Based on following output file format shown in plain_table_factory.h
// When opening the output file, PlainTableReader creates a hash table
// from key prefixes to offset of the output file. PlainTable will decide
// whether it points to the data offset of the first key with the key prefix
// or the offset of it. If there are too many keys share this prefix, it will
// create a binary search-able index from the suffix to offset on disk.
static Status Open(const ImmutableOptions& ioptions,
const EnvOptions& env_options,
const InternalKeyComparator& internal_comparator,
@ -165,10 +166,11 @@ class PlainTableReader: public TableReader {
const ImmutableOptions& ioptions_;
std::unique_ptr<Cleanable> dummy_cleanable_;
uint64_t file_size_;
protected: // for testing
std::shared_ptr<const TableProperties> table_properties_;
private:
private:
bool IsFixedLength() const {
return user_key_len_ != kPlainTableVariableLength;
}

@ -7,8 +7,8 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include "table/internal_iterator.h"
#include "port/port.h"
#include "table/internal_iterator.h"
namespace ROCKSDB_NAMESPACE {
class ScopedArenaIterator {
@ -20,7 +20,6 @@ class ScopedArenaIterator {
}
public:
explicit ScopedArenaIterator(InternalIterator* iter = nullptr)
: iter_(iter) {}
@ -50,9 +49,7 @@ class ScopedArenaIterator {
return res;
}
~ScopedArenaIterator() {
reset(nullptr);
}
~ScopedArenaIterator() { reset(nullptr); }
private:
InternalIterator* iter_;

@ -223,9 +223,8 @@ Status SstFileDumper::CalculateCompressedTableSize(
table_options.block_size = block_size;
BlockBasedTableFactory block_based_tf(table_options);
std::unique_ptr<TableBuilder> table_builder;
table_builder.reset(block_based_tf.NewTableBuilder(
tb_options,
dest_writer.get()));
table_builder.reset(
block_based_tf.NewTableBuilder(tb_options, dest_writer.get()));
std::unique_ptr<InternalIterator> iter(table_reader_->NewIterator(
read_options_, moptions_.prefix_extractor.get(), /*arena=*/nullptr,
/*skip_filters=*/false, TableReaderCaller::kSSTDumpTool));

@ -210,8 +210,7 @@ struct SstFileWriter::Rep {
// Fadvise disabled
return s;
}
uint64_t bytes_since_last_fadvise =
builder->FileSize() - last_fadvise_size;
uint64_t bytes_since_last_fadvise = builder->FileSize() - last_fadvise_size;
if (bytes_since_last_fadvise > kFadviseTrigger || closing) {
TEST_SYNC_POINT_CALLBACK("SstFileWriter::Rep::InvalidatePageCache",
&(bytes_since_last_fadvise));
@ -422,9 +421,7 @@ Status SstFileWriter::Finish(ExternalSstFileInfo* file_info) {
return s;
}
uint64_t SstFileWriter::FileSize() {
return rep_->file_info.file_size;
}
uint64_t SstFileWriter::FileSize() { return rep_->file_info.file_size; }
#endif // !ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

@ -21,31 +21,24 @@ const uint32_t TablePropertiesCollectorFactory::Context::kUnknownColumnFamily =
std::numeric_limits<int32_t>::max();
namespace {
void AppendProperty(
std::string& props,
const std::string& key,
const std::string& value,
const std::string& prop_delim,
void AppendProperty(std::string& props, const std::string& key,
const std::string& value, const std::string& prop_delim,
const std::string& kv_delim) {
props.append(key);
props.append(kv_delim);
props.append(value);
props.append(prop_delim);
}
}
template <class TValue>
void AppendProperty(
std::string& props,
const std::string& key,
const TValue& value,
const std::string& prop_delim,
template <class TValue>
void AppendProperty(std::string& props, const std::string& key,
const TValue& value, const std::string& prop_delim,
const std::string& kv_delim) {
AppendProperty(props, key, std::to_string(value), prop_delim, kv_delim);
}
}
} // namespace
std::string TableProperties::ToString(
const std::string& prop_delim,
std::string TableProperties::ToString(const std::string& prop_delim,
const std::string& kv_delim) const {
std::string result;
result.reserve(1024);
@ -81,8 +74,8 @@ std::string TableProperties::ToString(
if (index_partitions != 0) {
AppendProperty(result, "# index partitions", index_partitions, prop_delim,
kv_delim);
AppendProperty(result, "top-level index size", top_level_index_size, prop_delim,
kv_delim);
AppendProperty(result, "top-level index size", top_level_index_size,
prop_delim, kv_delim);
}
AppendProperty(result, "filter block size", filter_size, prop_delim,
kv_delim);
@ -256,10 +249,8 @@ const std::string TablePropertiesNames::kDbHostId =
"rocksdb.creating.host.identity";
const std::string TablePropertiesNames::kOriginalFileNumber =
"rocksdb.original.file.number";
const std::string TablePropertiesNames::kDataSize =
"rocksdb.data.size";
const std::string TablePropertiesNames::kIndexSize =
"rocksdb.index.size";
const std::string TablePropertiesNames::kDataSize = "rocksdb.data.size";
const std::string TablePropertiesNames::kIndexSize = "rocksdb.index.size";
const std::string TablePropertiesNames::kIndexPartitions =
"rocksdb.index.partitions";
const std::string TablePropertiesNames::kTopLevelIndexSize =
@ -268,16 +259,13 @@ const std::string TablePropertiesNames::kIndexKeyIsUserKey =
"rocksdb.index.key.is.user.key";
const std::string TablePropertiesNames::kIndexValueIsDeltaEncoded =
"rocksdb.index.value.is.delta.encoded";
const std::string TablePropertiesNames::kFilterSize =
"rocksdb.filter.size";
const std::string TablePropertiesNames::kRawKeySize =
"rocksdb.raw.key.size";
const std::string TablePropertiesNames::kFilterSize = "rocksdb.filter.size";
const std::string TablePropertiesNames::kRawKeySize = "rocksdb.raw.key.size";
const std::string TablePropertiesNames::kRawValueSize =
"rocksdb.raw.value.size";
const std::string TablePropertiesNames::kNumDataBlocks =
"rocksdb.num.data.blocks";
const std::string TablePropertiesNames::kNumEntries =
"rocksdb.num.entries";
const std::string TablePropertiesNames::kNumEntries = "rocksdb.num.entries";
const std::string TablePropertiesNames::kNumFilterEntries =
"rocksdb.num.filter_entries";
const std::string TablePropertiesNames::kDeletedKeys = "rocksdb.deleted.keys";
@ -285,8 +273,7 @@ const std::string TablePropertiesNames::kMergeOperands =
"rocksdb.merge.operands";
const std::string TablePropertiesNames::kNumRangeDeletions =
"rocksdb.num.range-deletions";
const std::string TablePropertiesNames::kFilterPolicy =
"rocksdb.filter.policy";
const std::string TablePropertiesNames::kFilterPolicy = "rocksdb.filter.policy";
const std::string TablePropertiesNames::kFormatVersion =
"rocksdb.format.version";
const std::string TablePropertiesNames::kFixedKeyLen =

@ -9,6 +9,7 @@
#pragma once
#include <memory>
#include "db/range_tombstone_fragmenter.h"
#if USE_COROUTINES
#include "folly/experimental/coro/Coroutine.h"
@ -161,8 +162,8 @@ class TableReader {
// persists the data on a non volatile storage medium like disk/SSD
virtual Status Prefetch(const Slice* begin = nullptr,
const Slice* end = nullptr) {
(void) begin;
(void) end;
(void)begin;
(void)end;
// Default implementation is NOOP.
// The child class should implement functionality when applicable
return Status::OK();

@ -224,9 +224,10 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
}
}
if (count != r2_len) {
fprintf(
stderr, "Iterator cannot iterate expected number of entries. "
"Expected %d but got %d\n", r2_len, count);
fprintf(stderr,
"Iterator cannot iterate expected number of entries. "
"Expected %d but got %d\n",
r2_len, count);
assert(false);
}
delete iter;
@ -261,16 +262,16 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
} // namespace
} // namespace ROCKSDB_NAMESPACE
DEFINE_bool(query_empty, false, "query non-existing keys instead of existing "
"ones.");
DEFINE_bool(query_empty, false,
"query non-existing keys instead of existing ones.");
DEFINE_int32(num_keys1, 4096, "number of distinguish prefix of keys");
DEFINE_int32(num_keys2, 512, "number of distinguish keys for each prefix");
DEFINE_int32(iter, 3, "query non-existing keys instead of existing ones");
DEFINE_int32(prefix_len, 16, "Prefix length used for iterators and indexes");
DEFINE_bool(iterator, false, "For test iterator");
DEFINE_bool(through_db, false, "If enable, a DB instance will be created and "
"the query will be against DB. Otherwise, will be directly against "
"a table reader.");
DEFINE_bool(through_db, false,
"If enable, a DB instance will be created and the query will be "
"against DB. Otherwise, will be directly against a table reader.");
DEFINE_bool(mmap_read, true, "Whether use mmap read");
DEFINE_string(table_factory, "block_based",
"Table factory to use: `block_based` (default), `plain_table` or "

@ -186,7 +186,7 @@ class Constructor {
public:
explicit Constructor(const Comparator* cmp)
: data_(stl_wrappers::LessOfComparator(cmp)) {}
virtual ~Constructor() { }
virtual ~Constructor() {}
void Add(const std::string& key, const Slice& value) {
data_[key] = value.ToString();
@ -492,7 +492,7 @@ class TableConstructor : public Constructor {
};
uint64_t TableConstructor::cur_file_num_ = 1;
class MemTableConstructor: public Constructor {
class MemTableConstructor : public Constructor {
public:
explicit MemTableConstructor(const Comparator* cmp, WriteBufferManager* wb)
: Constructor(cmp),
@ -566,11 +566,10 @@ class InternalIteratorFromIterator : public InternalIterator {
std::unique_ptr<Iterator> it_;
};
class DBConstructor: public Constructor {
class DBConstructor : public Constructor {
public:
explicit DBConstructor(const Comparator* cmp)
: Constructor(cmp),
comparator_(cmp) {
: Constructor(cmp), comparator_(cmp) {
db_ = nullptr;
NewDB();
}
@ -654,15 +653,15 @@ std::ostream& operator<<(std::ostream& os, const TestArgs& args) {
static std::vector<TestArgs> GenerateArgList() {
std::vector<TestArgs> test_args;
std::vector<TestType> test_types = {
BLOCK_BASED_TABLE_TEST,
std::vector<TestType> test_types = {BLOCK_BASED_TABLE_TEST,
#ifndef ROCKSDB_LITE
PLAIN_TABLE_SEMI_FIXED_PREFIX,
PLAIN_TABLE_FULL_STR_PREFIX,
PLAIN_TABLE_TOTAL_ORDER,
#endif // !ROCKSDB_LITE
BLOCK_TEST,
MEMTABLE_TEST, DB_TEST};
MEMTABLE_TEST,
DB_TEST};
std::vector<bool> reverse_compare_types = {false, true};
std::vector<int> restart_intervals = {16, 1, 1024};
std::vector<uint32_t> compression_parallel_threads = {1, 4};
@ -747,9 +746,8 @@ class FixedOrLessPrefixTransform : public SliceTransform {
const size_t prefix_len_;
public:
explicit FixedOrLessPrefixTransform(size_t prefix_len) :
prefix_len_(prefix_len) {
}
explicit FixedOrLessPrefixTransform(size_t prefix_len)
: prefix_len_(prefix_len) {}
const char* Name() const override { return "rocksdb.FixedPrefix"; }
@ -964,8 +962,8 @@ class HarnessTest : public testing::Test {
case 2: {
std::string key = PickRandomKey(rnd, keys);
model_iter = data.lower_bound(key);
if (kVerbose) fprintf(stderr, "Seek '%s'\n",
EscapeString(key).c_str());
if (kVerbose)
fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
iter->Seek(Slice(key));
ASSERT_OK(iter->status());
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
@ -1047,10 +1045,10 @@ class HarnessTest : public testing::Test {
break;
case 1: {
// Attempt to return something smaller than an existing key
if (result.size() > 0 && result[result.size() - 1] > '\0'
&& (!only_support_prefix_seek_
|| options_.prefix_extractor->Transform(result).size()
< result.size())) {
if (result.size() > 0 && result[result.size() - 1] > '\0' &&
(!only_support_prefix_seek_ ||
options_.prefix_extractor->Transform(result).size() <
result.size())) {
result[result.size() - 1]--;
}
break;
@ -1103,8 +1101,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
(unsigned long long)(val),
(unsigned long long)(low),
(unsigned long long)(val), (unsigned long long)(low),
(unsigned long long)(high));
}
return result;
@ -1183,8 +1180,8 @@ class BlockBasedTableTest
{
std::unique_ptr<TraceReader> trace_reader;
Status s =
NewFileTraceReader(env_, EnvOptions(), trace_file_path_, &trace_reader);
Status s = NewFileTraceReader(env_, EnvOptions(), trace_file_path_,
&trace_reader);
EXPECT_OK(s);
BlockCacheTraceReader reader(std::move(trace_reader));
BlockCacheTraceHeader header;
@ -1249,8 +1246,7 @@ class BBTTailPrefetchTest : public TableTest {};
class FileChecksumTestHelper {
public:
FileChecksumTestHelper(bool convert_to_internal_key = false)
: convert_to_internal_key_(convert_to_internal_key) {
}
: convert_to_internal_key_(convert_to_internal_key) {}
~FileChecksumTestHelper() {}
void CreateWritableFile() {
@ -1368,15 +1364,11 @@ INSTANTIATE_TEST_CASE_P(FormatVersions, BlockBasedTableTest,
// This test serves as the living tutorial for the prefix scan of user collected
// properties.
TEST_F(TablePropertyTest, PrefixScanTest) {
UserCollectedProperties props{{"num.111.1", "1"},
{"num.111.2", "2"},
{"num.111.3", "3"},
{"num.333.1", "1"},
{"num.333.2", "2"},
{"num.333.3", "3"},
{"num.555.1", "1"},
{"num.555.2", "2"},
{"num.555.3", "3"}, };
UserCollectedProperties props{
{"num.111.1", "1"}, {"num.111.2", "2"}, {"num.111.3", "3"},
{"num.333.1", "1"}, {"num.333.2", "2"}, {"num.333.3", "3"},
{"num.555.1", "1"}, {"num.555.2", "2"}, {"num.555.3", "3"},
};
// prefixes that exist
for (const std::string prefix : {"num.111", "num.333", "num.555"}) {
@ -2031,7 +2023,6 @@ TEST_P(BlockBasedTableTest, PrefetchTest) {
// [ k05 ] k05
// [ k06 k07 ] k07
// Simple
PrefetchRange(&c, &opt, &table_options,
/*key_range=*/"k01", "k05",
@ -2452,7 +2443,12 @@ void TableTest::IndexTest(BlockBasedTableOptions table_options) {
}
// find the upper bound of prefixes
std::vector<std::string> upper_bound = {keys[1], keys[2], keys[7], keys[9], };
std::vector<std::string> upper_bound = {
keys[1],
keys[2],
keys[7],
keys[9],
};
// find existing keys
for (const auto& item : kvmap) {
@ -4045,8 +4041,7 @@ TEST_F(GeneralTableTest, ApproximateOffsetOfCompressed) {
if (!XPRESS_Supported()) {
fprintf(stderr, "skipping xpress and xpress compression tests\n");
}
else {
} else {
compression_state.push_back(kXpressCompression);
}

@ -8,6 +8,7 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "table/two_level_iterator.h"
#include "db/pinned_iterators_manager.h"
#include "memory/arena.h"
#include "rocksdb/options.h"

@ -8,8 +8,8 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include "rocksdb/iterator.h"
#include "rocksdb/env.h"
#include "rocksdb/iterator.h"
#include "table/iterator_wrapper.h"
namespace ROCKSDB_NAMESPACE {

Loading…
Cancel
Save