De-template block based table iterator (#6531)
Summary: Right now block based table iterator is used as both of iterating data for block based table, and for the index iterator for partitioend index. This was initially convenient for introducing a new iterator and block type for new index format, while reducing code change. However, these two usage doesn't go with each other very well. For example, Prev() is never called for partitioned index iterator, and some other complexity is maintained in block based iterators, which is not needed for index iterator but maintainers will always need to reason about it. Furthermore, the template usage is not following Google C++ Style which we are following, and makes a large chunk of code tangled together. This commit separate the two iterators. Right now, here is what it is done: 1. Copy the block based iterator code into partitioned index iterator, and de-template them. 2. Remove some code not needed for partitioned index. The upper bound check and tricks are removed. We never tested performance for those tricks when partitioned index is enabled in the first place. It's unlikelyl to generate performance regression, as creating new partitioned index block is much rarer than data blocks. 3. Separate out the prefetch logic to a helper class and both classes call them. This commit will enable future follow-ups. One direction is that we might separate index iterator interface for data blocks and index blocks, as they are quite different. Pull Request resolved: https://github.com/facebook/rocksdb/pull/6531 Test Plan: build using make and cmake. And build release Differential Revision: D20473108 fbshipit-source-id: e48011783b339a4257c204cc07507b171b834b0fmain
parent
402da454cb
commit
d66908091d
@ -0,0 +1,377 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
#include "table/block_based/block_based_table_iterator.h" |
||||
|
||||
namespace ROCKSDB_NAMESPACE { |
||||
void BlockBasedTableIterator::Seek(const Slice& target) { SeekImpl(&target); } |
||||
|
||||
void BlockBasedTableIterator::SeekToFirst() { SeekImpl(nullptr); } |
||||
|
||||
void BlockBasedTableIterator::SeekImpl(const Slice* target) { |
||||
is_out_of_bound_ = false; |
||||
is_at_first_key_from_index_ = false; |
||||
if (target && !CheckPrefixMayMatch(*target, IterDirection::kForward)) { |
||||
ResetDataIter(); |
||||
return; |
||||
} |
||||
|
||||
bool need_seek_index = true; |
||||
if (block_iter_points_to_real_block_ && block_iter_.Valid()) { |
||||
// Reseek.
|
||||
prev_block_offset_ = index_iter_->value().handle.offset(); |
||||
|
||||
if (target) { |
||||
// We can avoid an index seek if:
|
||||
// 1. The new seek key is larger than the current key
|
||||
// 2. The new seek key is within the upper bound of the block
|
||||
// Since we don't necessarily know the internal key for either
|
||||
// the current key or the upper bound, we check user keys and
|
||||
// exclude the equality case. Considering internal keys can
|
||||
// improve for the boundary cases, but it would complicate the
|
||||
// code.
|
||||
if (user_comparator_.Compare(ExtractUserKey(*target), |
||||
block_iter_.user_key()) > 0 && |
||||
user_comparator_.Compare(ExtractUserKey(*target), |
||||
index_iter_->user_key()) < 0) { |
||||
need_seek_index = false; |
||||
} |
||||
} |
||||
} |
||||
|
||||
if (need_seek_index) { |
||||
if (target) { |
||||
index_iter_->Seek(*target); |
||||
} else { |
||||
index_iter_->SeekToFirst(); |
||||
} |
||||
|
||||
if (!index_iter_->Valid()) { |
||||
ResetDataIter(); |
||||
return; |
||||
} |
||||
} |
||||
|
||||
IndexValue v = index_iter_->value(); |
||||
const bool same_block = block_iter_points_to_real_block_ && |
||||
v.handle.offset() == prev_block_offset_; |
||||
|
||||
// TODO(kolmike): Remove the != kBlockCacheTier condition.
|
||||
if (!v.first_internal_key.empty() && !same_block && |
||||
(!target || icomp_.Compare(*target, v.first_internal_key) <= 0) && |
||||
read_options_.read_tier != kBlockCacheTier) { |
||||
// Index contains the first key of the block, and it's >= target.
|
||||
// We can defer reading the block.
|
||||
is_at_first_key_from_index_ = true; |
||||
// ResetDataIter() will invalidate block_iter_. Thus, there is no need to
|
||||
// call CheckDataBlockWithinUpperBound() to check for iterate_upper_bound
|
||||
// as that will be done later when the data block is actually read.
|
||||
ResetDataIter(); |
||||
} else { |
||||
// Need to use the data block.
|
||||
if (!same_block) { |
||||
InitDataBlock(); |
||||
} else { |
||||
// When the user does a reseek, the iterate_upper_bound might have
|
||||
// changed. CheckDataBlockWithinUpperBound() needs to be called
|
||||
// explicitly if the reseek ends up in the same data block.
|
||||
// If the reseek ends up in a different block, InitDataBlock() will do
|
||||
// the iterator upper bound check.
|
||||
CheckDataBlockWithinUpperBound(); |
||||
} |
||||
|
||||
if (target) { |
||||
block_iter_.Seek(*target); |
||||
} else { |
||||
block_iter_.SeekToFirst(); |
||||
} |
||||
FindKeyForward(); |
||||
} |
||||
|
||||
CheckOutOfBound(); |
||||
|
||||
if (target) { |
||||
assert(!Valid() || icomp_.Compare(*target, key()) <= 0); |
||||
} |
||||
} |
||||
|
||||
void BlockBasedTableIterator::SeekForPrev(const Slice& target) { |
||||
is_out_of_bound_ = false; |
||||
is_at_first_key_from_index_ = false; |
||||
// For now totally disable prefix seek in auto prefix mode because we don't
|
||||
// have logic
|
||||
if (!CheckPrefixMayMatch(target, IterDirection::kBackward)) { |
||||
ResetDataIter(); |
||||
return; |
||||
} |
||||
|
||||
SavePrevIndexValue(); |
||||
|
||||
// Call Seek() rather than SeekForPrev() in the index block, because the
|
||||
// target data block will likely to contain the position for `target`, the
|
||||
// same as Seek(), rather than than before.
|
||||
// For example, if we have three data blocks, each containing two keys:
|
||||
// [2, 4] [6, 8] [10, 12]
|
||||
// (the keys in the index block would be [4, 8, 12])
|
||||
// and the user calls SeekForPrev(7), we need to go to the second block,
|
||||
// just like if they call Seek(7).
|
||||
// The only case where the block is difference is when they seek to a position
|
||||
// in the boundary. For example, if they SeekForPrev(5), we should go to the
|
||||
// first block, rather than the second. However, we don't have the information
|
||||
// to distinguish the two unless we read the second block. In this case, we'll
|
||||
// end up with reading two blocks.
|
||||
index_iter_->Seek(target); |
||||
|
||||
if (!index_iter_->Valid()) { |
||||
auto seek_status = index_iter_->status(); |
||||
// Check for IO error
|
||||
if (!seek_status.IsNotFound() && !seek_status.ok()) { |
||||
ResetDataIter(); |
||||
return; |
||||
} |
||||
|
||||
// With prefix index, Seek() returns NotFound if the prefix doesn't exist
|
||||
if (seek_status.IsNotFound()) { |
||||
// Any key less than the target is fine for prefix seek
|
||||
ResetDataIter(); |
||||
return; |
||||
} else { |
||||
index_iter_->SeekToLast(); |
||||
} |
||||
// Check for IO error
|
||||
if (!index_iter_->Valid()) { |
||||
ResetDataIter(); |
||||
return; |
||||
} |
||||
} |
||||
|
||||
InitDataBlock(); |
||||
|
||||
block_iter_.SeekForPrev(target); |
||||
|
||||
FindKeyBackward(); |
||||
CheckDataBlockWithinUpperBound(); |
||||
assert(!block_iter_.Valid() || |
||||
icomp_.Compare(target, block_iter_.key()) >= 0); |
||||
} |
||||
|
||||
void BlockBasedTableIterator::SeekToLast() { |
||||
is_out_of_bound_ = false; |
||||
is_at_first_key_from_index_ = false; |
||||
SavePrevIndexValue(); |
||||
index_iter_->SeekToLast(); |
||||
if (!index_iter_->Valid()) { |
||||
ResetDataIter(); |
||||
return; |
||||
} |
||||
InitDataBlock(); |
||||
block_iter_.SeekToLast(); |
||||
FindKeyBackward(); |
||||
CheckDataBlockWithinUpperBound(); |
||||
} |
||||
|
||||
void BlockBasedTableIterator::Next() { |
||||
if (is_at_first_key_from_index_ && !MaterializeCurrentBlock()) { |
||||
return; |
||||
} |
||||
assert(block_iter_points_to_real_block_); |
||||
block_iter_.Next(); |
||||
FindKeyForward(); |
||||
CheckOutOfBound(); |
||||
} |
||||
|
||||
bool BlockBasedTableIterator::NextAndGetResult(IterateResult* result) { |
||||
Next(); |
||||
bool is_valid = Valid(); |
||||
if (is_valid) { |
||||
result->key = key(); |
||||
result->may_be_out_of_upper_bound = MayBeOutOfUpperBound(); |
||||
} |
||||
return is_valid; |
||||
} |
||||
|
||||
void BlockBasedTableIterator::Prev() { |
||||
if (is_at_first_key_from_index_) { |
||||
is_at_first_key_from_index_ = false; |
||||
|
||||
index_iter_->Prev(); |
||||
if (!index_iter_->Valid()) { |
||||
return; |
||||
} |
||||
|
||||
InitDataBlock(); |
||||
block_iter_.SeekToLast(); |
||||
} else { |
||||
assert(block_iter_points_to_real_block_); |
||||
block_iter_.Prev(); |
||||
} |
||||
|
||||
FindKeyBackward(); |
||||
} |
||||
|
||||
void BlockBasedTableIterator::InitDataBlock() { |
||||
BlockHandle data_block_handle = index_iter_->value().handle; |
||||
if (!block_iter_points_to_real_block_ || |
||||
data_block_handle.offset() != prev_block_offset_ || |
||||
// if previous attempt of reading the block missed cache, try again
|
||||
block_iter_.status().IsIncomplete()) { |
||||
if (block_iter_points_to_real_block_) { |
||||
ResetDataIter(); |
||||
} |
||||
auto* rep = table_->get_rep(); |
||||
|
||||
bool is_for_compaction = |
||||
lookup_context_.caller == TableReaderCaller::kCompaction; |
||||
// Prefetch additional data for range scans (iterators).
|
||||
// Implicit auto readahead:
|
||||
// Enabled after 2 sequential IOs when ReadOptions.readahead_size == 0.
|
||||
// Explicit user requested readahead:
|
||||
// Enabled from the very first IO when ReadOptions.readahead_size is set.
|
||||
block_prefetcher_.PrefetchIfNeeded(rep, data_block_handle, |
||||
read_options_.readahead_size, |
||||
is_for_compaction); |
||||
|
||||
Status s; |
||||
table_->NewDataBlockIterator<DataBlockIter>( |
||||
read_options_, data_block_handle, &block_iter_, BlockType::kData, |
||||
/*get_context=*/nullptr, &lookup_context_, s, |
||||
block_prefetcher_.prefetch_buffer(), |
||||
/*for_compaction=*/is_for_compaction); |
||||
block_iter_points_to_real_block_ = true; |
||||
CheckDataBlockWithinUpperBound(); |
||||
} |
||||
} |
||||
|
||||
bool BlockBasedTableIterator::MaterializeCurrentBlock() { |
||||
assert(is_at_first_key_from_index_); |
||||
assert(!block_iter_points_to_real_block_); |
||||
assert(index_iter_->Valid()); |
||||
|
||||
is_at_first_key_from_index_ = false; |
||||
InitDataBlock(); |
||||
assert(block_iter_points_to_real_block_); |
||||
block_iter_.SeekToFirst(); |
||||
|
||||
if (!block_iter_.Valid() || |
||||
icomp_.Compare(block_iter_.key(), |
||||
index_iter_->value().first_internal_key) != 0) { |
||||
// Uh oh.
|
||||
block_iter_.Invalidate(Status::Corruption( |
||||
"first key in index doesn't match first key in block")); |
||||
return false; |
||||
} |
||||
|
||||
return true; |
||||
} |
||||
|
||||
void BlockBasedTableIterator::FindKeyForward() { |
||||
// This method's code is kept short to make it likely to be inlined.
|
||||
|
||||
assert(!is_out_of_bound_); |
||||
assert(block_iter_points_to_real_block_); |
||||
|
||||
if (!block_iter_.Valid()) { |
||||
// This is the only call site of FindBlockForward(), but it's extracted into
|
||||
// a separate method to keep FindKeyForward() short and likely to be
|
||||
// inlined. When transitioning to a different block, we call
|
||||
// FindBlockForward(), which is much longer and is probably not inlined.
|
||||
FindBlockForward(); |
||||
} else { |
||||
// This is the fast path that avoids a function call.
|
||||
} |
||||
} |
||||
|
||||
void BlockBasedTableIterator::FindBlockForward() { |
||||
// TODO the while loop inherits from two-level-iterator. We don't know
|
||||
// whether a block can be empty so it can be replaced by an "if".
|
||||
do { |
||||
if (!block_iter_.status().ok()) { |
||||
return; |
||||
} |
||||
// Whether next data block is out of upper bound, if there is one.
|
||||
const bool next_block_is_out_of_bound = |
||||
read_options_.iterate_upper_bound != nullptr && |
||||
block_iter_points_to_real_block_ && !data_block_within_upper_bound_; |
||||
assert(!next_block_is_out_of_bound || |
||||
user_comparator_.CompareWithoutTimestamp( |
||||
*read_options_.iterate_upper_bound, /*a_has_ts=*/false, |
||||
index_iter_->user_key(), /*b_has_ts=*/true) <= 0); |
||||
ResetDataIter(); |
||||
index_iter_->Next(); |
||||
if (next_block_is_out_of_bound) { |
||||
// The next block is out of bound. No need to read it.
|
||||
TEST_SYNC_POINT_CALLBACK("BlockBasedTableIterator:out_of_bound", nullptr); |
||||
// We need to make sure this is not the last data block before setting
|
||||
// is_out_of_bound_, since the index key for the last data block can be
|
||||
// larger than smallest key of the next file on the same level.
|
||||
if (index_iter_->Valid()) { |
||||
is_out_of_bound_ = true; |
||||
} |
||||
return; |
||||
} |
||||
|
||||
if (!index_iter_->Valid()) { |
||||
return; |
||||
} |
||||
|
||||
IndexValue v = index_iter_->value(); |
||||
|
||||
// TODO(kolmike): Remove the != kBlockCacheTier condition.
|
||||
if (!v.first_internal_key.empty() && |
||||
read_options_.read_tier != kBlockCacheTier) { |
||||
// Index contains the first key of the block. Defer reading the block.
|
||||
is_at_first_key_from_index_ = true; |
||||
return; |
||||
} |
||||
|
||||
InitDataBlock(); |
||||
block_iter_.SeekToFirst(); |
||||
} while (!block_iter_.Valid()); |
||||
} |
||||
|
||||
void BlockBasedTableIterator::FindKeyBackward() { |
||||
while (!block_iter_.Valid()) { |
||||
if (!block_iter_.status().ok()) { |
||||
return; |
||||
} |
||||
|
||||
ResetDataIter(); |
||||
index_iter_->Prev(); |
||||
|
||||
if (index_iter_->Valid()) { |
||||
InitDataBlock(); |
||||
block_iter_.SeekToLast(); |
||||
} else { |
||||
return; |
||||
} |
||||
} |
||||
|
||||
// We could have check lower bound here too, but we opt not to do it for
|
||||
// code simplicity.
|
||||
} |
||||
|
||||
void BlockBasedTableIterator::CheckOutOfBound() { |
||||
if (read_options_.iterate_upper_bound != nullptr && Valid()) { |
||||
is_out_of_bound_ = |
||||
user_comparator_.CompareWithoutTimestamp( |
||||
*read_options_.iterate_upper_bound, /*a_has_ts=*/false, user_key(), |
||||
/*b_has_ts=*/true) <= 0; |
||||
} |
||||
} |
||||
|
||||
void BlockBasedTableIterator::CheckDataBlockWithinUpperBound() { |
||||
if (read_options_.iterate_upper_bound != nullptr && |
||||
block_iter_points_to_real_block_) { |
||||
data_block_within_upper_bound_ = |
||||
(user_comparator_.CompareWithoutTimestamp( |
||||
*read_options_.iterate_upper_bound, /*a_has_ts=*/false, |
||||
index_iter_->user_key(), |
||||
/*b_has_ts=*/true) > 0); |
||||
} |
||||
} |
||||
} // namespace ROCKSDB_NAMESPACE
|
@ -0,0 +1,56 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
#include "table/block_based/block_prefetcher.h" |
||||
|
||||
namespace ROCKSDB_NAMESPACE { |
||||
void BlockPrefetcher::PrefetchIfNeeded(const BlockBasedTable::Rep* rep, |
||||
const BlockHandle& handle, |
||||
size_t readahead_size, |
||||
bool is_for_compaction) { |
||||
if (!is_for_compaction) { |
||||
if (readahead_size == 0) { |
||||
// Implicit auto readahead
|
||||
num_file_reads_++; |
||||
if (num_file_reads_ > |
||||
BlockBasedTable::kMinNumFileReadsToStartAutoReadahead) { |
||||
if (!rep->file->use_direct_io() && |
||||
(handle.offset() + static_cast<size_t>(block_size(handle)) > |
||||
readahead_limit_)) { |
||||
// Buffered I/O
|
||||
// Discarding the return status of Prefetch calls intentionally, as
|
||||
// we can fallback to reading from disk if Prefetch fails.
|
||||
rep->file->Prefetch(handle.offset(), readahead_size_); |
||||
readahead_limit_ = |
||||
static_cast<size_t>(handle.offset() + readahead_size_); |
||||
// Keep exponentially increasing readahead size until
|
||||
// kMaxAutoReadaheadSize.
|
||||
readahead_size_ = std::min(BlockBasedTable::kMaxAutoReadaheadSize, |
||||
readahead_size_ * 2); |
||||
} else if (rep->file->use_direct_io() && !prefetch_buffer_) { |
||||
// Direct I/O
|
||||
// Let FilePrefetchBuffer take care of the readahead.
|
||||
rep->CreateFilePrefetchBuffer(BlockBasedTable::kInitAutoReadaheadSize, |
||||
BlockBasedTable::kMaxAutoReadaheadSize, |
||||
&prefetch_buffer_); |
||||
} |
||||
} |
||||
} else if (!prefetch_buffer_) { |
||||
// Explicit user requested readahead
|
||||
// The actual condition is:
|
||||
// if (readahead_size != 0 && !prefetch_buffer_)
|
||||
rep->CreateFilePrefetchBuffer(readahead_size, readahead_size, |
||||
&prefetch_buffer_); |
||||
} |
||||
} else if (!prefetch_buffer_) { |
||||
rep->CreateFilePrefetchBuffer(compaction_readahead_size_, |
||||
compaction_readahead_size_, |
||||
&prefetch_buffer_); |
||||
} |
||||
} |
||||
} // namespace ROCKSDB_NAMESPACE
|
@ -0,0 +1,32 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
#pragma once |
||||
#include "table/block_based/block_based_table_reader.h" |
||||
|
||||
namespace ROCKSDB_NAMESPACE { |
||||
class BlockPrefetcher { |
||||
public: |
||||
explicit BlockPrefetcher(size_t compaction_readahead_size) |
||||
: compaction_readahead_size_(compaction_readahead_size) {} |
||||
void PrefetchIfNeeded(const BlockBasedTable::Rep* rep, |
||||
const BlockHandle& handle, size_t readahead_size, |
||||
bool is_for_compaction); |
||||
FilePrefetchBuffer* prefetch_buffer() { return prefetch_buffer_.get(); } |
||||
|
||||
private: |
||||
// Readahead size used in compaction, its value is used only if
|
||||
// lookup_context_.caller = kCompaction.
|
||||
size_t compaction_readahead_size_; |
||||
|
||||
size_t readahead_size_ = BlockBasedTable::kInitAutoReadaheadSize; |
||||
size_t readahead_limit_ = 0; |
||||
int64_t num_file_reads_ = 0; |
||||
std::unique_ptr<FilePrefetchBuffer> prefetch_buffer_; |
||||
}; |
||||
} // namespace ROCKSDB_NAMESPACE
|
@ -0,0 +1,167 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
#include "table/block_based/partitioned_index_iterator.h" |
||||
|
||||
namespace ROCKSDB_NAMESPACE { |
||||
void ParititionedIndexIterator::Seek(const Slice& target) { SeekImpl(&target); } |
||||
|
||||
void ParititionedIndexIterator::SeekToFirst() { SeekImpl(nullptr); } |
||||
|
||||
void ParititionedIndexIterator::SeekImpl(const Slice* target) { |
||||
if (target) { |
||||
index_iter_->Seek(*target); |
||||
} else { |
||||
index_iter_->SeekToFirst(); |
||||
} |
||||
|
||||
if (!index_iter_->Valid()) { |
||||
ResetPartitionedIndexIter(); |
||||
return; |
||||
} |
||||
|
||||
IndexValue v = index_iter_->value(); |
||||
const bool same_block = block_iter_points_to_real_block_ && |
||||
v.handle.offset() == prev_block_offset_; |
||||
|
||||
if (!same_block) { |
||||
InitPartitionedIndexBlock(); |
||||
} |
||||
|
||||
if (target) { |
||||
block_iter_.Seek(*target); |
||||
} else { |
||||
block_iter_.SeekToFirst(); |
||||
} |
||||
FindKeyForward(); |
||||
|
||||
// We could check upper bound here, but that would be too complicated
|
||||
// and checking index upper bound is less useful than for data blocks.
|
||||
|
||||
if (target) { |
||||
assert(!Valid() || (table_->get_rep()->index_key_includes_seq |
||||
? (icomp_.Compare(*target, key()) <= 0) |
||||
: (user_comparator_.Compare(ExtractUserKey(*target), |
||||
key()) <= 0))); |
||||
} |
||||
} |
||||
|
||||
void ParititionedIndexIterator::SeekToLast() { |
||||
SavePrevIndexValue(); |
||||
index_iter_->SeekToLast(); |
||||
if (!index_iter_->Valid()) { |
||||
ResetPartitionedIndexIter(); |
||||
return; |
||||
} |
||||
InitPartitionedIndexBlock(); |
||||
block_iter_.SeekToLast(); |
||||
FindKeyBackward(); |
||||
} |
||||
|
||||
void ParititionedIndexIterator::Next() { |
||||
assert(block_iter_points_to_real_block_); |
||||
block_iter_.Next(); |
||||
FindKeyForward(); |
||||
} |
||||
|
||||
void ParititionedIndexIterator::Prev() { |
||||
assert(block_iter_points_to_real_block_); |
||||
block_iter_.Prev(); |
||||
|
||||
FindKeyBackward(); |
||||
} |
||||
|
||||
void ParititionedIndexIterator::InitPartitionedIndexBlock() { |
||||
BlockHandle partitioned_index_handle = index_iter_->value().handle; |
||||
if (!block_iter_points_to_real_block_ || |
||||
partitioned_index_handle.offset() != prev_block_offset_ || |
||||
// if previous attempt of reading the block missed cache, try again
|
||||
block_iter_.status().IsIncomplete()) { |
||||
if (block_iter_points_to_real_block_) { |
||||
ResetPartitionedIndexIter(); |
||||
} |
||||
auto* rep = table_->get_rep(); |
||||
bool is_for_compaction = |
||||
lookup_context_.caller == TableReaderCaller::kCompaction; |
||||
// Prefetch additional data for range scans (iterators).
|
||||
// Implicit auto readahead:
|
||||
// Enabled after 2 sequential IOs when ReadOptions.readahead_size == 0.
|
||||
// Explicit user requested readahead:
|
||||
// Enabled from the very first IO when ReadOptions.readahead_size is set.
|
||||
block_prefetcher_.PrefetchIfNeeded(rep, partitioned_index_handle, |
||||
read_options_.readahead_size, |
||||
is_for_compaction); |
||||
|
||||
Status s; |
||||
table_->NewDataBlockIterator<IndexBlockIter>( |
||||
read_options_, partitioned_index_handle, &block_iter_, |
||||
BlockType::kIndex, |
||||
/*get_context=*/nullptr, &lookup_context_, s, |
||||
block_prefetcher_.prefetch_buffer(), |
||||
/*for_compaction=*/is_for_compaction); |
||||
block_iter_points_to_real_block_ = true; |
||||
// We could check upper bound here but it is complicated to reason about
|
||||
// upper bound in index iterator. On the other than, in large scans, index
|
||||
// iterators are moved much less frequently compared to data blocks. So
|
||||
// the upper bound check is skipped for simplicity.
|
||||
} |
||||
} |
||||
|
||||
void ParititionedIndexIterator::FindKeyForward() { |
||||
// This method's code is kept short to make it likely to be inlined.
|
||||
|
||||
assert(block_iter_points_to_real_block_); |
||||
|
||||
if (!block_iter_.Valid()) { |
||||
// This is the only call site of FindBlockForward(), but it's extracted into
|
||||
// a separate method to keep FindKeyForward() short and likely to be
|
||||
// inlined. When transitioning to a different block, we call
|
||||
// FindBlockForward(), which is much longer and is probably not inlined.
|
||||
FindBlockForward(); |
||||
} else { |
||||
// This is the fast path that avoids a function call.
|
||||
} |
||||
} |
||||
|
||||
void ParititionedIndexIterator::FindBlockForward() { |
||||
// TODO the while loop inherits from two-level-iterator. We don't know
|
||||
// whether a block can be empty so it can be replaced by an "if".
|
||||
do { |
||||
if (!block_iter_.status().ok()) { |
||||
return; |
||||
} |
||||
ResetPartitionedIndexIter(); |
||||
index_iter_->Next(); |
||||
|
||||
if (!index_iter_->Valid()) { |
||||
return; |
||||
} |
||||
|
||||
InitPartitionedIndexBlock(); |
||||
block_iter_.SeekToFirst(); |
||||
} while (!block_iter_.Valid()); |
||||
} |
||||
|
||||
void ParititionedIndexIterator::FindKeyBackward() { |
||||
while (!block_iter_.Valid()) { |
||||
if (!block_iter_.status().ok()) { |
||||
return; |
||||
} |
||||
|
||||
ResetPartitionedIndexIter(); |
||||
index_iter_->Prev(); |
||||
|
||||
if (index_iter_->Valid()) { |
||||
InitPartitionedIndexBlock(); |
||||
block_iter_.SeekToLast(); |
||||
} else { |
||||
return; |
||||
} |
||||
} |
||||
} |
||||
} // namespace ROCKSDB_NAMESPACE
|
@ -0,0 +1,145 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
#pragma once |
||||
#include "table/block_based/block_based_table_reader.h" |
||||
|
||||
#include "table/block_based/block_based_table_reader_impl.h" |
||||
#include "table/block_based/block_prefetcher.h" |
||||
#include "table/block_based/reader_common.h" |
||||
|
||||
namespace ROCKSDB_NAMESPACE { |
||||
// Iterator that iterates over partitioned index.
|
||||
// Some upper and lower bound tricks played in block based table iterators
|
||||
// could be played here, but it's too complicated to reason about index
|
||||
// keys with upper or lower bound, so we skip it for simplicity.
|
||||
class ParititionedIndexIterator : public InternalIteratorBase<IndexValue> { |
||||
// compaction_readahead_size: its value will only be used if for_compaction =
|
||||
// true
|
||||
public: |
||||
ParititionedIndexIterator( |
||||
const BlockBasedTable* table, const ReadOptions& read_options, |
||||
const InternalKeyComparator& icomp, |
||||
std::unique_ptr<InternalIteratorBase<IndexValue>>&& index_iter, |
||||
TableReaderCaller caller, size_t compaction_readahead_size = 0) |
||||
: table_(table), |
||||
read_options_(read_options), |
||||
icomp_(icomp), |
||||
user_comparator_(icomp.user_comparator()), |
||||
index_iter_(std::move(index_iter)), |
||||
block_iter_points_to_real_block_(false), |
||||
lookup_context_(caller), |
||||
block_prefetcher_(compaction_readahead_size) {} |
||||
|
||||
~ParititionedIndexIterator() {} |
||||
|
||||
void Seek(const Slice& target) override; |
||||
void SeekForPrev(const Slice&) override { |
||||
// Shouldn't be called.
|
||||
assert(false); |
||||
} |
||||
void SeekToFirst() override; |
||||
void SeekToLast() override; |
||||
void Next() final override; |
||||
bool NextAndGetResult(IterateResult*) override { |
||||
assert(false); |
||||
return false; |
||||
} |
||||
void Prev() override; |
||||
bool Valid() const override { |
||||
return block_iter_points_to_real_block_ && block_iter_.Valid(); |
||||
} |
||||
Slice key() const override { |
||||
assert(Valid()); |
||||
return block_iter_.key(); |
||||
} |
||||
Slice user_key() const override { |
||||
assert(Valid()); |
||||
return block_iter_.user_key(); |
||||
} |
||||
IndexValue value() const override { |
||||
assert(Valid()); |
||||
return block_iter_.value(); |
||||
} |
||||
Status status() const override { |
||||
// Prefix index set status to NotFound when the prefix does not exist
|
||||
if (!index_iter_->status().ok() && !index_iter_->status().IsNotFound()) { |
||||
return index_iter_->status(); |
||||
} else if (block_iter_points_to_real_block_) { |
||||
return block_iter_.status(); |
||||
} else { |
||||
return Status::OK(); |
||||
} |
||||
} |
||||
|
||||
// Whether iterator invalidated for being out of bound.
|
||||
bool IsOutOfBound() override { |
||||
// Shoulldn't be called
|
||||
assert(false); |
||||
return false; |
||||
} |
||||
|
||||
inline bool MayBeOutOfUpperBound() override { |
||||
// Shouldn't be called.
|
||||
assert(false); |
||||
return true; |
||||
} |
||||
void SetPinnedItersMgr(PinnedIteratorsManager*) override { |
||||
// Shouldn't be called.
|
||||
assert(false); |
||||
} |
||||
bool IsKeyPinned() const override { |
||||
// Shouldn't be called.
|
||||
assert(false); |
||||
return false; |
||||
} |
||||
bool IsValuePinned() const override { |
||||
// Shouldn't be called.
|
||||
assert(false); |
||||
return false; |
||||
} |
||||
|
||||
void ResetPartitionedIndexIter() { |
||||
if (block_iter_points_to_real_block_) { |
||||
block_iter_.Invalidate(Status::OK()); |
||||
block_iter_points_to_real_block_ = false; |
||||
} |
||||
} |
||||
|
||||
void SavePrevIndexValue() { |
||||
if (block_iter_points_to_real_block_) { |
||||
// Reseek. If they end up with the same data block, we shouldn't re-fetch
|
||||
// the same data block.
|
||||
prev_block_offset_ = index_iter_->value().handle.offset(); |
||||
} |
||||
} |
||||
|
||||
private: |
||||
const BlockBasedTable* table_; |
||||
const ReadOptions read_options_; |
||||
const InternalKeyComparator& icomp_; |
||||
UserComparatorWrapper user_comparator_; |
||||
std::unique_ptr<InternalIteratorBase<IndexValue>> index_iter_; |
||||
IndexBlockIter block_iter_; |
||||
|
||||
// True if block_iter_ is initialized and points to the same block
|
||||
// as index iterator.
|
||||
bool block_iter_points_to_real_block_; |
||||
uint64_t prev_block_offset_ = std::numeric_limits<uint64_t>::max(); |
||||
BlockCacheLookupContext lookup_context_; |
||||
BlockPrefetcher block_prefetcher_; |
||||
|
||||
// If `target` is null, seek to first.
|
||||
void SeekImpl(const Slice* target); |
||||
|
||||
void InitPartitionedIndexBlock(); |
||||
void FindKeyForward(); |
||||
void FindBlockForward(); |
||||
void FindKeyBackward(); |
||||
}; |
||||
} // namespace ROCKSDB_NAMESPACE
|
Loading…
Reference in new issue