Seperate InternalIterator from Iterator

Summary:
Separate a new class InternalIterator from class Iterator, when the look-up is done internally, which also means they operate on key with sequence ID and type.

This change will enable potential future optimizations but for now InternalIterator's functions are still the same as Iterator's.
At the same time, separate the cleanup function to a separate class and let both of InternalIterator and Iterator inherit from it.

Test Plan: Run all existing tests.

Reviewers: igor, yhchiang, anthony, kradhakrishnan, IslamAbdelRahman, rven

Reviewed By: rven

Subscribers: leveldb, dhruba

Differential Revision: https://reviews.facebook.net/D48549
main
sdong 9 years ago
parent 198ed58980
commit 35ad531be3
  1. 8
      db/builder.cc
  2. 6
      db/builder.h
  3. 3
      db/compaction_iterator.cc
  4. 4
      db/compaction_iterator.h
  5. 4
      db/compaction_job.cc
  6. 2
      db/compaction_job.h
  7. 2
      db/compaction_job_stats_test.cc
  8. 23
      db/db_impl.cc
  9. 12
      db/db_impl.h
  10. 19
      db/db_iter.cc
  11. 19
      db/db_iter.h
  12. 7
      db/db_iter_test.cc
  13. 2
      db/db_test.cc
  14. 83
      db/db_test_util.cc
  15. 52
      db/db_test_util.h
  16. 2
      db/flush_job.cc
  17. 2
      db/flush_job.h
  18. 8
      db/forward_iterator.cc
  19. 18
      db/forward_iterator.h
  20. 6
      db/memtable.cc
  21. 3
      db/memtable.h
  22. 6
      db/memtable_list.cc
  23. 3
      db/memtable_list.h
  24. 4
      db/merge_helper.cc
  25. 4
      db/merge_helper.h
  26. 4
      db/repair.cc
  27. 19
      db/table_cache.cc
  28. 13
      db/table_cache.h
  29. 15
      db/version_set.cc
  30. 7
      db/version_set.h
  31. 2
      db/write_batch_test.cc
  32. 44
      include/rocksdb/iterator.h
  33. 2
      java/rocksjni/write_batch.cc
  34. 2
      java/rocksjni/write_batch_test.cc
  35. 8
      table/block.cc
  36. 8
      table/block.h
  37. 76
      table/block_based_table_reader.cc
  38. 25
      table/block_based_table_reader.h
  39. 6
      table/block_hash_index.cc
  40. 7
      table/block_hash_index.h
  41. 3
      table/block_hash_index_test.cc
  42. 6
      table/block_test.cc
  43. 12
      table/cuckoo_table_reader.cc
  44. 4
      table/cuckoo_table_reader.h
  45. 4
      table/cuckoo_table_reader_test.cc
  46. 75
      table/internal_iterator.h
  47. 54
      table/iterator.cc
  48. 19
      table/iterator_wrapper.h
  49. 20
      table/merger.cc
  50. 14
      table/merger.h
  51. 6
      table/merger_test.cc
  52. 11
      table/meta_blocks.cc
  53. 3
      table/meta_blocks.h
  54. 3
      table/mock_table.cc
  55. 5
      table/mock_table.h
  56. 9
      table/plain_table_reader.cc
  57. 4
      table/plain_table_reader.h
  58. 15
      table/scoped_arena_iterator.h
  59. 3
      table/table_properties.cc
  60. 4
      table/table_properties_internal.h
  61. 4
      table/table_reader.h
  62. 10
      table/table_reader_bench.cc
  63. 67
      table/table_test.cc
  64. 19
      table/two_level_iterator.cc
  65. 9
      table/two_level_iterator.h
  66. 2
      util/ldb_cmd.cc
  67. 6
      util/sst_dump_tool.cc
  68. 3
      util/testutil.h

@ -26,6 +26,7 @@
#include "rocksdb/options.h"
#include "rocksdb/table.h"
#include "table/block_based_table_builder.h"
#include "table/internal_iterator.h"
#include "util/file_reader_writer.h"
#include "util/iostats_context_imp.h"
#include "util/stop_watch.h"
@ -52,8 +53,9 @@ TableBuilder* NewTableBuilder(
Status BuildTable(
const std::string& dbname, Env* env, const ImmutableCFOptions& ioptions,
const EnvOptions& env_options, TableCache* table_cache, Iterator* iter,
FileMetaData* meta, const InternalKeyComparator& internal_comparator,
const EnvOptions& env_options, TableCache* table_cache,
InternalIterator* iter, FileMetaData* meta,
const InternalKeyComparator& internal_comparator,
const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
int_tbl_prop_collector_factories,
uint32_t column_family_id, std::vector<SequenceNumber> snapshots,
@ -141,7 +143,7 @@ Status BuildTable(
if (s.ok() && !empty) {
// Verify that the table is usable
std::unique_ptr<Iterator> it(table_cache->NewIterator(
std::unique_ptr<InternalIterator> it(table_cache->NewIterator(
ReadOptions(), env_options, internal_comparator, meta->fd, nullptr,
(internal_stats == nullptr) ? nullptr
: internal_stats->GetFileReadHist(0),

@ -31,6 +31,7 @@ class VersionEdit;
class TableBuilder;
class WritableFileWriter;
class InternalStats;
class InternalIterator;
TableBuilder* NewTableBuilder(
const ImmutableCFOptions& options,
@ -49,8 +50,9 @@ TableBuilder* NewTableBuilder(
// zero, and no Table file will be produced.
extern Status BuildTable(
const std::string& dbname, Env* env, const ImmutableCFOptions& options,
const EnvOptions& env_options, TableCache* table_cache, Iterator* iter,
FileMetaData* meta, const InternalKeyComparator& internal_comparator,
const EnvOptions& env_options, TableCache* table_cache,
InternalIterator* iter, FileMetaData* meta,
const InternalKeyComparator& internal_comparator,
const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
int_tbl_prop_collector_factories,
uint32_t column_family_id, std::vector<SequenceNumber> snapshots,

@ -6,11 +6,12 @@
// of patent rights can be found in the PATENTS file in the same directory.
#include "db/compaction_iterator.h"
#include "table/internal_iterator.h"
namespace rocksdb {
CompactionIterator::CompactionIterator(
Iterator* input, const Comparator* cmp, MergeHelper* merge_helper,
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
Env* env, bool expect_valid_internal_key, Compaction* compaction,
const CompactionFilter* compaction_filter, LogBuffer* log_buffer)

@ -37,7 +37,7 @@ struct CompactionIteratorStats {
class CompactionIterator {
public:
CompactionIterator(Iterator* input, const Comparator* cmp,
CompactionIterator(InternalIterator* input, const Comparator* cmp,
MergeHelper* merge_helper, SequenceNumber last_sequence,
std::vector<SequenceNumber>* snapshots, Env* env,
bool expect_valid_internal_key,
@ -84,7 +84,7 @@ class CompactionIterator {
inline SequenceNumber findEarliestVisibleSnapshot(
SequenceNumber in, SequenceNumber* prev_snapshot);
Iterator* input_;
InternalIterator* input_;
const Comparator* cmp_;
MergeHelper* merge_helper_;
const std::vector<SequenceNumber>* snapshots_;

@ -585,7 +585,7 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options,
void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
assert(sub_compact != nullptr);
std::unique_ptr<Iterator> input(
std::unique_ptr<InternalIterator> input(
versions_->MakeInputIterator(sub_compact->compaction));
AutoThreadOperationStageUpdater stage_updater(
@ -811,7 +811,7 @@ Status CompactionJob::FinishCompactionOutputFile(
if (s.ok() && current_entries > 0) {
// Verify that the table is usable
ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
Iterator* iter = cfd->table_cache()->NewIterator(
InternalIterator* iter = cfd->table_cache()->NewIterator(
ReadOptions(), env_options_, cfd->internal_comparator(), meta->fd,
nullptr, cfd->internal_stats()->GetFileReadHist(
compact_->compaction->output_level()),

@ -35,9 +35,9 @@
#include "rocksdb/env.h"
#include "rocksdb/memtablerep.h"
#include "rocksdb/transaction_log.h"
#include "table/scoped_arena_iterator.h"
#include "util/autovector.h"
#include "util/event_logger.h"
#include "util/scoped_arena_iterator.h"
#include "util/stop_watch.h"
#include "util/thread_local.h"

@ -47,6 +47,7 @@
#include "table/block_based_table_factory.h"
#include "table/mock_table.h"
#include "table/plain_table_factory.h"
#include "table/scoped_arena_iterator.h"
#include "util/compression.h"
#include "util/hash.h"
#include "util/hash_linklist_rep.h"
@ -54,7 +55,6 @@
#include "util/mock_env.h"
#include "util/mutexlock.h"
#include "util/rate_limiter.h"
#include "util/scoped_arena_iterator.h"
#include "util/statistics.h"
#include "util/string_util.h"
#include "util/sync_point.h"

@ -2845,11 +2845,11 @@ static void CleanupIteratorState(void* arg1, void* arg2) {
}
} // namespace
Iterator* DBImpl::NewInternalIterator(const ReadOptions& read_options,
ColumnFamilyData* cfd,
SuperVersion* super_version,
Arena* arena) {
Iterator* internal_iter;
InternalIterator* DBImpl::NewInternalIterator(const ReadOptions& read_options,
ColumnFamilyData* cfd,
SuperVersion* super_version,
Arena* arena) {
InternalIterator* internal_iter;
assert(arena != nullptr);
// Need to create internal iterator from the arena.
MergeIteratorBuilder merge_iter_builder(&cfd->internal_comparator(), arena);
@ -3148,7 +3148,8 @@ Status DBImpl::AddFile(ColumnFamilyHandle* column_family,
file_info.num_entries = table_reader->GetTableProperties()->num_entries;
ParsedInternalKey key;
std::unique_ptr<Iterator> iter(table_reader->NewIterator(ReadOptions()));
std::unique_ptr<InternalIterator> iter(
table_reader->NewIterator(ReadOptions()));
// Get first (smallest) key from file
iter->SeekToFirst();
@ -3307,8 +3308,8 @@ Status DBImpl::AddFile(ColumnFamilyHandle* column_family,
return status;
}
Iterator* DBImpl::NewInternalIterator(Arena* arena,
ColumnFamilyHandle* column_family) {
InternalIterator* DBImpl::NewInternalIterator(
Arena* arena, ColumnFamilyHandle* column_family) {
ColumnFamilyData* cfd;
if (column_family == nullptr) {
cfd = default_cf_handle_->cfd();
@ -3565,7 +3566,7 @@ Iterator* DBImpl::NewIterator(const ReadOptions& read_options,
snapshot, sv->mutable_cf_options.max_sequential_skip_in_iterations,
read_options.iterate_upper_bound);
Iterator* internal_iter =
InternalIterator* internal_iter =
NewInternalIterator(read_options, cfd, sv, db_iter->GetArena());
db_iter->SetIterUnderDBIter(internal_iter);
@ -3632,8 +3633,8 @@ Status DBImpl::NewIterators(
ArenaWrappedDBIter* db_iter = NewArenaWrappedDbIterator(
env_, *cfd->ioptions(), cfd->user_comparator(), snapshot,
sv->mutable_cf_options.max_sequential_skip_in_iterations);
Iterator* internal_iter = NewInternalIterator(
read_options, cfd, sv, db_iter->GetArena());
InternalIterator* internal_iter =
NewInternalIterator(read_options, cfd, sv, db_iter->GetArena());
db_iter->SetIterUnderDBIter(internal_iter);
iterators->push_back(db_iter);
}

@ -36,11 +36,11 @@
#include "rocksdb/env.h"
#include "rocksdb/memtablerep.h"
#include "rocksdb/transaction_log.h"
#include "table/scoped_arena_iterator.h"
#include "util/autovector.h"
#include "util/event_logger.h"
#include "util/hash.h"
#include "util/instrumented_mutex.h"
#include "util/scoped_arena_iterator.h"
#include "util/stop_watch.h"
#include "util/thread_local.h"
@ -251,8 +251,8 @@ class DBImpl : public DB {
// Return an internal iterator over the current state of the database.
// The keys of this iterator are internal keys (see format.h).
// The returned iterator should be deleted when no longer needed.
Iterator* NewInternalIterator(Arena* arena,
ColumnFamilyHandle* column_family = nullptr);
InternalIterator* NewInternalIterator(
Arena* arena, ColumnFamilyHandle* column_family = nullptr);
#ifndef NDEBUG
// Extra methods (for testing) that are not in the public DB interface
@ -370,8 +370,10 @@ class DBImpl : public DB {
const DBOptions db_options_;
Statistics* stats_;
Iterator* NewInternalIterator(const ReadOptions&, ColumnFamilyData* cfd,
SuperVersion* super_version, Arena* arena);
InternalIterator* NewInternalIterator(const ReadOptions&,
ColumnFamilyData* cfd,
SuperVersion* super_version,
Arena* arena);
void NotifyOnFlushCompleted(ColumnFamilyData* cfd, FileMetaData* file_meta,
const MutableCFOptions& mutable_cf_options,

@ -15,11 +15,12 @@
#include "db/filename.h"
#include "db/dbformat.h"
#include "port/port.h"
#include "rocksdb/env.h"
#include "rocksdb/options.h"
#include "rocksdb/iterator.h"
#include "rocksdb/merge_operator.h"
#include "port/port.h"
#include "table/internal_iterator.h"
#include "util/arena.h"
#include "util/logging.h"
#include "util/mutexlock.h"
@ -58,9 +59,9 @@ class DBIter: public Iterator {
kReverse
};
DBIter(Env* env, const ImmutableCFOptions& ioptions,
const Comparator* cmp, Iterator* iter, SequenceNumber s,
bool arena_mode, uint64_t max_sequential_skip_in_iterations,
DBIter(Env* env, const ImmutableCFOptions& ioptions, const Comparator* cmp,
InternalIterator* iter, SequenceNumber s, bool arena_mode,
uint64_t max_sequential_skip_in_iterations,
const Slice* iterate_upper_bound = nullptr)
: arena_mode_(arena_mode),
env_(env),
@ -83,10 +84,10 @@ class DBIter: public Iterator {
if (!arena_mode_) {
delete iter_;
} else {
iter_->~Iterator();
iter_->~InternalIterator();
}
}
virtual void SetIter(Iterator* iter) {
virtual void SetIter(InternalIterator* iter) {
assert(iter_ == nullptr);
iter_ = iter;
}
@ -142,7 +143,7 @@ class DBIter: public Iterator {
Logger* logger_;
const Comparator* const user_comparator_;
const MergeOperator* const user_merge_operator_;
Iterator* iter_;
InternalIterator* iter_;
SequenceNumber const sequence_;
Status status_;
@ -744,7 +745,7 @@ void DBIter::SeekToLast() {
Iterator* NewDBIterator(Env* env, const ImmutableCFOptions& ioptions,
const Comparator* user_key_comparator,
Iterator* internal_iter,
InternalIterator* internal_iter,
const SequenceNumber& sequence,
uint64_t max_sequential_skip_in_iterations,
const Slice* iterate_upper_bound) {
@ -757,7 +758,7 @@ ArenaWrappedDBIter::~ArenaWrappedDBIter() { db_iter_->~DBIter(); }
void ArenaWrappedDBIter::SetDBIter(DBIter* iter) { db_iter_ = iter; }
void ArenaWrappedDBIter::SetIterUnderDBIter(Iterator* iter) {
void ArenaWrappedDBIter::SetIterUnderDBIter(InternalIterator* iter) {
static_cast<DBIter*>(db_iter_)->SetIter(iter);
}

@ -10,6 +10,7 @@
#pragma once
#include <stdint.h>
#include "rocksdb/db.h"
#include "rocksdb/iterator.h"
#include "db/dbformat.h"
#include "util/arena.h"
#include "util/autovector.h"
@ -18,18 +19,17 @@ namespace rocksdb {
class Arena;
class DBIter;
class InternalIterator;
// Return a new iterator that converts internal keys (yielded by
// "*internal_iter") that were live at the specified "sequence" number
// into appropriate user keys.
extern Iterator* NewDBIterator(
Env* env,
const ImmutableCFOptions& options,
const Comparator *user_key_comparator,
Iterator* internal_iter,
const SequenceNumber& sequence,
uint64_t max_sequential_skip_in_iterations,
const Slice* iterate_upper_bound = nullptr);
extern Iterator* NewDBIterator(Env* env, const ImmutableCFOptions& options,
const Comparator* user_key_comparator,
InternalIterator* internal_iter,
const SequenceNumber& sequence,
uint64_t max_sequential_skip_in_iterations,
const Slice* iterate_upper_bound = nullptr);
// A wrapper iterator which wraps DB Iterator and the arena, with which the DB
// iterator is supposed be allocated. This class is used as an entry point of
@ -50,7 +50,7 @@ class ArenaWrappedDBIter : public Iterator {
// Set the internal iterator wrapped inside the DB Iterator. Usually it is
// a merging iterator.
virtual void SetIterUnderDBIter(Iterator* iter);
virtual void SetIterUnderDBIter(InternalIterator* iter);
virtual bool Valid() const override;
virtual void SeekToFirst() override;
virtual void SeekToLast() override;
@ -60,6 +60,7 @@ class ArenaWrappedDBIter : public Iterator {
virtual Slice key() const override;
virtual Slice value() const override;
virtual Status status() const override;
void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2);
private:

@ -29,7 +29,7 @@ static uint64_t TestGetTickerCount(const Options& options,
return options.statistics->getTickerCount(ticker_type);
}
class TestIterator : public Iterator {
class TestIterator : public InternalIterator {
public:
explicit TestIterator(const Comparator* comparator)
: initialized_(false),
@ -1864,11 +1864,12 @@ class DBIterWithMergeIterTest : public testing::Test {
internal_iter2_->Add("d", kTypeValue, "7", 3u);
internal_iter2_->Finish();
std::vector<Iterator*> child_iters;
std::vector<InternalIterator*> child_iters;
child_iters.push_back(internal_iter1_);
child_iters.push_back(internal_iter2_);
InternalKeyComparator icomp(BytewiseComparator());
Iterator* merge_iter = NewMergingIterator(&icomp_, &child_iters[0], 2u);
InternalIterator* merge_iter =
NewMergingIterator(&icomp_, &child_iters[0], 2u);
db_iter_.reset(NewDBIterator(env_, ImmutableCFOptions(options_),
BytewiseComparator(), merge_iter,

@ -54,6 +54,7 @@
#include "table/block_based_table_factory.h"
#include "table/mock_table.h"
#include "table/plain_table_factory.h"
#include "table/scoped_arena_iterator.h"
#include "util/file_reader_writer.h"
#include "util/hash.h"
#include "util/hash_linklist_rep.h"
@ -64,7 +65,6 @@
#include "util/rate_limiter.h"
#include "util/statistics.h"
#include "util/testharness.h"
#include "util/scoped_arena_iterator.h"
#include "util/sync_point.h"
#include "util/testutil.h"
#include "util/mock_env.h"

@ -38,11 +38,10 @@ SpecialEnv::SpecialEnv(Env* base)
table_write_callback_ = nullptr;
}
DBTestBase::DBTestBase(const std::string path) : option_config_(kDefault),
mem_env_(!getenv("MEM_ENV") ? nullptr :
new MockEnv(Env::Default())),
env_(new SpecialEnv(mem_env_ ? mem_env_ : Env::Default())) {
DBTestBase::DBTestBase(const std::string path)
: option_config_(kDefault),
mem_env_(!getenv("MEM_ENV") ? nullptr : new MockEnv(Env::Default())),
env_(new SpecialEnv(mem_env_ ? mem_env_ : Env::Default())) {
env_->SetBackgroundThreads(1, Env::LOW);
env_->SetBackgroundThreads(1, Env::HIGH);
dbname_ = test::TmpDir(env_) + path;
@ -92,8 +91,7 @@ bool DBTestBase::ChangeOptions(int skip_mask) {
continue;
}
if ((skip_mask & kSkipNoSeekToLast) &&
(option_config_ == kHashLinkList ||
option_config_ == kHashSkipList)) {;
(option_config_ == kHashLinkList || option_config_ == kHashSkipList)) {
continue;
}
if ((skip_mask & kSkipPlainTable) &&
@ -115,8 +113,7 @@ bool DBTestBase::ChangeOptions(int skip_mask) {
option_config_ == kFIFOCompaction) {
continue;
}
if ((skip_mask & kSkipMmapReads) &&
option_config_ == kWalDirAndMmapReads) {
if ((skip_mask & kSkipMmapReads) && option_config_ == kWalDirAndMmapReads) {
continue;
}
break;
@ -207,8 +204,7 @@ Options DBTestBase::CurrentOptions(
switch (option_config_) {
case kHashSkipList:
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
options.memtable_factory.reset(
NewHashSkipListRepFactory(16));
options.memtable_factory.reset(NewHashSkipListRepFactory(16));
break;
case kPlainTableFirstBytePrefix:
options.table_factory.reset(new PlainTableFactory());
@ -296,7 +292,7 @@ Options DBTestBase::CurrentOptions(
break;
case kCompressedBlockCache:
options.allow_mmap_writes = true;
table_options.block_cache_compressed = NewLRUCache(8*1024*1024);
table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
break;
case kInfiniteMaxOpenFiles:
options.max_open_files = -1;
@ -355,7 +351,7 @@ Options DBTestBase::CurrentOptions(
}
void DBTestBase::CreateColumnFamilies(const std::vector<std::string>& cfs,
const Options& options) {
const Options& options) {
ColumnFamilyOptions cf_opts(options);
size_t cfi = handles_.size();
handles_.resize(cfi + cfs.size());
@ -365,7 +361,7 @@ void DBTestBase::CreateColumnFamilies(const std::vector<std::string>& cfs,
}
void DBTestBase::CreateAndReopenWithCF(const std::vector<std::string>& cfs,
const Options& options) {
const Options& options) {
CreateColumnFamilies(cfs, options);
std::vector<std::string> cfs_plus_default = cfs;
cfs_plus_default.insert(cfs_plus_default.begin(), kDefaultColumnFamilyName);
@ -373,18 +369,17 @@ void DBTestBase::CreateAndReopenWithCF(const std::vector<std::string>& cfs,
}
void DBTestBase::ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
const std::vector<Options>& options) {
const std::vector<Options>& options) {
ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
}
void DBTestBase::ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
const Options& options) {
const Options& options) {
ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
}
Status DBTestBase::TryReopenWithColumnFamilies(
const std::vector<std::string>& cfs,
const std::vector<Options>& options) {
const std::vector<std::string>& cfs, const std::vector<Options>& options) {
Close();
EXPECT_EQ(cfs.size(), options.size());
std::vector<ColumnFamilyDescriptor> column_families;
@ -396,8 +391,7 @@ Status DBTestBase::TryReopenWithColumnFamilies(
}
Status DBTestBase::TryReopenWithColumnFamilies(
const std::vector<std::string>& cfs,
const Options& options) {
const std::vector<std::string>& cfs, const Options& options) {
Close();
std::vector<Options> v_opts(cfs.size(), options);
return TryReopenWithColumnFamilies(cfs, v_opts);
@ -454,7 +448,7 @@ Status DBTestBase::Put(const Slice& k, const Slice& v, WriteOptions wo) {
}
Status DBTestBase::Put(int cf, const Slice& k, const Slice& v,
WriteOptions wo) {
WriteOptions wo) {
if (kMergePut == option_config_) {
return db_->Merge(wo, handles_[cf], k, v);
} else {
@ -493,7 +487,7 @@ std::string DBTestBase::Get(const std::string& k, const Snapshot* snapshot) {
}
std::string DBTestBase::Get(int cf, const std::string& k,
const Snapshot* snapshot) {
const Snapshot* snapshot) {
ReadOptions options;
options.verify_checksums = true;
options.snapshot = snapshot;
@ -731,7 +725,7 @@ uint64_t DBTestBase::Size(const Slice& start, const Slice& limit, int cf) {
}
void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit,
uint32_t target_path_id) {
uint32_t target_path_id) {
CompactRangeOptions compact_options;
compact_options.target_path_id = target_path_id;
ASSERT_OK(db_->CompactRange(compact_options, handles_[cf], &start, &limit));
@ -748,9 +742,8 @@ void DBTestBase::Compact(const Slice& start, const Slice& limit) {
// Do n memtable compactions, each of which produces an sstable
// covering the range [small,large].
void DBTestBase::MakeTables(
int n, const std::string& small,
const std::string& large, int cf) {
void DBTestBase::MakeTables(int n, const std::string& small,
const std::string& large, int cf) {
for (int i = 0; i < n; i++) {
ASSERT_OK(Put(cf, small, "begin"));
ASSERT_OK(Put(cf, large, "end"));
@ -761,8 +754,8 @@ void DBTestBase::MakeTables(
// Prevent pushing of new sstables into deeper levels by adding
// tables that cover a specified range to all levels.
void DBTestBase::FillLevels(
const std::string& smallest, const std::string& largest, int cf) {
void DBTestBase::FillLevels(const std::string& smallest,
const std::string& largest, int cf) {
MakeTables(db_->NumberLevels(handles_[cf]), smallest, largest, cf);
}
@ -779,7 +772,7 @@ void DBTestBase::MoveFilesToLevel(int level, int cf) {
void DBTestBase::DumpFileCounts(const char* label) {
fprintf(stderr, "---\n%s:\n", label);
fprintf(stderr, "maxoverlap: %" PRIu64 "\n",
dbfull()->TEST_MaxNextLevelOverlappingBytes());
dbfull()->TEST_MaxNextLevelOverlappingBytes());
for (int level = 0; level < db_->NumberLevels(); level++) {
int num = NumTableFilesAtLevel(level);
if (num > 0) {
@ -888,9 +881,10 @@ void DBTestBase::VerifyIterLast(std::string expected_key, int cf) {
// sets newValue with delta
// If previous value is not empty,
// updates previous value with 'b' string of previous value size - 1.
UpdateStatus DBTestBase::updateInPlaceSmallerSize(
char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue) {
UpdateStatus DBTestBase::updateInPlaceSmallerSize(char* prevValue,
uint32_t* prevSize,
Slice delta,
std::string* newValue) {
if (prevValue == nullptr) {
*newValue = std::string(delta.size(), 'c');
return UpdateStatus::UPDATED;
@ -902,9 +896,10 @@ UpdateStatus DBTestBase::updateInPlaceSmallerSize(
}
}
UpdateStatus DBTestBase::updateInPlaceSmallerVarintSize(
char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue) {
UpdateStatus DBTestBase::updateInPlaceSmallerVarintSize(char* prevValue,
uint32_t* prevSize,
Slice delta,
std::string* newValue) {
if (prevValue == nullptr) {
*newValue = std::string(delta.size(), 'c');
return UpdateStatus::UPDATED;
@ -916,16 +911,17 @@ UpdateStatus DBTestBase::updateInPlaceSmallerVarintSize(
}
}
UpdateStatus DBTestBase::updateInPlaceLargerSize(
char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue) {
UpdateStatus DBTestBase::updateInPlaceLargerSize(char* prevValue,
uint32_t* prevSize,
Slice delta,
std::string* newValue) {
*newValue = std::string(delta.size(), 'c');
return UpdateStatus::UPDATED;
}
UpdateStatus DBTestBase::updateInPlaceNoAction(
char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue) {
UpdateStatus DBTestBase::updateInPlaceNoAction(char* prevValue,
uint32_t* prevSize, Slice delta,
std::string* newValue) {
return UpdateStatus::UPDATE_FAILED;
}
@ -953,9 +949,8 @@ void DBTestBase::validateNumberOfEntries(int numValues, int cf) {
ASSERT_EQ(0, seq);
}
void DBTestBase::CopyFile(
const std::string& source, const std::string& destination,
uint64_t size) {
void DBTestBase::CopyFile(const std::string& source,
const std::string& destination, uint64_t size) {
const EnvOptions soptions;
unique_ptr<SequentialFile> srcfile;
ASSERT_OK(env_->NewSequentialFile(source, &srcfile, soptions));

@ -43,11 +43,12 @@
#include "table/block_based_table_factory.h"
#include "table/mock_table.h"
#include "table/plain_table_factory.h"
#include "table/scoped_arena_iterator.h"
#include "util/compression.h"
#include "util/hash_linklist_rep.h"
#include "util/mock_env.h"
#include "util/mutexlock.h"
#include "util/scoped_arena_iterator.h"
#include "util/string_util.h"
// SyncPoint is not supported in Released Windows Mode.
#if !(defined NDEBUG) || !defined(OS_WIN)
@ -131,9 +132,7 @@ class SpecialEnv : public EnvWrapper {
public:
SSTableFile(SpecialEnv* env, unique_ptr<WritableFile>&& base)
: env_(env),
base_(std::move(base)) {
}
: env_(env), base_(std::move(base)) {}
Status Append(const Slice& data) override {
if (env_->table_write_callback_) {
(*env_->table_write_callback_)();
@ -148,9 +147,7 @@ class SpecialEnv : public EnvWrapper {
return base_->Append(data);
}
}
Status Truncate(uint64_t size) override {
return base_->Truncate(size);
}
Status Truncate(uint64_t size) override { return base_->Truncate(size); }
Status Close() override {
// SyncPoint is not supported in Released Windows Mode.
#if !(defined NDEBUG) || !defined(OS_WIN)
@ -180,7 +177,7 @@ class SpecialEnv : public EnvWrapper {
class ManifestFile : public WritableFile {
public:
ManifestFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
: env_(env), base_(std::move(b)) { }
: env_(env), base_(std::move(b)) {}
Status Append(const Slice& data) override {
if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated writer error");
@ -283,8 +280,7 @@ class SpecialEnv : public EnvWrapper {
public:
CountingFile(unique_ptr<RandomAccessFile>&& target,
anon::AtomicCounter* counter)
: target_(std::move(target)), counter_(counter) {
}
: target_(std::move(target)), counter_(counter) {}
virtual Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
counter_->Increment();
@ -329,7 +325,6 @@ class SpecialEnv : public EnvWrapper {
return s;
}
virtual void SleepForMicroseconds(int micros) override {
sleep_counter_.Increment();
if (no_sleep_) {
@ -406,7 +401,7 @@ class SpecialEnv : public EnvWrapper {
std::atomic<int64_t> addon_time_;
bool no_sleep_;
std::atomic<bool> is_wal_sync_thread_safe_ {true};
std::atomic<bool> is_wal_sync_thread_safe_{true};
};
class DBTestBase : public testing::Test {
@ -509,9 +504,7 @@ class DBTestBase : public testing::Test {
const Options& defaultOptions,
const anon::OptionsOverride& options_override = anon::OptionsOverride());
DBImpl* dbfull() {
return reinterpret_cast<DBImpl*>(db_);
}
DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
void CreateColumnFamilies(const std::vector<std::string>& cfs,
const Options& options);
@ -525,9 +518,8 @@ class DBTestBase : public testing::Test {
void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
const Options& options);
Status TryReopenWithColumnFamilies(
const std::vector<std::string>& cfs,
const std::vector<Options>& options);
Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
const std::vector<Options>& options);
Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
const Options& options);
@ -643,21 +635,21 @@ class DBTestBase : public testing::Test {
// sets newValue with delta
// If previous value is not empty,
// updates previous value with 'b' string of previous value size - 1.
static UpdateStatus updateInPlaceSmallerSize(
char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue);
static UpdateStatus updateInPlaceSmallerSize(char* prevValue,
uint32_t* prevSize, Slice delta,
std::string* newValue);
static UpdateStatus updateInPlaceSmallerVarintSize(
char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue);
static UpdateStatus updateInPlaceSmallerVarintSize(char* prevValue,
uint32_t* prevSize,
Slice delta,
std::string* newValue);
static UpdateStatus updateInPlaceLargerSize(
char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue);
static UpdateStatus updateInPlaceLargerSize(char* prevValue,
uint32_t* prevSize, Slice delta,
std::string* newValue);
static UpdateStatus updateInPlaceNoAction(
char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue);
static UpdateStatus updateInPlaceNoAction(char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue);
// Utility method to test InplaceUpdate
void validateNumberOfEntries(int numValues, int cf = 0);

@ -197,7 +197,7 @@ Status FlushJob::WriteLevel0Table(const autovector<MemTable*>& mems,
if (log_buffer_) {
log_buffer_->FlushBufferToLog();
}
std::vector<Iterator*> memtables;
std::vector<InternalIterator*> memtables;
ReadOptions ro;
ro.total_order_seek = true;
Arena arena;

@ -27,12 +27,12 @@
#include "rocksdb/env.h"
#include "rocksdb/memtablerep.h"
#include "rocksdb/transaction_log.h"
#include "table/scoped_arena_iterator.h"
#include "util/autovector.h"
#include "util/event_logger.h"
#include "util/instrumented_mutex.h"
#include "util/stop_watch.h"
#include "util/thread_local.h"
#include "util/scoped_arena_iterator.h"
#include "db/internal_stats.h"
#include "db/write_controller.h"
#include "db/flush_scheduler.h"

@ -28,7 +28,7 @@ namespace rocksdb {
// iter.SetFileIndex(file_index);
// iter.Seek(target);
// iter.Next()
class LevelIterator : public Iterator {
class LevelIterator : public InternalIterator {
public:
LevelIterator(const ColumnFamilyData* const cfd,
const ReadOptions& read_options,
@ -113,7 +113,7 @@ class LevelIterator : public Iterator {
bool valid_;
uint32_t file_index_;
Status status_;
std::unique_ptr<Iterator> file_iter_;
std::unique_ptr<InternalIterator> file_iter_;
};
ForwardIterator::ForwardIterator(DBImpl* db, const ReadOptions& read_options,
@ -146,10 +146,10 @@ ForwardIterator::~ForwardIterator() {
void ForwardIterator::Cleanup(bool release_sv) {
if (mutable_iter_ != nullptr) {
mutable_iter_->~Iterator();
mutable_iter_->~InternalIterator();
}
for (auto* m : imm_iters_) {
m->~Iterator();
m->~InternalIterator();
}
imm_iters_.clear();
for (auto* f : l0_iters_) {

@ -14,6 +14,7 @@
#include "rocksdb/iterator.h"
#include "rocksdb/options.h"
#include "db/dbformat.h"
#include "table/internal_iterator.h"
#include "util/arena.h"
namespace rocksdb {
@ -30,16 +31,15 @@ class MinIterComparator {
explicit MinIterComparator(const Comparator* comparator) :
comparator_(comparator) {}
bool operator()(Iterator* a, Iterator* b) {
bool operator()(InternalIterator* a, InternalIterator* b) {
return comparator_->Compare(a->key(), b->key()) > 0;
}
private:
const Comparator* comparator_;
};
typedef std::priority_queue<Iterator*,
std::vector<Iterator*>,
MinIterComparator> MinIterHeap;
typedef std::priority_queue<InternalIterator*, std::vector<InternalIterator*>,
MinIterComparator> MinIterHeap;
/**
* ForwardIterator is a special type of iterator that only supports Seek()
@ -48,7 +48,7 @@ typedef std::priority_queue<Iterator*,
* the iterator. At the current implementation, snapshot is taken at the
* time Seek() is called. The Next() followed do not see new values after.
*/
class ForwardIterator : public Iterator {
class ForwardIterator : public InternalIterator {
public:
ForwardIterator(DBImpl* db, const ReadOptions& read_options,
ColumnFamilyData* cfd, SuperVersion* current_sv = nullptr);
@ -94,11 +94,11 @@ class ForwardIterator : public Iterator {
MinIterHeap immutable_min_heap_;
SuperVersion* sv_;
Iterator* mutable_iter_;
std::vector<Iterator*> imm_iters_;
std::vector<Iterator*> l0_iters_;
InternalIterator* mutable_iter_;
std::vector<InternalIterator*> imm_iters_;
std::vector<InternalIterator*> l0_iters_;
std::vector<LevelIterator*> level_iters_;
Iterator* current_;
InternalIterator* current_;
bool valid_;
// Internal iterator status; set only by one of the unsupported methods.

@ -21,6 +21,7 @@
#include "rocksdb/iterator.h"
#include "rocksdb/merge_operator.h"
#include "rocksdb/slice_transform.h"
#include "table/internal_iterator.h"
#include "table/merger.h"
#include "util/arena.h"
#include "util/coding.h"
@ -202,7 +203,7 @@ const char* EncodeKey(std::string* scratch, const Slice& target) {
return scratch->data();
}
class MemTableIterator: public Iterator {
class MemTableIterator : public InternalIterator {
public:
MemTableIterator(
const MemTable& mem, const ReadOptions& read_options, Arena* arena)
@ -285,7 +286,8 @@ class MemTableIterator: public Iterator {
void operator=(const MemTableIterator&);
};
Iterator* MemTable::NewIterator(const ReadOptions& read_options, Arena* arena) {
InternalIterator* MemTable::NewIterator(const ReadOptions& read_options,
Arena* arena) {
assert(arena != nullptr);
auto mem = arena->AllocateAligned(sizeof(MemTableIterator));
return new (mem) MemTableIterator(*this, read_options, arena);

@ -31,6 +31,7 @@ class Mutex;
class MemTableIterator;
class MergeContext;
class WriteBuffer;
class InternalIterator;
struct MemTableOptions {
explicit MemTableOptions(
@ -140,7 +141,7 @@ class MemTable {
// arena: If not null, the arena needs to be used to allocate the Iterator.
// Calling ~Iterator of the iterator will destroy all the states but
// those allocated in arena.
Iterator* NewIterator(const ReadOptions& read_options, Arena* arena);
InternalIterator* NewIterator(const ReadOptions& read_options, Arena* arena);
// Add an entry into memtable that maps key to value at the
// specified sequence number and with the specified type.

@ -138,9 +138,9 @@ bool MemTableListVersion::GetFromList(std::list<MemTable*>* list,
return false;
}
void MemTableListVersion::AddIterators(const ReadOptions& options,
std::vector<Iterator*>* iterator_list,
Arena* arena) {
void MemTableListVersion::AddIterators(
const ReadOptions& options, std::vector<InternalIterator*>* iterator_list,
Arena* arena) {
for (auto& m : memlist_) {
iterator_list->push_back(m->NewIterator(options, arena));
}

@ -74,7 +74,8 @@ class MemTableListVersion {
}
void AddIterators(const ReadOptions& options,
std::vector<Iterator*>* iterator_list, Arena* arena);
std::vector<InternalIterator*>* iterator_list,
Arena* arena);
void AddIterators(const ReadOptions& options,
MergeIteratorBuilder* merge_iter_builder);

@ -12,6 +12,7 @@
#include "rocksdb/comparator.h"
#include "rocksdb/db.h"
#include "rocksdb/merge_operator.h"
#include "table/internal_iterator.h"
#include "util/perf_context_imp.h"
#include "util/statistics.h"
@ -56,7 +57,8 @@ Status MergeHelper::TimedFullMerge(const Slice& key, const Slice* value,
// keys_ stores the list of keys encountered while merging.
// operands_ stores the list of merge operands encountered while merging.
// keys_[i] corresponds to operands_[i] for each i.
Status MergeHelper::MergeUntil(Iterator* iter, const SequenceNumber stop_before,
Status MergeHelper::MergeUntil(InternalIterator* iter,
const SequenceNumber stop_before,
const bool at_bottom) {
// Get a copy of the internal key, before it's invalidated by iter->Next()
// Also maintain the list of merge operands seen.

@ -22,6 +22,7 @@ class Iterator;
class Logger;
class MergeOperator;
class Statistics;
class InternalIterator;
class MergeHelper {
public:
@ -82,7 +83,8 @@ class MergeHelper {
// with asserts removed).
//
// REQUIRED: The first key in the input is not corrupted.
Status MergeUntil(Iterator* iter, const SequenceNumber stop_before = 0,
Status MergeUntil(InternalIterator* iter,
const SequenceNumber stop_before = 0,
const bool at_bottom = false);
// Filters a merge operand using the compaction filter specified

@ -81,8 +81,8 @@
#include "rocksdb/env.h"
#include "rocksdb/options.h"
#include "rocksdb/immutable_options.h"
#include "table/scoped_arena_iterator.h"
#include "util/file_reader_writer.h"
#include "util/scoped_arena_iterator.h"
namespace rocksdb {
@ -340,7 +340,7 @@ class Repairer {
t->meta.fd = FileDescriptor(t->meta.fd.GetNumber(), t->meta.fd.GetPathId(),
file_size);
if (status.ok()) {
Iterator* iter = table_cache_->NewIterator(
InternalIterator* iter = table_cache_->NewIterator(
ReadOptions(), env_options_, icmp_, t->meta.fd);
bool empty = true;
ParsedInternalKey parsed;

@ -14,6 +14,7 @@
#include "db/version_edit.h"
#include "rocksdb/statistics.h"
#include "table/internal_iterator.h"
#include "table/iterator_wrapper.h"
#include "table/table_builder.h"
#include "table/table_reader.h"
@ -148,13 +149,11 @@ Status TableCache::FindTable(const EnvOptions& env_options,
return s;
}
Iterator* TableCache::NewIterator(const ReadOptions& options,
const EnvOptions& env_options,
const InternalKeyComparator& icomparator,
const FileDescriptor& fd,
TableReader** table_reader_ptr,
HistogramImpl* file_read_hist,
bool for_compaction, Arena* arena) {
InternalIterator* TableCache::NewIterator(
const ReadOptions& options, const EnvOptions& env_options,
const InternalKeyComparator& icomparator, const FileDescriptor& fd,
TableReader** table_reader_ptr, HistogramImpl* file_read_hist,
bool for_compaction, Arena* arena) {
PERF_TIMER_GUARD(new_table_iterator_nanos);
if (table_reader_ptr != nullptr) {
@ -171,7 +170,7 @@ Iterator* TableCache::NewIterator(const ReadOptions& options,
env_options, icomparator, fd, /* sequential mode */ true,
/* record stats */ false, nullptr, &table_reader_unique_ptr);
if (!s.ok()) {
return NewErrorIterator(s, arena);
return NewErrorInternalIterator(s, arena);
}
table_reader = table_reader_unique_ptr.release();
} else {
@ -182,13 +181,13 @@ Iterator* TableCache::NewIterator(const ReadOptions& options,
options.read_tier == kBlockCacheTier /* no_io */,
!for_compaction /* record read_stats */, file_read_hist);
if (!s.ok()) {
return NewErrorIterator(s, arena);
return NewErrorInternalIterator(s, arena);
}
table_reader = GetTableReaderFromHandle(handle);
}
}
Iterator* result = table_reader->NewIterator(options, arena);
InternalIterator* result = table_reader->NewIterator(options, arena);
if (create_new_table_reader) {
assert(handle == nullptr);

@ -29,6 +29,7 @@ class Arena;
struct FileDescriptor;
class GetContext;
class HistogramImpl;
class InternalIterator;
class TableCache {
public:
@ -43,12 +44,12 @@ class TableCache {
// the returned iterator. The returned "*tableptr" object is owned by
// the cache and should not be deleted, and is valid for as long as the
// returned iterator is live.
Iterator* NewIterator(const ReadOptions& options, const EnvOptions& toptions,
const InternalKeyComparator& internal_comparator,
const FileDescriptor& file_fd,
TableReader** table_reader_ptr = nullptr,
HistogramImpl* file_read_hist = nullptr,
bool for_compaction = false, Arena* arena = nullptr);
InternalIterator* NewIterator(
const ReadOptions& options, const EnvOptions& toptions,
const InternalKeyComparator& internal_comparator,
const FileDescriptor& file_fd, TableReader** table_reader_ptr = nullptr,
HistogramImpl* file_read_hist = nullptr, bool for_compaction = false,
Arena* arena = nullptr);
// If a seek to internal key "k" in specified file finds an entry,
// call (*handle_result)(arg, found_key, found_value) repeatedly until

@ -35,6 +35,7 @@
#include "db/writebuffer.h"
#include "rocksdb/env.h"
#include "rocksdb/merge_operator.h"
#include "table/internal_iterator.h"
#include "table/table_reader.h"
#include "table/merger.h"
#include "table/two_level_iterator.h"
@ -420,7 +421,7 @@ namespace {
// is the largest key that occurs in the file, and value() is an
// 16-byte value containing the file number and file size, both
// encoded using EncodeFixed64.
class LevelFileNumIterator : public Iterator {
class LevelFileNumIterator : public InternalIterator {
public:
LevelFileNumIterator(const InternalKeyComparator& icmp,
const LevelFilesBrief* flevel)
@ -488,9 +489,9 @@ class LevelFileIteratorState : public TwoLevelIteratorState {
file_read_hist_(file_read_hist),
for_compaction_(for_compaction) {}
Iterator* NewSecondaryIterator(const Slice& meta_handle) override {
InternalIterator* NewSecondaryIterator(const Slice& meta_handle) override {
if (meta_handle.size() != sizeof(FileDescriptor)) {
return NewErrorIterator(
return NewErrorInternalIterator(
Status::Corruption("FileReader invoked with unexpected value"));
} else {
const FileDescriptor* fd =
@ -3119,7 +3120,7 @@ uint64_t VersionSet::ApproximateSize(Version* v, const FdWithKeyRange& f,
// "key" falls in the range for this table. Add the
// approximate offset of "key" within the table.
TableReader* table_reader_ptr;
Iterator* iter = v->cfd_->table_cache()->NewIterator(
InternalIterator* iter = v->cfd_->table_cache()->NewIterator(
ReadOptions(), env_options_, v->cfd_->internal_comparator(), f.fd,
&table_reader_ptr);
if (table_reader_ptr != nullptr) {
@ -3166,7 +3167,7 @@ void VersionSet::AddLiveFiles(std::vector<FileDescriptor>* live_list) {
}
}
Iterator* VersionSet::MakeInputIterator(Compaction* c) {
InternalIterator* VersionSet::MakeInputIterator(Compaction* c) {
auto cfd = c->column_family_data();
ReadOptions read_options;
read_options.verify_checksums =
@ -3182,7 +3183,7 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
const size_t space = (c->level() == 0 ? c->input_levels(0)->num_files +
c->num_input_levels() - 1
: c->num_input_levels());
Iterator** list = new Iterator* [space];
InternalIterator** list = new InternalIterator* [space];
size_t num = 0;
for (size_t which = 0; which < c->num_input_levels(); which++) {
if (c->input_levels(which)->num_files != 0) {
@ -3209,7 +3210,7 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
}
}
assert(num <= space);
Iterator* result =
InternalIterator* result =
NewMergingIterator(&c->column_family_data()->internal_comparator(), list,
static_cast<int>(num));
delete[] list;

@ -48,7 +48,7 @@ class Writer;
}
class Compaction;
class Iterator;
class InternalIterator;
class LogBuffer;
class LookupKey;
class MemTable;
@ -502,7 +502,8 @@ class Version {
return storage_info_.user_comparator_;
}
bool PrefixMayMatch(const ReadOptions& read_options, Iterator* level_iter,
bool PrefixMayMatch(const ReadOptions& read_options,
InternalIterator* level_iter,
const Slice& internal_prefix) const;
// The helper function of UpdateAccumulatedStats, which may fill the missing
@ -643,7 +644,7 @@ class VersionSet {
// Create an iterator that reads over the compaction inputs for "*c".
// The caller should delete the iterator when no longer needed.
Iterator* MakeInputIterator(Compaction* c);
InternalIterator* MakeInputIterator(Compaction* c);
// Add all files listed in any live version to *live.
void AddLiveFiles(std::vector<FileDescriptor>* live_list);

@ -17,10 +17,10 @@
#include "rocksdb/env.h"
#include "rocksdb/memtablerep.h"
#include "rocksdb/utilities/write_batch_with_index.h"
#include "table/scoped_arena_iterator.h"
#include "util/logging.h"
#include "util/string_util.h"
#include "util/testharness.h"
#include "util/scoped_arena_iterator.h"
namespace rocksdb {

@ -24,10 +24,32 @@
namespace rocksdb {
class Iterator {
class Cleanable {
public:
Iterator();
virtual ~Iterator();
Cleanable();
~Cleanable();
// Clients are allowed to register function/arg1/arg2 triples that
// will be invoked when this iterator is destroyed.
//
// Note that unlike all of the preceding methods, this method is
// not abstract and therefore clients should not override it.
typedef void (*CleanupFunction)(void* arg1, void* arg2);
void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2);
protected:
struct Cleanup {
CleanupFunction function;
void* arg1;
void* arg2;
Cleanup* next;
};
Cleanup cleanup_;
};
class Iterator : public Cleanable {
public:
Iterator() {}
virtual ~Iterator() {}
// An iterator is either positioned at a key/value pair, or
// not valid. This method returns true iff the iterator is valid.
@ -73,23 +95,7 @@ class Iterator {
// satisfied without doing some IO, then this returns Status::Incomplete().
virtual Status status() const = 0;
// Clients are allowed to register function/arg1/arg2 triples that
// will be invoked when this iterator is destroyed.
//
// Note that unlike all of the preceding methods, this method is
// not abstract and therefore clients should not override it.
typedef void (*CleanupFunction)(void* arg1, void* arg2);
void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2);
private:
struct Cleanup {
CleanupFunction function;
void* arg1;
void* arg2;
Cleanup* next;
};
Cleanup cleanup_;
// No copying allowed
Iterator(const Iterator&);
void operator=(const Iterator&);

@ -20,8 +20,8 @@
#include "db/writebuffer.h"
#include "rocksdb/env.h"
#include "rocksdb/memtablerep.h"
#include "table/scoped_arena_iterator.h"
#include "util/logging.h"
#include "util/scoped_arena_iterator.h"
#include "util/testharness.h"
/*

@ -21,8 +21,8 @@
#include "rocksdb/status.h"
#include "rocksdb/write_batch.h"
#include "rocksjni/portal.h"
#include "table/scoped_arena_iterator.h"
#include "util/logging.h"
#include "util/scoped_arena_iterator.h"
#include "util/testharness.h"
/*

@ -316,14 +316,14 @@ Block::Block(BlockContents&& contents)
}
}
Iterator* Block::NewIterator(
const Comparator* cmp, BlockIter* iter, bool total_order_seek) {
InternalIterator* Block::NewIterator(const Comparator* cmp, BlockIter* iter,
bool total_order_seek) {
if (size_ < 2*sizeof(uint32_t)) {
if (iter != nullptr) {
iter->SetStatus(Status::Corruption("bad block contents"));
return iter;
} else {
return NewErrorIterator(Status::Corruption("bad block contents"));
return NewErrorInternalIterator(Status::Corruption("bad block contents"));
}
}
const uint32_t num_restarts = NumRestarts();
@ -332,7 +332,7 @@ Iterator* Block::NewIterator(
iter->SetStatus(Status::OK());
return iter;
} else {
return NewEmptyIterator();
return NewEmptyInternalIterator();
}
} else {
BlockHashIndex* hash_index_ptr =

@ -19,6 +19,7 @@
#include "db/dbformat.h"
#include "table/block_prefix_index.h"
#include "table/block_hash_index.h"
#include "table/internal_iterator.h"
#include "format.h"
@ -66,8 +67,9 @@ class Block {
// If total_order_seek is true, hash_index_ and prefix_index_ are ignored.
// This option only applies for index block. For data block, hash_index_
// and prefix_index_ are null, so this option does not matter.
Iterator* NewIterator(const Comparator* comparator,
BlockIter* iter = nullptr, bool total_order_seek = true);
InternalIterator* NewIterator(const Comparator* comparator,
BlockIter* iter = nullptr,
bool total_order_seek = true);
void SetBlockHashIndex(BlockHashIndex* hash_index);
void SetBlockPrefixIndex(BlockPrefixIndex* prefix_index);
@ -87,7 +89,7 @@ class Block {
void operator=(const Block&);
};
class BlockIter : public Iterator {
class BlockIter : public InternalIterator {
public:
BlockIter()
: comparator_(nullptr),

@ -32,6 +32,7 @@
#include "table/block_hash_index.h"
#include "table/block_prefix_index.h"
#include "table/format.h"
#include "table/internal_iterator.h"
#include "table/meta_blocks.h"
#include "table/two_level_iterator.h"
#include "table/get_context.h"
@ -146,8 +147,8 @@ class BlockBasedTable::IndexReader {
// Create an iterator for index access.
// An iter is passed in, if it is not null, update this one and return it
// If it is null, create a new Iterator
virtual Iterator* NewIterator(
BlockIter* iter = nullptr, bool total_order_seek = true) = 0;
virtual InternalIterator* NewIterator(BlockIter* iter = nullptr,
bool total_order_seek = true) = 0;
// The size of the index.
virtual size_t size() const = 0;
@ -187,8 +188,8 @@ class BinarySearchIndexReader : public IndexReader {
return s;
}
virtual Iterator* NewIterator(
BlockIter* iter = nullptr, bool dont_care = true) override {
virtual InternalIterator* NewIterator(BlockIter* iter = nullptr,
bool dont_care = true) override {
return index_block_->NewIterator(comparator_, iter, true);
}
@ -219,7 +220,8 @@ class HashIndexReader : public IndexReader {
const Footer& footer, RandomAccessFileReader* file,
Env* env, const Comparator* comparator,
const BlockHandle& index_handle,
Iterator* meta_index_iter, IndexReader** index_reader,
InternalIterator* meta_index_iter,
IndexReader** index_reader,
bool hash_index_allow_collision) {
std::unique_ptr<Block> index_block;
auto s = ReadBlockFromFile(file, footer, ReadOptions(), index_handle,
@ -298,8 +300,8 @@ class HashIndexReader : public IndexReader {
return Status::OK();
}
virtual Iterator* NewIterator(
BlockIter* iter = nullptr, bool total_order_seek = true) override {
virtual InternalIterator* NewIterator(BlockIter* iter = nullptr,
bool total_order_seek = true) override {
return index_block_->NewIterator(comparator_, iter, total_order_seek);
}
@ -512,7 +514,7 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
// Read meta index
std::unique_ptr<Block> meta;
std::unique_ptr<Iterator> meta_iter;
std::unique_ptr<InternalIterator> meta_iter;
s = ReadMetaBlock(rep, &meta, &meta_iter);
if (!s.ok()) {
return s;
@ -580,7 +582,8 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
assert(table_options.block_cache != nullptr);
// Hack: Call NewIndexIterator() to implicitly add index to the
// block_cache
unique_ptr<Iterator> iter(new_table->NewIndexIterator(ReadOptions()));
unique_ptr<InternalIterator> iter(
new_table->NewIndexIterator(ReadOptions()));
s = iter->status();
if (s.ok()) {
@ -652,10 +655,9 @@ size_t BlockBasedTable::ApproximateMemoryUsage() const {
// Load the meta-block from the file. On success, return the loaded meta block
// and its iterator.
Status BlockBasedTable::ReadMetaBlock(
Rep* rep,
std::unique_ptr<Block>* meta_block,
std::unique_ptr<Iterator>* iter) {
Status BlockBasedTable::ReadMetaBlock(Rep* rep,
std::unique_ptr<Block>* meta_block,
std::unique_ptr<InternalIterator>* iter) {
// TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
// it is an empty block.
// TODO: we never really verify check sum for meta index block
@ -898,8 +900,8 @@ BlockBasedTable::CachableEntry<FilterBlockReader> BlockBasedTable::GetFilter(
return { filter, cache_handle };
}
Iterator* BlockBasedTable::NewIndexIterator(const ReadOptions& read_options,
BlockIter* input_iter) {
InternalIterator* BlockBasedTable::NewIndexIterator(
const ReadOptions& read_options, BlockIter* input_iter) {
// index reader has already been pre-populated.
if (rep_->index_reader) {
return rep_->index_reader->NewIterator(
@ -922,7 +924,7 @@ Iterator* BlockBasedTable::NewIndexIterator(const ReadOptions& read_options,
input_iter->SetStatus(Status::Incomplete("no blocking io"));
return input_iter;
} else {
return NewErrorIterator(Status::Incomplete("no blocking io"));
return NewErrorInternalIterator(Status::Incomplete("no blocking io"));
}
}
@ -942,7 +944,7 @@ Iterator* BlockBasedTable::NewIndexIterator(const ReadOptions& read_options,
input_iter->SetStatus(s);
return input_iter;
} else {
return NewErrorIterator(s);
return NewErrorInternalIterator(s);
}
}
@ -965,8 +967,8 @@ Iterator* BlockBasedTable::NewIndexIterator(const ReadOptions& read_options,
// into an iterator over the contents of the corresponding block.
// If input_iter is null, new a iterator
// If input_iter is not null, update this iter and return it
Iterator* BlockBasedTable::NewDataBlockIterator(Rep* rep,
const ReadOptions& ro, const Slice& index_value,
InternalIterator* BlockBasedTable::NewDataBlockIterator(
Rep* rep, const ReadOptions& ro, const Slice& index_value,
BlockIter* input_iter) {
PERF_TIMER_GUARD(new_table_block_iter_nanos);
@ -987,7 +989,7 @@ Iterator* BlockBasedTable::NewDataBlockIterator(Rep* rep,
input_iter->SetStatus(s);
return input_iter;
} else {
return NewErrorIterator(s);
return NewErrorInternalIterator(s);
}
}
@ -1040,7 +1042,7 @@ Iterator* BlockBasedTable::NewDataBlockIterator(Rep* rep,
input_iter->SetStatus(Status::Incomplete("no blocking io"));
return input_iter;
} else {
return NewErrorIterator(Status::Incomplete("no blocking io"));
return NewErrorInternalIterator(Status::Incomplete("no blocking io"));
}
}
std::unique_ptr<Block> block_value;
@ -1051,7 +1053,7 @@ Iterator* BlockBasedTable::NewDataBlockIterator(Rep* rep,
}
}
Iterator* iter;
InternalIterator* iter;
if (block.value != nullptr) {
iter = block.value->NewIterator(&rep->internal_comparator, input_iter);
if (block.cache_handle != nullptr) {
@ -1065,7 +1067,7 @@ Iterator* BlockBasedTable::NewDataBlockIterator(Rep* rep,
input_iter->SetStatus(s);
iter = input_iter;
} else {
iter = NewErrorIterator(s);
iter = NewErrorInternalIterator(s);
}
}
return iter;
@ -1080,7 +1082,7 @@ class BlockBasedTable::BlockEntryIteratorState : public TwoLevelIteratorState {
table_(table),
read_options_(read_options) {}
Iterator* NewSecondaryIterator(const Slice& index_value) override {
InternalIterator* NewSecondaryIterator(const Slice& index_value) override {
return NewDataBlockIterator(table_->rep_, read_options_, index_value);
}
@ -1138,7 +1140,7 @@ bool BlockBasedTable::PrefixMayMatch(const Slice& internal_key) {
// Then, try find it within each block
if (may_match) {
unique_ptr<Iterator> iiter(NewIndexIterator(no_io_read_options));
unique_ptr<InternalIterator> iiter(NewIndexIterator(no_io_read_options));
iiter->Seek(internal_prefix);
if (!iiter->Valid()) {
@ -1184,8 +1186,8 @@ bool BlockBasedTable::PrefixMayMatch(const Slice& internal_key) {
return may_match;
}
Iterator* BlockBasedTable::NewIterator(const ReadOptions& read_options,
Arena* arena) {
InternalIterator* BlockBasedTable::NewIterator(const ReadOptions& read_options,
Arena* arena) {
return NewTwoLevelIterator(new BlockEntryIteratorState(this, read_options),
NewIndexIterator(read_options), arena);
}
@ -1326,7 +1328,7 @@ Status BlockBasedTable::Prefetch(const Slice* const begin,
bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
const Slice& key) {
std::unique_ptr<Iterator> iiter(NewIndexIterator(options));
std::unique_ptr<InternalIterator> iiter(NewIndexIterator(options));
iiter->Seek(key);
assert(iiter->Valid());
CachableEntry<Block> block;
@ -1361,8 +1363,8 @@ bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
// 3. options
// 4. internal_comparator
// 5. index_type
Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader,
Iterator* preloaded_meta_index_iter) {
Status BlockBasedTable::CreateIndexReader(
IndexReader** index_reader, InternalIterator* preloaded_meta_index_iter) {
// Some old version of block-based tables don't have index type present in
// table properties. If that's the case we can safely use the kBinarySearch.
auto index_type_on_file = BlockBasedTableOptions::kBinarySearch;
@ -1396,7 +1398,7 @@ Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader,
}
case BlockBasedTableOptions::kHashSearch: {
std::unique_ptr<Block> meta_guard;
std::unique_ptr<Iterator> meta_iter_guard;
std::unique_ptr<InternalIterator> meta_iter_guard;
auto meta_index_iter = preloaded_meta_index_iter;
if (meta_index_iter == nullptr) {
auto s = ReadMetaBlock(rep_, &meta_guard, &meta_iter_guard);
@ -1430,7 +1432,7 @@ Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader,
}
uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key) {
unique_ptr<Iterator> index_iter(NewIndexIterator(ReadOptions()));
unique_ptr<InternalIterator> index_iter(NewIndexIterator(ReadOptions()));
index_iter->Seek(key);
uint64_t result;
@ -1484,7 +1486,7 @@ Status BlockBasedTable::DumpTable(WritableFile* out_file) {
"Metaindex Details:\n"
"--------------------------------------\n");
std::unique_ptr<Block> meta;
std::unique_ptr<Iterator> meta_iter;
std::unique_ptr<InternalIterator> meta_iter;
Status s = ReadMetaBlock(rep_, &meta, &meta_iter);
if (s.ok()) {
for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) {
@ -1567,7 +1569,8 @@ Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) {
"Index Details:\n"
"--------------------------------------\n");
std::unique_ptr<Iterator> blockhandles_iter(NewIndexIterator(ReadOptions()));
std::unique_ptr<InternalIterator> blockhandles_iter(
NewIndexIterator(ReadOptions()));
Status s = blockhandles_iter->status();
if (!s.ok()) {
out_file->Append("Can not read Index Block \n\n");
@ -1608,7 +1611,8 @@ Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) {
}
Status BlockBasedTable::DumpDataBlocks(WritableFile* out_file) {
std::unique_ptr<Iterator> blockhandles_iter(NewIndexIterator(ReadOptions()));
std::unique_ptr<InternalIterator> blockhandles_iter(
NewIndexIterator(ReadOptions()));
Status s = blockhandles_iter->status();
if (!s.ok()) {
out_file->Append("Can not read Index Block \n\n");
@ -1630,7 +1634,7 @@ Status BlockBasedTable::DumpDataBlocks(WritableFile* out_file) {
out_file->Append("\n");
out_file->Append("--------------------------------------\n");
std::unique_ptr<Iterator> datablock_iter;
std::unique_ptr<InternalIterator> datablock_iter;
datablock_iter.reset(
NewDataBlockIterator(rep_, ReadOptions(), blockhandles_iter->value()));
s = datablock_iter->status();

@ -43,6 +43,7 @@ struct BlockBasedTableOptions;
struct EnvOptions;
struct ReadOptions;
class GetContext;
class InternalIterator;
using std::unique_ptr;
@ -79,7 +80,8 @@ class BlockBasedTable : public TableReader {
// Returns a new iterator over the table contents.
// The result of NewIterator() is initially invalid (caller must
// call one of the Seek methods on the iterator before using it).
Iterator* NewIterator(const ReadOptions&, Arena* arena = nullptr) override;
InternalIterator* NewIterator(const ReadOptions&,
Arena* arena = nullptr) override;
Status Get(const ReadOptions& readOptions, const Slice& key,
GetContext* get_context) override;
@ -129,9 +131,9 @@ class BlockBasedTable : public TableReader {
class BlockEntryIteratorState;
// input_iter: if it is not null, update this one and return it as Iterator
static Iterator* NewDataBlockIterator(Rep* rep, const ReadOptions& ro,
const Slice& index_value,
BlockIter* input_iter = nullptr);
static InternalIterator* NewDataBlockIterator(
Rep* rep, const ReadOptions& ro, const Slice& index_value,
BlockIter* input_iter = nullptr);
// For the following two functions:
// if `no_io == true`, we will not try to read filter/index from sst file
@ -148,8 +150,8 @@ class BlockBasedTable : public TableReader {
// 2. index is not present in block cache.
// 3. We disallowed any io to be performed, that is, read_options ==
// kBlockCacheTier
Iterator* NewIndexIterator(const ReadOptions& read_options,
BlockIter* input_iter = nullptr);
InternalIterator* NewIndexIterator(const ReadOptions& read_options,
BlockIter* input_iter = nullptr);
// Read block cache from block caches (if set): block_cache and
// block_cache_compressed.
@ -186,17 +188,16 @@ class BlockBasedTable : public TableReader {
// Optionally, user can pass a preloaded meta_index_iter for the index that
// need to access extra meta blocks for index construction. This parameter
// helps avoid re-reading meta index block if caller already created one.
Status CreateIndexReader(IndexReader** index_reader,
Iterator* preloaded_meta_index_iter = nullptr);
Status CreateIndexReader(
IndexReader** index_reader,
InternalIterator* preloaded_meta_index_iter = nullptr);
bool FullFilterKeyMayMatch(FilterBlockReader* filter,
const Slice& user_key) const;
// Read the meta block from sst.
static Status ReadMetaBlock(
Rep* rep,
std::unique_ptr<Block>* meta_block,
std::unique_ptr<Iterator>* iter);
static Status ReadMetaBlock(Rep* rep, std::unique_ptr<Block>* meta_block,
std::unique_ptr<InternalIterator>* iter);
// Create the filter from the filter block.
static FilterBlockReader* ReadFilter(Rep* rep, size_t* filter_size = nullptr);

@ -10,6 +10,7 @@
#include "rocksdb/comparator.h"
#include "rocksdb/iterator.h"
#include "rocksdb/slice_transform.h"
#include "table/internal_iterator.h"
#include "util/coding.h"
namespace rocksdb {
@ -53,8 +54,9 @@ Status CreateBlockHashIndex(const SliceTransform* hash_key_extractor,
}
BlockHashIndex* CreateBlockHashIndexOnTheFly(
Iterator* index_iter, Iterator* data_iter, const uint32_t num_restarts,
const Comparator* comparator, const SliceTransform* hash_key_extractor) {
InternalIterator* index_iter, InternalIterator* data_iter,
const uint32_t num_restarts, const Comparator* comparator,
const SliceTransform* hash_key_extractor) {
assert(hash_key_extractor);
auto hash_index = new BlockHashIndex(
hash_key_extractor,

@ -14,7 +14,7 @@
namespace rocksdb {
class Comparator;
class Iterator;
class InternalIterator;
class Slice;
class SliceTransform;
@ -79,7 +79,8 @@ Status CreateBlockHashIndex(const SliceTransform* hash_key_extractor,
// @params hash_key_extractor: extract the hashable part of a given key.
// On error, nullptr will be returned.
BlockHashIndex* CreateBlockHashIndexOnTheFly(
Iterator* index_iter, Iterator* data_iter, const uint32_t num_restarts,
const Comparator* comparator, const SliceTransform* hash_key_extractor);
InternalIterator* index_iter, InternalIterator* data_iter,
const uint32_t num_restarts, const Comparator* comparator,
const SliceTransform* hash_key_extractor);
} // namespace rocksdb

@ -11,6 +11,7 @@
#include "rocksdb/iterator.h"
#include "rocksdb/slice_transform.h"
#include "table/block_hash_index.h"
#include "table/internal_iterator.h"
#include "util/testharness.h"
#include "util/testutil.h"
@ -18,7 +19,7 @@ namespace rocksdb {
typedef std::map<std::string, std::string> Data;
class MapIterator : public Iterator {
class MapIterator : public InternalIterator {
public:
explicit MapIterator(const Data& data) : data_(data), pos_(data_.end()) {}

@ -96,7 +96,7 @@ TEST_F(BlockTest, SimpleTest) {
// read contents of block sequentially
int count = 0;
Iterator* iter = reader.NewIterator(options.comparator);
InternalIterator *iter = reader.NewIterator(options.comparator);
for (iter->SeekToFirst();iter->Valid(); count++, iter->Next()) {
// read kv from block
@ -170,10 +170,10 @@ void CheckBlockContents(BlockContents contents, const int max_key,
delete iter2;
}
std::unique_ptr<Iterator> hash_iter(
std::unique_ptr<InternalIterator> hash_iter(
reader1.NewIterator(BytewiseComparator(), nullptr, false));
std::unique_ptr<Iterator> regular_iter(
std::unique_ptr<InternalIterator> regular_iter(
reader2.NewIterator(BytewiseComparator()));
// Seek existent keys

@ -17,6 +17,7 @@
#include <vector>
#include "rocksdb/iterator.h"
#include "rocksdb/table.h"
#include "table/internal_iterator.h"
#include "table/meta_blocks.h"
#include "table/cuckoo_table_factory.h"
#include "table/get_context.h"
@ -173,7 +174,7 @@ void CuckooTableReader::Prepare(const Slice& key) {
}
}
class CuckooTableIterator : public Iterator {
class CuckooTableIterator : public InternalIterator {
public:
explicit CuckooTableIterator(CuckooTableReader* reader);
~CuckooTableIterator() {}
@ -348,16 +349,17 @@ Slice CuckooTableIterator::value() const {
return curr_value_;
}
extern Iterator* NewErrorIterator(const Status& status, Arena* arena);
extern InternalIterator* NewErrorInternalIterator(const Status& status,
Arena* arena);
Iterator* CuckooTableReader::NewIterator(
InternalIterator* CuckooTableReader::NewIterator(
const ReadOptions& read_options, Arena* arena) {
if (!status().ok()) {
return NewErrorIterator(
return NewErrorInternalIterator(
Status::Corruption("CuckooTableReader status is not okay."), arena);
}
if (read_options.total_order_seek) {
return NewErrorIterator(
return NewErrorInternalIterator(
Status::InvalidArgument("total_order_seek is not supported."), arena);
}
CuckooTableIterator* iter;

@ -24,6 +24,7 @@ namespace rocksdb {
class Arena;
class TableReader;
class InternalIterator;
class CuckooTableReader: public TableReader {
public:
@ -43,7 +44,8 @@ class CuckooTableReader: public TableReader {
Status Get(const ReadOptions& read_options, const Slice& key,
GetContext* get_context) override;
Iterator* NewIterator(const ReadOptions&, Arena* arena = nullptr) override;
InternalIterator* NewIterator(const ReadOptions&,
Arena* arena = nullptr) override;
void Prepare(const Slice& target) override;
// Report an approximation of how much memory has been used.

@ -148,7 +148,7 @@ class CuckooReaderTest : public testing::Test {
CuckooTableReader reader(ioptions, std::move(file_reader), file_size, ucomp,
GetSliceHash);
ASSERT_OK(reader.status());
Iterator* it = reader.NewIterator(ReadOptions(), nullptr);
InternalIterator* it = reader.NewIterator(ReadOptions(), nullptr);
ASSERT_OK(it->status());
ASSERT_TRUE(!it->Valid());
it->SeekToFirst();
@ -196,7 +196,7 @@ class CuckooReaderTest : public testing::Test {
ASSERT_TRUE(keys[num_items/2] == it->key());
ASSERT_TRUE(values[num_items/2] == it->value());
ASSERT_OK(it->status());
it->~Iterator();
it->~InternalIterator();
}
std::vector<std::string> keys;

@ -0,0 +1,75 @@
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
#pragma once
#include "rocksdb/iterator.h"
#include "rocksdb/status.h"
namespace rocksdb {
class InternalIterator : public Cleanable {
public:
InternalIterator() {}
virtual ~InternalIterator() {}
// An iterator is either positioned at a key/value pair, or
// not valid. This method returns true iff the iterator is valid.
virtual bool Valid() const = 0;
// Position at the first key in the source. The iterator is Valid()
// after this call iff the source is not empty.
virtual void SeekToFirst() = 0;
// Position at the last key in the source. The iterator is
// Valid() after this call iff the source is not empty.
virtual void SeekToLast() = 0;
// Position at the first key in the source that at or past target
// The iterator is Valid() after this call iff the source contains
// an entry that comes at or past target.
virtual void Seek(const Slice& target) = 0;
// Moves to the next entry in the source. After this call, Valid() is
// true iff the iterator was not positioned at the last entry in the source.
// REQUIRES: Valid()
virtual void Next() = 0;
// Moves to the previous entry in the source. After this call, Valid() is
// true iff the iterator was not positioned at the first entry in source.
// REQUIRES: Valid()
virtual void Prev() = 0;
// Return the key for the current entry. The underlying storage for
// the returned slice is valid only until the next modification of
// the iterator.
// REQUIRES: Valid()
virtual Slice key() const = 0;
// Return the value for the current entry. The underlying storage for
// the returned slice is valid only until the next modification of
// the iterator.
// REQUIRES: !AtEnd() && !AtStart()
virtual Slice value() const = 0;
// If an error has occurred, return it. Else return an ok status.
// If non-blocking IO is requested and this operation cannot be
// satisfied without doing some IO, then this returns Status::Incomplete().
virtual Status status() const = 0;
private:
// No copying allowed
InternalIterator(const InternalIterator&) = delete;
InternalIterator& operator=(const InternalIterator&) = delete;
};
// Return an empty iterator (yields nothing).
extern InternalIterator* NewEmptyInternalIterator();
// Return an empty iterator with the specified status.
extern InternalIterator* NewErrorInternalIterator(const Status& status);
} // namespace rocksdb

@ -8,17 +8,18 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "rocksdb/iterator.h"
#include "table/internal_iterator.h"
#include "table/iterator_wrapper.h"
#include "util/arena.h"
namespace rocksdb {
Iterator::Iterator() {
Cleanable::Cleanable() {
cleanup_.function = nullptr;
cleanup_.next = nullptr;
}
Iterator::~Iterator() {
Cleanable::~Cleanable() {
if (cleanup_.function != nullptr) {
(*cleanup_.function)(cleanup_.arg1, cleanup_.arg2);
for (Cleanup* c = cleanup_.next; c != nullptr; ) {
@ -30,7 +31,7 @@ Iterator::~Iterator() {
}
}
void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
void Cleanable::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
assert(func != nullptr);
Cleanup* c;
if (cleanup_.function == nullptr) {
@ -68,31 +69,62 @@ class EmptyIterator : public Iterator {
private:
Status status_;
};
class EmptyInternalIterator : public InternalIterator {
public:
explicit EmptyInternalIterator(const Status& s) : status_(s) {}
virtual bool Valid() const override { return false; }
virtual void Seek(const Slice& target) override {}
virtual void SeekToFirst() override {}
virtual void SeekToLast() override {}
virtual void Next() override { assert(false); }
virtual void Prev() override { assert(false); }
Slice key() const override {
assert(false);
return Slice();
}
Slice value() const override {
assert(false);
return Slice();
}
virtual Status status() const override { return status_; }
private:
Status status_;
};
} // namespace
Iterator* NewEmptyIterator() {
return new EmptyIterator(Status::OK());
}
Iterator* NewEmptyIterator(Arena* arena) {
Iterator* NewErrorIterator(const Status& status) {
return new EmptyIterator(status);
}
InternalIterator* NewEmptyInternalIterator() {
return new EmptyInternalIterator(Status::OK());
}
InternalIterator* NewEmptyInternalIterator(Arena* arena) {
if (arena == nullptr) {
return NewEmptyIterator();
return NewEmptyInternalIterator();
} else {
auto mem = arena->AllocateAligned(sizeof(EmptyIterator));
return new (mem) EmptyIterator(Status::OK());
return new (mem) EmptyInternalIterator(Status::OK());
}
}
Iterator* NewErrorIterator(const Status& status) {
return new EmptyIterator(status);
InternalIterator* NewErrorInternalIterator(const Status& status) {
return new EmptyInternalIterator(status);
}
Iterator* NewErrorIterator(const Status& status, Arena* arena) {
InternalIterator* NewErrorInternalIterator(const Status& status, Arena* arena) {
if (arena == nullptr) {
return NewErrorIterator(status);
return NewErrorInternalIterator(status);
} else {
auto mem = arena->AllocateAligned(sizeof(EmptyIterator));
return new (mem) EmptyIterator(status);
return new (mem) EmptyInternalIterator(status);
}
}

@ -9,7 +9,7 @@
#pragma once
#include "rocksdb/iterator.h"
#include "table/internal_iterator.h"
namespace rocksdb {
@ -20,13 +20,15 @@ namespace rocksdb {
class IteratorWrapper {
public:
IteratorWrapper(): iter_(nullptr), valid_(false) { }
explicit IteratorWrapper(Iterator* _iter) : iter_(nullptr) { Set(_iter); }
explicit IteratorWrapper(InternalIterator* _iter) : iter_(nullptr) {
Set(_iter);
}
~IteratorWrapper() {}
Iterator* iter() const { return iter_; }
InternalIterator* iter() const { return iter_; }
// Takes ownership of "iter" and will delete it when destroyed, or
// when Set() is invoked again.
void Set(Iterator* _iter) {
void Set(InternalIterator* _iter) {
delete iter_;
iter_ = _iter;
if (iter_ == nullptr) {
@ -40,7 +42,7 @@ class IteratorWrapper {
if (!is_arena_mode) {
delete iter_;
} else {
iter_->~Iterator();
iter_->~InternalIterator();
}
}
@ -64,16 +66,17 @@ class IteratorWrapper {
}
}
Iterator* iter_;
InternalIterator* iter_;
bool valid_;
Slice key_;
};
class Arena;
// Return an empty iterator (yields nothing) allocated from arena.
extern Iterator* NewEmptyIterator(Arena* arena);
extern InternalIterator* NewEmptyInternalIterator(Arena* arena);
// Return an empty iterator with the specified status, allocated arena.
extern Iterator* NewErrorIterator(const Status& status, Arena* arena);
extern InternalIterator* NewErrorInternalIterator(const Status& status,
Arena* arena);
} // namespace rocksdb

@ -14,6 +14,7 @@
#include "rocksdb/comparator.h"
#include "rocksdb/iterator.h"
#include "rocksdb/options.h"
#include "table/internal_iterator.h"
#include "table/iter_heap.h"
#include "table/iterator_wrapper.h"
#include "util/arena.h"
@ -32,10 +33,10 @@ typedef BinaryHeap<IteratorWrapper*, MinIteratorComparator> MergerMinIterHeap;
const size_t kNumIterReserve = 4;
class MergingIterator : public Iterator {
class MergingIterator : public InternalIterator {
public:
MergingIterator(const Comparator* comparator, Iterator** children, int n,
bool is_arena_mode)
MergingIterator(const Comparator* comparator, InternalIterator** children,
int n, bool is_arena_mode)
: is_arena_mode_(is_arena_mode),
comparator_(comparator),
current_(nullptr),
@ -53,7 +54,7 @@ class MergingIterator : public Iterator {
current_ = CurrentForward();
}
virtual void AddIterator(Iterator* iter) {
virtual void AddIterator(InternalIterator* iter) {
assert(direction_ == kForward);
children_.emplace_back(iter);
auto new_wrapper = children_.back();
@ -288,11 +289,12 @@ void MergingIterator::InitMaxHeap() {
}
}
Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n,
Arena* arena) {
InternalIterator* NewMergingIterator(const Comparator* cmp,
InternalIterator** list, int n,
Arena* arena) {
assert(n >= 0);
if (n == 0) {
return NewEmptyIterator(arena);
return NewEmptyInternalIterator(arena);
} else if (n == 1) {
return list[0];
} else {
@ -313,7 +315,7 @@ MergeIteratorBuilder::MergeIteratorBuilder(const Comparator* comparator,
merge_iter = new (mem) MergingIterator(comparator, nullptr, 0, true);
}
void MergeIteratorBuilder::AddIterator(Iterator* iter) {
void MergeIteratorBuilder::AddIterator(InternalIterator* iter) {
if (!use_merging_iter && first_iter != nullptr) {
merge_iter->AddIterator(first_iter);
use_merging_iter = true;
@ -325,7 +327,7 @@ void MergeIteratorBuilder::AddIterator(Iterator* iter) {
}
}
Iterator* MergeIteratorBuilder::Finish() {
InternalIterator* MergeIteratorBuilder::Finish() {
if (!use_merging_iter) {
return first_iter;
} else {

@ -14,7 +14,7 @@
namespace rocksdb {
class Comparator;
class Iterator;
class InternalIterator;
class Env;
class Arena;
@ -26,9 +26,9 @@ class Arena;
// key is present in K child iterators, it will be yielded K times.
//
// REQUIRES: n >= 0
extern Iterator* NewMergingIterator(const Comparator* comparator,
Iterator** children, int n,
Arena* arena = nullptr);
extern InternalIterator* NewMergingIterator(const Comparator* comparator,
InternalIterator** children, int n,
Arena* arena = nullptr);
class MergingIterator;
@ -41,18 +41,18 @@ class MergeIteratorBuilder {
~MergeIteratorBuilder() {}
// Add iter to the merging iterator.
void AddIterator(Iterator* iter);
void AddIterator(InternalIterator* iter);
// Get arena used to build the merging iterator. It is called one a child
// iterator needs to be allocated.
Arena* GetArena() { return arena; }
// Return the result merging iterator.
Iterator* Finish();
InternalIterator* Finish();
private:
MergingIterator* merge_iter;
Iterator* first_iter;
InternalIterator* first_iter;
bool use_merging_iter;
Arena* arena;
};

@ -88,7 +88,7 @@ class MergerTest : public testing::Test {
void Generate(size_t num_iterators, size_t strings_per_iterator,
int letters_per_string) {
std::vector<Iterator*> small_iterators;
std::vector<InternalIterator*> small_iterators;
for (size_t i = 0; i < num_iterators; ++i) {
auto strings = GenerateStrings(strings_per_iterator, letters_per_string);
small_iterators.push_back(new test::VectorIterator(strings));
@ -102,8 +102,8 @@ class MergerTest : public testing::Test {
}
Random rnd_;
std::unique_ptr<Iterator> merging_iterator_;
std::unique_ptr<Iterator> single_iterator_;
std::unique_ptr<InternalIterator> merging_iterator_;
std::unique_ptr<InternalIterator> single_iterator_;
std::vector<std::string> all_keys_;
};

@ -12,6 +12,7 @@
#include "rocksdb/table_properties.h"
#include "table/block.h"
#include "table/format.h"
#include "table/internal_iterator.h"
#include "table/table_properties_internal.h"
#include "util/coding.h"
@ -152,7 +153,7 @@ Status ReadProperties(const Slice& handle_value, RandomAccessFileReader* file,
}
Block properties_block(std::move(block_contents));
std::unique_ptr<Iterator> iter(
std::unique_ptr<InternalIterator> iter(
properties_block.NewIterator(BytewiseComparator()));
auto new_table_properties = new TableProperties();
@ -237,7 +238,7 @@ Status ReadTableProperties(RandomAccessFileReader* file, uint64_t file_size,
return s;
}
Block metaindex_block(std::move(metaindex_contents));
std::unique_ptr<Iterator> meta_iter(
std::unique_ptr<InternalIterator> meta_iter(
metaindex_block.NewIterator(BytewiseComparator()));
// -- Read property block
@ -258,7 +259,7 @@ Status ReadTableProperties(RandomAccessFileReader* file, uint64_t file_size,
return s;
}
Status FindMetaBlock(Iterator* meta_index_iter,
Status FindMetaBlock(InternalIterator* meta_index_iter,
const std::string& meta_block_name,
BlockHandle* block_handle) {
meta_index_iter->Seek(meta_block_name);
@ -292,7 +293,7 @@ Status FindMetaBlock(RandomAccessFileReader* file, uint64_t file_size,
}
Block metaindex_block(std::move(metaindex_contents));
std::unique_ptr<Iterator> meta_iter;
std::unique_ptr<InternalIterator> meta_iter;
meta_iter.reset(metaindex_block.NewIterator(BytewiseComparator()));
return FindMetaBlock(meta_iter.get(), meta_block_name, block_handle);
@ -323,7 +324,7 @@ Status ReadMetaBlock(RandomAccessFileReader* file, uint64_t file_size,
// Finding metablock
Block metaindex_block(std::move(metaindex_contents));
std::unique_ptr<Iterator> meta_iter;
std::unique_ptr<InternalIterator> meta_iter;
meta_iter.reset(metaindex_block.NewIterator(BytewiseComparator()));
BlockHandle block_handle;

@ -27,6 +27,7 @@ class Footer;
class Logger;
class RandomAccessFile;
struct TableProperties;
class InternalIterator;
class MetaIndexBuilder {
public:
@ -105,7 +106,7 @@ Status ReadTableProperties(RandomAccessFileReader* file, uint64_t file_size,
Logger* info_log, TableProperties** properties);
// Find the meta block from the meta index block.
Status FindMetaBlock(Iterator* meta_index_iter,
Status FindMetaBlock(InternalIterator* meta_index_iter,
const std::string& meta_block_name,
BlockHandle* block_handle);

@ -28,7 +28,8 @@ stl_wrappers::KVMap MakeMockFile(
return stl_wrappers::KVMap(l, stl_wrappers::LessOfComparator(&icmp_));
}
Iterator* MockTableReader::NewIterator(const ReadOptions&, Arena* arena) {
InternalIterator* MockTableReader::NewIterator(const ReadOptions&,
Arena* arena) {
return new MockTableIterator(table_);
}

@ -17,6 +17,7 @@
#include "port/port.h"
#include "rocksdb/comparator.h"
#include "rocksdb/table.h"
#include "table/internal_iterator.h"
#include "table/table_builder.h"
#include "table/table_reader.h"
#include "util/mutexlock.h"
@ -39,7 +40,7 @@ class MockTableReader : public TableReader {
public:
explicit MockTableReader(const stl_wrappers::KVMap& table) : table_(table) {}
Iterator* NewIterator(const ReadOptions&, Arena* arena) override;
InternalIterator* NewIterator(const ReadOptions&, Arena* arena) override;
Status Get(const ReadOptions&, const Slice& key,
GetContext* get_context) override;
@ -58,7 +59,7 @@ class MockTableReader : public TableReader {
const stl_wrappers::KVMap& table_;
};
class MockTableIterator : public Iterator {
class MockTableIterator : public InternalIterator {
public:
explicit MockTableIterator(const stl_wrappers::KVMap& table) : table_(table) {
itr_ = table_.end();

@ -22,6 +22,7 @@
#include "table/bloom_block.h"
#include "table/filter_block.h"
#include "table/format.h"
#include "table/internal_iterator.h"
#include "table/meta_blocks.h"
#include "table/two_level_iterator.h"
#include "table/plain_table_factory.h"
@ -51,7 +52,7 @@ inline uint32_t GetFixed32Element(const char* base, size_t offset) {
} // namespace
// Iterator to iterate IndexedTable
class PlainTableIterator : public Iterator {
class PlainTableIterator : public InternalIterator {
public:
explicit PlainTableIterator(PlainTableReader* table, bool use_prefix_seek);
~PlainTableIterator();
@ -186,10 +187,10 @@ Status PlainTableReader::Open(const ImmutableCFOptions& ioptions,
void PlainTableReader::SetupForCompaction() {
}
Iterator* PlainTableReader::NewIterator(const ReadOptions& options,
Arena* arena) {
InternalIterator* PlainTableReader::NewIterator(const ReadOptions& options,
Arena* arena) {
if (options.total_order_seek && !IsTotalOrderMode()) {
return NewErrorIterator(
return NewErrorInternalIterator(
Status::InvalidArgument("total_order_seek not supported"), arena);
}
if (arena == nullptr) {

@ -38,6 +38,7 @@ class TableReader;
class InternalKeyComparator;
class PlainTableKeyDecoder;
class GetContext;
class InternalIterator;
using std::unique_ptr;
using std::unordered_map;
@ -77,7 +78,8 @@ class PlainTableReader: public TableReader {
size_t index_sparseness, size_t huge_page_tlb_size,
bool full_scan_mode);
Iterator* NewIterator(const ReadOptions&, Arena* arena = nullptr) override;
InternalIterator* NewIterator(const ReadOptions&,
Arena* arena = nullptr) override;
void Prepare(const Slice& target) override;

@ -7,22 +7,23 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include "rocksdb/iterator.h"
#include "table/internal_iterator.h"
namespace rocksdb {
class ScopedArenaIterator {
public:
explicit ScopedArenaIterator(Iterator* iter = nullptr) : iter_(iter) {}
explicit ScopedArenaIterator(InternalIterator* iter = nullptr)
: iter_(iter) {}
Iterator* operator->() { return iter_; }
InternalIterator* operator->() { return iter_; }
void set(Iterator* iter) { iter_ = iter; }
void set(InternalIterator* iter) { iter_ = iter; }
Iterator* get() { return iter_; }
InternalIterator* get() { return iter_; }
~ScopedArenaIterator() { iter_->~Iterator(); }
~ScopedArenaIterator() { iter_->~InternalIterator(); }
private:
Iterator* iter_;
InternalIterator* iter_;
};
} // namespace rocksdb

@ -8,6 +8,7 @@
#include "rocksdb/iterator.h"
#include "rocksdb/env.h"
#include "port/port.h"
#include "table/internal_iterator.h"
#include "util/string_util.h"
namespace rocksdb {
@ -114,7 +115,7 @@ extern const std::string kPropertiesBlockOldName = "rocksdb.stats";
// Seek to the properties block.
// Return true if it successfully seeks to the properties block.
Status SeekToPropertiesBlock(Iterator* meta_iter, bool* is_found) {
Status SeekToPropertiesBlock(InternalIterator* meta_iter, bool* is_found) {
*is_found = true;
meta_iter->Seek(kPropertiesBlock);
if (meta_iter->status().ok() &&

@ -10,9 +10,11 @@
namespace rocksdb {
class InternalIterator;
// Seek to the properties block.
// If it successfully seeks to the properties block, "is_found" will be
// set to true.
Status SeekToPropertiesBlock(Iterator* meta_iter, bool* is_found);
Status SeekToPropertiesBlock(InternalIterator* meta_iter, bool* is_found);
} // namespace rocksdb

@ -19,6 +19,7 @@ class Arena;
struct ReadOptions;
struct TableProperties;
class GetContext;
class InternalIterator;
// A Table is a sorted map from strings to strings. Tables are
// immutable and persistent. A Table may be safely accessed from
@ -34,7 +35,8 @@ class TableReader {
// When destroying the iterator, the caller will not call "delete"
// but Iterator::~Iterator() directly. The destructor needs to destroy
// all the states but those allocated in arena.
virtual Iterator* NewIterator(const ReadOptions&, Arena* arena = nullptr) = 0;
virtual InternalIterator* NewIterator(const ReadOptions&,
Arena* arena = nullptr) = 0;
// Given a key, return an approximate byte offset in the file where
// the data for that key begins (or would begin if the key were

@ -19,6 +19,7 @@ int main() {
#include "db/db_impl.h"
#include "db/dbformat.h"
#include "table/block_based_table_factory.h"
#include "table/internal_iterator.h"
#include "table/plain_table_factory.h"
#include "table/table_builder.h"
#include "table/get_context.h"
@ -187,14 +188,17 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
std::string end_key = MakeKey(r1, r2 + r2_len, through_db);
uint64_t total_time = 0;
uint64_t start_time = Now(env, measured_by_nanosecond);
Iterator* iter;
Iterator* iter = nullptr;
InternalIterator* iiter = nullptr;
if (!through_db) {
iter = table_reader->NewIterator(read_options);
iiter = table_reader->NewIterator(read_options);
} else {
iter = db->NewIterator(read_options);
}
int count = 0;
for(iter->Seek(start_key); iter->Valid(); iter->Next()) {
for (through_db ? iter->Seek(start_key) : iiter->Seek(start_key);
through_db ? iter->Valid() : iiter->Valid();
through_db ? iter->Next() : iiter->Next()) {
if (if_query_empty_keys) {
break;
}

@ -36,11 +36,12 @@
#include "table/block_builder.h"
#include "table/format.h"
#include "table/get_context.h"
#include "table/internal_iterator.h"
#include "table/meta_blocks.h"
#include "table/plain_table_factory.h"
#include "table/scoped_arena_iterator.h"
#include "util/compression.h"
#include "util/random.h"
#include "util/scoped_arena_iterator.h"
#include "util/statistics.h"
#include "util/stl_wrappers.h"
#include "util/string_util.h"
@ -142,7 +143,7 @@ class Constructor {
const InternalKeyComparator& internal_comparator,
const stl_wrappers::KVMap& data) = 0;
virtual Iterator* NewIterator() const = 0;
virtual InternalIterator* NewIterator() const = 0;
virtual const stl_wrappers::KVMap& data() { return data_; }
@ -188,7 +189,7 @@ class BlockConstructor: public Constructor {
block_ = new Block(std::move(contents));
return Status::OK();
}
virtual Iterator* NewIterator() const override {
virtual InternalIterator* NewIterator() const override {
return block_->NewIterator(comparator_);
}
@ -201,13 +202,14 @@ class BlockConstructor: public Constructor {
};
// A helper class that converts internal format keys into user keys
class KeyConvertingIterator: public Iterator {
class KeyConvertingIterator : public InternalIterator {
public:
explicit KeyConvertingIterator(Iterator* iter, bool arena_mode = false)
explicit KeyConvertingIterator(InternalIterator* iter,
bool arena_mode = false)
: iter_(iter), arena_mode_(arena_mode) {}
virtual ~KeyConvertingIterator() {
if (arena_mode_) {
iter_->~Iterator();
iter_->~InternalIterator();
} else {
delete iter_;
}
@ -241,7 +243,7 @@ class KeyConvertingIterator: public Iterator {
private:
mutable Status status_;
Iterator* iter_;
InternalIterator* iter_;
bool arena_mode_;
// No copying allowed
@ -301,9 +303,9 @@ class TableConstructor: public Constructor {
std::move(file_reader_), GetSink()->contents().size(), &table_reader_);
}
virtual Iterator* NewIterator() const override {
virtual InternalIterator* NewIterator() const override {
ReadOptions ro;
Iterator* iter = table_reader_->NewIterator(ro);
InternalIterator* iter = table_reader_->NewIterator(ro);
if (convert_to_internal_key_) {
return new KeyConvertingIterator(iter);
} else {
@ -390,7 +392,7 @@ class MemTableConstructor: public Constructor {
}
return Status::OK();
}
virtual Iterator* NewIterator() const override {
virtual InternalIterator* NewIterator() const override {
return new KeyConvertingIterator(
memtable_->NewIterator(ReadOptions(), &arena_), true);
}
@ -408,6 +410,23 @@ class MemTableConstructor: public Constructor {
std::shared_ptr<SkipListFactory> table_factory_;
};
class InternalIteratorFromIterator : public InternalIterator {
public:
explicit InternalIteratorFromIterator(Iterator* it) : it_(it) {}
virtual bool Valid() const override { return it_->Valid(); }
virtual void Seek(const Slice& target) override { it_->Seek(target); }
virtual void SeekToFirst() override { it_->SeekToFirst(); }
virtual void SeekToLast() override { it_->SeekToLast(); }
virtual void Next() override { it_->Next(); }
virtual void Prev() override { it_->Prev(); }
Slice key() const override { return it_->key(); }
Slice value() const override { return it_->value(); }
virtual Status status() const override { return it_->status(); }
private:
unique_ptr<Iterator> it_;
};
class DBConstructor: public Constructor {
public:
explicit DBConstructor(const Comparator* cmp)
@ -434,8 +453,9 @@ class DBConstructor: public Constructor {
}
return Status::OK();
}
virtual Iterator* NewIterator() const override {
return db_->NewIterator(ReadOptions());
virtual InternalIterator* NewIterator() const override {
return new InternalIteratorFromIterator(db_->NewIterator(ReadOptions()));
}
virtual DB* db() const override { return db_; }
@ -705,7 +725,7 @@ class HarnessTest : public testing::Test {
void TestForwardScan(const std::vector<std::string>& keys,
const stl_wrappers::KVMap& data) {
Iterator* iter = constructor_->NewIterator();
InternalIterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
iter->SeekToFirst();
for (stl_wrappers::KVMap::const_iterator model_iter = data.begin();
@ -715,7 +735,7 @@ class HarnessTest : public testing::Test {
}
ASSERT_TRUE(!iter->Valid());
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
iter->~Iterator();
iter->~InternalIterator();
} else {
delete iter;
}
@ -723,7 +743,7 @@ class HarnessTest : public testing::Test {
void TestBackwardScan(const std::vector<std::string>& keys,
const stl_wrappers::KVMap& data) {
Iterator* iter = constructor_->NewIterator();
InternalIterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
iter->SeekToLast();
for (stl_wrappers::KVMap::const_reverse_iterator model_iter = data.rbegin();
@ -733,7 +753,7 @@ class HarnessTest : public testing::Test {
}
ASSERT_TRUE(!iter->Valid());
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
iter->~Iterator();
iter->~InternalIterator();
} else {
delete iter;
}
@ -742,7 +762,7 @@ class HarnessTest : public testing::Test {
void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys,
const stl_wrappers::KVMap& data) {
static const bool kVerbose = false;
Iterator* iter = constructor_->NewIterator();
InternalIterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
stl_wrappers::KVMap::const_iterator model_iter = data.begin();
if (kVerbose) fprintf(stderr, "---\n");
@ -806,7 +826,7 @@ class HarnessTest : public testing::Test {
}
}
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
iter->~Iterator();
iter->~InternalIterator();
} else {
delete iter;
}
@ -830,7 +850,7 @@ class HarnessTest : public testing::Test {
}
}
std::string ToString(const Iterator* it) {
std::string ToString(const InternalIterator* it) {
if (!it->Valid()) {
return "END";
} else {
@ -1191,7 +1211,7 @@ TEST_F(BlockBasedTableTest, TotalOrderSeekOnHashIndex) {
auto* reader = c.GetTableReader();
ReadOptions ro;
ro.total_order_seek = true;
std::unique_ptr<Iterator> iter(reader->NewIterator(ro));
std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
iter->Seek(InternalKey("b", 0, kTypeValue).Encode());
ASSERT_OK(iter->status());
@ -1275,7 +1295,8 @@ TEST_F(TableTest, HashIndexTest) {
auto props = reader->GetTableProperties();
ASSERT_EQ(5u, props->num_data_blocks);
std::unique_ptr<Iterator> hash_iter(reader->NewIterator(ReadOptions()));
std::unique_ptr<InternalIterator> hash_iter(
reader->NewIterator(ReadOptions()));
// -- Find keys do not exist, but have common prefix.
std::vector<std::string> prefixes = {"001", "003", "005", "007", "009"};
@ -1545,7 +1566,7 @@ TEST_F(BlockBasedTableTest, FilterBlockInBlockCache) {
// -- PART 1: Open with regular block cache.
// Since block_cache is disabled, no cache activities will be involved.
unique_ptr<Iterator> iter;
unique_ptr<InternalIterator> iter;
int64_t last_cache_bytes_read = 0;
// At first, no block will be accessed.
@ -1778,7 +1799,7 @@ TEST_F(BlockBasedTableTest, BlockCacheLeak) {
const ImmutableCFOptions ioptions(opt);
c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap);
unique_ptr<Iterator> iter(c.NewIterator());
unique_ptr<InternalIterator> iter(c.NewIterator());
iter->SeekToFirst();
while (iter->Valid()) {
iter->key();

@ -19,10 +19,10 @@ namespace rocksdb {
namespace {
class TwoLevelIterator: public Iterator {
class TwoLevelIterator : public InternalIterator {
public:
explicit TwoLevelIterator(TwoLevelIteratorState* state,
Iterator* first_level_iter,
InternalIterator* first_level_iter,
bool need_free_iter_and_state);
virtual ~TwoLevelIterator() {
@ -68,7 +68,7 @@ class TwoLevelIterator: public Iterator {
}
void SkipEmptyDataBlocksForward();
void SkipEmptyDataBlocksBackward();
void SetSecondLevelIterator(Iterator* iter);
void SetSecondLevelIterator(InternalIterator* iter);
void InitDataBlock();
TwoLevelIteratorState* state_;
@ -82,7 +82,7 @@ class TwoLevelIterator: public Iterator {
};
TwoLevelIterator::TwoLevelIterator(TwoLevelIteratorState* state,
Iterator* first_level_iter,
InternalIterator* first_level_iter,
bool need_free_iter_and_state)
: state_(state),
first_level_iter_(first_level_iter),
@ -168,7 +168,7 @@ void TwoLevelIterator::SkipEmptyDataBlocksBackward() {
}
}
void TwoLevelIterator::SetSecondLevelIterator(Iterator* iter) {
void TwoLevelIterator::SetSecondLevelIterator(InternalIterator* iter) {
if (second_level_iter_.iter() != nullptr) {
SaveError(second_level_iter_.status());
}
@ -186,7 +186,7 @@ void TwoLevelIterator::InitDataBlock() {
// second_level_iter is already constructed with this iterator, so
// no need to change anything
} else {
Iterator* iter = state_->NewSecondaryIterator(handle);
InternalIterator* iter = state_->NewSecondaryIterator(handle);
data_block_handle_.assign(handle.data(), handle.size());
SetSecondLevelIterator(iter);
}
@ -195,9 +195,10 @@ void TwoLevelIterator::InitDataBlock() {
} // namespace
Iterator* NewTwoLevelIterator(TwoLevelIteratorState* state,
Iterator* first_level_iter, Arena* arena,
bool need_free_iter_and_state) {
InternalIterator* NewTwoLevelIterator(TwoLevelIteratorState* state,
InternalIterator* first_level_iter,
Arena* arena,
bool need_free_iter_and_state) {
if (arena == nullptr) {
return new TwoLevelIterator(state, first_level_iter,
need_free_iter_and_state);

@ -23,7 +23,7 @@ struct TwoLevelIteratorState {
: check_prefix_may_match(_check_prefix_may_match) {}
virtual ~TwoLevelIteratorState() {}
virtual Iterator* NewSecondaryIterator(const Slice& handle) = 0;
virtual InternalIterator* NewSecondaryIterator(const Slice& handle) = 0;
virtual bool PrefixMayMatch(const Slice& internal_key) = 0;
// If call PrefixMayMatch()
@ -45,9 +45,8 @@ struct TwoLevelIteratorState {
// all the states but those allocated in arena.
// need_free_iter_and_state: free `state` and `first_level_iter` if
// true. Otherwise, just call destructor.
extern Iterator* NewTwoLevelIterator(TwoLevelIteratorState* state,
Iterator* first_level_iter,
Arena* arena = nullptr,
bool need_free_iter_and_state = true);
extern InternalIterator* NewTwoLevelIterator(
TwoLevelIteratorState* state, InternalIterator* first_level_iter,
Arena* arena = nullptr, bool need_free_iter_and_state = true);
} // namespace rocksdb

@ -21,11 +21,11 @@
#include "rocksdb/write_batch.h"
#include "rocksdb/cache.h"
#include "rocksdb/table_properties.h"
#include "table/scoped_arena_iterator.h"
#include "port/dirent.h"
#include "util/coding.h"
#include "util/sst_dump_tool_imp.h"
#include "util/string_util.h"
#include "util/scoped_arena_iterator.h"
#include "utilities/ttl/db_ttl_impl.h"
#include <cstdlib>

@ -127,7 +127,7 @@ uint64_t SstFileReader::CalculateCompressedTableSize(
tb_options,
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
dest_writer.get()));
unique_ptr<Iterator> iter(table_reader_->NewIterator(ReadOptions()));
unique_ptr<InternalIterator> iter(table_reader_->NewIterator(ReadOptions()));
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
if (!iter->status().ok()) {
fputs(iter->status().ToString().c_str(), stderr);
@ -261,8 +261,8 @@ Status SstFileReader::ReadSequential(bool print_kv,
return init_result_;
}
Iterator* iter = table_reader_->NewIterator(ReadOptions(verify_checksum_,
false));
InternalIterator* iter =
table_reader_->NewIterator(ReadOptions(verify_checksum_, false));
uint64_t i = 0;
if (has_from) {
InternalKey ikey;

@ -17,6 +17,7 @@
#include "rocksdb/env.h"
#include "rocksdb/iterator.h"
#include "rocksdb/slice.h"
#include "table/internal_iterator.h"
#include "util/mutexlock.h"
#include "util/random.h"
@ -127,7 +128,7 @@ class SimpleSuffixReverseComparator : public Comparator {
extern const Comparator* Uint64Comparator();
// Iterator over a vector of keys/values
class VectorIterator : public Iterator {
class VectorIterator : public InternalIterator {
public:
explicit VectorIterator(const std::vector<std::string>& keys)
: keys_(keys), current_(keys.size()) {

Loading…
Cancel
Save