From fe349db57b3f3ec817324912996cddd37cf80405 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Sat, 19 Nov 2016 14:14:35 -0800 Subject: [PATCH] Remove Arena in RangeDelAggregator Summary: The Arena construction/destruction introduced significant overhead to read-heavy workload just by creating empty vectors for its blocks, so avoid it in RangeDelAggregator. Closes https://github.com/facebook/rocksdb/pull/1547 Differential Revision: D4207781 Pulled By: ajkr fbshipit-source-id: 9d1c130 --- db/builder.cc | 2 +- db/builder.h | 2 +- db/db_impl.cc | 8 ++++---- db/flush_job.cc | 6 +++--- db/memtable.cc | 14 ++++++-------- db/memtable.h | 3 +-- db/memtable_list.cc | 4 ++-- db/range_del_aggregator.cc | 17 ++++------------- db/range_del_aggregator.h | 6 +----- db/repair.cc | 2 +- db/write_batch_test.cc | 14 ++++++++++---- table/table_test.cc | 15 ++++++++++----- 12 files changed, 44 insertions(+), 49 deletions(-) diff --git a/db/builder.cc b/db/builder.cc index 90f1b2150..e31371d1b 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -62,7 +62,7 @@ Status BuildTable( const std::string& dbname, Env* env, const ImmutableCFOptions& ioptions, const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options, TableCache* table_cache, InternalIterator* iter, - ScopedArenaIterator&& range_del_iter, FileMetaData* meta, + std::unique_ptr range_del_iter, FileMetaData* meta, const InternalKeyComparator& internal_comparator, const std::vector>* int_tbl_prop_collector_factories, diff --git a/db/builder.h b/db/builder.h index 39872fb2f..4b166d1f8 100644 --- a/db/builder.h +++ b/db/builder.h @@ -65,7 +65,7 @@ extern Status BuildTable( const std::string& dbname, Env* env, const ImmutableCFOptions& options, const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options, TableCache* table_cache, InternalIterator* iter, - ScopedArenaIterator&& range_del_iter, FileMetaData* meta, + std::unique_ptr range_del_iter, FileMetaData* meta, const InternalKeyComparator& internal_comparator, const std::vector>* int_tbl_prop_collector_factories, diff --git a/db/db_impl.cc b/db/db_impl.cc index 3abb36092..2830ee055 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -1779,7 +1779,7 @@ Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd, s = BuildTable( dbname_, env_, *cfd->ioptions(), mutable_cf_options, env_options_, cfd->table_cache(), iter.get(), - ScopedArenaIterator(mem->NewRangeTombstoneIterator(ro, &arena)), + std::unique_ptr(mem->NewRangeTombstoneIterator(ro)), &meta, cfd->internal_comparator(), cfd->int_tbl_prop_collector_factories(), cfd->GetID(), cfd->GetName(), snapshot_seqs, earliest_write_conflict_snapshot, @@ -3858,11 +3858,11 @@ InternalIterator* DBImpl::NewInternalIterator( // Collect iterator for mutable mem merge_iter_builder.AddIterator( super_version->mem->NewIterator(read_options, arena)); - ScopedArenaIterator range_del_iter; + std::unique_ptr range_del_iter; Status s; if (!read_options.ignore_range_deletions) { - range_del_iter.set( - super_version->mem->NewRangeTombstoneIterator(read_options, arena)); + range_del_iter.reset( + super_version->mem->NewRangeTombstoneIterator(read_options)); s = range_del_agg->AddTombstones(std::move(range_del_iter)); } // Collect all needed child iterators for immutable memtables diff --git a/db/flush_job.cc b/db/flush_job.cc index 2a3faf073..ff09a8a16 100644 --- a/db/flush_job.cc +++ b/db/flush_job.cc @@ -256,7 +256,7 @@ Status FlushJob::WriteLevel0Table() { "[%s] [JOB %d] Flushing memtable with next log file: %" PRIu64 "\n", cfd_->GetName().c_str(), job_context_->job_id, m->GetNextLogNumber()); memtables.push_back(m->NewIterator(ro, &arena)); - range_del_iters.push_back(m->NewRangeTombstoneIterator(ro, &arena)); + range_del_iters.push_back(m->NewRangeTombstoneIterator(ro)); total_num_entries += m->num_entries(); total_num_deletes += m->num_deletes(); total_memory_usage += m->ApproximateMemoryUsage(); @@ -274,9 +274,9 @@ Status FlushJob::WriteLevel0Table() { ScopedArenaIterator iter( NewMergingIterator(&cfd_->internal_comparator(), &memtables[0], static_cast(memtables.size()), &arena)); - ScopedArenaIterator range_del_iter(NewMergingIterator( + std::unique_ptr range_del_iter(NewMergingIterator( &cfd_->internal_comparator(), &range_del_iters[0], - static_cast(range_del_iters.size()), &arena)); + static_cast(range_del_iters.size()))); Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, "[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": started", cfd_->GetName().c_str(), job_context_->job_id, meta_.fd.GetNumber()); diff --git a/db/memtable.cc b/db/memtable.cc index 9e059409f..e31a69a0e 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -374,14 +374,12 @@ InternalIterator* MemTable::NewIterator(const ReadOptions& read_options, } InternalIterator* MemTable::NewRangeTombstoneIterator( - const ReadOptions& read_options, Arena* arena) { - assert(arena != nullptr); + const ReadOptions& read_options) { if (read_options.ignore_range_deletions) { - return NewEmptyInternalIterator(arena); + return NewEmptyInternalIterator(); } - auto mem = arena->AllocateAligned(sizeof(MemTableIterator)); - return new (mem) MemTableIterator(*this, read_options, arena, - true /* use_range_del_table */); + return new MemTableIterator(*this, read_options, nullptr /* arena */, + true /* use_range_del_table */); } port::RWMutex* MemTable::GetLock(const Slice& key) { @@ -652,8 +650,8 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s, if (prefix_bloom_) { PERF_COUNTER_ADD(bloom_memtable_hit_count, 1); } - ScopedArenaIterator range_del_iter( - NewRangeTombstoneIterator(read_opts, range_del_agg->GetArena())); + std::unique_ptr range_del_iter( + NewRangeTombstoneIterator(read_opts)); Status status = range_del_agg->AddTombstones(std::move(range_del_iter)); if (!status.ok()) { *s = status; diff --git a/db/memtable.h b/db/memtable.h index 3fee7549f..8b68bd8a4 100644 --- a/db/memtable.h +++ b/db/memtable.h @@ -161,8 +161,7 @@ class MemTable { // those allocated in arena. InternalIterator* NewIterator(const ReadOptions& read_options, Arena* arena); - InternalIterator* NewRangeTombstoneIterator(const ReadOptions& read_options, - Arena* arena); + InternalIterator* NewRangeTombstoneIterator(const ReadOptions& read_options); // Add an entry into memtable that maps key to value at the // specified sequence number and with the specified type. diff --git a/db/memtable_list.cc b/db/memtable_list.cc index 4442cf55a..7252c9730 100644 --- a/db/memtable_list.cc +++ b/db/memtable_list.cc @@ -156,8 +156,8 @@ Status MemTableListVersion::AddRangeTombstoneIterators( RangeDelAggregator* range_del_agg) { assert(range_del_agg != nullptr); for (auto& m : memlist_) { - ScopedArenaIterator range_del_iter( - m->NewRangeTombstoneIterator(read_opts, arena)); + std::unique_ptr range_del_iter( + m->NewRangeTombstoneIterator(read_opts)); Status s = range_del_agg->AddTombstones(std::move(range_del_iter)); if (!s.ok()) { return s; diff --git a/db/range_del_aggregator.cc b/db/range_del_aggregator.cc index 9ae63ffdb..3ea8cda4f 100644 --- a/db/range_del_aggregator.cc +++ b/db/range_del_aggregator.cc @@ -86,16 +86,11 @@ bool RangeDelAggregator::ShouldAddTombstones( return false; } -Status RangeDelAggregator::AddTombstones(ScopedArenaIterator input) { - return AddTombstones(input.release(), true /* arena */); -} - Status RangeDelAggregator::AddTombstones( std::unique_ptr input) { - return AddTombstones(input.release(), false /* arena */); -} - -Status RangeDelAggregator::AddTombstones(InternalIterator* input, bool arena) { + if (input == nullptr) { + return Status::OK(); + } input->SeekToFirst(); bool first_iter = true; while (input->Valid()) { @@ -115,11 +110,7 @@ Status RangeDelAggregator::AddTombstones(InternalIterator* input, bool arena) { input->Next(); } if (!first_iter) { - rep_->pinned_iters_mgr_.PinIterator(input, arena); - } else if (arena) { - input->~InternalIterator(); - } else { - delete input; + rep_->pinned_iters_mgr_.PinIterator(input.release(), false /* arena */); } return Status::OK(); } diff --git a/db/range_del_aggregator.h b/db/range_del_aggregator.h index 8b9ddc4eb..8781fb297 100644 --- a/db/range_del_aggregator.h +++ b/db/range_del_aggregator.h @@ -57,7 +57,6 @@ class RangeDelAggregator { // Adds tombstones to the tombstone aggregation structure maintained by this // object. // @return non-OK status if any of the tombstone keys are corrupted. - Status AddTombstones(ScopedArenaIterator input); Status AddTombstones(std::unique_ptr input); // Writes tombstones covering a range to a table builder. @@ -83,7 +82,6 @@ class RangeDelAggregator { void AddToBuilder(TableBuilder* builder, const Slice* lower_bound, const Slice* upper_bound, FileMetaData* meta, bool bottommost_level = false); - Arena* GetArena() { return &arena_; } bool IsEmpty(); private: @@ -103,13 +101,11 @@ class RangeDelAggregator { // once the first range deletion is encountered. void InitRep(const std::vector& snapshots); - Status AddTombstones(InternalIterator* input, bool arena); TombstoneMap& GetTombstoneMap(SequenceNumber seq); SequenceNumber upper_bound_; - Arena arena_; // must be destroyed after pinned_iters_mgr_ which references - // memory in this arena std::unique_ptr rep_; const InternalKeyComparator icmp_; }; + } // namespace rocksdb diff --git a/db/repair.cc b/db/repair.cc index 89a1c74db..6a70e853d 100644 --- a/db/repair.cc +++ b/db/repair.cc @@ -381,7 +381,7 @@ class Repairer { status = BuildTable( dbname_, env_, *cfd->ioptions(), *cfd->GetLatestMutableCFOptions(), env_options_, table_cache_, iter.get(), - ScopedArenaIterator(mem->NewRangeTombstoneIterator(ro, &arena)), + std::unique_ptr(mem->NewRangeTombstoneIterator(ro)), &meta, cfd->internal_comparator(), cfd->int_tbl_prop_collector_factories(), cfd->GetID(), cfd->GetName(), {}, kMaxSequenceNumber, kNoCompression, CompressionOptions(), false, diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index 75d9e3e7f..a68069237 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -45,10 +45,16 @@ static std::string PrintContents(WriteBatch* b) { int merge_count = 0; for (int i = 0; i < 2; ++i) { Arena arena; - auto iter = - i == 0 ? ScopedArenaIterator(mem->NewIterator(ReadOptions(), &arena)) - : ScopedArenaIterator( - mem->NewRangeTombstoneIterator(ReadOptions(), &arena)); + ScopedArenaIterator arena_iter_guard; + std::unique_ptr iter_guard; + InternalIterator* iter; + if (i == 0) { + iter = mem->NewIterator(ReadOptions(), &arena); + arena_iter_guard.set(iter); + } else { + iter = mem->NewRangeTombstoneIterator(ReadOptions()); + iter_guard.reset(iter); + } for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedInternalKey ikey; memset((void*)&ikey, 0, sizeof(ikey)); diff --git a/table/table_test.cc b/table/table_test.cc index a61800933..f2670acaf 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -2449,11 +2449,16 @@ TEST_F(MemTableTest, Simple) { for (int i = 0; i < 2; ++i) { Arena arena; - ScopedArenaIterator iter = - i == 0 - ? ScopedArenaIterator(memtable->NewIterator(ReadOptions(), &arena)) - : ScopedArenaIterator( - memtable->NewRangeTombstoneIterator(ReadOptions(), &arena)); + ScopedArenaIterator arena_iter_guard; + std::unique_ptr iter_guard; + InternalIterator* iter; + if (i == 0) { + iter = memtable->NewIterator(ReadOptions(), &arena); + arena_iter_guard.set(iter); + } else { + iter = memtable->NewRangeTombstoneIterator(ReadOptions()); + iter_guard.reset(iter); + } iter->SeekToFirst(); while (iter->Valid()) { fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),