|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "rocksdb/iterator.h"
|
|
|
|
#include "memory/arena.h"
|
|
|
|
#include "table/internal_iterator.h"
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
11 years ago
|
|
|
#include "table/iterator_wrapper.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
Cleanable::Cleanable() {
|
|
|
|
cleanup_.function = nullptr;
|
|
|
|
cleanup_.next = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
Cleanable::~Cleanable() { DoCleanup(); }
|
|
|
|
|
|
|
|
Cleanable::Cleanable(Cleanable&& other) {
|
|
|
|
*this = std::move(other);
|
|
|
|
}
|
|
|
|
|
|
|
|
Cleanable& Cleanable::operator=(Cleanable&& other) {
|
|
|
|
if (this != &other) {
|
|
|
|
cleanup_ = other.cleanup_;
|
|
|
|
other.cleanup_.function = nullptr;
|
|
|
|
other.cleanup_.next = nullptr;
|
|
|
|
}
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the entire linked list was on heap we could have simply add attach one
|
|
|
|
// link list to another. However the head is an embeded object to avoid the cost
|
|
|
|
// of creating objects for most of the use cases when the Cleanable has only one
|
|
|
|
// Cleanup to do. We could put evernything on heap if benchmarks show no
|
|
|
|
// negative impact on performance.
|
|
|
|
// Also we need to iterate on the linked list since there is no pointer to the
|
|
|
|
// tail. We can add the tail pointer but maintainin it might negatively impact
|
|
|
|
// the perforamnce for the common case of one cleanup where tail pointer is not
|
|
|
|
// needed. Again benchmarks could clarify that.
|
|
|
|
// Even without a tail pointer we could iterate on the list, find the tail, and
|
|
|
|
// have only that node updated without the need to insert the Cleanups one by
|
|
|
|
// one. This however would be redundant when the source Cleanable has one or a
|
|
|
|
// few Cleanups which is the case most of the time.
|
|
|
|
// TODO(myabandeh): if the list is too long we should maintain a tail pointer
|
|
|
|
// and have the entire list (minus the head that has to be inserted separately)
|
|
|
|
// merged with the target linked list at once.
|
|
|
|
void Cleanable::DelegateCleanupsTo(Cleanable* other) {
|
|
|
|
assert(other != nullptr);
|
|
|
|
if (cleanup_.function == nullptr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Cleanup* c = &cleanup_;
|
|
|
|
other->RegisterCleanup(c->function, c->arg1, c->arg2);
|
|
|
|
c = c->next;
|
|
|
|
while (c != nullptr) {
|
|
|
|
Cleanup* next = c->next;
|
|
|
|
other->RegisterCleanup(c);
|
|
|
|
c = next;
|
|
|
|
}
|
|
|
|
cleanup_.function = nullptr;
|
|
|
|
cleanup_.next = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Cleanable::RegisterCleanup(Cleanable::Cleanup* c) {
|
|
|
|
assert(c != nullptr);
|
|
|
|
if (cleanup_.function == nullptr) {
|
|
|
|
cleanup_.function = c->function;
|
|
|
|
cleanup_.arg1 = c->arg1;
|
|
|
|
cleanup_.arg2 = c->arg2;
|
|
|
|
delete c;
|
|
|
|
} else {
|
|
|
|
c->next = cleanup_.next;
|
|
|
|
cleanup_.next = c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Cleanable::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
|
|
|
|
assert(func != nullptr);
|
|
|
|
Cleanup* c;
|
|
|
|
if (cleanup_.function == nullptr) {
|
|
|
|
c = &cleanup_;
|
|
|
|
} else {
|
|
|
|
c = new Cleanup;
|
|
|
|
c->next = cleanup_.next;
|
|
|
|
cleanup_.next = c;
|
|
|
|
}
|
|
|
|
c->function = func;
|
|
|
|
c->arg1 = arg1;
|
|
|
|
c->arg2 = arg2;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Iterator::GetProperty(std::string prop_name, std::string* prop) {
|
|
|
|
if (prop == nullptr) {
|
|
|
|
return Status::InvalidArgument("prop is nullptr");
|
|
|
|
}
|
|
|
|
if (prop_name == "rocksdb.iterator.is-key-pinned") {
|
|
|
|
*prop = "0";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return Status::InvalidArgument("Unidentified property.");
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class EmptyIterator : public Iterator {
|
|
|
|
public:
|
|
|
|
explicit EmptyIterator(const Status& s) : status_(s) { }
|
|
|
|
bool Valid() const override { return false; }
|
|
|
|
void Seek(const Slice& /*target*/) override {}
|
|
|
|
void SeekForPrev(const Slice& /*target*/) override {}
|
|
|
|
void SeekToFirst() override {}
|
|
|
|
void SeekToLast() override {}
|
|
|
|
void Next() override { assert(false); }
|
|
|
|
void Prev() override { assert(false); }
|
|
|
|
Slice key() const override {
|
|
|
|
assert(false);
|
|
|
|
return Slice();
|
|
|
|
}
|
|
|
|
Slice value() const override {
|
|
|
|
assert(false);
|
|
|
|
return Slice();
|
|
|
|
}
|
|
|
|
Status status() const override { return status_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
Status status_;
|
|
|
|
};
|
|
|
|
|
|
|
|
template <class TValue = Slice>
|
|
|
|
class EmptyInternalIterator : public InternalIteratorBase<TValue> {
|
|
|
|
public:
|
|
|
|
explicit EmptyInternalIterator(const Status& s) : status_(s) {}
|
|
|
|
bool Valid() const override { return false; }
|
|
|
|
void Seek(const Slice& /*target*/) override {}
|
|
|
|
void SeekForPrev(const Slice& /*target*/) override {}
|
|
|
|
void SeekToFirst() override {}
|
|
|
|
void SeekToLast() override {}
|
|
|
|
void Next() override { assert(false); }
|
|
|
|
void Prev() override { assert(false); }
|
|
|
|
Slice key() const override {
|
|
|
|
assert(false);
|
|
|
|
return Slice();
|
|
|
|
}
|
|
|
|
TValue value() const override {
|
|
|
|
assert(false);
|
|
|
|
return TValue();
|
|
|
|
}
|
|
|
|
Status status() const override { return status_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
Status status_;
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
Iterator* NewEmptyIterator() { return new EmptyIterator(Status::OK()); }
|
|
|
|
|
|
|
|
Iterator* NewErrorIterator(const Status& status) {
|
|
|
|
return new EmptyIterator(status);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class TValue>
|
|
|
|
InternalIteratorBase<TValue>* NewErrorInternalIterator(const Status& status) {
|
|
|
|
return new EmptyInternalIterator<TValue>(status);
|
|
|
|
}
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
6 years ago
|
|
|
template InternalIteratorBase<IndexValue>* NewErrorInternalIterator(
|
|
|
|
const Status& status);
|
|
|
|
template InternalIteratorBase<Slice>* NewErrorInternalIterator(
|
|
|
|
const Status& status);
|
|
|
|
|
|
|
|
template <class TValue>
|
|
|
|
InternalIteratorBase<TValue>* NewErrorInternalIterator(const Status& status,
|
|
|
|
Arena* arena) {
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
11 years ago
|
|
|
if (arena == nullptr) {
|
|
|
|
return NewErrorInternalIterator<TValue>(status);
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
11 years ago
|
|
|
} else {
|
|
|
|
auto mem = arena->AllocateAligned(sizeof(EmptyInternalIterator<TValue>));
|
|
|
|
return new (mem) EmptyInternalIterator<TValue>(status);
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
11 years ago
|
|
|
}
|
|
|
|
}
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
6 years ago
|
|
|
template InternalIteratorBase<IndexValue>* NewErrorInternalIterator(
|
|
|
|
const Status& status, Arena* arena);
|
|
|
|
template InternalIteratorBase<Slice>* NewErrorInternalIterator(
|
|
|
|
const Status& status, Arena* arena);
|
|
|
|
|
|
|
|
template <class TValue>
|
|
|
|
InternalIteratorBase<TValue>* NewEmptyInternalIterator() {
|
|
|
|
return new EmptyInternalIterator<TValue>(Status::OK());
|
|
|
|
}
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
6 years ago
|
|
|
template InternalIteratorBase<IndexValue>* NewEmptyInternalIterator();
|
|
|
|
template InternalIteratorBase<Slice>* NewEmptyInternalIterator();
|
|
|
|
|
|
|
|
template <class TValue>
|
|
|
|
InternalIteratorBase<TValue>* NewEmptyInternalIterator(Arena* arena) {
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
11 years ago
|
|
|
if (arena == nullptr) {
|
|
|
|
return NewEmptyInternalIterator<TValue>();
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
11 years ago
|
|
|
} else {
|
|
|
|
auto mem = arena->AllocateAligned(sizeof(EmptyInternalIterator<TValue>));
|
|
|
|
return new (mem) EmptyInternalIterator<TValue>(Status::OK());
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
11 years ago
|
|
|
}
|
|
|
|
}
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
6 years ago
|
|
|
template InternalIteratorBase<IndexValue>* NewEmptyInternalIterator(
|
|
|
|
Arena* arena);
|
|
|
|
template InternalIteratorBase<Slice>* NewEmptyInternalIterator(Arena* arena);
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
11 years ago
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|