|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
|
|
|
#ifndef MERGE_HELPER_H
|
|
|
|
#define MERGE_HELPER_H
|
|
|
|
|
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "rocksdb/slice.h"
|
|
|
|
#include "rocksdb/statistics.h"
|
|
|
|
#include <string>
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
12 years ago
|
|
|
#include <deque>
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
class Comparator;
|
|
|
|
class Iterator;
|
|
|
|
class Logger;
|
|
|
|
class MergeOperator;
|
|
|
|
|
|
|
|
class MergeHelper {
|
|
|
|
public:
|
|
|
|
MergeHelper(const Comparator* user_comparator,
|
|
|
|
const MergeOperator* user_merge_operator,
|
|
|
|
Logger* logger,
|
|
|
|
bool assert_valid_internal_key)
|
|
|
|
: user_comparator_(user_comparator),
|
|
|
|
user_merge_operator_(user_merge_operator),
|
|
|
|
logger_(logger),
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
12 years ago
|
|
|
assert_valid_internal_key_(assert_valid_internal_key),
|
|
|
|
keys_(),
|
|
|
|
operands_(),
|
|
|
|
success_(false) {}
|
|
|
|
|
|
|
|
// Merge entries until we hit
|
|
|
|
// - a corrupted key
|
|
|
|
// - a Put/Delete,
|
|
|
|
// - a different user key,
|
|
|
|
// - a specific sequence number (snapshot boundary),
|
|
|
|
// or - the end of iteration
|
|
|
|
// iter: (IN) points to the first merge type entry
|
|
|
|
// (OUT) points to the first entry not included in the merge process
|
|
|
|
// stop_before: (IN) a sequence number that merge should not cross.
|
|
|
|
// 0 means no restriction
|
|
|
|
// at_bottom: (IN) true if the iterator covers the bottem level, which means
|
|
|
|
// we could reach the start of the history of this user key.
|
|
|
|
void MergeUntil(Iterator* iter, SequenceNumber stop_before = 0,
|
|
|
|
bool at_bottom = false, shared_ptr<Statistics> stats=nullptr);
|
|
|
|
|
|
|
|
// Query the merge result
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
12 years ago
|
|
|
// These are valid until the next MergeUntil call
|
|
|
|
// If the merging was successful:
|
|
|
|
// - IsSuccess() will be true
|
|
|
|
// - key() will have the latest sequence number of the merges.
|
|
|
|
// The type will be Put or Merge. See IMPORTANT 1 note, below.
|
|
|
|
// - value() will be the result of merging all the operands together
|
|
|
|
// - The user should ignore keys() and values().
|
|
|
|
//
|
|
|
|
// IMPORTANT 1: the key type could change after the MergeUntil call.
|
|
|
|
// Put/Delete + Merge + ... + Merge => Put
|
|
|
|
// Merge + ... + Merge => Merge
|
|
|
|
//
|
|
|
|
// If the merge operator is not associative, and if a Put/Delete is not found
|
|
|
|
// then the merging will be unsuccessful. In this case:
|
|
|
|
// - IsSuccess() will be false
|
|
|
|
// - keys() contains the list of internal keys seen in order of iteration.
|
|
|
|
// - values() contains the list of values (merges) seen in the same order.
|
|
|
|
// values() is parallel to keys() so that the first entry in
|
|
|
|
// keys() is the key associated with the first entry in values()
|
|
|
|
// and so on. These lists will be the same length.
|
|
|
|
// All of these pairs will be merges over the same user key.
|
|
|
|
// See IMPORTANT 2 note below.
|
|
|
|
// - The user should ignore key() and value().
|
|
|
|
//
|
|
|
|
// IMPORTANT 2: The entries were traversed in order from BACK to FRONT.
|
|
|
|
// So keys().back() was the first key seen by iterator.
|
|
|
|
// TODO: Re-style this comment to be like the first one
|
|
|
|
bool IsSuccess() { return success_; }
|
|
|
|
Slice key() { assert(success_); return Slice(keys_.back()); }
|
|
|
|
Slice value() { assert(success_); return Slice(operands_.back()); }
|
|
|
|
const std::deque<std::string>& keys() { assert(!success_); return keys_; }
|
|
|
|
const std::deque<std::string>& values() {
|
|
|
|
assert(!success_); return operands_;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
const Comparator* user_comparator_;
|
|
|
|
const MergeOperator* user_merge_operator_;
|
|
|
|
Logger* logger_;
|
|
|
|
bool assert_valid_internal_key_; // enforce no internal key corruption?
|
|
|
|
|
|
|
|
// the scratch area that holds the result of MergeUntil
|
|
|
|
// valid up to the next MergeUntil call
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
12 years ago
|
|
|
std::deque<std::string> keys_; // Keeps track of the sequence of keys seen
|
|
|
|
std::deque<std::string> operands_; // Parallel with keys_; stores the values
|
|
|
|
bool success_;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
#endif
|