|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
|
|
|
#ifndef __STDC_FORMAT_MACROS
|
|
|
|
#define __STDC_FORMAT_MACROS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <algorithm>
|
|
|
|
#include <array>
|
|
|
|
#include <map>
|
|
|
|
#include <string>
|
|
|
|
#include <tuple>
|
|
|
|
|
|
|
|
#include "db/column_family.h"
|
|
|
|
#include "db/compaction_job.h"
|
|
|
|
#include "db/error_handler.h"
|
|
|
|
#include "db/version_set.h"
|
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/options.h"
|
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
|
|
|
#include "table/mock_table.h"
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
#include "util/file_reader_writer.h"
|
|
|
|
#include "util/string_util.h"
|
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
|
|
|
#include "utilities/merge_operators.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
void VerifyInitializationOfCompactionJobStats(
|
|
|
|
const CompactionJobStats& compaction_job_stats) {
|
|
|
|
#if !defined(IOS_CROSS_COMPILE)
|
|
|
|
ASSERT_EQ(compaction_job_stats.elapsed_micros, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_records, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_files, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_files_at_output_level, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_output_records, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_output_files, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.is_manual_compaction, true);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_input_bytes, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_output_bytes, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_input_raw_key_bytes, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_input_raw_value_bytes, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.smallest_output_key_prefix[0], 0);
|
|
|
|
ASSERT_EQ(compaction_job_stats.largest_output_key_prefix[0], 0);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_records_replaced, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_deletion_records, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_expired_deletion_records, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_corrupt_keys, 0U);
|
|
|
|
#endif // !defined(IOS_CROSS_COMPILE)
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// TODO(icanadi) Make it simpler once we mock out VersionSet
|
|
|
|
class CompactionJobTest : public testing::Test {
|
|
|
|
public:
|
|
|
|
CompactionJobTest()
|
|
|
|
: env_(Env::Default()),
|
|
|
|
dbname_(test::PerThreadDBPath("compaction_job_test")),
|
|
|
|
db_options_(),
|
|
|
|
mutable_cf_options_(cf_options_),
|
|
|
|
table_cache_(NewLRUCache(50000, 16)),
|
|
|
|
write_buffer_manager_(db_options_.db_write_buffer_size),
|
|
|
|
versions_(new VersionSet(dbname_, &db_options_, env_options_,
|
|
|
|
table_cache_.get(), &write_buffer_manager_,
|
|
|
|
&write_controller_)),
|
|
|
|
shutting_down_(false),
|
Added support for differential snapshots
Summary:
The motivation for this PR is to add to RocksDB support for differential (incremental) snapshots, as snapshot of the DB changes between two points in time (one can think of it as diff between to sequence numbers, or the diff D which can be thought of as an SST file or just set of KVs that can be applied to sequence number S1 to get the database to the state at sequence number S2).
This feature would be useful for various distributed storages layers built on top of RocksDB, as it should help reduce resources (time and network bandwidth) needed to recover and rebuilt DB instances as replicas in the context of distributed storages.
From the API standpoint that would like client app requesting iterator between (start seqnum) and current DB state, and reading the "diff".
This is a very draft PR for initial review in the discussion on the approach, i'm going to rework some parts and keep updating the PR.
For now, what's done here according to initial discussions:
Preserving deletes:
- We want to be able to optionally preserve recent deletes for some defined period of time, so that if a delete came in recently and might need to be included in the next incremental snapshot it would't get dropped by a compaction. This is done by adding new param to Options (preserve deletes flag) and new variable to DB Impl where we keep track of the sequence number after which we don't want to drop tombstones, even if they are otherwise eligible for deletion.
- I also added a new API call for clients to be able to advance this cutoff seqnum after which we drop deletes; i assume it's more flexible to let clients control this, since otherwise we'd need to keep some kind of timestamp < -- > seqnum mapping inside the DB, which sounds messy and painful to support. Clients could make use of it by periodically calling GetLatestSequenceNumber(), noting the timestamp, doing some calculation and figuring out by how much we need to advance the cutoff seqnum.
- Compaction codepath in compaction_iterator.cc has been modified to avoid dropping tombstones with seqnum > cutoff seqnum.
Iterator changes:
- couple params added to ReadOptions, to optionally allow client to request internal keys instead of user keys (so that client can get the latest value of a key, be it delete marker or a put), as well as min timestamp and min seqnum.
TableCache changes:
- I modified table_cache code to be able to quickly exclude SST files from iterators heep if creation_time on the file is less then iter_start_ts as passed in ReadOptions. That would help a lot in some DB settings (like reading very recent data only or using FIFO compactions), but not so much for universal compaction with more or less long iterator time span.
What's left:
- Still looking at how to best plug that inside DBIter codepath. So far it seems that FindNextUserKeyInternal only parses values as UserKeys, and iter->key() call generally returns user key. Can we add new API to DBIter as internal_key(), and modify this internal method to optionally set saved_key_ to point to the full internal key? I don't need to store actual seqnum there, but I do need to store type.
Closes https://github.com/facebook/rocksdb/pull/2999
Differential Revision: D6175602
Pulled By: mikhail-antonov
fbshipit-source-id: c779a6696ee2d574d86c69cec866a3ae095aa900
7 years ago
|
|
|
preserve_deletes_seqnum_(0),
|
|
|
|
mock_table_factory_(new mock::MockTableFactory()),
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
6 years ago
|
|
|
error_handler_(nullptr, db_options_, &mutex_) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
10 years ago
|
|
|
EXPECT_OK(env_->CreateDirIfMissing(dbname_));
|
|
|
|
db_options_.db_paths.emplace_back(dbname_,
|
|
|
|
std::numeric_limits<uint64_t>::max());
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string GenerateFileName(uint64_t file_number) {
|
|
|
|
FileMetaData meta;
|
|
|
|
std::vector<DbPath> db_paths;
|
|
|
|
db_paths.emplace_back(dbname_, std::numeric_limits<uint64_t>::max());
|
|
|
|
meta.fd = FileDescriptor(file_number, 0, 0);
|
|
|
|
return TableFileName(db_paths, meta.fd.GetNumber(), meta.fd.GetPathId());
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string KeyStr(const std::string& user_key, const SequenceNumber seq_num,
|
|
|
|
const ValueType t) {
|
|
|
|
return InternalKey(user_key, seq_num, t).Encode().ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
void AddMockFile(const stl_wrappers::KVMap& contents, int level = 0) {
|
|
|
|
assert(contents.size() > 0);
|
|
|
|
|
|
|
|
bool first_key = true;
|
|
|
|
std::string smallest, largest;
|
|
|
|
InternalKey smallest_key, largest_key;
|
|
|
|
SequenceNumber smallest_seqno = kMaxSequenceNumber;
|
|
|
|
SequenceNumber largest_seqno = 0;
|
|
|
|
for (auto kv : contents) {
|
|
|
|
ParsedInternalKey key;
|
|
|
|
std::string skey;
|
|
|
|
std::string value;
|
|
|
|
std::tie(skey, value) = kv;
|
|
|
|
ParseInternalKey(skey, &key);
|
|
|
|
|
|
|
|
smallest_seqno = std::min(smallest_seqno, key.sequence);
|
|
|
|
largest_seqno = std::max(largest_seqno, key.sequence);
|
|
|
|
|
|
|
|
if (first_key ||
|
|
|
|
cfd_->user_comparator()->Compare(key.user_key, smallest) < 0) {
|
|
|
|
smallest.assign(key.user_key.data(), key.user_key.size());
|
|
|
|
smallest_key.DecodeFrom(skey);
|
|
|
|
}
|
|
|
|
if (first_key ||
|
|
|
|
cfd_->user_comparator()->Compare(key.user_key, largest) > 0) {
|
|
|
|
largest.assign(key.user_key.data(), key.user_key.size());
|
|
|
|
largest_key.DecodeFrom(skey);
|
|
|
|
}
|
|
|
|
|
|
|
|
first_key = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t file_number = versions_->NewFileNumber();
|
|
|
|
EXPECT_OK(mock_table_factory_->CreateMockTable(
|
|
|
|
env_, GenerateFileName(file_number), std::move(contents)));
|
|
|
|
|
|
|
|
VersionEdit edit;
|
|
|
|
edit.AddFile(level, file_number, 0, 10, smallest_key, largest_key,
|
|
|
|
smallest_seqno, largest_seqno, false);
|
|
|
|
|
|
|
|
mutex_.Lock();
|
|
|
|
versions_->LogAndApply(versions_->GetColumnFamilySet()->GetDefault(),
|
|
|
|
mutable_cf_options_, &edit, &mutex_);
|
|
|
|
mutex_.Unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetLastSequence(const SequenceNumber sequence_number) {
|
|
|
|
versions_->SetLastAllocatedSequence(sequence_number + 1);
|
|
|
|
versions_->SetLastPublishedSequence(sequence_number + 1);
|
|
|
|
versions_->SetLastSequence(sequence_number + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// returns expected result after compaction
|
|
|
|
stl_wrappers::KVMap CreateTwoFiles(bool gen_corrupted_keys) {
|
|
|
|
auto expected_results = mock::MakeMockFile();
|
|
|
|
const int kKeysPerFile = 10000;
|
|
|
|
const int kCorruptKeysPerFile = 200;
|
|
|
|
const int kMatchingKeys = kKeysPerFile / 2;
|
|
|
|
SequenceNumber sequence_number = 0;
|
|
|
|
|
|
|
|
auto corrupt_id = [&](int id) {
|
|
|
|
return gen_corrupted_keys && id > 0 && id <= kCorruptKeysPerFile;
|
|
|
|
};
|
|
|
|
|
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
auto contents = mock::MakeMockFile();
|
|
|
|
for (int k = 0; k < kKeysPerFile; ++k) {
|
|
|
|
auto key = ToString(i * kMatchingKeys + k);
|
|
|
|
auto value = ToString(i * kKeysPerFile + k);
|
|
|
|
InternalKey internal_key(key, ++sequence_number, kTypeValue);
|
|
|
|
|
Make Compaction class easier to use
Summary:
The goal of this diff is to make Compaction class easier to use. This should also make new compaction algorithms easier to write (like CompactFiles from @yhchiang and dynamic leveled and multi-leveled universal from @sdong).
Here are couple of things demonstrating that Compaction class is hard to use:
1. we have two constructors of Compaction class
2. there's this thing called grandparents_, but it appears to only be setup for leveled compaction and not compactfiles
3. it's easy to introduce a subtle and dangerous bug like this: D36225
4. SetupBottomMostLevel() is hard to understand and it shouldn't be. See this comment: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction.cc#L236-L241. It also made it harder for @yhchiang to write CompactFiles, as evidenced by this: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction_picker.cc#L204-L210
The problem is that we create Compaction object, which holds a lot of state, and then pass it around to some functions. After those functions are done mutating, then we call couple of functions on Compaction object, like SetupBottommostLevel() and MarkFilesBeingCompacted(). It is very hard to see what's happening with all that Compaction's state while it's travelling across different functions. If you're writing a new PickCompaction() function you need to try really hard to understand what are all the functions you need to run on Compaction object and what state you need to setup.
My proposed solution is to make important parts of Compaction immutable after construction. PickCompaction() should calculate compaction inputs and then pass them onto Compaction object once they are finalized. That makes it easy to create a new compaction -- just provide all the parameters to the constructor and you're done. No need to call confusing functions after you created your object.
This diff doesn't fully achieve that goal, but it comes pretty close. Here are some of the changes:
* have one Compaction constructor instead of two.
* inputs_ is constant after construction
* MarkFilesBeingCompacted() is now private to Compaction class and automatically called on construction/destruction.
* SetupBottommostLevel() is gone. Compaction figures it out on its own based on the input.
* CompactionPicker's functions are not passing around Compaction object anymore. They are only passing around the state that they need.
Test Plan:
make check
make asan_check
make valgrind_check
Reviewers: rven, anthony, sdong, yhchiang
Reviewed By: yhchiang
Subscribers: sdong, yhchiang, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D36687
10 years ago
|
|
|
// This is how the key will look like once it's written in bottommost
|
|
|
|
// file
|
|
|
|
InternalKey bottommost_internal_key(
|
|
|
|
key, 0, kTypeValue);
|
|
|
|
|
|
|
|
if (corrupt_id(k)) {
|
Simplify querying of merge results
Summary:
While working on supporting mixing merge operators with
single deletes ( https://reviews.facebook.net/D43179 ),
I realized that returning and dealing with merge results
can be made simpler. Submitting this as a separate diff
because it is not directly related to single deletes.
Before, callers of merge helper had to retrieve the merge
result in one of two ways depending on whether the merge
was successful or not (success = result of merge was single
kTypeValue). For successful merges, the caller could query
the resulting key/value pair and for unsuccessful merges,
the result could be retrieved in the form of two deques of
keys and values. However, with single deletes, a successful merge
does not return a single key/value pair (if merge
operands are merged with a single delete, we have to generate
a value and keep the original single delete around to make
sure that we are not accidentially producing a key overwrite).
In addition, the two existing call sites of the merge
helper were taking the same actions independently from whether
the merge was successful or not, so this patch simplifies that.
Test Plan: make clean all check
Reviewers: rven, sdong, yhchiang, anthony, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43353
9 years ago
|
|
|
test::CorruptKeyType(&internal_key);
|
|
|
|
test::CorruptKeyType(&bottommost_internal_key);
|
|
|
|
}
|
|
|
|
contents.insert({ internal_key.Encode().ToString(), value });
|
|
|
|
if (i == 1 || k < kMatchingKeys || corrupt_id(k - kMatchingKeys)) {
|
|
|
|
expected_results.insert(
|
|
|
|
{ bottommost_internal_key.Encode().ToString(), value });
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
AddMockFile(contents);
|
|
|
|
}
|
|
|
|
|
|
|
|
SetLastSequence(sequence_number);
|
|
|
|
|
|
|
|
return expected_results;
|
|
|
|
}
|
|
|
|
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
void NewDB() {
|
|
|
|
DestroyDB(dbname_, Options());
|
|
|
|
EXPECT_OK(env_->CreateDirIfMissing(dbname_));
|
|
|
|
versions_.reset(new VersionSet(dbname_, &db_options_, env_options_,
|
|
|
|
table_cache_.get(), &write_buffer_manager_,
|
|
|
|
&write_controller_));
|
|
|
|
compaction_job_stats_.Reset();
|
|
|
|
|
|
|
|
VersionEdit new_db;
|
|
|
|
new_db.SetLogNumber(0);
|
|
|
|
new_db.SetNextFile(2);
|
|
|
|
new_db.SetLastSequence(0);
|
|
|
|
|
|
|
|
const std::string manifest = DescriptorFileName(dbname_, 1);
|
|
|
|
std::unique_ptr<WritableFile> file;
|
|
|
|
Status s = env_->NewWritableFile(
|
|
|
|
manifest, &file, env_->OptimizeForManifestWrite(env_options_));
|
|
|
|
ASSERT_OK(s);
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(
|
|
|
|
new WritableFileWriter(std::move(file), manifest, env_options_));
|
|
|
|
{
|
|
|
|
log::Writer log(std::move(file_writer), 0, false);
|
|
|
|
std::string record;
|
|
|
|
new_db.EncodeTo(&record);
|
|
|
|
s = log.AddRecord(record);
|
|
|
|
}
|
|
|
|
ASSERT_OK(s);
|
|
|
|
// Make "CURRENT" file that points to the new manifest file.
|
|
|
|
s = SetCurrentFile(env_, dbname_, 1, nullptr);
|
|
|
|
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
cf_options_.table_factory = mock_table_factory_;
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
cf_options_.merge_operator = merge_op_;
|
|
|
|
cf_options_.compaction_filter = compaction_filter_.get();
|
|
|
|
column_families.emplace_back(kDefaultColumnFamilyName, cf_options_);
|
|
|
|
|
|
|
|
EXPECT_OK(versions_->Recover(column_families, false));
|
|
|
|
cfd_ = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
}
|
|
|
|
|
|
|
|
void RunCompaction(
|
|
|
|
const std::vector<std::vector<FileMetaData*>>& input_files,
|
|
|
|
const stl_wrappers::KVMap& expected_results,
|
|
|
|
const std::vector<SequenceNumber>& snapshots = {},
|
|
|
|
SequenceNumber earliest_write_conflict_snapshot = kMaxSequenceNumber,
|
|
|
|
int output_level = 1, bool verify = true,
|
|
|
|
SnapshotListFetchCallback* snapshot_fetcher =
|
|
|
|
SnapshotListFetchCallback::kDisabled) {
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
|
|
|
|
size_t num_input_files = 0;
|
|
|
|
std::vector<CompactionInputFiles> compaction_input_files;
|
|
|
|
for (size_t level = 0; level < input_files.size(); level++) {
|
|
|
|
auto level_files = input_files[level];
|
|
|
|
CompactionInputFiles compaction_level;
|
|
|
|
compaction_level.level = static_cast<int>(level);
|
|
|
|
compaction_level.files.insert(compaction_level.files.end(),
|
|
|
|
level_files.begin(), level_files.end());
|
|
|
|
compaction_input_files.push_back(compaction_level);
|
|
|
|
num_input_files += level_files.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
Compaction compaction(cfd->current()->storage_info(), *cfd->ioptions(),
|
|
|
|
*cfd->GetLatestMutableCFOptions(),
|
|
|
|
compaction_input_files, output_level, 1024 * 1024,
|
|
|
|
10 * 1024 * 1024, 0, kNoCompression,
|
|
|
|
cfd->ioptions()->compression_opts, 0, {}, true);
|
|
|
|
compaction.SetInputVersion(cfd->current());
|
|
|
|
|
|
|
|
LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, db_options_.info_log.get());
|
|
|
|
mutex_.Lock();
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
|
|
|
// TODO(yiwu) add a mock snapshot checker and add test for it.
|
|
|
|
SnapshotChecker* snapshot_checker = nullptr;
|
|
|
|
CompactionJob compaction_job(
|
|
|
|
0, &compaction, db_options_, env_options_, versions_.get(),
|
|
|
|
&shutting_down_, preserve_deletes_seqnum_, &log_buffer, nullptr,
|
|
|
|
nullptr, nullptr, &mutex_, &error_handler_, snapshots,
|
|
|
|
earliest_write_conflict_snapshot, snapshot_checker, table_cache_,
|
|
|
|
&event_logger, false, false, dbname_, &compaction_job_stats_,
|
|
|
|
Env::Priority::USER, snapshot_fetcher);
|
|
|
|
VerifyInitializationOfCompactionJobStats(compaction_job_stats_);
|
|
|
|
|
|
|
|
compaction_job.Prepare();
|
|
|
|
mutex_.Unlock();
|
|
|
|
Status s;
|
|
|
|
s = compaction_job.Run();
|
|
|
|
ASSERT_OK(s);
|
|
|
|
mutex_.Lock();
|
|
|
|
ASSERT_OK(compaction_job.Install(*cfd->GetLatestMutableCFOptions()));
|
|
|
|
mutex_.Unlock();
|
|
|
|
|
|
|
|
if (verify) {
|
|
|
|
if (expected_results.size() == 0) {
|
|
|
|
ASSERT_GE(compaction_job_stats_.elapsed_micros, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats_.num_input_files, num_input_files);
|
|
|
|
ASSERT_EQ(compaction_job_stats_.num_output_files, 0U);
|
|
|
|
} else {
|
|
|
|
ASSERT_GE(compaction_job_stats_.elapsed_micros, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats_.num_input_files, num_input_files);
|
|
|
|
ASSERT_EQ(compaction_job_stats_.num_output_files, 1U);
|
|
|
|
mock_table_factory_->AssertLatestFile(expected_results);
|
|
|
|
}
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Env* env_;
|
|
|
|
std::string dbname_;
|
|
|
|
EnvOptions env_options_;
|
|
|
|
ImmutableDBOptions db_options_;
|
|
|
|
ColumnFamilyOptions cf_options_;
|
|
|
|
MutableCFOptions mutable_cf_options_;
|
|
|
|
std::shared_ptr<Cache> table_cache_;
|
|
|
|
WriteController write_controller_;
|
|
|
|
WriteBufferManager write_buffer_manager_;
|
|
|
|
std::unique_ptr<VersionSet> versions_;
|
|
|
|
InstrumentedMutex mutex_;
|
|
|
|
std::atomic<bool> shutting_down_;
|
Added support for differential snapshots
Summary:
The motivation for this PR is to add to RocksDB support for differential (incremental) snapshots, as snapshot of the DB changes between two points in time (one can think of it as diff between to sequence numbers, or the diff D which can be thought of as an SST file or just set of KVs that can be applied to sequence number S1 to get the database to the state at sequence number S2).
This feature would be useful for various distributed storages layers built on top of RocksDB, as it should help reduce resources (time and network bandwidth) needed to recover and rebuilt DB instances as replicas in the context of distributed storages.
From the API standpoint that would like client app requesting iterator between (start seqnum) and current DB state, and reading the "diff".
This is a very draft PR for initial review in the discussion on the approach, i'm going to rework some parts and keep updating the PR.
For now, what's done here according to initial discussions:
Preserving deletes:
- We want to be able to optionally preserve recent deletes for some defined period of time, so that if a delete came in recently and might need to be included in the next incremental snapshot it would't get dropped by a compaction. This is done by adding new param to Options (preserve deletes flag) and new variable to DB Impl where we keep track of the sequence number after which we don't want to drop tombstones, even if they are otherwise eligible for deletion.
- I also added a new API call for clients to be able to advance this cutoff seqnum after which we drop deletes; i assume it's more flexible to let clients control this, since otherwise we'd need to keep some kind of timestamp < -- > seqnum mapping inside the DB, which sounds messy and painful to support. Clients could make use of it by periodically calling GetLatestSequenceNumber(), noting the timestamp, doing some calculation and figuring out by how much we need to advance the cutoff seqnum.
- Compaction codepath in compaction_iterator.cc has been modified to avoid dropping tombstones with seqnum > cutoff seqnum.
Iterator changes:
- couple params added to ReadOptions, to optionally allow client to request internal keys instead of user keys (so that client can get the latest value of a key, be it delete marker or a put), as well as min timestamp and min seqnum.
TableCache changes:
- I modified table_cache code to be able to quickly exclude SST files from iterators heep if creation_time on the file is less then iter_start_ts as passed in ReadOptions. That would help a lot in some DB settings (like reading very recent data only or using FIFO compactions), but not so much for universal compaction with more or less long iterator time span.
What's left:
- Still looking at how to best plug that inside DBIter codepath. So far it seems that FindNextUserKeyInternal only parses values as UserKeys, and iter->key() call generally returns user key. Can we add new API to DBIter as internal_key(), and modify this internal method to optionally set saved_key_ to point to the full internal key? I don't need to store actual seqnum there, but I do need to store type.
Closes https://github.com/facebook/rocksdb/pull/2999
Differential Revision: D6175602
Pulled By: mikhail-antonov
fbshipit-source-id: c779a6696ee2d574d86c69cec866a3ae095aa900
7 years ago
|
|
|
SequenceNumber preserve_deletes_seqnum_;
|
|
|
|
std::shared_ptr<mock::MockTableFactory> mock_table_factory_;
|
|
|
|
CompactionJobStats compaction_job_stats_;
|
|
|
|
ColumnFamilyData* cfd_;
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
std::unique_ptr<CompactionFilter> compaction_filter_;
|
|
|
|
std::shared_ptr<MergeOperator> merge_op_;
|
|
|
|
ErrorHandler error_handler_;
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, Simple) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto expected_results = CreateTwoFiles(false);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(0);
|
|
|
|
ASSERT_EQ(2U, files.size());
|
|
|
|
RunCompaction({ files }, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleCorrupted) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto expected_results = CreateTwoFiles(true);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(0);
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
RunCompaction({files}, expected_results);
|
|
|
|
ASSERT_EQ(compaction_job_stats_.num_corrupt_keys, 400U);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleDeletion) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({{KeyStr("c", 4U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("c", 3U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("b", 2U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 1U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 0U, kTypeValue), "val"}});
|
|
|
|
|
|
|
|
SetLastSequence(4U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
RunCompaction({files}, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, OutputNothing) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"}});
|
|
|
|
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 2U, kTypeDeletion), ""}});
|
|
|
|
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile();
|
|
|
|
|
|
|
|
SetLastSequence(4U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({files}, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleOverwrite) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 3U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("b", 4U, kTypeValue), "val3"},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("b", 0U, kTypeValue), "val3"}});
|
|
|
|
|
|
|
|
SetLastSequence(4U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
RunCompaction({files}, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleNonLastLevel) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("b", 6U, kTypeValue), "val3"},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 4U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
// Because level 1 is not the last level, the sequence numbers of a and b
|
|
|
|
// cannot be set to 0
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("b", 6U, kTypeValue), "val3"}});
|
|
|
|
|
|
|
|
SetLastSequence(6U);
|
|
|
|
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
RunCompaction({lvl0_files, lvl1_files}, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleMerge) {
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
merge_op_ = MergeOperators::CreateStringAppendOperator();
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeMerge), "5"},
|
|
|
|
{KeyStr("a", 4U, kTypeMerge), "4"},
|
|
|
|
{KeyStr("a", 3U, kTypeValue), "3"},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("b", 2U, kTypeMerge), "2"}, {KeyStr("b", 1U, kTypeValue), "1"}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "3,4,5"},
|
|
|
|
{KeyStr("b", 0U, kTypeValue), "1,2"}});
|
|
|
|
|
|
|
|
SetLastSequence(5U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
RunCompaction({files}, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, NonAssocMerge) {
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
merge_op_ = MergeOperators::CreateStringAppendTESTOperator();
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeMerge), "5"},
|
|
|
|
{KeyStr("a", 4U, kTypeMerge), "4"},
|
|
|
|
{KeyStr("a", 3U, kTypeMerge), "3"},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("b", 2U, kTypeMerge), "2"}, {KeyStr("b", 1U, kTypeMerge), "1"}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "3,4,5"},
|
|
|
|
{KeyStr("b", 0U, kTypeValue), "1,2"}});
|
|
|
|
|
|
|
|
SetLastSequence(5U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
RunCompaction({files}, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Filters merge operands with value 10.
|
|
|
|
TEST_F(CompactionJobTest, MergeOperandFilter) {
|
|
|
|
merge_op_ = MergeOperators::CreateUInt64AddOperator();
|
|
|
|
compaction_filter_.reset(new test::FilterNumber(10U));
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("a", 5U, kTypeMerge), test::EncodeInt(5U)},
|
|
|
|
{KeyStr("a", 4U, kTypeMerge), test::EncodeInt(10U)}, // Filtered
|
|
|
|
{KeyStr("a", 3U, kTypeMerge), test::EncodeInt(3U)}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("b", 2U, kTypeMerge), test::EncodeInt(2U)},
|
|
|
|
{KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)} // Filtered
|
|
|
|
});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), test::EncodeInt(8U)},
|
|
|
|
{KeyStr("b", 0U, kTypeValue), test::EncodeInt(2U)}});
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
9 years ago
|
|
|
|
|
|
|
SetLastSequence(5U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({files}, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, FilterSomeMergeOperands) {
|
|
|
|
merge_op_ = MergeOperators::CreateUInt64AddOperator();
|
|
|
|
compaction_filter_.reset(new test::FilterNumber(10U));
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("a", 5U, kTypeMerge), test::EncodeInt(5U)},
|
|
|
|
{KeyStr("a", 4U, kTypeMerge), test::EncodeInt(10U)}, // Filtered
|
|
|
|
{KeyStr("a", 3U, kTypeValue), test::EncodeInt(5U)},
|
|
|
|
{KeyStr("d", 8U, kTypeMerge), test::EncodeInt(10U)}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 2U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("c", 2U, kTypeMerge), test::EncodeInt(3U)},
|
|
|
|
{KeyStr("c", 1U, kTypeValue), test::EncodeInt(7U)},
|
|
|
|
{KeyStr("d", 1U, kTypeValue), test::EncodeInt(6U)}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 1U, kTypeMerge), test::EncodeInt(3U)}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeValue), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("c", 2U, kTypeValue), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("d", 1U, kTypeValue), test::EncodeInt(6U)}
|
|
|
|
// b does not appear because the operands are filtered
|
|
|
|
});
|
|
|
|
|
|
|
|
SetLastSequence(5U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({files}, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test where all operands/merge results are filtered out.
|
|
|
|
TEST_F(CompactionJobTest, FilterAllMergeOperands) {
|
|
|
|
merge_op_ = MergeOperators::CreateUInt64AddOperator();
|
|
|
|
compaction_filter_.reset(new test::FilterNumber(10U));
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 11U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("a", 10U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("a", 9U, kTypeMerge), test::EncodeInt(10U)}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 8U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 7U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 6U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 5U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 4U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 3U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 2U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("c", 2U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("c", 1U, kTypeMerge), test::EncodeInt(10U)}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 2U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
SetLastSequence(11U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
|
|
|
|
stl_wrappers::KVMap empty_map;
|
|
|
|
RunCompaction({files}, empty_map);
|
|
|
|
}
|
|
|
|
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
TEST_F(CompactionJobTest, SimpleSingleDelete) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("b", 6U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 4U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 1U, kTypeValue), "val"},
|
|
|
|
});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5U, kTypeDeletion), ""}});
|
|
|
|
|
|
|
|
SetLastSequence(6U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({files}, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SingleDeleteSnapshots) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("b", 21U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("c", 22U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("d", 9U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("f", 21U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("j", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("j", 9U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("k", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("k", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("l", 3U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("l", 2U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("0", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 11U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("b", 11U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("c", 21U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("d", 8U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("e", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("f", 1U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("g", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("h", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("m", 12U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("m", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("m", 8U, kTypeValue), "val2"},
|
|
|
|
});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("e", 1U, kTypeValue), "val"},
|
|
|
|
});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 11U, kTypeValue), ""},
|
|
|
|
{KeyStr("b", 21U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("b", 11U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("c", 22U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("c", 21U, kTypeValue), ""},
|
|
|
|
{KeyStr("e", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("f", 21U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("f", 1U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("g", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("j", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("k", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("m", 12U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("m", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("m", 8U, kTypeValue), "val2"},
|
|
|
|
});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
|
|
|
|
SetLastSequence(22U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({files}, expected_results, {10U, 20U}, 10U);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, EarliestWriteConflictSnapshot) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
// Test multiple snapshots where the earliest snapshot is not a
|
|
|
|
// write-conflic-snapshot.
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 23U, kTypeValue), "val"},
|
|
|
|
{KeyStr("B", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 23U, kTypeValue), "val"},
|
|
|
|
{KeyStr("D", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 32U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 31U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 23U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("H", 31U, kTypeValue), "val"},
|
|
|
|
{KeyStr("H", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 23U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 35U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 34U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("I", 33U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 32U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("I", 31U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 34U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 33U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 25U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("J", 24U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 13U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("C", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("C", 13U, kTypeValue), "val"},
|
|
|
|
{KeyStr("E", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("F", 4U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("F", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 13U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("H", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 13U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("I", 13U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("I", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 11U, kTypeValue), "val5"},
|
|
|
|
{KeyStr("J", 15U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("J", 14U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 23U, kTypeValue), ""},
|
|
|
|
{KeyStr("B", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 23U, kTypeValue), ""},
|
|
|
|
{KeyStr("D", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 32U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 31U, kTypeValue), ""},
|
|
|
|
{KeyStr("H", 31U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 35U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 34U, kTypeValue), ""},
|
|
|
|
{KeyStr("I", 31U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 13U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("J", 34U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 33U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 25U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("J", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 15U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("J", 14U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
|
|
|
|
|
|
|
SetLastSequence(24U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({files}, expected_results, {10U, 20U, 30U}, 20U);
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SingleDeleteZeroSeq) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("dummy", 5U, kTypeValue), "val2"},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 0U, kTypeValue), "val"},
|
|
|
|
});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile({
|
|
|
|
{KeyStr("dummy", 0U, kTypeValue), "val2"},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
});
|
|
|
|
|
|
|
|
SetLastSequence(22U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({files}, expected_results, {});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, MultiSingleDelete) {
|
|
|
|
// Tests three scenarios involving multiple single delete/put pairs:
|
|
|
|
//
|
|
|
|
// A: Put Snapshot SDel Put SDel -> Put Snapshot SDel
|
|
|
|
// B: Snapshot Put SDel Put SDel Snapshot -> Snapshot SDel Snapshot
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
// C: SDel Put SDel Snapshot Put -> Snapshot Put
|
|
|
|
// D: (Put) SDel Snapshot Put SDel -> (Put) SDel Snapshot SDel
|
|
|
|
// E: Put SDel Snapshot Put SDel -> Snapshot SDel
|
|
|
|
// F: Put SDel Put Sdel Snapshot -> removed
|
|
|
|
// G: Snapshot SDel Put SDel Put -> Snapshot Put SDel
|
|
|
|
// H: (Put) Put SDel Put Sdel Snapshot -> Removed
|
|
|
|
// I: (Put) Snapshot Put SDel Put SDel -> SDel
|
|
|
|
// J: Put Put SDel Put SDel SDel Snapshot Put Put SDel SDel Put
|
|
|
|
// -> Snapshot Put
|
|
|
|
// K: SDel SDel Put SDel Put Put Snapshot SDel Put SDel SDel Put SDel
|
|
|
|
// -> Snapshot Put Snapshot SDel
|
|
|
|
// L: SDel Put Del Put SDel Snapshot Del Put Del SDel Put SDel
|
|
|
|
// -> Snapshot SDel
|
|
|
|
// M: (Put) SDel Put Del Put SDel Snapshot Put Del SDel Put SDel Del
|
|
|
|
// -> SDel Snapshot Del
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 13U, kTypeValue), "val5"},
|
|
|
|
{KeyStr("A", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 13U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("C", 14U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("D", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("D", 11U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("G", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 13U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 13U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 13U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 12U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 11U, kTypeValue), "val"},
|
|
|
|
{KeyStr("K", 16U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 15U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("K", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 13U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 12U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("K", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 16U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("L", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 13U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("L", 12U, kTypeValue), "val"},
|
|
|
|
{KeyStr("L", 11U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("M", 16U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("M", 15U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 14U, kTypeValue), "val"},
|
|
|
|
{KeyStr("M", 13U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 12U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("M", 11U, kTypeValue), "val"},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 10U, kTypeValue), "val"},
|
|
|
|
{KeyStr("B", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 11U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("C", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("C", 9U, kTypeValue), "val6"},
|
|
|
|
{KeyStr("C", 8U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("D", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 11U, kTypeValue), "val"},
|
|
|
|
{KeyStr("E", 5U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("F", 6U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("F", 5U, kTypeValue), "val"},
|
|
|
|
{KeyStr("F", 4U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("F", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 6U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 5U, kTypeValue), "val"},
|
|
|
|
{KeyStr("H", 4U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 11U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 6U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 5U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 3U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 2U, kTypeValue), "val"},
|
|
|
|
{KeyStr("K", 8U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("K", 7U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("K", 6U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 5U, kTypeValue), "val5"},
|
|
|
|
{KeyStr("K", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 1U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 5U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("L", 3U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("L", 2U, kTypeValue), "val"},
|
|
|
|
{KeyStr("L", 1U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 7U, kTypeValue), "val"},
|
|
|
|
{KeyStr("M", 5U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("M", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("M", 3U, kTypeSingleDeletion), ""},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({
|
|
|
|
{KeyStr("D", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("H", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 2U, kTypeValue), "val"},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto file4 = mock::MakeMockFile({
|
|
|
|
{KeyStr("M", 1U, kTypeValue), "val"},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
});
|
|
|
|
AddMockFile(file4, 2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("A", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 13U, kTypeValue), ""},
|
|
|
|
{KeyStr("A", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 10U, kTypeValue), "val"},
|
|
|
|
{KeyStr("B", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 13U, kTypeValue), ""},
|
|
|
|
{KeyStr("C", 14U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("D", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("D", 11U, kTypeValue), ""},
|
|
|
|
{KeyStr("D", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 11U, kTypeValue), ""},
|
|
|
|
{KeyStr("G", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 13U, kTypeValue), ""},
|
|
|
|
{KeyStr("J", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("K", 16U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 15U, kTypeValue), ""},
|
|
|
|
{KeyStr("K", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 8U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("L", 16U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 15U, kTypeValue), ""},
|
|
|
|
{KeyStr("M", 16U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("M", 3U, kTypeSingleDeletion), ""}});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
|
|
|
|
SetLastSequence(22U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({files}, expected_results, {10U}, 10U);
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
}
|
|
|
|
|
|
|
|
// This test documents the behavior where a corrupt key follows a deletion or a
|
|
|
|
// single deletion and the (single) deletion gets removed while the corrupt key
|
|
|
|
// gets written out. TODO(noetzli): We probably want a better way to treat
|
|
|
|
// corrupt keys.
|
|
|
|
TEST_F(CompactionJobTest, CorruptionAfterDeletion) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 =
|
|
|
|
mock::MakeMockFile({{test::KeyStr("A", 6U, kTypeValue), "val3"},
|
|
|
|
{test::KeyStr("a", 5U, kTypeDeletion), ""},
|
|
|
|
{test::KeyStr("a", 4U, kTypeValue, true), "val"}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 =
|
|
|
|
mock::MakeMockFile({{test::KeyStr("b", 3U, kTypeSingleDeletion), ""},
|
|
|
|
{test::KeyStr("b", 2U, kTypeValue, true), "val"},
|
|
|
|
{test::KeyStr("c", 1U, kTypeValue), "val2"}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{test::KeyStr("A", 0U, kTypeValue), "val3"},
|
|
|
|
{test::KeyStr("a", 0U, kTypeValue, true), "val"},
|
|
|
|
{test::KeyStr("b", 0U, kTypeValue, true), "val"},
|
|
|
|
{test::KeyStr("c", 0U, kTypeValue), "val2"}});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
|
|
|
|
SetLastSequence(6U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({files}, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test the snapshot fetcher in compaction
|
|
|
|
TEST_F(CompactionJobTest, SnapshotRefresh) {
|
|
|
|
uint64_t time_seed = env_->NowMicros();
|
|
|
|
printf("time_seed is %" PRIu64 "\n", time_seed); // would help to reproduce
|
|
|
|
Random64 rand(time_seed);
|
|
|
|
std::vector<SequenceNumber> db_snapshots;
|
|
|
|
class SnapshotListFetchCallbackTest : public SnapshotListFetchCallback {
|
|
|
|
public:
|
|
|
|
SnapshotListFetchCallbackTest(Env* env, Random64& rand,
|
|
|
|
std::vector<SequenceNumber>* snapshots)
|
|
|
|
: SnapshotListFetchCallback(env, 0 /*no time delay*/,
|
|
|
|
1 /*fetch after each key*/),
|
|
|
|
rand_(rand),
|
|
|
|
snapshots_(snapshots) {}
|
|
|
|
virtual void Refresh(std::vector<SequenceNumber>* snapshots,
|
|
|
|
SequenceNumber) override {
|
|
|
|
assert(snapshots->size());
|
|
|
|
assert(snapshots_->size());
|
|
|
|
assert(snapshots_->size() == snapshots->size());
|
|
|
|
if (rand_.OneIn(2)) {
|
|
|
|
uint64_t release_index = rand_.Uniform(snapshots_->size());
|
|
|
|
snapshots_->erase(snapshots_->begin() + release_index);
|
|
|
|
*snapshots = *snapshots_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Random64 rand_;
|
|
|
|
std::vector<SequenceNumber>* snapshots_;
|
|
|
|
} snapshot_fetcher(env_, rand, &db_snapshots);
|
|
|
|
|
|
|
|
std::vector<std::pair<const std::string, std::string>> file1_kvs, file2_kvs;
|
|
|
|
std::array<ValueType, 4> types = {kTypeValue, kTypeDeletion,
|
|
|
|
kTypeSingleDeletion};
|
|
|
|
SequenceNumber last_seq = 0;
|
|
|
|
for (int i = 1; i < 100; i++) {
|
|
|
|
SequenceNumber seq = last_seq + 1;
|
|
|
|
last_seq = seq;
|
|
|
|
if (rand.OneIn(2)) {
|
|
|
|
auto type = types[rand.Uniform(types.size())];
|
|
|
|
file1_kvs.push_back(
|
|
|
|
{test::KeyStr("k" + ToString(i), seq, type), "v" + ToString(i)});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
auto file1 = mock::MakeMockFile(file1_kvs);
|
|
|
|
for (int i = 1; i < 100; i++) {
|
|
|
|
SequenceNumber seq = last_seq + 1;
|
|
|
|
last_seq++;
|
|
|
|
if (rand.OneIn(2)) {
|
|
|
|
auto type = types[rand.Uniform(types.size())];
|
|
|
|
file2_kvs.push_back(
|
|
|
|
{test::KeyStr("k" + ToString(i), seq, type), "v" + ToString(i)});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
auto file2 = mock::MakeMockFile(file2_kvs);
|
|
|
|
for (SequenceNumber i = 1; i < last_seq + 1; i++) {
|
|
|
|
if (rand.OneIn(5)) {
|
|
|
|
db_snapshots.push_back(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const bool kVerify = true;
|
|
|
|
const int output_level_0 = 0;
|
|
|
|
NewDB();
|
|
|
|
AddMockFile(file1);
|
|
|
|
AddMockFile(file2);
|
|
|
|
SetLastSequence(last_seq);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
// put the output on L0 since it is easier to feed them again to the 2nd
|
|
|
|
// compaction
|
|
|
|
RunCompaction({files}, file1, db_snapshots, kMaxSequenceNumber,
|
|
|
|
output_level_0, !kVerify, &snapshot_fetcher);
|
|
|
|
|
|
|
|
// Now db_snapshots are changed. Run the compaction again without snapshot
|
|
|
|
// fetcher but with the updated snapshot list.
|
|
|
|
compaction_job_stats_.Reset();
|
|
|
|
files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({files}, file1, db_snapshots, kMaxSequenceNumber,
|
|
|
|
output_level_0 + 1, !kVerify);
|
|
|
|
// The result should be what we get if we run compaction without snapshot
|
|
|
|
// fetcher on the updated list of snapshots
|
|
|
|
auto expected = mock_table_factory_->output();
|
|
|
|
|
|
|
|
NewDB();
|
|
|
|
AddMockFile(file1);
|
|
|
|
AddMockFile(file2);
|
|
|
|
SetLastSequence(last_seq);
|
|
|
|
files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({files}, expected, db_snapshots, kMaxSequenceNumber,
|
|
|
|
output_level_0, !kVerify);
|
|
|
|
// The 2nd compaction above would get rid of useless delete markers. To get
|
|
|
|
// the output here exactly as what we got above after two compactions, we also
|
|
|
|
// run the compaction for 2nd time.
|
|
|
|
compaction_job_stats_.Reset();
|
|
|
|
files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({files}, expected, db_snapshots, kMaxSequenceNumber,
|
|
|
|
output_level_0 + 1, !kVerify);
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
int main(int /*argc*/, char** /*argv*/) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"SKIPPED as CompactionJobStats is not supported in ROCKSDB_LITE\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // ROCKSDB_LITE
|