Smarter purging during flush

Summary:
Currently, we only purge duplicate keys and deletions during flush if `earliest_seqno_in_memtable <= newest_snapshot`. This means that the newest snapshot happened before we first created the memtable. This is almost never true for MyRocks and MongoRocks.

This patch makes purging during flush able to understand snapshots. The main logic is copied from compaction_job.cc, although the logic over there is much more complicated and extensive. However, we should try to merge the common functionality at some point.

I need this patch to implement no_overwrite_i_promise functionality for flush. We'll also need this to support SingleDelete() during Flush(). @yoshinorim requested the feature.

Test Plan:
make check
I had to adjust some unit tests to understand this new behavior

Reviewers: yhchiang, yoshinorim, anthony, sdong, noetzli

Reviewed By: noetzli

Subscribers: yoshinorim, dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D42087
main
Igor Canadi 9 years ago
parent 4c81ac0c59
commit 4ab26c5ad1
  1. 231
      db/builder.cc
  2. 4
      db/builder.h
  3. 9
      db/db_impl.cc
  4. 16
      db/db_test.cc
  5. 15
      db/flush_job.cc
  6. 11
      db/flush_job.h
  7. 81
      db/flush_job_test.cc
  8. 2
      db/repair.cc

@ -9,6 +9,7 @@
#include "db/builder.h" #include "db/builder.h"
#include <algorithm>
#include <deque> #include <deque>
#include <vector> #include <vector>
@ -31,6 +32,28 @@
namespace rocksdb { namespace rocksdb {
namespace {
inline SequenceNumber EarliestVisibleSnapshot(
SequenceNumber in, const std::vector<SequenceNumber>& snapshots,
SequenceNumber* prev_snapshot) {
if (snapshots.empty()) {
*prev_snapshot = 0; // 0 means no previous snapshot
return kMaxSequenceNumber;
}
SequenceNumber prev = 0;
for (const auto cur : snapshots) {
assert(prev <= cur);
if (cur >= in) {
*prev_snapshot = prev;
return cur;
}
prev = cur; // assignment
}
*prev_snapshot = prev;
return kMaxSequenceNumber;
}
} // namespace
class TableFactory; class TableFactory;
TableBuilder* NewTableBuilder( TableBuilder* NewTableBuilder(
@ -53,9 +76,7 @@ Status BuildTable(
FileMetaData* meta, const InternalKeyComparator& internal_comparator, FileMetaData* meta, const InternalKeyComparator& internal_comparator,
const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>* const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
int_tbl_prop_collector_factories, int_tbl_prop_collector_factories,
const SequenceNumber newest_snapshot, std::vector<SequenceNumber> snapshots, const CompressionType compression,
const SequenceNumber earliest_seqno_in_memtable,
const CompressionType compression,
const CompressionOptions& compression_opts, bool paranoid_file_checks, const CompressionOptions& compression_opts, bool paranoid_file_checks,
InternalStats* internal_stats, const Env::IOPriority io_priority, InternalStats* internal_stats, const Env::IOPriority io_priority,
TableProperties* table_properties) { TableProperties* table_properties) {
@ -66,14 +87,6 @@ Status BuildTable(
meta->smallest_seqno = meta->largest_seqno = 0; meta->smallest_seqno = meta->largest_seqno = 0;
iter->SeekToFirst(); iter->SeekToFirst();
// If the sequence number of the smallest entry in the memtable is
// smaller than the most recent snapshot, then we do not trigger
// removal of duplicate/deleted keys as part of this builder.
bool purge = true;
if (earliest_seqno_in_memtable <= newest_snapshot) {
purge = false;
}
std::string fname = TableFileName(ioptions.db_paths, meta->fd.GetNumber(), std::string fname = TableFileName(ioptions.db_paths, meta->fd.GetNumber(),
meta->fd.GetPathId()); meta->fd.GetPathId());
if (iter->Valid()) { if (iter->Valid()) {
@ -107,112 +120,112 @@ Status BuildTable(
ioptions.min_partial_merge_operands, ioptions.min_partial_merge_operands,
true /* internal key corruption is not ok */); true /* internal key corruption is not ok */);
if (purge) { IterKey current_user_key;
bool has_current_user_key = false;
// If has_current_user_key == true, this variable remembers the earliest
// snapshot in which this current key already exists. If two internal keys
// have the same user key AND the earlier one should be visible in the
// snapshot in which we already have a user key, we can drop the earlier
// user key
SequenceNumber current_user_key_exists_in_snapshot = kMaxSequenceNumber;
while (iter->Valid()) {
// Get current key
ParsedInternalKey ikey;
Slice key = iter->key();
Slice value = iter->value();
// In-memory key corruption is not ok;
// TODO: find a clean way to treat in memory key corruption
// Ugly walkaround to avoid compiler error for release build // Ugly walkaround to avoid compiler error for release build
bool ok __attribute__((unused)) = true; bool ok __attribute__((unused)) = true;
ok = ParseInternalKey(key, &ikey);
assert(ok);
meta->smallest_seqno = std::min(meta->smallest_seqno, ikey.sequence);
meta->largest_seqno = std::max(meta->largest_seqno, ikey.sequence);
// If the key is the same as the previous key (and it is not the
// first key), then we skip it, since it is an older version.
// Otherwise we output the key and mark it as the "new" previous key.
if (!has_current_user_key ||
internal_comparator.user_comparator()->Compare(
ikey.user_key, current_user_key.GetKey()) != 0) {
// First occurrence of this user key
current_user_key.SetKey(ikey.user_key);
has_current_user_key = true;
current_user_key_exists_in_snapshot = 0;
}
// Will write to builder if current key != prev key // If there are no snapshots, then this kv affect visibility at tip.
ParsedInternalKey prev_ikey; // Otherwise, search though all existing snapshots to find
std::string prev_key; // the earlist snapshot that is affected by this kv.
bool is_first_key = true; // Also write if this is the very first key SequenceNumber prev_snapshot = 0; // 0 means no previous snapshot
SequenceNumber key_needs_to_exist_in_snapshot =
while (iter->Valid()) { EarliestVisibleSnapshot(ikey.sequence, snapshots, &prev_snapshot);
bool iterator_at_next = false;
if (current_user_key_exists_in_snapshot ==
// Get current key key_needs_to_exist_in_snapshot) {
ParsedInternalKey this_ikey; // If this user key already exists in snapshot in which it needs to
Slice key = iter->key(); // exist, we can drop it.
Slice value = iter->value(); // In other words, if the earliest snapshot is which this key is visible
// in is the same as the visibily of a previous instance of the
// In-memory key corruption is not ok; // same key, then this kv is not visible in any snapshot.
// TODO: find a clean way to treat in memory key corruption // Hidden by an newer entry for same user key
ok = ParseInternalKey(key, &this_ikey); iter->Next();
assert(ok); } else if (ikey.type == kTypeMerge) {
assert(this_ikey.sequence >= earliest_seqno_in_memtable); meta->largest.DecodeFrom(key);
// If the key is the same as the previous key (and it is not the
// first key), then we skip it, since it is an older version.
// Otherwise we output the key and mark it as the "new" previous key.
if (!is_first_key && !internal_comparator.user_comparator()->Compare(
prev_ikey.user_key, this_ikey.user_key)) {
// seqno within the same key are in decreasing order
assert(this_ikey.sequence < prev_ikey.sequence);
} else {
is_first_key = false;
if (this_ikey.type == kTypeMerge) {
// TODO(tbd): Add a check here to prevent RocksDB from crash when
// reopening a DB w/o properly specifying the merge operator. But
// currently we observed a memory leak on failing in RocksDB
// recovery, so we decide to let it crash instead of causing
// memory leak for now before we have identified the real cause
// of the memory leak.
// Handle merge-type keys using the MergeHelper
// TODO: pass statistics to MergeUntil
merge.MergeUntil(iter, 0 /* don't worry about snapshot */);
iterator_at_next = true;
// Write them out one-by-one. (Proceed back() to front())
// If the merge successfully merged the input into
// a kTypeValue, the list contains a single element.
const std::deque<std::string>& keys = merge.keys();
const std::deque<std::string>& values = merge.values();
assert(keys.size() == values.size() && keys.size() >= 1);
std::deque<std::string>::const_reverse_iterator key_iter;
std::deque<std::string>::const_reverse_iterator value_iter;
for (key_iter = keys.rbegin(), value_iter = values.rbegin();
key_iter != keys.rend() && value_iter != values.rend();
++key_iter, ++value_iter) {
builder->Add(Slice(*key_iter), Slice(*value_iter));
}
// Sanity check. Both iterators should end at the same time
assert(key_iter == keys.rend() && value_iter == values.rend());
prev_key.assign(keys.front());
ok = ParseInternalKey(Slice(prev_key), &prev_ikey);
assert(ok);
} else {
// Handle Put/Delete-type keys by simply writing them
builder->Add(key, value);
prev_key.assign(key.data(), key.size());
ok = ParseInternalKey(Slice(prev_key), &prev_ikey);
assert(ok);
}
}
if (io_priority == Env::IO_HIGH && // TODO(tbd): Add a check here to prevent RocksDB from crash when
IOSTATS(bytes_written) >= kReportFlushIOStatsEvery) { // reopening a DB w/o properly specifying the merge operator. But
ThreadStatusUtil::IncreaseThreadOperationProperty( // currently we observed a memory leak on failing in RocksDB
ThreadStatus::FLUSH_BYTES_WRITTEN, // recovery, so we decide to let it crash instead of causing
IOSTATS(bytes_written)); // memory leak for now before we have identified the real cause
IOSTATS_RESET(bytes_written); // of the memory leak.
// Handle merge-type keys using the MergeHelper
// TODO: pass statistics to MergeUntil
merge.MergeUntil(iter, prev_snapshot, false, nullptr, env);
// IMPORTANT: Slice key doesn't point to a valid value anymore!!
const auto& keys = merge.keys();
const auto& values = merge.values();
assert(!keys.empty());
assert(keys.size() == values.size());
// largest possible sequence number in a merge queue is already stored
// in ikey.sequence.
// we additionally have to consider the front of the merge queue, which
// might have the smallest sequence number (out of all the merges with
// the same key)
meta->smallest_seqno =
std::min(meta->smallest_seqno, GetInternalKeySeqno(keys.front()));
// We have a list of keys to write, write all keys in the list.
for (auto key_iter = keys.rbegin(), value_iter = values.rbegin();
key_iter != keys.rend(); key_iter++, value_iter++) {
key = Slice(*key_iter);
value = Slice(*value_iter);
bool valid_key __attribute__((__unused__)) =
ParseInternalKey(key, &ikey);
// MergeUntil stops when it encounters a corrupt key and does not
// include them in the result, so we expect the keys here to valid.
assert(valid_key);
builder->Add(key, value);
} }
if (!iterator_at_next) iter->Next(); } else { // just write out the key-value
builder->Add(key, value);
meta->largest.DecodeFrom(key);
iter->Next();
} }
// The last key is the largest key current_user_key_exists_in_snapshot = key_needs_to_exist_in_snapshot;
meta->largest.DecodeFrom(Slice(prev_key));
SequenceNumber seqno = GetInternalKeySeqno(Slice(prev_key));
meta->smallest_seqno = std::min(meta->smallest_seqno, seqno);
meta->largest_seqno = std::max(meta->largest_seqno, seqno);
} else { if (io_priority == Env::IO_HIGH &&
for (; iter->Valid(); iter->Next()) { IOSTATS(bytes_written) >= kReportFlushIOStatsEvery) {
Slice key = iter->key(); ThreadStatusUtil::IncreaseThreadOperationProperty(
meta->largest.DecodeFrom(key); ThreadStatus::FLUSH_BYTES_WRITTEN, IOSTATS(bytes_written));
builder->Add(key, iter->value()); IOSTATS_RESET(bytes_written);
SequenceNumber seqno = GetInternalKeySeqno(key);
meta->smallest_seqno = std::min(meta->smallest_seqno, seqno);
meta->largest_seqno = std::max(meta->largest_seqno, seqno);
if (io_priority == Env::IO_HIGH &&
IOSTATS(bytes_written) >= kReportFlushIOStatsEvery) {
ThreadStatusUtil::IncreaseThreadOperationProperty(
ThreadStatus::FLUSH_BYTES_WRITTEN,
IOSTATS(bytes_written));
IOSTATS_RESET(bytes_written);
}
} }
} }

@ -52,9 +52,7 @@ extern Status BuildTable(
FileMetaData* meta, const InternalKeyComparator& internal_comparator, FileMetaData* meta, const InternalKeyComparator& internal_comparator,
const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>* const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
int_tbl_prop_collector_factories, int_tbl_prop_collector_factories,
const SequenceNumber newest_snapshot, std::vector<SequenceNumber> snapshots, const CompressionType compression,
const SequenceNumber earliest_seqno_in_memtable,
const CompressionType compression,
const CompressionOptions& compression_opts, bool paranoid_file_checks, const CompressionOptions& compression_opts, bool paranoid_file_checks,
InternalStats* internal_stats, InternalStats* internal_stats,
const Env::IOPriority io_priority = Env::IO_HIGH, const Env::IOPriority io_priority = Env::IO_HIGH,

@ -1274,9 +1274,6 @@ Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
TableProperties table_properties; TableProperties table_properties;
{ {
ScopedArenaIterator iter(mem->NewIterator(ro, &arena)); ScopedArenaIterator iter(mem->NewIterator(ro, &arena));
const SequenceNumber newest_snapshot = snapshots_.GetNewest();
const SequenceNumber earliest_seqno_in_memtable =
mem->GetFirstSequenceNumber();
Log(InfoLogLevel::DEBUG_LEVEL, db_options_.info_log, Log(InfoLogLevel::DEBUG_LEVEL, db_options_.info_log,
"[%s] [WriteLevel0TableForRecovery]" "[%s] [WriteLevel0TableForRecovery]"
" Level-0 table #%" PRIu64 ": started", " Level-0 table #%" PRIu64 ": started",
@ -1290,8 +1287,8 @@ Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
s = BuildTable( s = BuildTable(
dbname_, env_, *cfd->ioptions(), env_options_, cfd->table_cache(), dbname_, env_, *cfd->ioptions(), env_options_, cfd->table_cache(),
iter.get(), &meta, cfd->internal_comparator(), iter.get(), &meta, cfd->internal_comparator(),
cfd->int_tbl_prop_collector_factories(), newest_snapshot, cfd->int_tbl_prop_collector_factories(), snapshots_.GetAll(),
earliest_seqno_in_memtable, GetCompressionFlush(*cfd->ioptions()), GetCompressionFlush(*cfd->ioptions()),
cfd->ioptions()->compression_opts, paranoid_file_checks, cfd->ioptions()->compression_opts, paranoid_file_checks,
cfd->internal_stats(), Env::IO_HIGH, &info.table_properties); cfd->internal_stats(), Env::IO_HIGH, &info.table_properties);
LogFlush(db_options_.info_log); LogFlush(db_options_.info_log);
@ -1348,7 +1345,7 @@ Status DBImpl::FlushMemTableToOutputFile(
FlushJob flush_job(dbname_, cfd, db_options_, mutable_cf_options, FlushJob flush_job(dbname_, cfd, db_options_, mutable_cf_options,
env_options_, versions_.get(), &mutex_, &shutting_down_, env_options_, versions_.get(), &mutex_, &shutting_down_,
snapshots_.GetNewest(), job_context, log_buffer, snapshots_.GetAll(), job_context, log_buffer,
directories_.GetDbDir(), directories_.GetDataDir(0U), directories_.GetDbDir(), directories_.GetDataDir(0U),
GetCompressionFlush(*cfd->ioptions()), stats_, GetCompressionFlush(*cfd->ioptions()), stats_,
&event_logger_); &event_logger_);

@ -3280,22 +3280,16 @@ TEST_F(DBTest, CompactBetweenSnapshots) {
Put(1, "foo", "sixth"); Put(1, "foo", "sixth");
// All entries (including duplicates) exist // All entries (including duplicates) exist
// before any compaction is triggered. // before any compaction or flush is triggered.
ASSERT_OK(Flush(1));
ASSERT_EQ("sixth", Get(1, "foo"));
ASSERT_EQ("fourth", Get(1, "foo", snapshot2));
ASSERT_EQ("first", Get(1, "foo", snapshot1));
ASSERT_EQ(AllEntriesFor("foo", 1), ASSERT_EQ(AllEntriesFor("foo", 1),
"[ sixth, fifth, fourth, third, second, first ]"); "[ sixth, fifth, fourth, third, second, first ]");
// After a compaction, "second", "third" and "fifth" should
// be removed
FillLevels("a", "z", 1);
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
nullptr);
ASSERT_EQ("sixth", Get(1, "foo")); ASSERT_EQ("sixth", Get(1, "foo"));
ASSERT_EQ("fourth", Get(1, "foo", snapshot2)); ASSERT_EQ("fourth", Get(1, "foo", snapshot2));
ASSERT_EQ("first", Get(1, "foo", snapshot1)); ASSERT_EQ("first", Get(1, "foo", snapshot1));
// After a flush, "second", "third" and "fifth" should
// be removed
ASSERT_OK(Flush(1));
ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth, fourth, first ]"); ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth, fourth, first ]");
// after we release the snapshot1, only two values left // after we release the snapshot1, only two values left

@ -60,9 +60,9 @@ FlushJob::FlushJob(const std::string& dbname, ColumnFamilyData* cfd,
const EnvOptions& env_options, VersionSet* versions, const EnvOptions& env_options, VersionSet* versions,
InstrumentedMutex* db_mutex, InstrumentedMutex* db_mutex,
std::atomic<bool>* shutting_down, std::atomic<bool>* shutting_down,
SequenceNumber newest_snapshot, JobContext* job_context, std::vector<SequenceNumber> existing_snapshots,
LogBuffer* log_buffer, Directory* db_directory, JobContext* job_context, LogBuffer* log_buffer,
Directory* output_file_directory, Directory* db_directory, Directory* output_file_directory,
CompressionType output_compression, Statistics* stats, CompressionType output_compression, Statistics* stats,
EventLogger* event_logger) EventLogger* event_logger)
: dbname_(dbname), : dbname_(dbname),
@ -73,7 +73,7 @@ FlushJob::FlushJob(const std::string& dbname, ColumnFamilyData* cfd,
versions_(versions), versions_(versions),
db_mutex_(db_mutex), db_mutex_(db_mutex),
shutting_down_(shutting_down), shutting_down_(shutting_down),
newest_snapshot_(newest_snapshot), existing_snapshots_(std::move(existing_snapshots)),
job_context_(job_context), job_context_(job_context),
log_buffer_(log_buffer), log_buffer_(log_buffer),
db_directory_(db_directory), db_directory_(db_directory),
@ -188,8 +188,6 @@ Status FlushJob::WriteLevel0Table(const autovector<MemTable*>& mems,
// path 0 for level 0 file. // path 0 for level 0 file.
meta->fd = FileDescriptor(versions_->NewFileNumber(), 0, 0); meta->fd = FileDescriptor(versions_->NewFileNumber(), 0, 0);
const SequenceNumber earliest_seqno_in_memtable =
mems[0]->GetFirstSequenceNumber();
Version* base = cfd_->current(); Version* base = cfd_->current();
base->Ref(); // it is likely that we do not need this reference base->Ref(); // it is likely that we do not need this reference
Status s; Status s;
@ -234,9 +232,8 @@ Status FlushJob::WriteLevel0Table(const autovector<MemTable*>& mems,
s = BuildTable( s = BuildTable(
dbname_, db_options_.env, *cfd_->ioptions(), env_options_, dbname_, db_options_.env, *cfd_->ioptions(), env_options_,
cfd_->table_cache(), iter.get(), meta, cfd_->internal_comparator(), cfd_->table_cache(), iter.get(), meta, cfd_->internal_comparator(),
cfd_->int_tbl_prop_collector_factories(), newest_snapshot_, cfd_->int_tbl_prop_collector_factories(), existing_snapshots_,
earliest_seqno_in_memtable, output_compression_, output_compression_, cfd_->ioptions()->compression_opts,
cfd_->ioptions()->compression_opts,
mutable_cf_options_.paranoid_file_checks, cfd_->internal_stats(), mutable_cf_options_.paranoid_file_checks, cfd_->internal_stats(),
Env::IO_HIGH, &info.table_properties); Env::IO_HIGH, &info.table_properties);
LogFlush(db_options_.info_log); LogFlush(db_options_.info_log);

@ -57,10 +57,11 @@ class FlushJob {
const MutableCFOptions& mutable_cf_options, const MutableCFOptions& mutable_cf_options,
const EnvOptions& env_options, VersionSet* versions, const EnvOptions& env_options, VersionSet* versions,
InstrumentedMutex* db_mutex, std::atomic<bool>* shutting_down, InstrumentedMutex* db_mutex, std::atomic<bool>* shutting_down,
SequenceNumber newest_snapshot, JobContext* job_context, std::vector<SequenceNumber> existing_snapshots,
LogBuffer* log_buffer, Directory* db_directory, JobContext* job_context, LogBuffer* log_buffer,
Directory* output_file_directory, CompressionType output_compression, Directory* db_directory, Directory* output_file_directory,
Statistics* stats, EventLogger* event_logger); CompressionType output_compression, Statistics* stats,
EventLogger* event_logger);
~FlushJob(); ~FlushJob();
@ -80,7 +81,7 @@ class FlushJob {
VersionSet* versions_; VersionSet* versions_;
InstrumentedMutex* db_mutex_; InstrumentedMutex* db_mutex_;
std::atomic<bool>* shutting_down_; std::atomic<bool>* shutting_down_;
SequenceNumber newest_snapshot_; std::vector<SequenceNumber> existing_snapshots_;
JobContext* job_context_; JobContext* job_context_;
LogBuffer* log_buffer_; LogBuffer* log_buffer_;
Directory* db_directory_; Directory* db_directory_;

@ -3,6 +3,7 @@
// LICENSE file in the root directory of this source tree. An additional grant // LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory. // of patent rights can be found in the PATENTS file in the same directory.
#include <algorithm>
#include <map> #include <map>
#include <string> #include <string>
@ -91,7 +92,7 @@ TEST_F(FlushJobTest, Empty) {
FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(), FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
db_options_, *cfd->GetLatestMutableCFOptions(), db_options_, *cfd->GetLatestMutableCFOptions(),
env_options_, versions_.get(), &mutex_, &shutting_down_, env_options_, versions_.get(), &mutex_, &shutting_down_,
SequenceNumber(), &job_context, nullptr, nullptr, nullptr, {}, &job_context, nullptr, nullptr, nullptr,
kNoCompression, nullptr, &event_logger); kNoCompression, nullptr, &event_logger);
ASSERT_OK(flush_job.Run()); ASSERT_OK(flush_job.Run());
job_context.Clean(); job_context.Clean();
@ -104,9 +105,17 @@ TEST_F(FlushJobTest, NonEmpty) {
kMaxSequenceNumber); kMaxSequenceNumber);
new_mem->Ref(); new_mem->Ref();
mock::MockFileContents inserted_keys; mock::MockFileContents inserted_keys;
// Test data:
// seqno [ 1, 2 ... 8998, 8999, 9000, 9001, 9002 ... 9999 ]
// key [ 1001, 1002 ... 9998, 9999, 0, 1, 2 ... 999 ]
// Expected:
// smallest_key = "0"
// largest_key = "9999"
// smallest_seqno = 1
// smallest_seqno = 9999
for (int i = 1; i < 10000; ++i) { for (int i = 1; i < 10000; ++i) {
std::string key(ToString(i)); std::string key(ToString((i + 1000) % 10000));
std::string value("value" + ToString(i)); std::string value("value" + key);
new_mem->Add(SequenceNumber(i), kTypeValue, key, value); new_mem->Add(SequenceNumber(i), kTypeValue, key, value);
InternalKey internal_key(key, SequenceNumber(i), kTypeValue); InternalKey internal_key(key, SequenceNumber(i), kTypeValue);
inserted_keys.insert({internal_key.Encode().ToString(), value}); inserted_keys.insert({internal_key.Encode().ToString(), value});
@ -122,7 +131,71 @@ TEST_F(FlushJobTest, NonEmpty) {
FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(), FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
db_options_, *cfd->GetLatestMutableCFOptions(), db_options_, *cfd->GetLatestMutableCFOptions(),
env_options_, versions_.get(), &mutex_, &shutting_down_, env_options_, versions_.get(), &mutex_, &shutting_down_,
SequenceNumber(), &job_context, nullptr, nullptr, nullptr, {}, &job_context, nullptr, nullptr, nullptr,
kNoCompression, nullptr, &event_logger);
FileMetaData fd;
mutex_.Lock();
ASSERT_OK(flush_job.Run(&fd));
mutex_.Unlock();
ASSERT_EQ(ToString(0), fd.smallest.user_key().ToString());
ASSERT_EQ(ToString(9999), fd.largest.user_key().ToString());
ASSERT_EQ(1, fd.smallest_seqno);
ASSERT_EQ(9999, fd.largest_seqno);
mock_table_factory_->AssertSingleFile(inserted_keys);
job_context.Clean();
}
TEST_F(FlushJobTest, Snapshots) {
JobContext job_context(0);
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(),
kMaxSequenceNumber);
std::vector<SequenceNumber> snapshots;
std::set<SequenceNumber> snapshots_set;
int keys = 10000;
int max_inserts_per_keys = 8;
Random rnd(301);
for (int i = 0; i < keys / 2; ++i) {
snapshots.push_back(rnd.Uniform(keys * (max_inserts_per_keys / 2)) + 1);
snapshots_set.insert(snapshots.back());
}
std::sort(snapshots.begin(), snapshots.end());
new_mem->Ref();
SequenceNumber current_seqno = 0;
mock::MockFileContents inserted_keys;
for (int i = 1; i < keys; ++i) {
std::string key(ToString(i));
int insertions = rnd.Uniform(max_inserts_per_keys);
for (int j = 0; j < insertions; ++j) {
std::string value(test::RandomHumanReadableString(&rnd, 10));
auto seqno = ++current_seqno;
new_mem->Add(SequenceNumber(seqno), kTypeValue, key, value);
// a key is visible only if:
// 1. it's the last one written (j == insertions - 1)
// 2. there's a snapshot pointing at it
bool visible = (j == insertions - 1) ||
(snapshots_set.find(seqno) != snapshots_set.end());
if (visible) {
InternalKey internal_key(key, seqno, kTypeValue);
inserted_keys.insert({internal_key.Encode().ToString(), value});
}
}
}
autovector<MemTable*> to_delete;
cfd->imm()->Add(new_mem, &to_delete);
for (auto& m : to_delete) {
delete m;
}
EventLogger event_logger(db_options_.info_log.get());
FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
db_options_, *cfd->GetLatestMutableCFOptions(),
env_options_, versions_.get(), &mutex_, &shutting_down_,
snapshots, &job_context, nullptr, nullptr, nullptr,
kNoCompression, nullptr, &event_logger); kNoCompression, nullptr, &event_logger);
mutex_.Lock(); mutex_.Lock();
ASSERT_OK(flush_job.Run()); ASSERT_OK(flush_job.Run());

@ -292,7 +292,7 @@ class Repairer {
ScopedArenaIterator iter(mem->NewIterator(ro, &arena)); ScopedArenaIterator iter(mem->NewIterator(ro, &arena));
status = BuildTable(dbname_, env_, ioptions_, env_options_, table_cache_, status = BuildTable(dbname_, env_, ioptions_, env_options_, table_cache_,
iter.get(), &meta, icmp_, iter.get(), &meta, icmp_,
&int_tbl_prop_collector_factories_, 0, 0, &int_tbl_prop_collector_factories_, {},
kNoCompression, CompressionOptions(), false, nullptr); kNoCompression, CompressionOptions(), false, nullptr);
} }
delete mem->Unref(); delete mem->Unref();

Loading…
Cancel
Save