|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "db/version_set.h"
|
|
|
|
|
|
|
|
#ifndef __STDC_FORMAT_MACROS
|
|
|
|
#define __STDC_FORMAT_MACROS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <inttypes.h>
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
10 years ago
|
|
|
#include <stdio.h>
|
|
|
|
#include <algorithm>
|
|
|
|
#include <map>
|
|
|
|
#include <set>
|
|
|
|
#include <climits>
|
|
|
|
#include <unordered_map>
|
|
|
|
#include <vector>
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
10 years ago
|
|
|
#include <string>
|
|
|
|
#include "db/compaction.h"
|
|
|
|
#include "db/filename.h"
|
Measure file read latency histogram per level
Summary: In internal stats, remember read latency histogram, if statistics is enabled. It can be retrieved from DB::GetProperty() with "rocksdb.dbstats" property, if it is enabled.
Test Plan: Manually run db_bench and prints out "rocksdb.dbstats" by hand and make sure it prints out as expected
Reviewers: igor, IslamAbdelRahman, rven, kradhakrishnan, anthony, yhchiang
Reviewed By: yhchiang
Subscribers: MarkCallaghan, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D44193
9 years ago
|
|
|
#include "db/internal_stats.h"
|
|
|
|
#include "db/log_reader.h"
|
|
|
|
#include "db/log_writer.h"
|
|
|
|
#include "db/memtable.h"
|
|
|
|
#include "db/merge_context.h"
|
|
|
|
#include "db/merge_helper.h"
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
8 years ago
|
|
|
#include "db/pinned_iterators_manager.h"
|
|
|
|
#include "db/table_cache.h"
|
|
|
|
#include "db/version_builder.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/merge_operator.h"
|
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
|
|
|
#include "table/format.h"
|
|
|
|
#include "table/get_context.h"
|
|
|
|
#include "table/internal_iterator.h"
|
|
|
|
#include "table/merging_iterator.h"
|
|
|
|
#include "table/meta_blocks.h"
|
|
|
|
#include "table/plain_table_factory.h"
|
|
|
|
#include "table/table_reader.h"
|
|
|
|
#include "table/two_level_iterator.h"
|
|
|
|
#include "util/coding.h"
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
#include "util/file_reader_writer.h"
|
|
|
|
#include "util/logging.h"
|
|
|
|
#include "util/perf_context_imp.h"
|
|
|
|
#include "util/stop_watch.h"
|
|
|
|
#include "util/sync_point.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Find File in LevelFilesBrief data structure
|
|
|
|
// Within an index range defined by left and right
|
|
|
|
int FindFileInRange(const InternalKeyComparator& icmp,
|
|
|
|
const LevelFilesBrief& file_level,
|
|
|
|
const Slice& key,
|
|
|
|
uint32_t left,
|
|
|
|
uint32_t right) {
|
|
|
|
while (left < right) {
|
|
|
|
uint32_t mid = (left + right) / 2;
|
|
|
|
const FdWithKeyRange& f = file_level.files[mid];
|
|
|
|
if (icmp.InternalKeyComparator::Compare(f.largest_key, key) < 0) {
|
|
|
|
// Key at "mid.largest" is < "target". Therefore all
|
|
|
|
// files at or before "mid" are uninteresting.
|
|
|
|
left = mid + 1;
|
|
|
|
} else {
|
|
|
|
// Key at "mid.largest" is >= "target". Therefore all files
|
|
|
|
// after "mid" are uninteresting.
|
|
|
|
right = mid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return right;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Class to help choose the next file to search for the particular key.
|
|
|
|
// Searches and returns files level by level.
|
|
|
|
// We can search level-by-level since entries never hop across
|
|
|
|
// levels. Therefore we are guaranteed that if we find data
|
|
|
|
// in a smaller level, later levels are irrelevant (unless we
|
|
|
|
// are MergeInProgress).
|
|
|
|
class FilePicker {
|
|
|
|
public:
|
|
|
|
FilePicker(std::vector<FileMetaData*>* files, const Slice& user_key,
|
|
|
|
const Slice& ikey, autovector<LevelFilesBrief>* file_levels,
|
|
|
|
unsigned int num_levels, FileIndexer* file_indexer,
|
|
|
|
const Comparator* user_comparator,
|
|
|
|
const InternalKeyComparator* internal_comparator)
|
|
|
|
: num_levels_(num_levels),
|
|
|
|
curr_level_(static_cast<unsigned int>(-1)),
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
9 years ago
|
|
|
returned_file_level_(static_cast<unsigned int>(-1)),
|
|
|
|
hit_file_level_(static_cast<unsigned int>(-1)),
|
|
|
|
search_left_bound_(0),
|
|
|
|
search_right_bound_(FileIndexer::kLevelMaxIndex),
|
|
|
|
#ifndef NDEBUG
|
|
|
|
files_(files),
|
|
|
|
#endif
|
|
|
|
level_files_brief_(file_levels),
|
|
|
|
is_hit_file_last_in_level_(false),
|
|
|
|
user_key_(user_key),
|
|
|
|
ikey_(ikey),
|
|
|
|
file_indexer_(file_indexer),
|
|
|
|
user_comparator_(user_comparator),
|
|
|
|
internal_comparator_(internal_comparator) {
|
|
|
|
// Setup member variables to search first level.
|
|
|
|
search_ended_ = !PrepareNextLevel();
|
|
|
|
if (!search_ended_) {
|
|
|
|
// Prefetch Level 0 table data to avoid cache miss if possible.
|
|
|
|
for (unsigned int i = 0; i < (*level_files_brief_)[0].num_files; ++i) {
|
|
|
|
auto* r = (*level_files_brief_)[0].files[i].fd.table_reader;
|
|
|
|
if (r) {
|
|
|
|
r->Prepare(ikey);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
9 years ago
|
|
|
int GetCurrentLevel() { return returned_file_level_; }
|
|
|
|
|
|
|
|
FdWithKeyRange* GetNextFile() {
|
|
|
|
while (!search_ended_) { // Loops over different levels.
|
|
|
|
while (curr_index_in_curr_level_ < curr_file_level_->num_files) {
|
|
|
|
// Loops over all files in current level.
|
|
|
|
FdWithKeyRange* f = &curr_file_level_->files[curr_index_in_curr_level_];
|
|
|
|
hit_file_level_ = curr_level_;
|
|
|
|
is_hit_file_last_in_level_ =
|
|
|
|
curr_index_in_curr_level_ == curr_file_level_->num_files - 1;
|
|
|
|
int cmp_largest = -1;
|
|
|
|
|
|
|
|
// Do key range filtering of files or/and fractional cascading if:
|
|
|
|
// (1) not all the files are in level 0, or
|
|
|
|
// (2) there are more than 3 Level 0 files
|
|
|
|
// If there are only 3 or less level 0 files in the system, we skip
|
|
|
|
// the key range filtering. In this case, more likely, the system is
|
|
|
|
// highly tuned to minimize number of tables queried by each query,
|
|
|
|
// so it is unlikely that key range filtering is more efficient than
|
|
|
|
// querying the files.
|
|
|
|
if (num_levels_ > 1 || curr_file_level_->num_files > 3) {
|
|
|
|
// Check if key is within a file's range. If search left bound and
|
|
|
|
// right bound point to the same find, we are sure key falls in
|
|
|
|
// range.
|
|
|
|
assert(
|
|
|
|
curr_level_ == 0 ||
|
|
|
|
curr_index_in_curr_level_ == start_index_in_curr_level_ ||
|
|
|
|
user_comparator_->Compare(user_key_,
|
|
|
|
ExtractUserKey(f->smallest_key)) <= 0);
|
|
|
|
|
|
|
|
int cmp_smallest = user_comparator_->Compare(user_key_,
|
|
|
|
ExtractUserKey(f->smallest_key));
|
|
|
|
if (cmp_smallest >= 0) {
|
|
|
|
cmp_largest = user_comparator_->Compare(user_key_,
|
|
|
|
ExtractUserKey(f->largest_key));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup file search bound for the next level based on the
|
|
|
|
// comparison results
|
|
|
|
if (curr_level_ > 0) {
|
|
|
|
file_indexer_->GetNextLevelIndex(curr_level_,
|
|
|
|
curr_index_in_curr_level_,
|
|
|
|
cmp_smallest, cmp_largest,
|
|
|
|
&search_left_bound_,
|
|
|
|
&search_right_bound_);
|
|
|
|
}
|
|
|
|
// Key falls out of current file's range
|
|
|
|
if (cmp_smallest < 0 || cmp_largest > 0) {
|
|
|
|
if (curr_level_ == 0) {
|
|
|
|
++curr_index_in_curr_level_;
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
// Search next level.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// Sanity check to make sure that the files are correctly sorted
|
|
|
|
if (prev_file_) {
|
|
|
|
if (curr_level_ != 0) {
|
|
|
|
int comp_sign = internal_comparator_->Compare(
|
|
|
|
prev_file_->largest_key, f->smallest_key);
|
|
|
|
assert(comp_sign < 0);
|
|
|
|
} else {
|
|
|
|
// level == 0, the current file cannot be newer than the previous
|
|
|
|
// one. Use compressed data structure, has no attribute seqNo
|
|
|
|
assert(curr_index_in_curr_level_ > 0);
|
|
|
|
assert(!NewestFirstBySeqNo(files_[0][curr_index_in_curr_level_],
|
|
|
|
files_[0][curr_index_in_curr_level_-1]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
prev_file_ = f;
|
|
|
|
#endif
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
9 years ago
|
|
|
returned_file_level_ = curr_level_;
|
|
|
|
if (curr_level_ > 0 && cmp_largest < 0) {
|
|
|
|
// No more files to search in this level.
|
|
|
|
search_ended_ = !PrepareNextLevel();
|
|
|
|
} else {
|
|
|
|
++curr_index_in_curr_level_;
|
|
|
|
}
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
// Start searching next level.
|
|
|
|
search_ended_ = !PrepareNextLevel();
|
|
|
|
}
|
|
|
|
// Search ended.
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// getter for current file level
|
|
|
|
// for GET_HIT_L0, GET_HIT_L1 & GET_HIT_L2_AND_UP counts
|
|
|
|
unsigned int GetHitFileLevel() { return hit_file_level_; }
|
|
|
|
|
|
|
|
// Returns true if the most recent "hit file" (i.e., one returned by
|
|
|
|
// GetNextFile()) is at the last index in its level.
|
|
|
|
bool IsHitFileLastInLevel() { return is_hit_file_last_in_level_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
unsigned int num_levels_;
|
|
|
|
unsigned int curr_level_;
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
9 years ago
|
|
|
unsigned int returned_file_level_;
|
|
|
|
unsigned int hit_file_level_;
|
|
|
|
int32_t search_left_bound_;
|
|
|
|
int32_t search_right_bound_;
|
|
|
|
#ifndef NDEBUG
|
|
|
|
std::vector<FileMetaData*>* files_;
|
|
|
|
#endif
|
|
|
|
autovector<LevelFilesBrief>* level_files_brief_;
|
|
|
|
bool search_ended_;
|
|
|
|
bool is_hit_file_last_in_level_;
|
|
|
|
LevelFilesBrief* curr_file_level_;
|
|
|
|
unsigned int curr_index_in_curr_level_;
|
|
|
|
unsigned int start_index_in_curr_level_;
|
|
|
|
Slice user_key_;
|
|
|
|
Slice ikey_;
|
|
|
|
FileIndexer* file_indexer_;
|
|
|
|
const Comparator* user_comparator_;
|
|
|
|
const InternalKeyComparator* internal_comparator_;
|
|
|
|
#ifndef NDEBUG
|
|
|
|
FdWithKeyRange* prev_file_;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Setup local variables to search next level.
|
|
|
|
// Returns false if there are no more levels to search.
|
|
|
|
bool PrepareNextLevel() {
|
|
|
|
curr_level_++;
|
|
|
|
while (curr_level_ < num_levels_) {
|
|
|
|
curr_file_level_ = &(*level_files_brief_)[curr_level_];
|
|
|
|
if (curr_file_level_->num_files == 0) {
|
|
|
|
// When current level is empty, the search bound generated from upper
|
|
|
|
// level must be [0, -1] or [0, FileIndexer::kLevelMaxIndex] if it is
|
|
|
|
// also empty.
|
|
|
|
assert(search_left_bound_ == 0);
|
|
|
|
assert(search_right_bound_ == -1 ||
|
|
|
|
search_right_bound_ == FileIndexer::kLevelMaxIndex);
|
|
|
|
// Since current level is empty, it will need to search all files in
|
|
|
|
// the next level
|
|
|
|
search_left_bound_ = 0;
|
|
|
|
search_right_bound_ = FileIndexer::kLevelMaxIndex;
|
|
|
|
curr_level_++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Some files may overlap each other. We find
|
|
|
|
// all files that overlap user_key and process them in order from
|
|
|
|
// newest to oldest. In the context of merge-operator, this can occur at
|
|
|
|
// any level. Otherwise, it only occurs at Level-0 (since Put/Deletes
|
|
|
|
// are always compacted into a single entry).
|
|
|
|
int32_t start_index;
|
|
|
|
if (curr_level_ == 0) {
|
|
|
|
// On Level-0, we read through all files to check for overlap.
|
|
|
|
start_index = 0;
|
|
|
|
} else {
|
|
|
|
// On Level-n (n>=1), files are sorted. Binary search to find the
|
|
|
|
// earliest file whose largest key >= ikey. Search left bound and
|
|
|
|
// right bound are used to narrow the range.
|
|
|
|
if (search_left_bound_ == search_right_bound_) {
|
|
|
|
start_index = search_left_bound_;
|
|
|
|
} else if (search_left_bound_ < search_right_bound_) {
|
|
|
|
if (search_right_bound_ == FileIndexer::kLevelMaxIndex) {
|
|
|
|
search_right_bound_ =
|
|
|
|
static_cast<int32_t>(curr_file_level_->num_files) - 1;
|
|
|
|
}
|
|
|
|
start_index =
|
|
|
|
FindFileInRange(*internal_comparator_, *curr_file_level_, ikey_,
|
|
|
|
static_cast<uint32_t>(search_left_bound_),
|
|
|
|
static_cast<uint32_t>(search_right_bound_));
|
|
|
|
} else {
|
|
|
|
// search_left_bound > search_right_bound, key does not exist in
|
|
|
|
// this level. Since no comparison is done in this level, it will
|
|
|
|
// need to search all files in the next level.
|
|
|
|
search_left_bound_ = 0;
|
|
|
|
search_right_bound_ = FileIndexer::kLevelMaxIndex;
|
|
|
|
curr_level_++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
start_index_in_curr_level_ = start_index;
|
|
|
|
curr_index_in_curr_level_ = start_index;
|
|
|
|
#ifndef NDEBUG
|
|
|
|
prev_file_ = nullptr;
|
|
|
|
#endif
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// curr_level_ = num_levels_. So, no more levels to search.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // anonymous namespace
|
|
|
|
|
|
|
|
VersionStorageInfo::~VersionStorageInfo() { delete[] files_; }
|
|
|
|
|
|
|
|
Version::~Version() {
|
|
|
|
assert(refs_ == 0);
|
|
|
|
|
|
|
|
// Remove from linked list
|
|
|
|
prev_->next_ = next_;
|
|
|
|
next_->prev_ = prev_;
|
|
|
|
|
|
|
|
// Drop references to files
|
|
|
|
for (int level = 0; level < storage_info_.num_levels_; level++) {
|
|
|
|
for (size_t i = 0; i < storage_info_.files_[level].size(); i++) {
|
|
|
|
FileMetaData* f = storage_info_.files_[level][i];
|
|
|
|
assert(f->refs > 0);
|
|
|
|
f->refs--;
|
|
|
|
if (f->refs <= 0) {
|
|
|
|
if (f->table_reader_handle) {
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
9 years ago
|
|
|
cfd_->table_cache()->EraseHandle(f->fd, f->table_reader_handle);
|
|
|
|
f->table_reader_handle = nullptr;
|
|
|
|
}
|
|
|
|
vset_->obsolete_files_.push_back(f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
int FindFile(const InternalKeyComparator& icmp,
|
|
|
|
const LevelFilesBrief& file_level,
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
const Slice& key) {
|
|
|
|
return FindFileInRange(icmp, file_level, key, 0,
|
|
|
|
static_cast<uint32_t>(file_level.num_files));
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
}
|
|
|
|
|
|
|
|
void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level,
|
|
|
|
const std::vector<FileMetaData*>& files,
|
|
|
|
Arena* arena) {
|
|
|
|
assert(file_level);
|
|
|
|
assert(arena);
|
|
|
|
|
|
|
|
size_t num = files.size();
|
|
|
|
file_level->num_files = num;
|
|
|
|
char* mem = arena->AllocateAligned(num * sizeof(FdWithKeyRange));
|
|
|
|
file_level->files = new (mem)FdWithKeyRange[num];
|
|
|
|
|
|
|
|
for (size_t i = 0; i < num; i++) {
|
|
|
|
Slice smallest_key = files[i]->smallest.Encode();
|
|
|
|
Slice largest_key = files[i]->largest.Encode();
|
|
|
|
|
|
|
|
// Copy key slice to sequential memory
|
|
|
|
size_t smallest_size = smallest_key.size();
|
|
|
|
size_t largest_size = largest_key.size();
|
|
|
|
mem = arena->AllocateAligned(smallest_size + largest_size);
|
|
|
|
memcpy(mem, smallest_key.data(), smallest_size);
|
|
|
|
memcpy(mem + smallest_size, largest_key.data(), largest_size);
|
|
|
|
|
|
|
|
FdWithKeyRange& f = file_level->files[i];
|
|
|
|
f.fd = files[i]->fd;
|
|
|
|
f.smallest_key = Slice(mem, smallest_size);
|
|
|
|
f.largest_key = Slice(mem + smallest_size, largest_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool AfterFile(const Comparator* ucmp,
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
const Slice* user_key, const FdWithKeyRange* f) {
|
|
|
|
// nullptr user_key occurs before all keys and is therefore never after *f
|
|
|
|
return (user_key != nullptr &&
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
ucmp->Compare(*user_key, ExtractUserKey(f->largest_key)) > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool BeforeFile(const Comparator* ucmp,
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
const Slice* user_key, const FdWithKeyRange* f) {
|
|
|
|
// nullptr user_key occurs after all keys and is therefore never before *f
|
|
|
|
return (user_key != nullptr &&
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
ucmp->Compare(*user_key, ExtractUserKey(f->smallest_key)) < 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SomeFileOverlapsRange(
|
|
|
|
const InternalKeyComparator& icmp,
|
|
|
|
bool disjoint_sorted_files,
|
|
|
|
const LevelFilesBrief& file_level,
|
|
|
|
const Slice* smallest_user_key,
|
|
|
|
const Slice* largest_user_key) {
|
|
|
|
const Comparator* ucmp = icmp.user_comparator();
|
|
|
|
if (!disjoint_sorted_files) {
|
|
|
|
// Need to check against all files
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
for (size_t i = 0; i < file_level.num_files; i++) {
|
|
|
|
const FdWithKeyRange* f = &(file_level.files[i]);
|
|
|
|
if (AfterFile(ucmp, smallest_user_key, f) ||
|
|
|
|
BeforeFile(ucmp, largest_user_key, f)) {
|
|
|
|
// No overlap
|
|
|
|
} else {
|
|
|
|
return true; // Overlap
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Binary search over file list
|
|
|
|
uint32_t index = 0;
|
|
|
|
if (smallest_user_key != nullptr) {
|
|
|
|
// Find the earliest possible internal key for smallest_user_key
|
|
|
|
InternalKey small;
|
|
|
|
small.SetMaxPossibleForUserKey(*smallest_user_key);
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
index = FindFile(icmp, file_level, small.Encode());
|
|
|
|
}
|
|
|
|
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
if (index >= file_level.num_files) {
|
|
|
|
// beginning of range is after all files, so no overlap.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
return !BeforeFile(ucmp, largest_user_key, &file_level.files[index]);
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
// An internal iterator. For a given version/level pair, yields
|
|
|
|
// information about the files in the level. For a given entry, key()
|
|
|
|
// is the largest key that occurs in the file, and value() is an
|
|
|
|
// 16-byte value containing the file number and file size, both
|
|
|
|
// encoded using EncodeFixed64.
|
|
|
|
class LevelFileNumIterator : public InternalIterator {
|
|
|
|
public:
|
|
|
|
LevelFileNumIterator(const InternalKeyComparator& icmp,
|
|
|
|
const LevelFilesBrief* flevel)
|
|
|
|
: icmp_(icmp),
|
|
|
|
flevel_(flevel),
|
|
|
|
index_(static_cast<uint32_t>(flevel->num_files)),
|
|
|
|
current_value_(0, 0, 0) { // Marks as invalid
|
|
|
|
}
|
|
|
|
virtual bool Valid() const override { return index_ < flevel_->num_files; }
|
|
|
|
virtual void Seek(const Slice& target) override {
|
|
|
|
index_ = FindFile(icmp_, *flevel_, target);
|
|
|
|
}
|
|
|
|
virtual void SeekForPrev(const Slice& target) override {
|
|
|
|
SeekForPrevImpl(target, &icmp_);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void SeekToFirst() override { index_ = 0; }
|
|
|
|
virtual void SeekToLast() override {
|
|
|
|
index_ = (flevel_->num_files == 0)
|
|
|
|
? 0
|
|
|
|
: static_cast<uint32_t>(flevel_->num_files) - 1;
|
|
|
|
}
|
|
|
|
virtual void Next() override {
|
|
|
|
assert(Valid());
|
|
|
|
index_++;
|
|
|
|
}
|
|
|
|
virtual void Prev() override {
|
|
|
|
assert(Valid());
|
|
|
|
if (index_ == 0) {
|
|
|
|
index_ = static_cast<uint32_t>(flevel_->num_files); // Marks as invalid
|
|
|
|
} else {
|
|
|
|
index_--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Slice key() const override {
|
|
|
|
assert(Valid());
|
|
|
|
return flevel_->files[index_].largest_key;
|
|
|
|
}
|
|
|
|
Slice value() const override {
|
|
|
|
assert(Valid());
|
|
|
|
|
|
|
|
auto file_meta = flevel_->files[index_];
|
|
|
|
current_value_ = file_meta.fd;
|
|
|
|
return Slice(reinterpret_cast<const char*>(¤t_value_),
|
|
|
|
sizeof(FileDescriptor));
|
|
|
|
}
|
|
|
|
virtual Status status() const override { return Status::OK(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
const InternalKeyComparator icmp_;
|
|
|
|
const LevelFilesBrief* flevel_;
|
|
|
|
uint32_t index_;
|
|
|
|
mutable FileDescriptor current_value_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class LevelFileIteratorState : public TwoLevelIteratorState {
|
|
|
|
public:
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
9 years ago
|
|
|
// @param skip_filters Disables loading/accessing the filter block
|
|
|
|
LevelFileIteratorState(TableCache* table_cache,
|
Measure file read latency histogram per level
Summary: In internal stats, remember read latency histogram, if statistics is enabled. It can be retrieved from DB::GetProperty() with "rocksdb.dbstats" property, if it is enabled.
Test Plan: Manually run db_bench and prints out "rocksdb.dbstats" by hand and make sure it prints out as expected
Reviewers: igor, IslamAbdelRahman, rven, kradhakrishnan, anthony, yhchiang
Reviewed By: yhchiang
Subscribers: MarkCallaghan, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D44193
9 years ago
|
|
|
const ReadOptions& read_options,
|
|
|
|
const EnvOptions& env_options,
|
|
|
|
const InternalKeyComparator& icomparator,
|
|
|
|
HistogramImpl* file_read_hist, bool for_compaction,
|
Compaction Support for Range Deletion
Summary:
This diff introduces RangeDelAggregator, which takes ownership of iterators
provided to it via AddTombstones(). The tombstones are organized in a two-level
map (snapshot stripe -> begin key -> tombstone). Tombstone creation avoids data
copy by holding Slices returned by the iterator, which remain valid thanks to pinning.
For compaction, we create a hierarchical range tombstone iterator with structure
matching the iterator over compaction input data. An aggregator based on that
iterator is used by CompactionIterator to determine which keys are covered by
range tombstones. In case of merge operand, the same aggregator is used by
MergeHelper. Upon finishing each file in the compaction, relevant range tombstones
are added to the output file's range tombstone metablock and file boundaries are
updated accordingly.
To check whether a key is covered by range tombstone, RangeDelAggregator::ShouldDelete()
considers tombstones in the key's snapshot stripe. When this function is used outside of
compaction, it also checks newer stripes, which can contain covering tombstones. Currently
the intra-stripe check involves a linear scan; however, in the future we plan to collapse ranges
within a stripe such that binary search can be used.
RangeDelAggregator::AddToBuilder() adds all range tombstones in the table's key-range
to a new table's range tombstone meta-block. Since range tombstones may fall in the gap
between files, we may need to extend some files' key-ranges. The strategy is (1) first file
extends as far left as possible and other files do not extend left, (2) all files extend right
until either the start of the next file or the end of the last range tombstone in the gap,
whichever comes first.
One other notable change is adding release/move semantics to ScopedArenaIterator
such that it can be used to transfer ownership of an arena-allocated iterator, similar to
how unique_ptr is used for malloc'd data.
Depends on D61473
Test Plan: compaction_iterator_test, mock_table, end-to-end tests in D63927
Reviewers: sdong, IslamAbdelRahman, wanning, yhchiang, lightmark
Reviewed By: lightmark
Subscribers: andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D62205
8 years ago
|
|
|
bool prefix_enabled, bool skip_filters, int level,
|
|
|
|
RangeDelAggregator* range_del_agg)
|
Measure file read latency histogram per level
Summary: In internal stats, remember read latency histogram, if statistics is enabled. It can be retrieved from DB::GetProperty() with "rocksdb.dbstats" property, if it is enabled.
Test Plan: Manually run db_bench and prints out "rocksdb.dbstats" by hand and make sure it prints out as expected
Reviewers: igor, IslamAbdelRahman, rven, kradhakrishnan, anthony, yhchiang
Reviewed By: yhchiang
Subscribers: MarkCallaghan, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D44193
9 years ago
|
|
|
: TwoLevelIteratorState(prefix_enabled),
|
|
|
|
table_cache_(table_cache),
|
|
|
|
read_options_(read_options),
|
|
|
|
env_options_(env_options),
|
|
|
|
icomparator_(icomparator),
|
|
|
|
file_read_hist_(file_read_hist),
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
9 years ago
|
|
|
for_compaction_(for_compaction),
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
9 years ago
|
|
|
skip_filters_(skip_filters),
|
Compaction Support for Range Deletion
Summary:
This diff introduces RangeDelAggregator, which takes ownership of iterators
provided to it via AddTombstones(). The tombstones are organized in a two-level
map (snapshot stripe -> begin key -> tombstone). Tombstone creation avoids data
copy by holding Slices returned by the iterator, which remain valid thanks to pinning.
For compaction, we create a hierarchical range tombstone iterator with structure
matching the iterator over compaction input data. An aggregator based on that
iterator is used by CompactionIterator to determine which keys are covered by
range tombstones. In case of merge operand, the same aggregator is used by
MergeHelper. Upon finishing each file in the compaction, relevant range tombstones
are added to the output file's range tombstone metablock and file boundaries are
updated accordingly.
To check whether a key is covered by range tombstone, RangeDelAggregator::ShouldDelete()
considers tombstones in the key's snapshot stripe. When this function is used outside of
compaction, it also checks newer stripes, which can contain covering tombstones. Currently
the intra-stripe check involves a linear scan; however, in the future we plan to collapse ranges
within a stripe such that binary search can be used.
RangeDelAggregator::AddToBuilder() adds all range tombstones in the table's key-range
to a new table's range tombstone meta-block. Since range tombstones may fall in the gap
between files, we may need to extend some files' key-ranges. The strategy is (1) first file
extends as far left as possible and other files do not extend left, (2) all files extend right
until either the start of the next file or the end of the last range tombstone in the gap,
whichever comes first.
One other notable change is adding release/move semantics to ScopedArenaIterator
such that it can be used to transfer ownership of an arena-allocated iterator, similar to
how unique_ptr is used for malloc'd data.
Depends on D61473
Test Plan: compaction_iterator_test, mock_table, end-to-end tests in D63927
Reviewers: sdong, IslamAbdelRahman, wanning, yhchiang, lightmark
Reviewed By: lightmark
Subscribers: andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D62205
8 years ago
|
|
|
level_(level),
|
|
|
|
range_del_agg_(range_del_agg) {}
|
|
|
|
|
|
|
|
InternalIterator* NewSecondaryIterator(const Slice& meta_handle) override {
|
|
|
|
if (meta_handle.size() != sizeof(FileDescriptor)) {
|
|
|
|
return NewErrorInternalIterator(
|
|
|
|
Status::Corruption("FileReader invoked with unexpected value"));
|
|
|
|
}
|
|
|
|
const FileDescriptor* fd =
|
|
|
|
reinterpret_cast<const FileDescriptor*>(meta_handle.data());
|
|
|
|
return table_cache_->NewIterator(
|
|
|
|
read_options_, env_options_, icomparator_, *fd, range_del_agg_,
|
|
|
|
nullptr /* don't need reference to table */, file_read_hist_,
|
|
|
|
for_compaction_, nullptr /* arena */, skip_filters_, level_);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PrefixMayMatch(const Slice& internal_key) override {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
TableCache* table_cache_;
|
|
|
|
const ReadOptions read_options_;
|
|
|
|
const EnvOptions& env_options_;
|
|
|
|
const InternalKeyComparator& icomparator_;
|
Measure file read latency histogram per level
Summary: In internal stats, remember read latency histogram, if statistics is enabled. It can be retrieved from DB::GetProperty() with "rocksdb.dbstats" property, if it is enabled.
Test Plan: Manually run db_bench and prints out "rocksdb.dbstats" by hand and make sure it prints out as expected
Reviewers: igor, IslamAbdelRahman, rven, kradhakrishnan, anthony, yhchiang
Reviewed By: yhchiang
Subscribers: MarkCallaghan, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D44193
9 years ago
|
|
|
HistogramImpl* file_read_hist_;
|
|
|
|
bool for_compaction_;
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
9 years ago
|
|
|
bool skip_filters_;
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
9 years ago
|
|
|
int level_;
|
Compaction Support for Range Deletion
Summary:
This diff introduces RangeDelAggregator, which takes ownership of iterators
provided to it via AddTombstones(). The tombstones are organized in a two-level
map (snapshot stripe -> begin key -> tombstone). Tombstone creation avoids data
copy by holding Slices returned by the iterator, which remain valid thanks to pinning.
For compaction, we create a hierarchical range tombstone iterator with structure
matching the iterator over compaction input data. An aggregator based on that
iterator is used by CompactionIterator to determine which keys are covered by
range tombstones. In case of merge operand, the same aggregator is used by
MergeHelper. Upon finishing each file in the compaction, relevant range tombstones
are added to the output file's range tombstone metablock and file boundaries are
updated accordingly.
To check whether a key is covered by range tombstone, RangeDelAggregator::ShouldDelete()
considers tombstones in the key's snapshot stripe. When this function is used outside of
compaction, it also checks newer stripes, which can contain covering tombstones. Currently
the intra-stripe check involves a linear scan; however, in the future we plan to collapse ranges
within a stripe such that binary search can be used.
RangeDelAggregator::AddToBuilder() adds all range tombstones in the table's key-range
to a new table's range tombstone meta-block. Since range tombstones may fall in the gap
between files, we may need to extend some files' key-ranges. The strategy is (1) first file
extends as far left as possible and other files do not extend left, (2) all files extend right
until either the start of the next file or the end of the last range tombstone in the gap,
whichever comes first.
One other notable change is adding release/move semantics to ScopedArenaIterator
such that it can be used to transfer ownership of an arena-allocated iterator, similar to
how unique_ptr is used for malloc'd data.
Depends on D61473
Test Plan: compaction_iterator_test, mock_table, end-to-end tests in D63927
Reviewers: sdong, IslamAbdelRahman, wanning, yhchiang, lightmark
Reviewed By: lightmark
Subscribers: andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D62205
8 years ago
|
|
|
RangeDelAggregator* range_del_agg_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// A wrapper of version builder which references the current version in
|
|
|
|
// constructor and unref it in the destructor.
|
|
|
|
// Both of the constructor and destructor need to be called inside DB Mutex.
|
|
|
|
class BaseReferencedVersionBuilder {
|
|
|
|
public:
|
|
|
|
explicit BaseReferencedVersionBuilder(ColumnFamilyData* cfd)
|
|
|
|
: version_builder_(new VersionBuilder(
|
|
|
|
cfd->current()->version_set()->env_options(), cfd->table_cache(),
|
|
|
|
cfd->current()->storage_info(), cfd->ioptions()->info_log)),
|
|
|
|
version_(cfd->current()) {
|
|
|
|
version_->Ref();
|
|
|
|
}
|
|
|
|
~BaseReferencedVersionBuilder() {
|
|
|
|
delete version_builder_;
|
|
|
|
version_->Unref();
|
|
|
|
}
|
|
|
|
VersionBuilder* version_builder() { return version_builder_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
VersionBuilder* version_builder_;
|
|
|
|
Version* version_;
|
|
|
|
};
|
|
|
|
} // anonymous namespace
|
|
|
|
|
|
|
|
Status Version::GetTableProperties(std::shared_ptr<const TableProperties>* tp,
|
|
|
|
const FileMetaData* file_meta,
|
|
|
|
const std::string* fname) const {
|
|
|
|
auto table_cache = cfd_->table_cache();
|
|
|
|
auto ioptions = cfd_->ioptions();
|
|
|
|
Status s = table_cache->GetTableProperties(
|
|
|
|
vset_->env_options_, cfd_->internal_comparator(), file_meta->fd,
|
|
|
|
tp, true /* no io */);
|
|
|
|
if (s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We only ignore error type `Incomplete` since it's by design that we
|
|
|
|
// disallow table when it's not in table cache.
|
|
|
|
if (!s.IsIncomplete()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// 2. Table is not present in table cache, we'll read the table properties
|
|
|
|
// directly from the properties block in the file.
|
|
|
|
std::unique_ptr<RandomAccessFile> file;
|
|
|
|
if (fname != nullptr) {
|
|
|
|
s = ioptions->env->NewRandomAccessFile(
|
|
|
|
*fname, &file, vset_->env_options_);
|
|
|
|
} else {
|
|
|
|
s = ioptions->env->NewRandomAccessFile(
|
|
|
|
TableFileName(vset_->db_options_->db_paths, file_meta->fd.GetNumber(),
|
|
|
|
file_meta->fd.GetPathId()),
|
|
|
|
&file, vset_->env_options_);
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
TableProperties* raw_table_properties;
|
|
|
|
// By setting the magic number to kInvalidTableMagicNumber, we can by
|
|
|
|
// pass the magic number check in the footer.
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
std::unique_ptr<RandomAccessFileReader> file_reader(
|
|
|
|
new RandomAccessFileReader(std::move(file)));
|
|
|
|
s = ReadTableProperties(
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
file_reader.get(), file_meta->fd.GetFileSize(),
|
|
|
|
Footer::kInvalidTableMagicNumber /* table's magic number */, *ioptions, &raw_table_properties);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
RecordTick(ioptions->statistics, NUMBER_DIRECT_LOAD_TABLE_PROPERTIES);
|
|
|
|
|
|
|
|
*tp = std::shared_ptr<const TableProperties>(raw_table_properties);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Version::GetPropertiesOfAllTables(TablePropertiesCollection* props) {
|
|
|
|
Status s;
|
|
|
|
for (int level = 0; level < storage_info_.num_levels_; level++) {
|
|
|
|
s = GetPropertiesOfAllTables(props, level);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Version::GetPropertiesOfAllTables(TablePropertiesCollection* props,
|
|
|
|
int level) {
|
|
|
|
for (const auto& file_meta : storage_info_.files_[level]) {
|
|
|
|
auto fname =
|
|
|
|
TableFileName(vset_->db_options_->db_paths, file_meta->fd.GetNumber(),
|
|
|
|
file_meta->fd.GetPathId());
|
|
|
|
// 1. If the table is already present in table cache, load table
|
|
|
|
// properties from there.
|
|
|
|
std::shared_ptr<const TableProperties> table_properties;
|
|
|
|
Status s = GetTableProperties(&table_properties, file_meta, &fname);
|
|
|
|
if (s.ok()) {
|
|
|
|
props->insert({fname, table_properties});
|
|
|
|
} else {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Version::GetPropertiesOfTablesInRange(
|
|
|
|
const Range* range, std::size_t n, TablePropertiesCollection* props) const {
|
|
|
|
for (int level = 0; level < storage_info_.num_non_empty_levels(); level++) {
|
|
|
|
for (decltype(n) i = 0; i < n; i++) {
|
|
|
|
// Convert user_key into a corresponding internal key.
|
|
|
|
InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
|
|
|
|
InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
|
|
|
|
std::vector<FileMetaData*> files;
|
|
|
|
storage_info_.GetOverlappingInputs(level, &k1, &k2, &files, -1, nullptr,
|
|
|
|
false);
|
|
|
|
for (const auto& file_meta : files) {
|
|
|
|
auto fname =
|
|
|
|
TableFileName(vset_->db_options_->db_paths,
|
|
|
|
file_meta->fd.GetNumber(), file_meta->fd.GetPathId());
|
|
|
|
if (props->count(fname) == 0) {
|
|
|
|
// 1. If the table is already present in table cache, load table
|
|
|
|
// properties from there.
|
|
|
|
std::shared_ptr<const TableProperties> table_properties;
|
|
|
|
Status s = GetTableProperties(&table_properties, file_meta, &fname);
|
|
|
|
if (s.ok()) {
|
|
|
|
props->insert({fname, table_properties});
|
|
|
|
} else {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Version::GetAggregatedTableProperties(
|
|
|
|
std::shared_ptr<const TableProperties>* tp, int level) {
|
|
|
|
TablePropertiesCollection props;
|
|
|
|
Status s;
|
|
|
|
if (level < 0) {
|
|
|
|
s = GetPropertiesOfAllTables(&props);
|
|
|
|
} else {
|
|
|
|
s = GetPropertiesOfAllTables(&props, level);
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto* new_tp = new TableProperties();
|
|
|
|
for (const auto& item : props) {
|
|
|
|
new_tp->Add(*item.second);
|
|
|
|
}
|
|
|
|
tp->reset(new_tp);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t Version::GetMemoryUsageByTableReaders() {
|
|
|
|
size_t total_usage = 0;
|
|
|
|
for (auto& file_level : storage_info_.level_files_brief_) {
|
|
|
|
for (size_t i = 0; i < file_level.num_files; i++) {
|
|
|
|
total_usage += cfd_->table_cache()->GetMemoryUsageByTableReader(
|
|
|
|
vset_->env_options_, cfd_->internal_comparator(),
|
|
|
|
file_level.files[i].fd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return total_usage;
|
|
|
|
}
|
|
|
|
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
10 years ago
|
|
|
void Version::GetColumnFamilyMetaData(ColumnFamilyMetaData* cf_meta) {
|
|
|
|
assert(cf_meta);
|
|
|
|
assert(cfd_);
|
|
|
|
|
|
|
|
cf_meta->name = cfd_->GetName();
|
|
|
|
cf_meta->size = 0;
|
|
|
|
cf_meta->file_count = 0;
|
|
|
|
cf_meta->levels.clear();
|
|
|
|
|
|
|
|
auto* ioptions = cfd_->ioptions();
|
|
|
|
auto* vstorage = storage_info();
|
|
|
|
|
|
|
|
for (int level = 0; level < cfd_->NumberLevels(); level++) {
|
|
|
|
uint64_t level_size = 0;
|
|
|
|
cf_meta->file_count += vstorage->LevelFiles(level).size();
|
|
|
|
std::vector<SstFileMetaData> files;
|
|
|
|
for (const auto& file : vstorage->LevelFiles(level)) {
|
|
|
|
uint32_t path_id = file->fd.GetPathId();
|
|
|
|
std::string file_path;
|
|
|
|
if (path_id < ioptions->db_paths.size()) {
|
|
|
|
file_path = ioptions->db_paths[path_id].path;
|
|
|
|
} else {
|
|
|
|
assert(!ioptions->db_paths.empty());
|
|
|
|
file_path = ioptions->db_paths.back().path;
|
|
|
|
}
|
|
|
|
files.emplace_back(
|
|
|
|
MakeTableFileName("", file->fd.GetNumber()),
|
|
|
|
file_path,
|
|
|
|
file->fd.GetFileSize(),
|
|
|
|
file->smallest_seqno,
|
|
|
|
file->largest_seqno,
|
|
|
|
file->smallest.user_key().ToString(),
|
|
|
|
file->largest.user_key().ToString(),
|
|
|
|
file->being_compacted);
|
|
|
|
level_size += file->fd.GetFileSize();
|
|
|
|
}
|
|
|
|
cf_meta->levels.emplace_back(
|
|
|
|
level, level_size, std::move(files));
|
|
|
|
cf_meta->size += level_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
uint64_t VersionStorageInfo::GetEstimatedActiveKeys() const {
|
avoid returning a number-of-active-keys estimate of nearly 2^64
Summary:
If accumulated_num_non_deletions_ were ever smaller than
accumulated_num_deletions_, the computation of
"accumulated_num_non_deletions_ - accumulated_num_deletions_"
would result in a logically "negative" value, but since
the two operands are unsigned (uint64_t), the result corresponding
to e.g., -1 would 2^64-1.
Instead, return 0 in that case.
Test Plan:
- ensure "make check" still passes
- temporarily add an "abort();" call in the new "if"-block, and
observe that it fails in some test cases. However, note that
this case is triggered only when the two numbers are equal.
Thus, no test case triggers the erroneous behavior this
change is designed to avoid. If anyone can construct a
scenario in which that bug would be triggered, I'll be
happy to add a test case.
Reviewers: ljin, igor, rven, igor.sugak, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D36489
10 years ago
|
|
|
// Estimation will be inaccurate when:
|
|
|
|
// (1) there exist merge keys
|
|
|
|
// (2) keys are directly overwritten
|
|
|
|
// (3) deletion on non-existing keys
|
|
|
|
// (4) low number of samples
|
|
|
|
if (current_num_samples_ == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (current_num_non_deletions_ <= current_num_deletions_) {
|
avoid returning a number-of-active-keys estimate of nearly 2^64
Summary:
If accumulated_num_non_deletions_ were ever smaller than
accumulated_num_deletions_, the computation of
"accumulated_num_non_deletions_ - accumulated_num_deletions_"
would result in a logically "negative" value, but since
the two operands are unsigned (uint64_t), the result corresponding
to e.g., -1 would 2^64-1.
Instead, return 0 in that case.
Test Plan:
- ensure "make check" still passes
- temporarily add an "abort();" call in the new "if"-block, and
observe that it fails in some test cases. However, note that
this case is triggered only when the two numbers are equal.
Thus, no test case triggers the erroneous behavior this
change is designed to avoid. If anyone can construct a
scenario in which that bug would be triggered, I'll be
happy to add a test case.
Reviewers: ljin, igor, rven, igor.sugak, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D36489
10 years ago
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t est = current_num_non_deletions_ - current_num_deletions_;
|
avoid returning a number-of-active-keys estimate of nearly 2^64
Summary:
If accumulated_num_non_deletions_ were ever smaller than
accumulated_num_deletions_, the computation of
"accumulated_num_non_deletions_ - accumulated_num_deletions_"
would result in a logically "negative" value, but since
the two operands are unsigned (uint64_t), the result corresponding
to e.g., -1 would 2^64-1.
Instead, return 0 in that case.
Test Plan:
- ensure "make check" still passes
- temporarily add an "abort();" call in the new "if"-block, and
observe that it fails in some test cases. However, note that
this case is triggered only when the two numbers are equal.
Thus, no test case triggers the erroneous behavior this
change is designed to avoid. If anyone can construct a
scenario in which that bug would be triggered, I'll be
happy to add a test case.
Reviewers: ljin, igor, rven, igor.sugak, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D36489
10 years ago
|
|
|
|
|
|
|
uint64_t file_count = 0;
|
|
|
|
for (int level = 0; level < num_levels_; ++level) {
|
|
|
|
file_count += files_[level].size();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (current_num_samples_ < file_count) {
|
|
|
|
// casting to avoid overflowing
|
|
|
|
return
|
|
|
|
static_cast<uint64_t>(
|
|
|
|
(est * static_cast<double>(file_count) / current_num_samples_)
|
|
|
|
);
|
|
|
|
} else {
|
avoid returning a number-of-active-keys estimate of nearly 2^64
Summary:
If accumulated_num_non_deletions_ were ever smaller than
accumulated_num_deletions_, the computation of
"accumulated_num_non_deletions_ - accumulated_num_deletions_"
would result in a logically "negative" value, but since
the two operands are unsigned (uint64_t), the result corresponding
to e.g., -1 would 2^64-1.
Instead, return 0 in that case.
Test Plan:
- ensure "make check" still passes
- temporarily add an "abort();" call in the new "if"-block, and
observe that it fails in some test cases. However, note that
this case is triggered only when the two numbers are equal.
Thus, no test case triggers the erroneous behavior this
change is designed to avoid. If anyone can construct a
scenario in which that bug would be triggered, I'll be
happy to add a test case.
Reviewers: ljin, igor, rven, igor.sugak, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D36489
10 years ago
|
|
|
return est;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
double VersionStorageInfo::GetEstimatedCompressionRatioAtLevel(
|
|
|
|
int level) const {
|
|
|
|
assert(level < num_levels_);
|
|
|
|
uint64_t sum_file_size_bytes = 0;
|
|
|
|
uint64_t sum_data_size_bytes = 0;
|
|
|
|
for (auto* file_meta : files_[level]) {
|
|
|
|
sum_file_size_bytes += file_meta->fd.GetFileSize();
|
|
|
|
sum_data_size_bytes += file_meta->raw_key_size + file_meta->raw_value_size;
|
|
|
|
}
|
|
|
|
if (sum_file_size_bytes == 0) {
|
|
|
|
return -1.0;
|
|
|
|
}
|
|
|
|
return static_cast<double>(sum_data_size_bytes) / sum_file_size_bytes;
|
|
|
|
}
|
|
|
|
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
11 years ago
|
|
|
void Version::AddIterators(const ReadOptions& read_options,
|
|
|
|
const EnvOptions& soptions,
|
|
|
|
MergeIteratorBuilder* merge_iter_builder,
|
|
|
|
RangeDelAggregator* range_del_agg) {
|
|
|
|
assert(storage_info_.finalized_);
|
|
|
|
|
|
|
|
for (int level = 0; level < storage_info_.num_non_empty_levels(); level++) {
|
|
|
|
AddIteratorsForLevel(read_options, soptions, merge_iter_builder, level,
|
|
|
|
range_del_agg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Version::AddIteratorsForLevel(const ReadOptions& read_options,
|
|
|
|
const EnvOptions& soptions,
|
|
|
|
MergeIteratorBuilder* merge_iter_builder,
|
|
|
|
int level,
|
|
|
|
RangeDelAggregator* range_del_agg) {
|
|
|
|
assert(storage_info_.finalized_);
|
|
|
|
if (level >= storage_info_.num_non_empty_levels()) {
|
|
|
|
// This is an empty level
|
|
|
|
return;
|
|
|
|
} else if (storage_info_.LevelFilesBrief(level).num_files == 0) {
|
|
|
|
// No files in this level
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto* arena = merge_iter_builder->GetArena();
|
|
|
|
if (level == 0) {
|
|
|
|
// Merge all level zero files together since they may overlap
|
|
|
|
for (size_t i = 0; i < storage_info_.LevelFilesBrief(0).num_files; i++) {
|
|
|
|
const auto& file = storage_info_.LevelFilesBrief(0).files[i];
|
|
|
|
merge_iter_builder->AddIterator(cfd_->table_cache()->NewIterator(
|
|
|
|
read_options, soptions, cfd_->internal_comparator(), file.fd,
|
|
|
|
range_del_agg, nullptr, cfd_->internal_stats()->GetFileReadHist(0),
|
|
|
|
false, arena, false /* skip_filters */, 0 /* level */));
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
11 years ago
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// For levels > 0, we can use a concatenating iterator that sequentially
|
|
|
|
// walks through the non-overlapping files in the level, opening them
|
|
|
|
// lazily.
|
|
|
|
auto* mem = arena->AllocateAligned(sizeof(LevelFileIteratorState));
|
|
|
|
auto* state = new (mem)
|
|
|
|
LevelFileIteratorState(cfd_->table_cache(), read_options, soptions,
|
|
|
|
cfd_->internal_comparator(),
|
|
|
|
cfd_->internal_stats()->GetFileReadHist(level),
|
|
|
|
false /* for_compaction */,
|
|
|
|
cfd_->ioptions()->prefix_extractor != nullptr,
|
|
|
|
IsFilterSkipped(level), level, range_del_agg);
|
|
|
|
mem = arena->AllocateAligned(sizeof(LevelFileNumIterator));
|
|
|
|
auto* first_level_iter = new (mem) LevelFileNumIterator(
|
|
|
|
cfd_->internal_comparator(), &storage_info_.LevelFilesBrief(level));
|
|
|
|
merge_iter_builder->AddIterator(
|
|
|
|
NewTwoLevelIterator(state, first_level_iter, arena, false));
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
11 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
VersionStorageInfo::VersionStorageInfo(
|
|
|
|
const InternalKeyComparator* internal_comparator,
|
|
|
|
const Comparator* user_comparator, int levels,
|
|
|
|
CompactionStyle compaction_style, VersionStorageInfo* ref_vstorage,
|
|
|
|
bool _force_consistency_checks)
|
|
|
|
: internal_comparator_(internal_comparator),
|
|
|
|
user_comparator_(user_comparator),
|
|
|
|
// cfd is nullptr if Version is dummy
|
|
|
|
num_levels_(levels),
|
|
|
|
num_non_empty_levels_(0),
|
|
|
|
file_indexer_(user_comparator),
|
|
|
|
compaction_style_(compaction_style),
|
|
|
|
files_(new std::vector<FileMetaData*>[num_levels_]),
|
|
|
|
base_level_(num_levels_ == 1 ? -1 : 1),
|
|
|
|
files_by_compaction_pri_(num_levels_),
|
Allowing L0 -> L1 trivial move on sorted data
Summary:
This diff updates the logic of how we do trivial move, now trivial move can run on any number of files in input level as long as they are not overlapping
The conditions for trivial move have been updated
Introduced conditions:
- Trivial move cannot happen if we have a compaction filter (except if the compaction is not manual)
- Input level files cannot be overlapping
Removed conditions:
- Trivial move only run when the compaction is not manual
- Input level should can contain only 1 file
More context on what tests failed because of Trivial move
```
DBTest.CompactionsGenerateMultipleFiles
This test is expecting compaction on a file in L0 to generate multiple files in L1, this test will fail with trivial move because we end up with one file in L1
```
```
DBTest.NoSpaceCompactRange
This test expect compaction to fail when we force environment to report running out of space, of course this is not valid in trivial move situation
because trivial move does not need any extra space, and did not check for that
```
```
DBTest.DropWrites
Similar to DBTest.NoSpaceCompactRange
```
```
DBTest.DeleteObsoleteFilesPendingOutputs
This test expect that a file in L2 is deleted after it's moved to L3, this is not valid with trivial move because although the file was moved it is now used by L3
```
```
CuckooTableDBTest.CompactionIntoMultipleFiles
Same as DBTest.CompactionsGenerateMultipleFiles
```
This diff is based on a work by @sdong https://reviews.facebook.net/D34149
Test Plan: make -j64 check
Reviewers: rven, sdong, igor
Reviewed By: igor
Subscribers: yhchiang, ott, march, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D34797
10 years ago
|
|
|
level0_non_overlapping_(false),
|
|
|
|
next_file_to_compact_by_size_(num_levels_),
|
|
|
|
compaction_score_(num_levels_),
|
|
|
|
compaction_level_(num_levels_),
|
|
|
|
l0_delay_trigger_count_(0),
|
|
|
|
accumulated_file_size_(0),
|
|
|
|
accumulated_raw_key_size_(0),
|
|
|
|
accumulated_raw_value_size_(0),
|
|
|
|
accumulated_num_non_deletions_(0),
|
|
|
|
accumulated_num_deletions_(0),
|
|
|
|
current_num_non_deletions_(0),
|
|
|
|
current_num_deletions_(0),
|
|
|
|
current_num_samples_(0),
|
|
|
|
estimated_compaction_needed_bytes_(0),
|
|
|
|
finalized_(false),
|
|
|
|
force_consistency_checks_(_force_consistency_checks) {
|
|
|
|
if (ref_vstorage != nullptr) {
|
|
|
|
accumulated_file_size_ = ref_vstorage->accumulated_file_size_;
|
|
|
|
accumulated_raw_key_size_ = ref_vstorage->accumulated_raw_key_size_;
|
|
|
|
accumulated_raw_value_size_ = ref_vstorage->accumulated_raw_value_size_;
|
|
|
|
accumulated_num_non_deletions_ =
|
|
|
|
ref_vstorage->accumulated_num_non_deletions_;
|
|
|
|
accumulated_num_deletions_ = ref_vstorage->accumulated_num_deletions_;
|
|
|
|
current_num_non_deletions_ = ref_vstorage->current_num_non_deletions_;
|
|
|
|
current_num_deletions_ = ref_vstorage->current_num_deletions_;
|
|
|
|
current_num_samples_ = ref_vstorage->current_num_samples_;
|
|
|
|
}
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
11 years ago
|
|
|
}
|
|
|
|
|
|
|
|
Version::Version(ColumnFamilyData* column_family_data, VersionSet* vset,
|
|
|
|
uint64_t version_number)
|
|
|
|
: env_(vset->env_),
|
|
|
|
cfd_(column_family_data),
|
|
|
|
info_log_((cfd_ == nullptr) ? nullptr : cfd_->ioptions()->info_log),
|
|
|
|
db_statistics_((cfd_ == nullptr) ? nullptr
|
|
|
|
: cfd_->ioptions()->statistics),
|
|
|
|
table_cache_((cfd_ == nullptr) ? nullptr : cfd_->table_cache()),
|
|
|
|
merge_operator_((cfd_ == nullptr) ? nullptr
|
|
|
|
: cfd_->ioptions()->merge_operator),
|
|
|
|
storage_info_(
|
|
|
|
(cfd_ == nullptr) ? nullptr : &cfd_->internal_comparator(),
|
|
|
|
(cfd_ == nullptr) ? nullptr : cfd_->user_comparator(),
|
|
|
|
cfd_ == nullptr ? 0 : cfd_->NumberLevels(),
|
|
|
|
cfd_ == nullptr ? kCompactionStyleLevel
|
|
|
|
: cfd_->ioptions()->compaction_style,
|
|
|
|
(cfd_ == nullptr || cfd_->current() == nullptr)
|
|
|
|
? nullptr
|
|
|
|
: cfd_->current()->storage_info(),
|
|
|
|
cfd_ == nullptr ? false : cfd_->ioptions()->force_consistency_checks),
|
|
|
|
vset_(vset),
|
|
|
|
next_(this),
|
|
|
|
prev_(this),
|
|
|
|
refs_(0),
|
|
|
|
version_number_(version_number) {}
|
|
|
|
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
9 years ago
|
|
|
void Version::Get(const ReadOptions& read_options, const LookupKey& k,
|
|
|
|
std::string* value, Status* status,
|
|
|
|
MergeContext* merge_context,
|
|
|
|
RangeDelAggregator* range_del_agg, bool* value_found,
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
9 years ago
|
|
|
bool* key_exists, SequenceNumber* seq) {
|
|
|
|
Slice ikey = k.internal_key();
|
|
|
|
Slice user_key = k.user_key();
|
|
|
|
|
|
|
|
assert(status->ok() || status->IsMergeInProgress());
|
|
|
|
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
9 years ago
|
|
|
if (key_exists != nullptr) {
|
|
|
|
// will falsify below if not found
|
|
|
|
*key_exists = true;
|
|
|
|
}
|
|
|
|
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
8 years ago
|
|
|
PinnedIteratorsManager pinned_iters_mgr;
|
|
|
|
GetContext get_context(
|
|
|
|
user_comparator(), merge_operator_, info_log_, db_statistics_,
|
|
|
|
status->ok() ? GetContext::kNotFound : GetContext::kMerge, user_key,
|
|
|
|
value, value_found, merge_context, range_del_agg, this->env_, seq,
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
8 years ago
|
|
|
merge_operator_ ? &pinned_iters_mgr : nullptr);
|
|
|
|
|
|
|
|
// Pin blocks that we read to hold merge operands
|
|
|
|
if (merge_operator_) {
|
|
|
|
pinned_iters_mgr.StartPinning();
|
|
|
|
}
|
|
|
|
|
|
|
|
FilePicker fp(
|
|
|
|
storage_info_.files_, user_key, ikey, &storage_info_.level_files_brief_,
|
|
|
|
storage_info_.num_non_empty_levels_, &storage_info_.file_indexer_,
|
|
|
|
user_comparator(), internal_comparator());
|
|
|
|
FdWithKeyRange* f = fp.GetNextFile();
|
|
|
|
while (f != nullptr) {
|
Measure file read latency histogram per level
Summary: In internal stats, remember read latency histogram, if statistics is enabled. It can be retrieved from DB::GetProperty() with "rocksdb.dbstats" property, if it is enabled.
Test Plan: Manually run db_bench and prints out "rocksdb.dbstats" by hand and make sure it prints out as expected
Reviewers: igor, IslamAbdelRahman, rven, kradhakrishnan, anthony, yhchiang
Reviewed By: yhchiang
Subscribers: MarkCallaghan, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D44193
9 years ago
|
|
|
*status = table_cache_->Get(
|
|
|
|
read_options, *internal_comparator(), f->fd, ikey, &get_context,
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
9 years ago
|
|
|
cfd_->internal_stats()->GetFileReadHist(fp.GetHitFileLevel()),
|
|
|
|
IsFilterSkipped(static_cast<int>(fp.GetHitFileLevel()),
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
9 years ago
|
|
|
fp.IsHitFileLastInLevel()),
|
|
|
|
fp.GetCurrentLevel());
|
|
|
|
// TODO: examine the behavior for corrupted key
|
|
|
|
if (!status->ok()) {
|
|
|
|
return;
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
11 years ago
|
|
|
}
|
|
|
|
|
|
|
|
switch (get_context.State()) {
|
|
|
|
case GetContext::kNotFound:
|
|
|
|
// Keep searching in other files
|
|
|
|
break;
|
|
|
|
case GetContext::kFound:
|
|
|
|
if (fp.GetHitFileLevel() == 0) {
|
|
|
|
RecordTick(db_statistics_, GET_HIT_L0);
|
|
|
|
} else if (fp.GetHitFileLevel() == 1) {
|
|
|
|
RecordTick(db_statistics_, GET_HIT_L1);
|
|
|
|
} else if (fp.GetHitFileLevel() >= 2) {
|
|
|
|
RecordTick(db_statistics_, GET_HIT_L2_AND_UP);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
case GetContext::kDeleted:
|
|
|
|
// Use empty error message for speed
|
|
|
|
*status = Status::NotFound();
|
|
|
|
return;
|
|
|
|
case GetContext::kCorrupt:
|
|
|
|
*status = Status::Corruption("corrupted key for ", user_key);
|
|
|
|
return;
|
|
|
|
case GetContext::kMerge:
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
11 years ago
|
|
|
break;
|
|
|
|
}
|
|
|
|
f = fp.GetNextFile();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (GetContext::kMerge == get_context.State()) {
|
|
|
|
if (!merge_operator_) {
|
|
|
|
*status = Status::InvalidArgument(
|
|
|
|
"merge_operator is not properly initialized.");
|
|
|
|
return;
|
|
|
|
}
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
11 years ago
|
|
|
// merge_operands are in saver and we hit the beginning of the key history
|
|
|
|
// do a final merge of nullptr and operands;
|
|
|
|
*status = MergeHelper::TimedFullMerge(merge_operator_, user_key, nullptr,
|
|
|
|
merge_context->GetOperands(), value,
|
|
|
|
info_log_, db_statistics_, env_);
|
|
|
|
} else {
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
9 years ago
|
|
|
if (key_exists != nullptr) {
|
|
|
|
*key_exists = false;
|
|
|
|
}
|
|
|
|
*status = Status::NotFound(); // Use an empty error message for speed
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Version::IsFilterSkipped(int level, bool is_file_last_in_level) {
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
9 years ago
|
|
|
// Reaching the bottom level implies misses at all upper levels, so we'll
|
|
|
|
// skip checking the filters when we predict a hit.
|
|
|
|
return cfd_->ioptions()->optimize_filters_for_hits &&
|
|
|
|
(level > 0 || is_file_last_in_level) &&
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
9 years ago
|
|
|
level == storage_info_.num_non_empty_levels() - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionStorageInfo::GenerateLevelFilesBrief() {
|
|
|
|
level_files_brief_.resize(num_non_empty_levels_);
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
for (int level = 0; level < num_non_empty_levels_; level++) {
|
|
|
|
DoGenerateLevelFilesBrief(
|
|
|
|
&level_files_brief_[level], files_[level], &arena_);
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Version::PrepareApply(
|
|
|
|
const MutableCFOptions& mutable_cf_options,
|
|
|
|
bool update_stats) {
|
|
|
|
UpdateAccumulatedStats(update_stats);
|
|
|
|
storage_info_.UpdateNumNonEmptyLevels();
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
storage_info_.CalculateBaseBytes(*cfd_->ioptions(), mutable_cf_options);
|
|
|
|
storage_info_.UpdateFilesByCompactionPri(cfd_->ioptions()->compaction_pri);
|
|
|
|
storage_info_.GenerateFileIndexer();
|
|
|
|
storage_info_.GenerateLevelFilesBrief();
|
Allowing L0 -> L1 trivial move on sorted data
Summary:
This diff updates the logic of how we do trivial move, now trivial move can run on any number of files in input level as long as they are not overlapping
The conditions for trivial move have been updated
Introduced conditions:
- Trivial move cannot happen if we have a compaction filter (except if the compaction is not manual)
- Input level files cannot be overlapping
Removed conditions:
- Trivial move only run when the compaction is not manual
- Input level should can contain only 1 file
More context on what tests failed because of Trivial move
```
DBTest.CompactionsGenerateMultipleFiles
This test is expecting compaction on a file in L0 to generate multiple files in L1, this test will fail with trivial move because we end up with one file in L1
```
```
DBTest.NoSpaceCompactRange
This test expect compaction to fail when we force environment to report running out of space, of course this is not valid in trivial move situation
because trivial move does not need any extra space, and did not check for that
```
```
DBTest.DropWrites
Similar to DBTest.NoSpaceCompactRange
```
```
DBTest.DeleteObsoleteFilesPendingOutputs
This test expect that a file in L2 is deleted after it's moved to L3, this is not valid with trivial move because although the file was moved it is now used by L3
```
```
CuckooTableDBTest.CompactionIntoMultipleFiles
Same as DBTest.CompactionsGenerateMultipleFiles
```
This diff is based on a work by @sdong https://reviews.facebook.net/D34149
Test Plan: make -j64 check
Reviewers: rven, sdong, igor
Reviewed By: igor
Subscribers: yhchiang, ott, march, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D34797
10 years ago
|
|
|
storage_info_.GenerateLevel0NonOverlapping();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Version::MaybeInitializeFileMetaData(FileMetaData* file_meta) {
|
|
|
|
if (file_meta->init_stats_from_file ||
|
|
|
|
file_meta->compensated_file_size > 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
std::shared_ptr<const TableProperties> tp;
|
|
|
|
Status s = GetTableProperties(&tp, file_meta);
|
|
|
|
file_meta->init_stats_from_file = true;
|
|
|
|
if (!s.ok()) {
|
|
|
|
Log(InfoLogLevel::ERROR_LEVEL, vset_->db_options_->info_log,
|
|
|
|
"Unable to load table properties for file %" PRIu64 " --- %s\n",
|
|
|
|
file_meta->fd.GetNumber(), s.ToString().c_str());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (tp.get() == nullptr) return false;
|
|
|
|
file_meta->num_entries = tp->num_entries;
|
|
|
|
file_meta->num_deletions = GetDeletedKeys(tp->user_collected_properties);
|
|
|
|
file_meta->raw_value_size = tp->raw_value_size;
|
|
|
|
file_meta->raw_key_size = tp->raw_key_size;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionStorageInfo::UpdateAccumulatedStats(FileMetaData* file_meta) {
|
|
|
|
assert(file_meta->init_stats_from_file);
|
|
|
|
accumulated_file_size_ += file_meta->fd.GetFileSize();
|
|
|
|
accumulated_raw_key_size_ += file_meta->raw_key_size;
|
|
|
|
accumulated_raw_value_size_ += file_meta->raw_value_size;
|
|
|
|
accumulated_num_non_deletions_ +=
|
|
|
|
file_meta->num_entries - file_meta->num_deletions;
|
|
|
|
accumulated_num_deletions_ += file_meta->num_deletions;
|
|
|
|
|
|
|
|
current_num_non_deletions_ +=
|
|
|
|
file_meta->num_entries - file_meta->num_deletions;
|
|
|
|
current_num_deletions_ += file_meta->num_deletions;
|
|
|
|
current_num_samples_++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionStorageInfo::RemoveCurrentStats(FileMetaData* file_meta) {
|
|
|
|
if (file_meta->init_stats_from_file) {
|
|
|
|
current_num_non_deletions_ -=
|
|
|
|
file_meta->num_entries - file_meta->num_deletions;
|
|
|
|
current_num_deletions_ -= file_meta->num_deletions;
|
|
|
|
current_num_samples_--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Version::UpdateAccumulatedStats(bool update_stats) {
|
|
|
|
if (update_stats) {
|
|
|
|
// maximum number of table properties loaded from files.
|
|
|
|
const int kMaxInitCount = 20;
|
|
|
|
int init_count = 0;
|
|
|
|
// here only the first kMaxInitCount files which haven't been
|
|
|
|
// initialized from file will be updated with num_deletions.
|
|
|
|
// The motivation here is to cap the maximum I/O per Version creation.
|
|
|
|
// The reason for choosing files from lower-level instead of higher-level
|
|
|
|
// is that such design is able to propagate the initialization from
|
|
|
|
// lower-level to higher-level: When the num_deletions of lower-level
|
|
|
|
// files are updated, it will make the lower-level files have accurate
|
|
|
|
// compensated_file_size, making lower-level to higher-level compaction
|
|
|
|
// will be triggered, which creates higher-level files whose num_deletions
|
|
|
|
// will be updated here.
|
|
|
|
for (int level = 0;
|
|
|
|
level < storage_info_.num_levels_ && init_count < kMaxInitCount;
|
|
|
|
++level) {
|
|
|
|
for (auto* file_meta : storage_info_.files_[level]) {
|
|
|
|
if (MaybeInitializeFileMetaData(file_meta)) {
|
|
|
|
// each FileMeta will be initialized only once.
|
|
|
|
storage_info_.UpdateAccumulatedStats(file_meta);
|
|
|
|
// when option "max_open_files" is -1, all the file metadata has
|
|
|
|
// already been read, so MaybeInitializeFileMetaData() won't incur
|
|
|
|
// any I/O cost.
|
|
|
|
if (vset_->db_options_->max_open_files == -1) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (++init_count >= kMaxInitCount) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// In case all sampled-files contain only deletion entries, then we
|
|
|
|
// load the table-property of a file in higher-level to initialize
|
|
|
|
// that value.
|
|
|
|
for (int level = storage_info_.num_levels_ - 1;
|
|
|
|
storage_info_.accumulated_raw_value_size_ == 0 && level >= 0;
|
|
|
|
--level) {
|
|
|
|
for (int i = static_cast<int>(storage_info_.files_[level].size()) - 1;
|
|
|
|
storage_info_.accumulated_raw_value_size_ == 0 && i >= 0; --i) {
|
|
|
|
if (MaybeInitializeFileMetaData(storage_info_.files_[level][i])) {
|
|
|
|
storage_info_.UpdateAccumulatedStats(storage_info_.files_[level][i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
storage_info_.ComputeCompensatedSizes();
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionStorageInfo::ComputeCompensatedSizes() {
|
|
|
|
static const int kDeletionWeightOnCompaction = 2;
|
|
|
|
uint64_t average_value_size = GetAverageValueSize();
|
|
|
|
|
|
|
|
// compute the compensated size
|
|
|
|
for (int level = 0; level < num_levels_; level++) {
|
|
|
|
for (auto* file_meta : files_[level]) {
|
|
|
|
// Here we only compute compensated_file_size for those file_meta
|
|
|
|
// which compensated_file_size is uninitialized (== 0). This is true only
|
|
|
|
// for files that have been created right now and no other thread has
|
|
|
|
// access to them. That's why we can safely mutate compensated_file_size.
|
|
|
|
if (file_meta->compensated_file_size == 0) {
|
|
|
|
file_meta->compensated_file_size = file_meta->fd.GetFileSize();
|
|
|
|
// Here we only boost the size of deletion entries of a file only
|
|
|
|
// when the number of deletion entries is greater than the number of
|
|
|
|
// non-deletion entries in the file. The motivation here is that in
|
|
|
|
// a stable workload, the number of deletion entries should be roughly
|
|
|
|
// equal to the number of non-deletion entries. If we compensate the
|
|
|
|
// size of deletion entries in a stable workload, the deletion
|
|
|
|
// compensation logic might introduce unwanted effet which changes the
|
|
|
|
// shape of LSM tree.
|
|
|
|
if (file_meta->num_deletions * 2 >= file_meta->num_entries) {
|
|
|
|
file_meta->compensated_file_size +=
|
|
|
|
(file_meta->num_deletions * 2 - file_meta->num_entries) *
|
|
|
|
average_value_size * kDeletionWeightOnCompaction;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int VersionStorageInfo::MaxInputLevel() const {
|
|
|
|
if (compaction_style_ == kCompactionStyleLevel) {
|
|
|
|
return num_levels() - 2;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionStorageInfo::EstimateCompactionBytesNeeded(
|
|
|
|
const MutableCFOptions& mutable_cf_options) {
|
|
|
|
// Only implemented for level-based compaction
|
|
|
|
if (compaction_style_ != kCompactionStyleLevel) {
|
When slowdown is triggered, reduce the write rate
Summary: It's usually hard for users to set a value of options.delayed_write_rate. With this diff, after slowdown condition triggers, we greedily reduce write rate if estimated pending compaction bytes increase. If estimated compaction pending bytes drop, we increase the write rate.
Test Plan:
Add a unit test
Test with db_bench setting:
TEST_TMPDIR=/dev/shm/ ./db_bench --benchmarks=fillrandom -num=10000000 --soft_pending_compaction_bytes_limit=1000000000 --hard_pending_compaction_bytes_limit=3000000000 --delayed_write_rate=100000000
and make sure without the commit, write stop will happen, but with the commit, it will not happen.
Reviewers: igor, anthony, rven, yhchiang, kradhakrishnan, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D52131
9 years ago
|
|
|
estimated_compaction_needed_bytes_ = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start from Level 0, if level 0 qualifies compaction to level 1,
|
|
|
|
// we estimate the size of compaction.
|
|
|
|
// Then we move on to the next level and see whether it qualifies compaction
|
|
|
|
// to the next level. The size of the level is estimated as the actual size
|
|
|
|
// on the level plus the input bytes from the previous level if there is any.
|
|
|
|
// If it exceeds, take the exceeded bytes as compaction input and add the size
|
|
|
|
// of the compaction size to tatal size.
|
|
|
|
// We keep doing it to Level 2, 3, etc, until the last level and return the
|
|
|
|
// accumulated bytes.
|
|
|
|
|
|
|
|
uint64_t bytes_compact_to_next_level = 0;
|
|
|
|
// Level 0
|
|
|
|
bool level0_compact_triggered = false;
|
|
|
|
if (static_cast<int>(files_[0].size()) >
|
|
|
|
mutable_cf_options.level0_file_num_compaction_trigger) {
|
|
|
|
level0_compact_triggered = true;
|
|
|
|
for (auto* f : files_[0]) {
|
|
|
|
bytes_compact_to_next_level += f->fd.GetFileSize();
|
|
|
|
}
|
|
|
|
estimated_compaction_needed_bytes_ = bytes_compact_to_next_level;
|
|
|
|
} else {
|
|
|
|
estimated_compaction_needed_bytes_ = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Level 1 and up.
|
|
|
|
uint64_t bytes_next_level = 0;
|
|
|
|
for (int level = base_level(); level <= MaxInputLevel(); level++) {
|
|
|
|
uint64_t level_size = 0;
|
|
|
|
if (bytes_next_level > 0) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
uint64_t level_size2 = 0;
|
|
|
|
for (auto* f : files_[level]) {
|
|
|
|
level_size2 += f->fd.GetFileSize();
|
|
|
|
}
|
|
|
|
assert(level_size2 == bytes_next_level);
|
|
|
|
#endif
|
|
|
|
level_size = bytes_next_level;
|
|
|
|
bytes_next_level = 0;
|
|
|
|
} else {
|
|
|
|
for (auto* f : files_[level]) {
|
|
|
|
level_size += f->fd.GetFileSize();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (level == base_level() && level0_compact_triggered) {
|
|
|
|
// Add base level size to compaction if level0 compaction triggered.
|
|
|
|
estimated_compaction_needed_bytes_ += level_size;
|
|
|
|
}
|
|
|
|
// Add size added by previous compaction
|
|
|
|
level_size += bytes_compact_to_next_level;
|
|
|
|
bytes_compact_to_next_level = 0;
|
|
|
|
uint64_t level_target = MaxBytesForLevel(level);
|
|
|
|
if (level_size > level_target) {
|
|
|
|
bytes_compact_to_next_level = level_size - level_target;
|
|
|
|
// Estimate the actual compaction fan-out ratio as size ratio between
|
|
|
|
// the two levels.
|
|
|
|
|
|
|
|
assert(bytes_next_level == 0);
|
|
|
|
if (level + 1 < num_levels_) {
|
|
|
|
for (auto* f : files_[level + 1]) {
|
|
|
|
bytes_next_level += f->fd.GetFileSize();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (bytes_next_level > 0) {
|
|
|
|
assert(level_size > 0);
|
|
|
|
estimated_compaction_needed_bytes_ += static_cast<uint64_t>(
|
|
|
|
static_cast<double>(bytes_compact_to_next_level) *
|
|
|
|
(static_cast<double>(bytes_next_level) /
|
|
|
|
static_cast<double>(level_size) +
|
|
|
|
1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionStorageInfo::ComputeCompactionScore(
|
|
|
|
const ImmutableCFOptions& immutable_cf_options,
|
|
|
|
const MutableCFOptions& mutable_cf_options) {
|
|
|
|
for (int level = 0; level <= MaxInputLevel(); level++) {
|
|
|
|
double score;
|
|
|
|
if (level == 0) {
|
|
|
|
// We treat level-0 specially by bounding the number of files
|
|
|
|
// instead of number of bytes for two reasons:
|
|
|
|
//
|
|
|
|
// (1) With larger write-buffer sizes, it is nice not to do too
|
|
|
|
// many level-0 compactions.
|
|
|
|
//
|
|
|
|
// (2) The files in level-0 are merged on every read and
|
|
|
|
// therefore we wish to avoid too many files when the individual
|
|
|
|
// file size is small (perhaps because of a small write-buffer
|
|
|
|
// setting, or very high compression ratios, or lots of
|
|
|
|
// overwrites/deletions).
|
|
|
|
int num_sorted_runs = 0;
|
|
|
|
uint64_t total_size = 0;
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
10 years ago
|
|
|
for (auto* f : files_[level]) {
|
|
|
|
if (!f->being_compacted) {
|
|
|
|
total_size += f->compensated_file_size;
|
|
|
|
num_sorted_runs++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (compaction_style_ == kCompactionStyleUniversal) {
|
|
|
|
// For universal compaction, we use level0 score to indicate
|
|
|
|
// compaction score for the whole DB. Adding other levels as if
|
|
|
|
// they are L0 files.
|
|
|
|
for (int i = 1; i < num_levels(); i++) {
|
|
|
|
if (!files_[i].empty() && !files_[i][0]->being_compacted) {
|
|
|
|
num_sorted_runs++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (compaction_style_ == kCompactionStyleFIFO) {
|
|
|
|
score =
|
|
|
|
static_cast<double>(total_size) /
|
|
|
|
immutable_cf_options.compaction_options_fifo.max_table_files_size;
|
|
|
|
} else {
|
|
|
|
score = static_cast<double>(num_sorted_runs) /
|
|
|
|
mutable_cf_options.level0_file_num_compaction_trigger;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Compute the ratio of current size to size limit.
|
|
|
|
uint64_t level_bytes_no_compacting = 0;
|
|
|
|
for (auto f : files_[level]) {
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
10 years ago
|
|
|
if (!f->being_compacted) {
|
|
|
|
level_bytes_no_compacting += f->compensated_file_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
score = static_cast<double>(level_bytes_no_compacting) /
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
MaxBytesForLevel(level);
|
|
|
|
}
|
|
|
|
compaction_level_[level] = level;
|
|
|
|
compaction_score_[level] = score;
|
|
|
|
}
|
|
|
|
|
|
|
|
// sort all the levels based on their score. Higher scores get listed
|
|
|
|
// first. Use bubble sort because the number of entries are small.
|
|
|
|
for (int i = 0; i < num_levels() - 2; i++) {
|
|
|
|
for (int j = i + 1; j < num_levels() - 1; j++) {
|
|
|
|
if (compaction_score_[i] < compaction_score_[j]) {
|
|
|
|
double score = compaction_score_[i];
|
|
|
|
int level = compaction_level_[i];
|
|
|
|
compaction_score_[i] = compaction_score_[j];
|
|
|
|
compaction_level_[i] = compaction_level_[j];
|
|
|
|
compaction_score_[j] = score;
|
|
|
|
compaction_level_[j] = level;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
10 years ago
|
|
|
ComputeFilesMarkedForCompaction();
|
|
|
|
EstimateCompactionBytesNeeded(mutable_cf_options);
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
10 years ago
|
|
|
}
|
|
|
|
|
|
|
|
void VersionStorageInfo::ComputeFilesMarkedForCompaction() {
|
|
|
|
files_marked_for_compaction_.clear();
|
|
|
|
int last_qualify_level = 0;
|
|
|
|
|
|
|
|
// Do not include files from the last level with data
|
|
|
|
// If table properties collector suggests a file on the last level,
|
|
|
|
// we should not move it to a new level.
|
|
|
|
for (int level = num_levels() - 1; level >= 1; level--) {
|
|
|
|
if (!files_[level].empty()) {
|
|
|
|
last_qualify_level = level - 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int level = 0; level <= last_qualify_level; level++) {
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
10 years ago
|
|
|
for (auto* f : files_[level]) {
|
|
|
|
if (!f->being_compacted && f->marked_for_compaction) {
|
|
|
|
files_marked_for_compaction_.emplace_back(level, f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
// used to sort files by size
|
|
|
|
struct Fsize {
|
|
|
|
size_t index;
|
|
|
|
FileMetaData* file;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Compator that is used to sort files based on their size
|
|
|
|
// In normal mode: descending size
|
|
|
|
bool CompareCompensatedSizeDescending(const Fsize& first, const Fsize& second) {
|
|
|
|
return (first.file->compensated_file_size >
|
|
|
|
second.file->compensated_file_size);
|
|
|
|
}
|
|
|
|
} // anonymous namespace
|
|
|
|
|
|
|
|
void VersionStorageInfo::AddFile(int level, FileMetaData* f, Logger* info_log) {
|
|
|
|
auto* level_files = &files_[level];
|
|
|
|
// Must not overlap
|
|
|
|
#ifndef NDEBUG
|
|
|
|
if (level > 0 && !level_files->empty() &&
|
|
|
|
internal_comparator_->Compare(
|
|
|
|
(*level_files)[level_files->size() - 1]->largest, f->smallest) >= 0) {
|
|
|
|
auto* f2 = (*level_files)[level_files->size() - 1];
|
|
|
|
if (info_log != nullptr) {
|
|
|
|
Error(info_log, "Adding new file %" PRIu64
|
|
|
|
" range (%s, %s) to level %d but overlapping "
|
|
|
|
"with existing file %" PRIu64 " %s %s",
|
|
|
|
f->fd.GetNumber(), f->smallest.DebugString(true).c_str(),
|
|
|
|
f->largest.DebugString(true).c_str(), level, f2->fd.GetNumber(),
|
|
|
|
f2->smallest.DebugString(true).c_str(),
|
|
|
|
f2->largest.DebugString(true).c_str());
|
|
|
|
LogFlush(info_log);
|
|
|
|
}
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
f->refs++;
|
|
|
|
level_files->push_back(f);
|
|
|
|
}
|
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
// Version::PrepareApply() need to be called before calling the function, or
|
|
|
|
// following functions called:
|
|
|
|
// 1. UpdateNumNonEmptyLevels();
|
|
|
|
// 2. CalculateBaseBytes();
|
|
|
|
// 3. UpdateFilesByCompactionPri();
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
// 4. GenerateFileIndexer();
|
|
|
|
// 5. GenerateLevelFilesBrief();
|
Allowing L0 -> L1 trivial move on sorted data
Summary:
This diff updates the logic of how we do trivial move, now trivial move can run on any number of files in input level as long as they are not overlapping
The conditions for trivial move have been updated
Introduced conditions:
- Trivial move cannot happen if we have a compaction filter (except if the compaction is not manual)
- Input level files cannot be overlapping
Removed conditions:
- Trivial move only run when the compaction is not manual
- Input level should can contain only 1 file
More context on what tests failed because of Trivial move
```
DBTest.CompactionsGenerateMultipleFiles
This test is expecting compaction on a file in L0 to generate multiple files in L1, this test will fail with trivial move because we end up with one file in L1
```
```
DBTest.NoSpaceCompactRange
This test expect compaction to fail when we force environment to report running out of space, of course this is not valid in trivial move situation
because trivial move does not need any extra space, and did not check for that
```
```
DBTest.DropWrites
Similar to DBTest.NoSpaceCompactRange
```
```
DBTest.DeleteObsoleteFilesPendingOutputs
This test expect that a file in L2 is deleted after it's moved to L3, this is not valid with trivial move because although the file was moved it is now used by L3
```
```
CuckooTableDBTest.CompactionIntoMultipleFiles
Same as DBTest.CompactionsGenerateMultipleFiles
```
This diff is based on a work by @sdong https://reviews.facebook.net/D34149
Test Plan: make -j64 check
Reviewers: rven, sdong, igor
Reviewed By: igor
Subscribers: yhchiang, ott, march, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D34797
10 years ago
|
|
|
// 6. GenerateLevel0NonOverlapping();
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
void VersionStorageInfo::SetFinalized() {
|
|
|
|
finalized_ = true;
|
|
|
|
#ifndef NDEBUG
|
|
|
|
if (compaction_style_ != kCompactionStyleLevel) {
|
|
|
|
// Not level based compaction.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
assert(base_level_ < 0 || num_levels() == 1 ||
|
|
|
|
(base_level_ >= 1 && base_level_ < num_levels()));
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
// Verify all levels newer than base_level are empty except L0
|
|
|
|
for (int level = 1; level < base_level(); level++) {
|
|
|
|
assert(NumLevelBytes(level) == 0);
|
|
|
|
}
|
|
|
|
uint64_t max_bytes_prev_level = 0;
|
|
|
|
for (int level = base_level(); level < num_levels() - 1; level++) {
|
|
|
|
if (LevelFiles(level).size() == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
assert(MaxBytesForLevel(level) >= max_bytes_prev_level);
|
|
|
|
max_bytes_prev_level = MaxBytesForLevel(level);
|
|
|
|
}
|
|
|
|
int num_empty_non_l0_level = 0;
|
|
|
|
for (int level = 0; level < num_levels(); level++) {
|
|
|
|
assert(LevelFiles(level).size() == 0 ||
|
|
|
|
LevelFiles(level).size() == LevelFilesBrief(level).num_files);
|
|
|
|
if (level > 0 && NumLevelBytes(level) > 0) {
|
|
|
|
num_empty_non_l0_level++;
|
|
|
|
}
|
|
|
|
if (LevelFiles(level).size() > 0) {
|
|
|
|
assert(level < num_non_empty_levels());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(compaction_level_.size() > 0);
|
|
|
|
assert(compaction_level_.size() == compaction_score_.size());
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionStorageInfo::UpdateNumNonEmptyLevels() {
|
|
|
|
num_non_empty_levels_ = num_levels_;
|
|
|
|
for (int i = num_levels_ - 1; i >= 0; i--) {
|
|
|
|
if (files_[i].size() != 0) {
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
num_non_empty_levels_ = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
// Sort `temp` based on ratio of overlapping size over file size
|
|
|
|
void SortFileByOverlappingRatio(
|
|
|
|
const InternalKeyComparator& icmp, const std::vector<FileMetaData*>& files,
|
|
|
|
const std::vector<FileMetaData*>& next_level_files,
|
|
|
|
std::vector<Fsize>* temp) {
|
|
|
|
std::unordered_map<uint64_t, uint64_t> file_to_order;
|
|
|
|
auto next_level_it = next_level_files.begin();
|
|
|
|
|
|
|
|
for (auto& file : files) {
|
|
|
|
uint64_t overlapping_bytes = 0;
|
|
|
|
// Skip files in next level that is smaller than current file
|
|
|
|
while (next_level_it != next_level_files.end() &&
|
|
|
|
icmp.Compare((*next_level_it)->largest, file->smallest) < 0) {
|
|
|
|
next_level_it++;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (next_level_it != next_level_files.end() &&
|
|
|
|
icmp.Compare((*next_level_it)->smallest, file->largest) < 0) {
|
|
|
|
overlapping_bytes += (*next_level_it)->fd.file_size;
|
|
|
|
|
|
|
|
if (icmp.Compare((*next_level_it)->largest, file->largest) > 0) {
|
|
|
|
// next level file cross large boundary of current file.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
next_level_it++;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(file->fd.file_size != 0);
|
|
|
|
file_to_order[file->fd.GetNumber()] =
|
|
|
|
overlapping_bytes * 1024u / file->fd.file_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::sort(temp->begin(), temp->end(),
|
|
|
|
[&](const Fsize& f1, const Fsize& f2) -> bool {
|
|
|
|
return file_to_order[f1.file->fd.GetNumber()] <
|
|
|
|
file_to_order[f2.file->fd.GetNumber()];
|
|
|
|
});
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
void VersionStorageInfo::UpdateFilesByCompactionPri(
|
|
|
|
CompactionPri compaction_pri) {
|
|
|
|
if (compaction_style_ == kCompactionStyleFIFO ||
|
|
|
|
compaction_style_ == kCompactionStyleUniversal) {
|
|
|
|
// don't need this
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// No need to sort the highest level because it is never compacted.
|
|
|
|
for (int level = 0; level < num_levels() - 1; level++) {
|
|
|
|
const std::vector<FileMetaData*>& files = files_[level];
|
|
|
|
auto& files_by_compaction_pri = files_by_compaction_pri_[level];
|
|
|
|
assert(files_by_compaction_pri.size() == 0);
|
|
|
|
|
|
|
|
// populate a temp vector for sorting based on size
|
|
|
|
std::vector<Fsize> temp(files.size());
|
|
|
|
for (size_t i = 0; i < files.size(); i++) {
|
|
|
|
temp[i].index = i;
|
|
|
|
temp[i].file = files[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
// sort the top number_of_files_to_sort_ based on file size
|
|
|
|
size_t num = VersionStorageInfo::kNumberFilesToSort;
|
|
|
|
if (num > temp.size()) {
|
|
|
|
num = temp.size();
|
|
|
|
}
|
|
|
|
switch (compaction_pri) {
|
|
|
|
case kByCompensatedSize:
|
|
|
|
std::partial_sort(temp.begin(), temp.begin() + num, temp.end(),
|
|
|
|
CompareCompensatedSizeDescending);
|
|
|
|
break;
|
|
|
|
case kOldestLargestSeqFirst:
|
|
|
|
std::sort(temp.begin(), temp.end(),
|
|
|
|
[this](const Fsize& f1, const Fsize& f2) -> bool {
|
|
|
|
return f1.file->largest_seqno < f2.file->largest_seqno;
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
case kOldestSmallestSeqFirst:
|
|
|
|
std::sort(temp.begin(), temp.end(),
|
|
|
|
[this](const Fsize& f1, const Fsize& f2) -> bool {
|
|
|
|
return f1.file->smallest_seqno < f2.file->smallest_seqno;
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
case kMinOverlappingRatio:
|
|
|
|
SortFileByOverlappingRatio(*internal_comparator_, files_[level],
|
|
|
|
files_[level + 1], &temp);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
assert(temp.size() == files.size());
|
|
|
|
|
|
|
|
// initialize files_by_compaction_pri_
|
|
|
|
for (size_t i = 0; i < temp.size(); i++) {
|
|
|
|
files_by_compaction_pri.push_back(static_cast<int>(temp[i].index));
|
|
|
|
}
|
|
|
|
next_file_to_compact_by_size_[level] = 0;
|
|
|
|
assert(files_[level].size() == files_by_compaction_pri_[level].size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Allowing L0 -> L1 trivial move on sorted data
Summary:
This diff updates the logic of how we do trivial move, now trivial move can run on any number of files in input level as long as they are not overlapping
The conditions for trivial move have been updated
Introduced conditions:
- Trivial move cannot happen if we have a compaction filter (except if the compaction is not manual)
- Input level files cannot be overlapping
Removed conditions:
- Trivial move only run when the compaction is not manual
- Input level should can contain only 1 file
More context on what tests failed because of Trivial move
```
DBTest.CompactionsGenerateMultipleFiles
This test is expecting compaction on a file in L0 to generate multiple files in L1, this test will fail with trivial move because we end up with one file in L1
```
```
DBTest.NoSpaceCompactRange
This test expect compaction to fail when we force environment to report running out of space, of course this is not valid in trivial move situation
because trivial move does not need any extra space, and did not check for that
```
```
DBTest.DropWrites
Similar to DBTest.NoSpaceCompactRange
```
```
DBTest.DeleteObsoleteFilesPendingOutputs
This test expect that a file in L2 is deleted after it's moved to L3, this is not valid with trivial move because although the file was moved it is now used by L3
```
```
CuckooTableDBTest.CompactionIntoMultipleFiles
Same as DBTest.CompactionsGenerateMultipleFiles
```
This diff is based on a work by @sdong https://reviews.facebook.net/D34149
Test Plan: make -j64 check
Reviewers: rven, sdong, igor
Reviewed By: igor
Subscribers: yhchiang, ott, march, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D34797
10 years ago
|
|
|
void VersionStorageInfo::GenerateLevel0NonOverlapping() {
|
|
|
|
assert(!finalized_);
|
|
|
|
level0_non_overlapping_ = true;
|
|
|
|
if (level_files_brief_.size() == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// A copy of L0 files sorted by smallest key
|
|
|
|
std::vector<FdWithKeyRange> level0_sorted_file(
|
|
|
|
level_files_brief_[0].files,
|
|
|
|
level_files_brief_[0].files + level_files_brief_[0].num_files);
|
|
|
|
std::sort(level0_sorted_file.begin(), level0_sorted_file.end(),
|
|
|
|
[this](const FdWithKeyRange& f1, const FdWithKeyRange& f2) -> bool {
|
|
|
|
return (internal_comparator_->Compare(f1.smallest_key,
|
|
|
|
f2.smallest_key) < 0);
|
|
|
|
});
|
Allowing L0 -> L1 trivial move on sorted data
Summary:
This diff updates the logic of how we do trivial move, now trivial move can run on any number of files in input level as long as they are not overlapping
The conditions for trivial move have been updated
Introduced conditions:
- Trivial move cannot happen if we have a compaction filter (except if the compaction is not manual)
- Input level files cannot be overlapping
Removed conditions:
- Trivial move only run when the compaction is not manual
- Input level should can contain only 1 file
More context on what tests failed because of Trivial move
```
DBTest.CompactionsGenerateMultipleFiles
This test is expecting compaction on a file in L0 to generate multiple files in L1, this test will fail with trivial move because we end up with one file in L1
```
```
DBTest.NoSpaceCompactRange
This test expect compaction to fail when we force environment to report running out of space, of course this is not valid in trivial move situation
because trivial move does not need any extra space, and did not check for that
```
```
DBTest.DropWrites
Similar to DBTest.NoSpaceCompactRange
```
```
DBTest.DeleteObsoleteFilesPendingOutputs
This test expect that a file in L2 is deleted after it's moved to L3, this is not valid with trivial move because although the file was moved it is now used by L3
```
```
CuckooTableDBTest.CompactionIntoMultipleFiles
Same as DBTest.CompactionsGenerateMultipleFiles
```
This diff is based on a work by @sdong https://reviews.facebook.net/D34149
Test Plan: make -j64 check
Reviewers: rven, sdong, igor
Reviewed By: igor
Subscribers: yhchiang, ott, march, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D34797
10 years ago
|
|
|
|
|
|
|
for (size_t i = 1; i < level0_sorted_file.size(); ++i) {
|
|
|
|
FdWithKeyRange& f = level0_sorted_file[i];
|
|
|
|
FdWithKeyRange& prev = level0_sorted_file[i - 1];
|
|
|
|
if (internal_comparator_->Compare(prev.largest_key, f.smallest_key) >= 0) {
|
|
|
|
level0_non_overlapping_ = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Version::Ref() {
|
|
|
|
++refs_;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Version::Unref() {
|
|
|
|
assert(refs_ >= 1);
|
|
|
|
--refs_;
|
|
|
|
if (refs_ == 0) {
|
|
|
|
delete this;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool VersionStorageInfo::OverlapInLevel(int level,
|
|
|
|
const Slice* smallest_user_key,
|
|
|
|
const Slice* largest_user_key) {
|
|
|
|
if (level >= num_non_empty_levels_) {
|
|
|
|
// empty level, no overlap
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return SomeFileOverlapsRange(*internal_comparator_, (level > 0),
|
|
|
|
level_files_brief_[level], smallest_user_key,
|
|
|
|
largest_user_key);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store in "*inputs" all files in "level" that overlap [begin,end]
|
|
|
|
// If hint_index is specified, then it points to a file in the
|
|
|
|
// overlapping range.
|
|
|
|
// The file_index returns a pointer to any file in an overlapping range.
|
|
|
|
void VersionStorageInfo::GetOverlappingInputs(
|
|
|
|
int level, const InternalKey* begin, const InternalKey* end,
|
|
|
|
std::vector<FileMetaData*>* inputs, int hint_index, int* file_index,
|
|
|
|
bool expand_range) const {
|
|
|
|
if (level >= num_non_empty_levels_) {
|
|
|
|
// this level is empty, no overlapping inputs
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
inputs->clear();
|
|
|
|
Slice user_begin, user_end;
|
|
|
|
if (begin != nullptr) {
|
|
|
|
user_begin = begin->user_key();
|
|
|
|
}
|
|
|
|
if (end != nullptr) {
|
|
|
|
user_end = end->user_key();
|
|
|
|
}
|
Assertion failure while running with unit tests with OPT=-g
Summary:
When we expand the range of keys for a level 0 compaction, we
need to invoke ParentFilesInCompaction() only once for the
entire range of keys that is being compacted. We were invoking
it for each file that was being compacted, but this triggers
an assertion because each file's range were contiguous but
non-overlapping.
I renamed ParentFilesInCompaction to ParentRangeInCompaction
to adequately represent that it is the range-of-keys and
not individual files that we compact in a single compaction run.
Here is the assertion that is fixed by this patch.
db_test: db/version_set.cc:585: void leveldb::Version::ExtendOverlappingInputs(int, const leveldb::Slice&, const leveldb::Slice&, std::vector<leveldb::FileMetaData*, std::allocator<leveldb::FileMetaData*> >*, int): Assertion `user_cmp->Compare(flimit, user_begin) >= 0' failed.
Test Plan: make clean check OPT=-g
Reviewers: sheki
Reviewed By: sheki
CC: MarkCallaghan, emayanke, leveldb
Differential Revision: https://reviews.facebook.net/D6963
12 years ago
|
|
|
if (file_index) {
|
|
|
|
*file_index = -1;
|
|
|
|
}
|
|
|
|
const Comparator* user_cmp = user_comparator_;
|
|
|
|
if (begin != nullptr && end != nullptr && level > 0) {
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
GetOverlappingInputsRangeBinarySearch(level, user_begin, user_end, inputs,
|
|
|
|
hint_index, file_index);
|
|
|
|
return;
|
|
|
|
}
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
|
|
|
|
for (size_t i = 0; i < level_files_brief_[level].num_files; ) {
|
|
|
|
FdWithKeyRange* f = &(level_files_brief_[level].files[i++]);
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
const Slice file_start = ExtractUserKey(f->smallest_key);
|
|
|
|
const Slice file_limit = ExtractUserKey(f->largest_key);
|
|
|
|
if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) {
|
|
|
|
// "f" is completely before specified range; skip it
|
|
|
|
} else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) {
|
|
|
|
// "f" is completely after specified range; skip it
|
|
|
|
} else {
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
inputs->push_back(files_[level][i-1]);
|
|
|
|
if (level == 0 && expand_range) {
|
|
|
|
// Level-0 files may overlap each other. So check if the newly
|
|
|
|
// added file has expanded the range. If so, restart search.
|
|
|
|
if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) {
|
|
|
|
user_begin = file_start;
|
|
|
|
inputs->clear();
|
|
|
|
i = 0;
|
|
|
|
} else if (end != nullptr
|
|
|
|
&& user_cmp->Compare(file_limit, user_end) > 0) {
|
|
|
|
user_end = file_limit;
|
|
|
|
inputs->clear();
|
|
|
|
i = 0;
|
|
|
|
}
|
|
|
|
} else if (file_index) {
|
|
|
|
*file_index = static_cast<int>(i) - 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
// Store in "*inputs" files in "level" that within range [begin,end]
|
|
|
|
// Guarantee a "clean cut" boundary between the files in inputs
|
|
|
|
// and the surrounding files and the maxinum number of files.
|
|
|
|
// This will ensure that no parts of a key are lost during compaction.
|
|
|
|
// If hint_index is specified, then it points to a file in the range.
|
|
|
|
// The file_index returns a pointer to any file in an overlapping range.
|
|
|
|
void VersionStorageInfo::GetCleanInputsWithinInterval(
|
|
|
|
int level, const InternalKey* begin, const InternalKey* end,
|
|
|
|
std::vector<FileMetaData*>* inputs, int hint_index, int* file_index) const {
|
|
|
|
if (level >= num_non_empty_levels_) {
|
|
|
|
// this level is empty, no inputs within range
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
inputs->clear();
|
|
|
|
Slice user_begin, user_end;
|
|
|
|
if (begin != nullptr) {
|
|
|
|
user_begin = begin->user_key();
|
|
|
|
}
|
|
|
|
if (end != nullptr) {
|
|
|
|
user_end = end->user_key();
|
|
|
|
}
|
|
|
|
if (file_index) {
|
|
|
|
*file_index = -1;
|
|
|
|
}
|
|
|
|
if (begin != nullptr && end != nullptr && level > 0) {
|
|
|
|
GetOverlappingInputsRangeBinarySearch(level, user_begin, user_end, inputs,
|
|
|
|
hint_index, file_index,
|
|
|
|
true /* within_interval */);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store in "*inputs" all files in "level" that overlap [begin,end]
|
|
|
|
// Employ binary search to find at least one file that overlaps the
|
|
|
|
// specified range. From that file, iterate backwards and
|
|
|
|
// forwards to find all overlapping files.
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
// if within_range is set, then only store the maximum clean inputs
|
|
|
|
// within range [begin, end]. "clean" means there is a boudnary
|
|
|
|
// between the files in "*inputs" and the surrounding files
|
|
|
|
void VersionStorageInfo::GetOverlappingInputsRangeBinarySearch(
|
|
|
|
int level, const Slice& user_begin, const Slice& user_end,
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
std::vector<FileMetaData*>* inputs, int hint_index, int* file_index,
|
|
|
|
bool within_interval) const {
|
|
|
|
assert(level > 0);
|
|
|
|
int min = 0;
|
|
|
|
int mid = 0;
|
|
|
|
int max = static_cast<int>(files_[level].size()) - 1;
|
|
|
|
bool foundOverlap = false;
|
|
|
|
const Comparator* user_cmp = user_comparator_;
|
|
|
|
|
|
|
|
// if the caller already knows the index of a file that has overlap,
|
|
|
|
// then we can skip the binary search.
|
|
|
|
if (hint_index != -1) {
|
|
|
|
mid = hint_index;
|
|
|
|
foundOverlap = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!foundOverlap && min <= max) {
|
|
|
|
mid = (min + max)/2;
|
|
|
|
FdWithKeyRange* f = &(level_files_brief_[level].files[mid]);
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
const Slice file_start = ExtractUserKey(f->smallest_key);
|
|
|
|
const Slice file_limit = ExtractUserKey(f->largest_key);
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
if ((!within_interval && user_cmp->Compare(file_limit, user_begin) < 0) ||
|
|
|
|
(within_interval && user_cmp->Compare(file_start, user_begin) < 0)) {
|
|
|
|
min = mid + 1;
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
} else if ((!within_interval &&
|
|
|
|
user_cmp->Compare(user_end, file_start) < 0) ||
|
|
|
|
(within_interval &&
|
|
|
|
user_cmp->Compare(user_end, file_limit) < 0)) {
|
|
|
|
max = mid - 1;
|
|
|
|
} else {
|
|
|
|
foundOverlap = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there were no overlapping files, return immediately.
|
|
|
|
if (!foundOverlap) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// returns the index where an overlap is found
|
|
|
|
if (file_index) {
|
|
|
|
*file_index = mid;
|
|
|
|
}
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
|
|
|
|
int start_index, end_index;
|
|
|
|
if (within_interval) {
|
|
|
|
ExtendFileRangeWithinInterval(level, user_begin, user_end, mid, &start_index,
|
|
|
|
&end_index);
|
|
|
|
} else {
|
|
|
|
ExtendFileRangeOverlappingInterval(level, user_begin, user_end, mid,
|
|
|
|
&start_index, &end_index);
|
|
|
|
}
|
|
|
|
assert(end_index >= start_index);
|
|
|
|
// insert overlapping files into vector
|
|
|
|
for (int i = start_index; i <= end_index; i++) {
|
|
|
|
inputs->push_back(files_[level][i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
// Store in *start_index and *end_index the range of all files in
|
|
|
|
// "level" that overlap [begin,end]
|
|
|
|
// The mid_index specifies the index of at least one file that
|
|
|
|
// overlaps the specified range. From that file, iterate backward
|
|
|
|
// and forward to find all overlapping files.
|
|
|
|
// Use FileLevel in searching, make it faster
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
void VersionStorageInfo::ExtendFileRangeOverlappingInterval(
|
|
|
|
int level, const Slice& user_begin, const Slice& user_end,
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
unsigned int mid_index, int* start_index, int* end_index) const {
|
|
|
|
const Comparator* user_cmp = user_comparator_;
|
|
|
|
const FdWithKeyRange* files = level_files_brief_[level].files;
|
|
|
|
#ifndef NDEBUG
|
|
|
|
{
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
// assert that the file at mid_index overlaps with the range
|
|
|
|
assert(mid_index < level_files_brief_[level].num_files);
|
|
|
|
const FdWithKeyRange* f = &files[mid_index];
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
const Slice fstart = ExtractUserKey(f->smallest_key);
|
|
|
|
const Slice flimit = ExtractUserKey(f->largest_key);
|
|
|
|
if (user_cmp->Compare(fstart, user_begin) >= 0) {
|
|
|
|
assert(user_cmp->Compare(fstart, user_end) <= 0);
|
|
|
|
} else {
|
|
|
|
assert(user_cmp->Compare(flimit, user_begin) >= 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
*start_index = mid_index + 1;
|
|
|
|
*end_index = mid_index;
|
|
|
|
int count __attribute__((unused)) = 0;
|
|
|
|
|
|
|
|
// check backwards from 'mid' to lower indices
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
for (int i = mid_index; i >= 0 ; i--) {
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
const FdWithKeyRange* f = &files[i];
|
|
|
|
const Slice file_limit = ExtractUserKey(f->largest_key);
|
|
|
|
if (user_cmp->Compare(file_limit, user_begin) >= 0) {
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
*start_index = i;
|
|
|
|
assert((count++, true));
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// check forward from 'mid+1' to higher indices
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
for (unsigned int i = mid_index+1;
|
|
|
|
i < level_files_brief_[level].num_files; i++) {
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
11 years ago
|
|
|
const FdWithKeyRange* f = &files[i];
|
|
|
|
const Slice file_start = ExtractUserKey(f->smallest_key);
|
|
|
|
if (user_cmp->Compare(file_start, user_end) <= 0) {
|
|
|
|
assert((count++, true));
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
*end_index = i;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
assert(count == *end_index - *start_index + 1);
|
|
|
|
}
|
|
|
|
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
// Store in *start_index and *end_index the clean range of all files in
|
|
|
|
// "level" within [begin,end]
|
|
|
|
// The mid_index specifies the index of at least one file within
|
|
|
|
// the specified range. From that file, iterate backward
|
|
|
|
// and forward to find all overlapping files and then "shrink" to
|
|
|
|
// the clean range required.
|
|
|
|
// Use FileLevel in searching, make it faster
|
|
|
|
void VersionStorageInfo::ExtendFileRangeWithinInterval(
|
|
|
|
int level, const Slice& user_begin, const Slice& user_end,
|
|
|
|
unsigned int mid_index, int* start_index, int* end_index) const {
|
|
|
|
assert(level != 0);
|
|
|
|
const Comparator* user_cmp = user_comparator_;
|
|
|
|
const FdWithKeyRange* files = level_files_brief_[level].files;
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
#ifndef NDEBUG
|
|
|
|
{
|
|
|
|
// assert that the file at mid_index is within the range
|
|
|
|
assert(mid_index < level_files_brief_[level].num_files);
|
|
|
|
const FdWithKeyRange* f = &files[mid_index];
|
|
|
|
const Slice fstart = ExtractUserKey(f->smallest_key);
|
|
|
|
const Slice flimit = ExtractUserKey(f->largest_key);
|
|
|
|
assert(user_cmp->Compare(fstart, user_begin) >= 0 &&
|
|
|
|
user_cmp->Compare(flimit, user_end) <= 0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
ExtendFileRangeOverlappingInterval(level, user_begin, user_end, mid_index,
|
|
|
|
start_index, end_index);
|
|
|
|
int left = *start_index;
|
|
|
|
int right = *end_index;
|
|
|
|
// shrink from left to right
|
|
|
|
while (left <= right) {
|
|
|
|
const Slice& first_key_in_range = ExtractUserKey(files[left].smallest_key);
|
|
|
|
if (user_cmp->Compare(first_key_in_range, user_begin) < 0) {
|
|
|
|
left++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (left > 0) { // If not first file
|
|
|
|
const Slice& last_key_before =
|
|
|
|
ExtractUserKey(files[left - 1].largest_key);
|
|
|
|
if (user_cmp->Equal(first_key_in_range, last_key_before)) {
|
|
|
|
// The first user key in range overlaps with the previous file's last
|
|
|
|
// key
|
|
|
|
left++;
|
|
|
|
continue;
|
|
|
|
}
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
11 years ago
|
|
|
}
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
break;
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
11 years ago
|
|
|
}
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
// shrink from right to left
|
|
|
|
while (left <= right) {
|
|
|
|
const Slice last_key_in_range = ExtractUserKey(files[right].largest_key);
|
|
|
|
if (user_cmp->Compare(last_key_in_range, user_end) > 0) {
|
|
|
|
right--;
|
|
|
|
continue;
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
11 years ago
|
|
|
}
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
if (right < static_cast<int>(level_files_brief_[level].num_files) -
|
|
|
|
1) { // If not the last file
|
|
|
|
const Slice first_key_after =
|
|
|
|
ExtractUserKey(files[right + 1].smallest_key);
|
|
|
|
if (user_cmp->Equal(last_key_in_range, first_key_after)) {
|
|
|
|
// The last user key in range overlaps with the next file's first key
|
|
|
|
right--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
11 years ago
|
|
|
}
|
|
|
|
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
8 years ago
|
|
|
*start_index = left;
|
|
|
|
*end_index = right;
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
11 years ago
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t VersionStorageInfo::NumLevelBytes(int level) const {
|
|
|
|
assert(level >= 0);
|
|
|
|
assert(level < num_levels());
|
|
|
|
return TotalFileSize(files_[level]);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* VersionStorageInfo::LevelSummary(
|
|
|
|
LevelSummaryStorage* scratch) const {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
int len = 0;
|
|
|
|
if (compaction_style_ == kCompactionStyleLevel && num_levels() > 1) {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
assert(base_level_ < static_cast<int>(level_max_bytes_.size()));
|
|
|
|
len = snprintf(scratch->buffer, sizeof(scratch->buffer),
|
|
|
|
"base level %d max bytes base %" PRIu64 " ", base_level_,
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
level_max_bytes_[base_level_]);
|
|
|
|
}
|
|
|
|
len +=
|
|
|
|
snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, "files[");
|
|
|
|
for (int i = 0; i < num_levels(); i++) {
|
|
|
|
int sz = sizeof(scratch->buffer) - len;
|
|
|
|
int ret = snprintf(scratch->buffer + len, sz, "%d ", int(files_[i].size()));
|
|
|
|
if (ret < 0 || ret >= sz) break;
|
|
|
|
len += ret;
|
|
|
|
}
|
|
|
|
if (len > 0) {
|
|
|
|
// overwrite the last space
|
|
|
|
--len;
|
|
|
|
}
|
Print info message about files need compaction for debuging purpose
Summary:
When there are files marked for compaction after compactions, print extra messages to help debugging. Example:
2015/06/08-23:12:55.212855 7ff5013ff700 [default] [JOB 121] Generated table #75: 54 keys, 4807 bytes (need compaction)
2015/06/08-23:12:55.556194 7ff5013ff700 (Original Log Time 2015/06/08-23:12:55.556160) [default] compacted to: base level 1 max bytes base
10240 files[0 1 9 32 12 0 0 0] max score 0.96 (2 files need compaction), MB/sec: 0.0 rd, 0.1 wr, level 2, files in(1, 3) out(5) MB in(0.0,
0.0) out(0.0), read-write-amplify(11.3) write-amplify(5.7) OK, records in: 40, records dropped: 0
Test Plan:
Run test and see LOG files.
valgrind test DBTest.TablePropertiesNeedCompactTest
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, igor
Reviewed By: igor
Subscribers: yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D39771
10 years ago
|
|
|
len += snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
|
|
|
|
"] max score %.2f", compaction_score_[0]);
|
|
|
|
|
|
|
|
if (!files_marked_for_compaction_.empty()) {
|
|
|
|
snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
|
|
|
|
" (%" ROCKSDB_PRIszt " files need compaction)",
|
Print info message about files need compaction for debuging purpose
Summary:
When there are files marked for compaction after compactions, print extra messages to help debugging. Example:
2015/06/08-23:12:55.212855 7ff5013ff700 [default] [JOB 121] Generated table #75: 54 keys, 4807 bytes (need compaction)
2015/06/08-23:12:55.556194 7ff5013ff700 (Original Log Time 2015/06/08-23:12:55.556160) [default] compacted to: base level 1 max bytes base
10240 files[0 1 9 32 12 0 0 0] max score 0.96 (2 files need compaction), MB/sec: 0.0 rd, 0.1 wr, level 2, files in(1, 3) out(5) MB in(0.0,
0.0) out(0.0), read-write-amplify(11.3) write-amplify(5.7) OK, records in: 40, records dropped: 0
Test Plan:
Run test and see LOG files.
valgrind test DBTest.TablePropertiesNeedCompactTest
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, igor
Reviewed By: igor
Subscribers: yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D39771
10 years ago
|
|
|
files_marked_for_compaction_.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
return scratch->buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* VersionStorageInfo::LevelFileSummary(FileSummaryStorage* scratch,
|
|
|
|
int level) const {
|
|
|
|
int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files_size[");
|
|
|
|
for (const auto& f : files_[level]) {
|
|
|
|
int sz = sizeof(scratch->buffer) - len;
|
|
|
|
char sztxt[16];
|
|
|
|
AppendHumanBytes(f->fd.GetFileSize(), sztxt, sizeof(sztxt));
|
|
|
|
int ret = snprintf(scratch->buffer + len, sz,
|
|
|
|
"#%" PRIu64 "(seq=%" PRIu64 ",sz=%s,%d) ",
|
|
|
|
f->fd.GetNumber(), f->smallest_seqno, sztxt,
|
|
|
|
static_cast<int>(f->being_compacted));
|
|
|
|
if (ret < 0 || ret >= sz)
|
|
|
|
break;
|
|
|
|
len += ret;
|
|
|
|
}
|
|
|
|
// overwrite the last space (only if files_[level].size() is non-zero)
|
|
|
|
if (files_[level].size() && len > 0) {
|
|
|
|
--len;
|
|
|
|
}
|
|
|
|
snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, "]");
|
|
|
|
return scratch->buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t VersionStorageInfo::MaxNextLevelOverlappingBytes() {
|
|
|
|
uint64_t result = 0;
|
|
|
|
std::vector<FileMetaData*> overlaps;
|
|
|
|
for (int level = 1; level < num_levels() - 1; level++) {
|
|
|
|
for (const auto& f : files_[level]) {
|
|
|
|
GetOverlappingInputs(level + 1, &f->smallest, &f->largest, &overlaps);
|
|
|
|
const uint64_t sum = TotalFileSize(overlaps);
|
|
|
|
if (sum > result) {
|
|
|
|
result = sum;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
uint64_t VersionStorageInfo::MaxBytesForLevel(int level) const {
|
|
|
|
// Note: the result for level zero is not really used since we set
|
|
|
|
// the level-0 compaction threshold based on number of files.
|
|
|
|
assert(level >= 0);
|
|
|
|
assert(level < static_cast<int>(level_max_bytes_.size()));
|
|
|
|
return level_max_bytes_[level];
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionStorageInfo::CalculateBaseBytes(const ImmutableCFOptions& ioptions,
|
|
|
|
const MutableCFOptions& options) {
|
|
|
|
// Special logic to set number of sorted runs.
|
|
|
|
// It is to match the previous behavior when all files are in L0.
|
|
|
|
int num_l0_count = static_cast<int>(files_[0].size());
|
|
|
|
if (compaction_style_ == kCompactionStyleUniversal) {
|
|
|
|
// For universal compaction, we use level0 score to indicate
|
|
|
|
// compaction score for the whole DB. Adding other levels as if
|
|
|
|
// they are L0 files.
|
|
|
|
for (int i = 1; i < num_levels(); i++) {
|
|
|
|
if (!files_[i].empty()) {
|
|
|
|
num_l0_count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
set_l0_delay_trigger_count(num_l0_count);
|
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
level_max_bytes_.resize(ioptions.num_levels);
|
|
|
|
if (!ioptions.level_compaction_dynamic_level_bytes) {
|
|
|
|
base_level_ = (ioptions.compaction_style == kCompactionStyleLevel) ? 1 : -1;
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
|
|
|
|
// Calculate for static bytes base case
|
|
|
|
for (int i = 0; i < ioptions.num_levels; ++i) {
|
|
|
|
if (i == 0 && ioptions.compaction_style == kCompactionStyleUniversal) {
|
|
|
|
level_max_bytes_[i] = options.max_bytes_for_level_base;
|
|
|
|
} else if (i > 1) {
|
|
|
|
level_max_bytes_[i] = MultiplyCheckOverflow(
|
|
|
|
MultiplyCheckOverflow(level_max_bytes_[i - 1],
|
|
|
|
options.max_bytes_for_level_multiplier),
|
|
|
|
options.MaxBytesMultiplerAdditional(i - 1));
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
} else {
|
|
|
|
level_max_bytes_[i] = options.max_bytes_for_level_base;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
uint64_t max_level_size = 0;
|
|
|
|
|
|
|
|
int first_non_empty_level = -1;
|
|
|
|
// Find size of non-L0 level of most data.
|
|
|
|
// Cannot use the size of the last level because it can be empty or less
|
|
|
|
// than previous levels after compaction.
|
|
|
|
for (int i = 1; i < num_levels_; i++) {
|
|
|
|
uint64_t total_size = 0;
|
|
|
|
for (const auto& f : files_[i]) {
|
|
|
|
total_size += f->fd.GetFileSize();
|
|
|
|
}
|
|
|
|
if (total_size > 0 && first_non_empty_level == -1) {
|
|
|
|
first_non_empty_level = i;
|
|
|
|
}
|
|
|
|
if (total_size > max_level_size) {
|
|
|
|
max_level_size = total_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prefill every level's max bytes to disallow compaction from there.
|
|
|
|
for (int i = 0; i < num_levels_; i++) {
|
|
|
|
level_max_bytes_[i] = std::numeric_limits<uint64_t>::max();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (max_level_size == 0) {
|
|
|
|
// No data for L1 and up. L0 compacts to last level directly.
|
|
|
|
// No compaction from L1+ needs to be scheduled.
|
|
|
|
base_level_ = num_levels_ - 1;
|
|
|
|
} else {
|
|
|
|
uint64_t base_bytes_max = options.max_bytes_for_level_base;
|
|
|
|
uint64_t base_bytes_min = static_cast<uint64_t>(
|
|
|
|
base_bytes_max / options.max_bytes_for_level_multiplier);
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
|
|
|
|
// Try whether we can make last level's target size to be max_level_size
|
|
|
|
uint64_t cur_level_size = max_level_size;
|
|
|
|
for (int i = num_levels_ - 2; i >= first_non_empty_level; i--) {
|
|
|
|
// Round up after dividing
|
|
|
|
cur_level_size = static_cast<uint64_t>(
|
|
|
|
cur_level_size / options.max_bytes_for_level_multiplier);
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate base level and its size.
|
|
|
|
uint64_t base_level_size;
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
if (cur_level_size <= base_bytes_min) {
|
|
|
|
// Case 1. If we make target size of last level to be max_level_size,
|
|
|
|
// target size of the first non-empty level would be smaller than
|
|
|
|
// base_bytes_min. We set it be base_bytes_min.
|
|
|
|
base_level_size = base_bytes_min + 1U;
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
base_level_ = first_non_empty_level;
|
|
|
|
Warn(ioptions.info_log,
|
|
|
|
"More existing levels in DB than needed. "
|
|
|
|
"max_bytes_for_level_multiplier may not be guaranteed.");
|
|
|
|
} else {
|
|
|
|
// Find base level (where L0 data is compacted to).
|
|
|
|
base_level_ = first_non_empty_level;
|
|
|
|
while (base_level_ > 1 && cur_level_size > base_bytes_max) {
|
|
|
|
--base_level_;
|
|
|
|
cur_level_size = static_cast<uint64_t>(
|
|
|
|
cur_level_size / options.max_bytes_for_level_multiplier);
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
}
|
|
|
|
if (cur_level_size > base_bytes_max) {
|
|
|
|
// Even L1 will be too large
|
|
|
|
assert(base_level_ == 1);
|
|
|
|
base_level_size = base_bytes_max;
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
} else {
|
|
|
|
base_level_size = cur_level_size;
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t level_size = base_level_size;
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
for (int i = base_level_; i < num_levels_; i++) {
|
|
|
|
if (i > base_level_) {
|
|
|
|
level_size = MultiplyCheckOverflow(
|
|
|
|
level_size, options.max_bytes_for_level_multiplier);
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
}
|
|
|
|
level_max_bytes_[i] = level_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t VersionStorageInfo::EstimateLiveDataSize() const {
|
|
|
|
// Estimate the live data size by adding up the size of the last level for all
|
|
|
|
// key ranges. Note: Estimate depends on the ordering of files in level 0
|
|
|
|
// because files in level 0 can be overlapping.
|
|
|
|
uint64_t size = 0;
|
|
|
|
|
|
|
|
auto ikey_lt = [this](InternalKey* x, InternalKey* y) {
|
|
|
|
return internal_comparator_->Compare(*x, *y) < 0;
|
|
|
|
};
|
|
|
|
// (Ordered) map of largest keys in non-overlapping files
|
|
|
|
std::map<InternalKey*, FileMetaData*, decltype(ikey_lt)> ranges(ikey_lt);
|
|
|
|
|
|
|
|
for (int l = num_levels_ - 1; l >= 0; l--) {
|
|
|
|
bool found_end = false;
|
|
|
|
for (auto file : files_[l]) {
|
|
|
|
// Find the first file where the largest key is larger than the smallest
|
|
|
|
// key of the current file. If this file does not overlap with the
|
|
|
|
// current file, none of the files in the map does. If there is
|
|
|
|
// no potential overlap, we can safely insert the rest of this level
|
|
|
|
// (if the level is not 0) into the map without checking again because
|
|
|
|
// the elements in the level are sorted and non-overlapping.
|
|
|
|
auto lb = (found_end && l != 0) ?
|
|
|
|
ranges.end() : ranges.lower_bound(&file->smallest);
|
|
|
|
found_end = (lb == ranges.end());
|
|
|
|
if (found_end || internal_comparator_->Compare(
|
|
|
|
file->largest, (*lb).second->smallest) < 0) {
|
|
|
|
ranges.emplace_hint(lb, &file->largest, file);
|
|
|
|
size += file->fd.file_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Version::AddLiveFiles(std::vector<FileDescriptor>* live) {
|
|
|
|
for (int level = 0; level < storage_info_.num_levels(); level++) {
|
|
|
|
const std::vector<FileMetaData*>& files = storage_info_.files_[level];
|
|
|
|
for (const auto& file : files) {
|
|
|
|
live->push_back(file->fd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string Version::DebugString(bool hex) const {
|
|
|
|
std::string r;
|
|
|
|
for (int level = 0; level < storage_info_.num_levels_; level++) {
|
|
|
|
// E.g.,
|
|
|
|
// --- level 1 ---
|
|
|
|
// 17:123['a' .. 'd']
|
|
|
|
// 20:43['e' .. 'g']
|
|
|
|
r.append("--- level ");
|
|
|
|
AppendNumberTo(&r, level);
|
|
|
|
r.append(" --- version# ");
|
|
|
|
AppendNumberTo(&r, version_number_);
|
|
|
|
r.append(" ---\n");
|
|
|
|
const std::vector<FileMetaData*>& files = storage_info_.files_[level];
|
|
|
|
for (size_t i = 0; i < files.size(); i++) {
|
|
|
|
r.push_back(' ');
|
|
|
|
AppendNumberTo(&r, files[i]->fd.GetNumber());
|
|
|
|
r.push_back(':');
|
|
|
|
AppendNumberTo(&r, files[i]->fd.GetFileSize());
|
|
|
|
r.append("[");
|
|
|
|
r.append(files[i]->smallest.DebugString(hex));
|
|
|
|
r.append(" .. ");
|
|
|
|
r.append(files[i]->largest.DebugString(hex));
|
|
|
|
r.append("]\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
// this is used to batch writes to the manifest file
|
|
|
|
struct VersionSet::ManifestWriter {
|
|
|
|
Status status;
|
|
|
|
bool done;
|
|
|
|
InstrumentedCondVar cv;
|
|
|
|
ColumnFamilyData* cfd;
|
|
|
|
const autovector<VersionEdit*>& edit_list;
|
|
|
|
|
|
|
|
explicit ManifestWriter(InstrumentedMutex* mu, ColumnFamilyData* _cfd,
|
|
|
|
const autovector<VersionEdit*>& e)
|
|
|
|
: done(false), cv(mu), cfd(_cfd), edit_list(e) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
VersionSet::VersionSet(const std::string& dbname,
|
|
|
|
const ImmutableDBOptions* db_options,
|
|
|
|
const EnvOptions& storage_options, Cache* table_cache,
|
|
|
|
WriteBufferManager* write_buffer_manager,
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
10 years ago
|
|
|
WriteController* write_controller)
|
|
|
|
: column_family_set_(
|
|
|
|
new ColumnFamilySet(dbname, db_options, storage_options, table_cache,
|
|
|
|
write_buffer_manager, write_controller)),
|
|
|
|
env_(db_options->env),
|
|
|
|
dbname_(dbname),
|
|
|
|
db_options_(db_options),
|
|
|
|
next_file_number_(2),
|
|
|
|
manifest_file_number_(0), // Filled by Recover()
|
|
|
|
pending_manifest_file_number_(0),
|
|
|
|
last_sequence_(0),
|
|
|
|
prev_log_number_(0),
|
|
|
|
current_version_number_(0),
|
|
|
|
manifest_file_size_(0),
|
|
|
|
env_options_(storage_options),
|
|
|
|
env_options_compactions_(env_options_) {}
|
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
9 years ago
|
|
|
void CloseTables(void* ptr, size_t) {
|
|
|
|
TableReader* table_reader = reinterpret_cast<TableReader*>(ptr);
|
|
|
|
table_reader->Close();
|
|
|
|
}
|
|
|
|
|
|
|
|
VersionSet::~VersionSet() {
|
|
|
|
// we need to delete column_family_set_ because its destructor depends on
|
|
|
|
// VersionSet
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
9 years ago
|
|
|
column_family_set_->get_table_cache()->ApplyToAllCacheEntries(&CloseTables,
|
|
|
|
false);
|
|
|
|
column_family_set_.reset();
|
|
|
|
for (auto file : obsolete_files_) {
|
|
|
|
delete file;
|
|
|
|
}
|
|
|
|
obsolete_files_.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionSet::AppendVersion(ColumnFamilyData* column_family_data,
|
|
|
|
Version* v) {
|
|
|
|
// compute new compaction score
|
|
|
|
v->storage_info()->ComputeCompactionScore(
|
|
|
|
*column_family_data->ioptions(),
|
|
|
|
*column_family_data->GetLatestMutableCFOptions());
|
|
|
|
|
|
|
|
// Mark v finalized
|
|
|
|
v->storage_info_.SetFinalized();
|
|
|
|
|
|
|
|
// Make "v" current
|
|
|
|
assert(v->refs_ == 0);
|
|
|
|
Version* current = column_family_data->current();
|
|
|
|
assert(v != current);
|
|
|
|
if (current != nullptr) {
|
|
|
|
assert(current->refs_ > 0);
|
|
|
|
current->Unref();
|
|
|
|
}
|
|
|
|
column_family_data->SetCurrent(v);
|
|
|
|
v->Ref();
|
|
|
|
|
|
|
|
// Append to linked list
|
|
|
|
v->prev_ = column_family_data->dummy_versions()->prev_;
|
|
|
|
v->next_ = column_family_data->dummy_versions();
|
|
|
|
v->prev_->next_ = v;
|
|
|
|
v->next_->prev_ = v;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status VersionSet::LogAndApply(ColumnFamilyData* column_family_data,
|
|
|
|
const MutableCFOptions& mutable_cf_options,
|
|
|
|
const autovector<VersionEdit*>& edit_list,
|
|
|
|
InstrumentedMutex* mu, Directory* db_directory,
|
|
|
|
bool new_descriptor_log,
|
|
|
|
const ColumnFamilyOptions* new_cf_options) {
|
|
|
|
mu->AssertHeld();
|
|
|
|
// num of edits
|
|
|
|
auto num_edits = edit_list.size();
|
|
|
|
if (num_edits == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
} else if (num_edits > 1) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// no group commits for column family add or drop
|
|
|
|
for (auto& edit : edit_list) {
|
|
|
|
assert(!edit->IsColumnFamilyManipulation());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// column_family_data can be nullptr only if this is column_family_add.
|
|
|
|
// in that case, we also need to specify ColumnFamilyOptions
|
|
|
|
if (column_family_data == nullptr) {
|
|
|
|
assert(num_edits == 1);
|
|
|
|
assert(edit_list[0]->is_column_family_add_);
|
|
|
|
assert(new_cf_options != nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
// queue our request
|
|
|
|
ManifestWriter w(mu, column_family_data, edit_list);
|
|
|
|
manifest_writers_.push_back(&w);
|
|
|
|
while (!w.done && &w != manifest_writers_.front()) {
|
|
|
|
w.cv.Wait();
|
|
|
|
}
|
|
|
|
if (w.done) {
|
|
|
|
return w.status;
|
|
|
|
}
|
|
|
|
if (column_family_data != nullptr && column_family_data->IsDropped()) {
|
|
|
|
// if column family is dropped by the time we get here, no need to write
|
|
|
|
// anything to the manifest
|
|
|
|
manifest_writers_.pop_front();
|
|
|
|
// Notify new head of write queue
|
|
|
|
if (!manifest_writers_.empty()) {
|
|
|
|
manifest_writers_.front()->cv.Signal();
|
|
|
|
}
|
LogAndApply() should fail if the column family has been dropped
Summary:
This patch finally fixes the ColumnFamilyTest.ReadDroppedColumnFamily test. The test has been failing very sporadically and it was hard to repro. However, I managed to write a new tests that reproes the failure deterministically.
Here's what happens:
1. We start the flush for the column family
2. We check if the column family was dropped here: https://github.com/facebook/rocksdb/blob/a3fc49bfddcdb1ff29409aacd06c04df56c7a1d7/db/flush_job.cc#L149
3. This check goes through, ends up in InstallMemtableFlushResults() and it goes into LogAndApply()
4. At about this time, we start dropping the column family. Dropping the column family process gets to LogAndApply() at about the same time as LogAndApply() from flush process
5. Drop column family goes through LogAndApply() first, marking the column family as dropped.
6. Flush process gets woken up and gets a chance to write to the MANIFEST. However, this is where it gets stuck: https://github.com/facebook/rocksdb/blob/a3fc49bfddcdb1ff29409aacd06c04df56c7a1d7/db/version_set.cc#L1975
7. We see that the column family was dropped, so there is no need to write to the MANIFEST. We return OK.
8. Flush gets OK back from LogAndApply() and it deletes the memtable, thinking that the data is now safely persisted to sst file.
The fix is pretty simple. Instead of OK, we return ShutdownInProgress. This is not really true, but we have been using this status code to also mean "this operation was canceled because the column family has been dropped".
The fix is only one LOC. All other code is related to tests. I added a new test that reproes the failure. I also moved SleepingBackgroundTask to util/testutil.h (because I needed it in column_family_test for my new test). There's plenty of other places where we reimplement SleepingBackgroundTask, but I'll address that in a separate commit.
Test Plan:
1. new test
2. make check
3. Make sure the ColumnFamilyTest.ReadDroppedColumnFamily doesn't fail on Travis: https://travis-ci.org/facebook/rocksdb/jobs/79952386
Reviewers: yhchiang, anthony, IslamAbdelRahman, kradhakrishnan, rven, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46773
9 years ago
|
|
|
// we steal this code to also inform about cf-drop
|
|
|
|
return Status::ShutdownInProgress();
|
|
|
|
}
|
|
|
|
|
|
|
|
autovector<VersionEdit*> batch_edits;
|
|
|
|
Version* v = nullptr;
|
|
|
|
std::unique_ptr<BaseReferencedVersionBuilder> builder_guard(nullptr);
|
|
|
|
|
|
|
|
// process all requests in the queue
|
|
|
|
ManifestWriter* last_writer = &w;
|
|
|
|
assert(!manifest_writers_.empty());
|
|
|
|
assert(manifest_writers_.front() == &w);
|
|
|
|
if (w.edit_list.front()->IsColumnFamilyManipulation()) {
|
|
|
|
// no group commits for column family add or drop
|
|
|
|
LogAndApplyCFHelper(w.edit_list.front());
|
|
|
|
batch_edits.push_back(w.edit_list.front());
|
|
|
|
} else {
|
|
|
|
v = new Version(column_family_data, this, current_version_number_++);
|
|
|
|
builder_guard.reset(new BaseReferencedVersionBuilder(column_family_data));
|
|
|
|
auto* builder = builder_guard->version_builder();
|
|
|
|
for (const auto& writer : manifest_writers_) {
|
|
|
|
if (writer->edit_list.front()->IsColumnFamilyManipulation() ||
|
|
|
|
writer->cfd->GetID() != column_family_data->GetID()) {
|
|
|
|
// no group commits for column family add or drop
|
|
|
|
// also, group commits across column families are not supported
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
last_writer = writer;
|
|
|
|
for (const auto& edit : writer->edit_list) {
|
|
|
|
LogAndApplyHelper(column_family_data, builder, v, edit, mu);
|
|
|
|
batch_edits.push_back(edit);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
builder->SaveTo(v->storage_info());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize new descriptor log file if necessary by creating
|
|
|
|
// a temporary file that contains a snapshot of the current version.
|
|
|
|
uint64_t new_manifest_file_size = 0;
|
|
|
|
Status s;
|
|
|
|
|
|
|
|
assert(pending_manifest_file_number_ == 0);
|
|
|
|
if (!descriptor_log_ ||
|
|
|
|
manifest_file_size_ > db_options_->max_manifest_file_size) {
|
|
|
|
pending_manifest_file_number_ = NewFileNumber();
|
|
|
|
batch_edits.back()->SetNextFile(next_file_number_.load());
|
|
|
|
new_descriptor_log = true;
|
|
|
|
} else {
|
|
|
|
pending_manifest_file_number_ = manifest_file_number_;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_descriptor_log) {
|
|
|
|
// if we're writing out new snapshot make sure to persist max column family
|
|
|
|
if (column_family_set_->GetMaxColumnFamily() > 0) {
|
|
|
|
w.edit_list.front()->SetMaxColumnFamily(
|
|
|
|
column_family_set_->GetMaxColumnFamily());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unlock during expensive operations. New writes cannot get here
|
|
|
|
// because &w is ensuring that all new writes get queued.
|
|
|
|
{
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
12 years ago
|
|
|
|
|
|
|
mu->Unlock();
|
|
|
|
|
|
|
|
TEST_SYNC_POINT("VersionSet::LogAndApply:WriteManifest");
|
|
|
|
if (!w.edit_list.front()->IsColumnFamilyManipulation() &&
|
|
|
|
db_options_->max_open_files == -1) {
|
|
|
|
// unlimited table cache. Pre-load table handle now.
|
|
|
|
// Need to do it out of the mutex.
|
|
|
|
builder_guard->version_builder()->LoadTableHandlers(
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
9 years ago
|
|
|
column_family_data->internal_stats(),
|
|
|
|
column_family_data->ioptions()->optimize_filters_for_hits,
|
|
|
|
true /* prefetch_index_and_filter_in_cache */);
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is fine because everything inside of this block is serialized --
|
|
|
|
// only one thread can be here at the same time
|
|
|
|
if (new_descriptor_log) {
|
|
|
|
// create manifest file
|
|
|
|
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
|
|
|
|
"Creating manifest %" PRIu64 "\n", pending_manifest_file_number_);
|
|
|
|
unique_ptr<WritableFile> descriptor_file;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
EnvOptions opt_env_opts = env_->OptimizeForManifestWrite(env_options_);
|
|
|
|
s = NewWritableFile(
|
|
|
|
env_, DescriptorFileName(dbname_, pending_manifest_file_number_),
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
&descriptor_file, opt_env_opts);
|
|
|
|
if (s.ok()) {
|
|
|
|
descriptor_file->SetPreallocationBlockSize(
|
|
|
|
db_options_->manifest_preallocation_size);
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
|
|
|
|
unique_ptr<WritableFileWriter> file_writer(
|
|
|
|
new WritableFileWriter(std::move(descriptor_file), opt_env_opts));
|
|
|
|
descriptor_log_.reset(
|
|
|
|
new log::Writer(std::move(file_writer), 0, false));
|
|
|
|
s = WriteSnapshot(descriptor_log_.get());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!w.edit_list.front()->IsColumnFamilyManipulation()) {
|
|
|
|
// This is cpu-heavy operations, which should be called outside mutex.
|
|
|
|
v->PrepareApply(mutable_cf_options, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write new record to MANIFEST log
|
|
|
|
if (s.ok()) {
|
|
|
|
for (auto& e : batch_edits) {
|
|
|
|
std::string record;
|
|
|
|
if (!e->EncodeTo(&record)) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Unable to Encode VersionEdit:" + e->DebugString(true));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
TEST_KILL_RANDOM("VersionSet::LogAndApply:BeforeAddRecord",
|
|
|
|
rocksdb_kill_odds * REDUCE_ODDS2);
|
|
|
|
s = descriptor_log_->AddRecord(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
s = SyncManifest(env_, db_options_, descriptor_log_->file());
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
Log(InfoLogLevel::ERROR_LEVEL, db_options_->info_log,
|
|
|
|
"MANIFEST write: %s\n", s.ToString().c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we just created a new descriptor file, install it by writing a
|
|
|
|
// new CURRENT file that points to it.
|
|
|
|
if (s.ok() && new_descriptor_log) {
|
|
|
|
s = SetCurrentFile(env_, dbname_, pending_manifest_file_number_,
|
|
|
|
db_directory);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
// find offset in manifest file where this version is stored.
|
|
|
|
new_manifest_file_size = descriptor_log_->file()->GetFileSize();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (w.edit_list.front()->is_column_family_drop_) {
|
|
|
|
TEST_SYNC_POINT("VersionSet::LogAndApply::ColumnFamilyDrop:0");
|
LogAndApply() should fail if the column family has been dropped
Summary:
This patch finally fixes the ColumnFamilyTest.ReadDroppedColumnFamily test. The test has been failing very sporadically and it was hard to repro. However, I managed to write a new tests that reproes the failure deterministically.
Here's what happens:
1. We start the flush for the column family
2. We check if the column family was dropped here: https://github.com/facebook/rocksdb/blob/a3fc49bfddcdb1ff29409aacd06c04df56c7a1d7/db/flush_job.cc#L149
3. This check goes through, ends up in InstallMemtableFlushResults() and it goes into LogAndApply()
4. At about this time, we start dropping the column family. Dropping the column family process gets to LogAndApply() at about the same time as LogAndApply() from flush process
5. Drop column family goes through LogAndApply() first, marking the column family as dropped.
6. Flush process gets woken up and gets a chance to write to the MANIFEST. However, this is where it gets stuck: https://github.com/facebook/rocksdb/blob/a3fc49bfddcdb1ff29409aacd06c04df56c7a1d7/db/version_set.cc#L1975
7. We see that the column family was dropped, so there is no need to write to the MANIFEST. We return OK.
8. Flush gets OK back from LogAndApply() and it deletes the memtable, thinking that the data is now safely persisted to sst file.
The fix is pretty simple. Instead of OK, we return ShutdownInProgress. This is not really true, but we have been using this status code to also mean "this operation was canceled because the column family has been dropped".
The fix is only one LOC. All other code is related to tests. I added a new test that reproes the failure. I also moved SleepingBackgroundTask to util/testutil.h (because I needed it in column_family_test for my new test). There's plenty of other places where we reimplement SleepingBackgroundTask, but I'll address that in a separate commit.
Test Plan:
1. new test
2. make check
3. Make sure the ColumnFamilyTest.ReadDroppedColumnFamily doesn't fail on Travis: https://travis-ci.org/facebook/rocksdb/jobs/79952386
Reviewers: yhchiang, anthony, IslamAbdelRahman, kradhakrishnan, rven, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46773
9 years ago
|
|
|
TEST_SYNC_POINT("VersionSet::LogAndApply::ColumnFamilyDrop:1");
|
|
|
|
TEST_SYNC_POINT("VersionSet::LogAndApply::ColumnFamilyDrop:2");
|
|
|
|
}
|
|
|
|
|
|
|
|
LogFlush(db_options_->info_log);
|
|
|
|
TEST_SYNC_POINT("VersionSet::LogAndApply:WriteManifestDone");
|
|
|
|
mu->Lock();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append the old mainfest file to the obsolete_manifests_ list to be deleted
|
|
|
|
// by PurgeObsoleteFiles later.
|
|
|
|
if (s.ok() && new_descriptor_log) {
|
|
|
|
obsolete_manifests_.emplace_back(
|
|
|
|
DescriptorFileName("", manifest_file_number_));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Install the new version
|
|
|
|
if (s.ok()) {
|
|
|
|
if (w.edit_list.front()->is_column_family_add_) {
|
|
|
|
// no group commit on column family add
|
|
|
|
assert(batch_edits.size() == 1);
|
|
|
|
assert(new_cf_options != nullptr);
|
|
|
|
CreateColumnFamily(*new_cf_options, w.edit_list.front());
|
|
|
|
} else if (w.edit_list.front()->is_column_family_drop_) {
|
|
|
|
assert(batch_edits.size() == 1);
|
|
|
|
column_family_data->SetDropped();
|
|
|
|
if (column_family_data->Unref()) {
|
|
|
|
delete column_family_data;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
uint64_t max_log_number_in_batch = 0;
|
|
|
|
for (auto& e : batch_edits) {
|
|
|
|
if (e->has_log_number_) {
|
|
|
|
max_log_number_in_batch =
|
|
|
|
std::max(max_log_number_in_batch, e->log_number_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (max_log_number_in_batch != 0) {
|
|
|
|
assert(column_family_data->GetLogNumber() <= max_log_number_in_batch);
|
|
|
|
column_family_data->SetLogNumber(max_log_number_in_batch);
|
|
|
|
}
|
|
|
|
AppendVersion(column_family_data, v);
|
|
|
|
}
|
|
|
|
|
|
|
|
manifest_file_number_ = pending_manifest_file_number_;
|
|
|
|
manifest_file_size_ = new_manifest_file_size;
|
|
|
|
prev_log_number_ = w.edit_list.front()->prev_log_number_;
|
|
|
|
} else {
|
|
|
|
std::string version_edits;
|
|
|
|
for (auto& e : batch_edits) {
|
|
|
|
version_edits = version_edits + "\n" + e->DebugString(true);
|
|
|
|
}
|
|
|
|
Log(InfoLogLevel::ERROR_LEVEL, db_options_->info_log,
|
|
|
|
"[%s] Error in committing version edit to MANIFEST: %s",
|
|
|
|
column_family_data ? column_family_data->GetName().c_str() : "<null>",
|
|
|
|
version_edits.c_str());
|
|
|
|
delete v;
|
|
|
|
if (new_descriptor_log) {
|
|
|
|
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
|
|
|
|
"Deleting manifest %" PRIu64 " current manifest %" PRIu64 "\n",
|
|
|
|
manifest_file_number_, pending_manifest_file_number_);
|
|
|
|
descriptor_log_.reset();
|
|
|
|
env_->DeleteFile(
|
|
|
|
DescriptorFileName(dbname_, pending_manifest_file_number_));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pending_manifest_file_number_ = 0;
|
|
|
|
|
|
|
|
// wake up all the waiting writers
|
|
|
|
while (true) {
|
|
|
|
ManifestWriter* ready = manifest_writers_.front();
|
|
|
|
manifest_writers_.pop_front();
|
|
|
|
if (ready != &w) {
|
|
|
|
ready->status = s;
|
|
|
|
ready->done = true;
|
|
|
|
ready->cv.Signal();
|
|
|
|
}
|
|
|
|
if (ready == last_writer) break;
|
|
|
|
}
|
|
|
|
// Notify new head of write queue
|
|
|
|
if (!manifest_writers_.empty()) {
|
|
|
|
manifest_writers_.front()->cv.Signal();
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionSet::LogAndApplyCFHelper(VersionEdit* edit) {
|
|
|
|
assert(edit->IsColumnFamilyManipulation());
|
|
|
|
edit->SetNextFile(next_file_number_.load());
|
|
|
|
edit->SetLastSequence(last_sequence_);
|
|
|
|
if (edit->is_column_family_drop_) {
|
|
|
|
// if we drop column family, we have to make sure to save max column family,
|
|
|
|
// so that we don't reuse existing ID
|
|
|
|
edit->SetMaxColumnFamily(column_family_set_->GetMaxColumnFamily());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionSet::LogAndApplyHelper(ColumnFamilyData* cfd,
|
|
|
|
VersionBuilder* builder, Version* v,
|
|
|
|
VersionEdit* edit, InstrumentedMutex* mu) {
|
|
|
|
mu->AssertHeld();
|
|
|
|
assert(!edit->IsColumnFamilyManipulation());
|
|
|
|
|
|
|
|
if (edit->has_log_number_) {
|
|
|
|
assert(edit->log_number_ >= cfd->GetLogNumber());
|
|
|
|
assert(edit->log_number_ < next_file_number_.load());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!edit->has_prev_log_number_) {
|
|
|
|
edit->SetPrevLogNumber(prev_log_number_);
|
|
|
|
}
|
|
|
|
edit->SetNextFile(next_file_number_.load());
|
|
|
|
edit->SetLastSequence(last_sequence_);
|
|
|
|
|
|
|
|
builder->Apply(edit);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status VersionSet::Recover(
|
|
|
|
const std::vector<ColumnFamilyDescriptor>& column_families,
|
|
|
|
bool read_only) {
|
|
|
|
std::unordered_map<std::string, ColumnFamilyOptions> cf_name_to_options;
|
|
|
|
for (auto cf : column_families) {
|
|
|
|
cf_name_to_options.insert({cf.name, cf.options});
|
|
|
|
}
|
|
|
|
// keeps track of column families in manifest that were not found in
|
|
|
|
// column families parameters. if those column families are not dropped
|
|
|
|
// by subsequent manifest records, Recover() will return failure status
|
|
|
|
std::unordered_map<int, std::string> column_families_not_found;
|
|
|
|
|
|
|
|
// Read "CURRENT" file, which contains a pointer to the current manifest file
|
|
|
|
std::string manifest_filename;
|
|
|
|
Status s = ReadFileToString(
|
|
|
|
env_, CurrentFileName(dbname_), &manifest_filename
|
|
|
|
);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (manifest_filename.empty() ||
|
|
|
|
manifest_filename.back() != '\n') {
|
|
|
|
return Status::Corruption("CURRENT file does not end with newline");
|
|
|
|
}
|
|
|
|
// remove the trailing '\n'
|
|
|
|
manifest_filename.resize(manifest_filename.size() - 1);
|
|
|
|
FileType type;
|
|
|
|
bool parse_ok =
|
|
|
|
ParseFileName(manifest_filename, &manifest_file_number_, &type);
|
|
|
|
if (!parse_ok || type != kDescriptorFile) {
|
|
|
|
return Status::Corruption("CURRENT file corrupted");
|
|
|
|
}
|
|
|
|
|
|
|
|
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
|
|
|
|
"Recovering from manifest file: %s\n",
|
|
|
|
manifest_filename.c_str());
|
|
|
|
|
|
|
|
manifest_filename = dbname_ + "/" + manifest_filename;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
unique_ptr<SequentialFileReader> manifest_file_reader;
|
|
|
|
{
|
|
|
|
unique_ptr<SequentialFile> manifest_file;
|
|
|
|
s = env_->NewSequentialFile(manifest_filename, &manifest_file,
|
|
|
|
env_options_);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
manifest_file_reader.reset(
|
|
|
|
new SequentialFileReader(std::move(manifest_file)));
|
|
|
|
}
|
|
|
|
uint64_t current_manifest_file_size;
|
|
|
|
s = env_->GetFileSize(manifest_filename, ¤t_manifest_file_size);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool have_log_number = false;
|
|
|
|
bool have_prev_log_number = false;
|
|
|
|
bool have_next_file = false;
|
|
|
|
bool have_last_sequence = false;
|
|
|
|
uint64_t next_file = 0;
|
|
|
|
uint64_t last_sequence = 0;
|
|
|
|
uint64_t log_number = 0;
|
|
|
|
uint64_t previous_log_number = 0;
|
|
|
|
uint32_t max_column_family = 0;
|
|
|
|
std::unordered_map<uint32_t, BaseReferencedVersionBuilder*> builders;
|
|
|
|
|
|
|
|
// add default column family
|
|
|
|
auto default_cf_iter = cf_name_to_options.find(kDefaultColumnFamilyName);
|
|
|
|
if (default_cf_iter == cf_name_to_options.end()) {
|
|
|
|
return Status::InvalidArgument("Default column family not specified");
|
|
|
|
}
|
|
|
|
VersionEdit default_cf_edit;
|
|
|
|
default_cf_edit.AddColumnFamily(kDefaultColumnFamilyName);
|
|
|
|
default_cf_edit.SetColumnFamily(0);
|
|
|
|
ColumnFamilyData* default_cfd =
|
|
|
|
CreateColumnFamily(default_cf_iter->second, &default_cf_edit);
|
|
|
|
builders.insert({0, new BaseReferencedVersionBuilder(default_cfd)});
|
|
|
|
|
|
|
|
{
|
|
|
|
VersionSet::LogReporter reporter;
|
|
|
|
reporter.status = &s;
|
|
|
|
log::Reader reader(NULL, std::move(manifest_file_reader), &reporter,
|
|
|
|
true /*checksum*/, 0 /*initial_offset*/, 0);
|
|
|
|
Slice record;
|
|
|
|
std::string scratch;
|
|
|
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
|
|
|
VersionEdit edit;
|
|
|
|
s = edit.DecodeFrom(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Not found means that user didn't supply that column
|
|
|
|
// family option AND we encountered column family add
|
|
|
|
// record. Once we encounter column family drop record,
|
|
|
|
// we will delete the column family from
|
|
|
|
// column_families_not_found.
|
|
|
|
bool cf_in_not_found =
|
|
|
|
column_families_not_found.find(edit.column_family_) !=
|
|
|
|
column_families_not_found.end();
|
|
|
|
// in builders means that user supplied that column family
|
|
|
|
// option AND that we encountered column family add record
|
|
|
|
bool cf_in_builders =
|
|
|
|
builders.find(edit.column_family_) != builders.end();
|
|
|
|
|
|
|
|
// they can't both be true
|
|
|
|
assert(!(cf_in_not_found && cf_in_builders));
|
|
|
|
|
|
|
|
ColumnFamilyData* cfd = nullptr;
|
|
|
|
|
|
|
|
if (edit.is_column_family_add_) {
|
|
|
|
if (cf_in_builders || cf_in_not_found) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest adding the same column family twice");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
auto cf_options = cf_name_to_options.find(edit.column_family_name_);
|
|
|
|
if (cf_options == cf_name_to_options.end()) {
|
|
|
|
column_families_not_found.insert(
|
|
|
|
{edit.column_family_, edit.column_family_name_});
|
|
|
|
} else {
|
|
|
|
cfd = CreateColumnFamily(cf_options->second, &edit);
|
|
|
|
builders.insert(
|
|
|
|
{edit.column_family_, new BaseReferencedVersionBuilder(cfd)});
|
|
|
|
}
|
|
|
|
} else if (edit.is_column_family_drop_) {
|
|
|
|
if (cf_in_builders) {
|
|
|
|
auto builder = builders.find(edit.column_family_);
|
|
|
|
assert(builder != builders.end());
|
|
|
|
delete builder->second;
|
|
|
|
builders.erase(builder);
|
|
|
|
cfd = column_family_set_->GetColumnFamily(edit.column_family_);
|
|
|
|
if (cfd->Unref()) {
|
|
|
|
delete cfd;
|
|
|
|
cfd = nullptr;
|
|
|
|
} else {
|
|
|
|
// who else can have reference to cfd!?
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
} else if (cf_in_not_found) {
|
|
|
|
column_families_not_found.erase(edit.column_family_);
|
|
|
|
} else {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest - dropping non-existing column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (!cf_in_not_found) {
|
|
|
|
if (!cf_in_builders) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest record referencing unknown column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
cfd = column_family_set_->GetColumnFamily(edit.column_family_);
|
|
|
|
// this should never happen since cf_in_builders is true
|
|
|
|
assert(cfd != nullptr);
|
|
|
|
if (edit.max_level_ >= cfd->current()->storage_info()->num_levels()) {
|
|
|
|
s = Status::InvalidArgument(
|
|
|
|
"db has more levels than options.num_levels");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// if it is not column family add or column family drop,
|
|
|
|
// then it's a file add/delete, which should be forwarded
|
|
|
|
// to builder
|
|
|
|
auto builder = builders.find(edit.column_family_);
|
|
|
|
assert(builder != builders.end());
|
|
|
|
builder->second->version_builder()->Apply(&edit);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cfd != nullptr) {
|
|
|
|
if (edit.has_log_number_) {
|
|
|
|
if (cfd->GetLogNumber() > edit.log_number_) {
|
|
|
|
Log(InfoLogLevel::WARN_LEVEL, db_options_->info_log,
|
|
|
|
"MANIFEST corruption detected, but ignored - Log numbers in "
|
|
|
|
"records NOT monotonically increasing");
|
|
|
|
} else {
|
|
|
|
cfd->SetLogNumber(edit.log_number_);
|
|
|
|
have_log_number = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (edit.has_comparator_ &&
|
|
|
|
edit.comparator_ != cfd->user_comparator()->Name()) {
|
|
|
|
s = Status::InvalidArgument(
|
|
|
|
cfd->user_comparator()->Name(),
|
|
|
|
"does not match existing comparator " + edit.comparator_);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (edit.has_prev_log_number_) {
|
|
|
|
previous_log_number = edit.prev_log_number_;
|
|
|
|
have_prev_log_number = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (edit.has_next_file_number_) {
|
|
|
|
next_file = edit.next_file_number_;
|
|
|
|
have_next_file = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (edit.has_max_column_family_) {
|
|
|
|
max_column_family = edit.max_column_family_;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (edit.has_last_sequence_) {
|
|
|
|
last_sequence = edit.last_sequence_;
|
|
|
|
have_last_sequence = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
if (!have_next_file) {
|
|
|
|
s = Status::Corruption("no meta-nextfile entry in descriptor");
|
|
|
|
} else if (!have_log_number) {
|
|
|
|
s = Status::Corruption("no meta-lognumber entry in descriptor");
|
|
|
|
} else if (!have_last_sequence) {
|
|
|
|
s = Status::Corruption("no last-sequence-number entry in descriptor");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!have_prev_log_number) {
|
|
|
|
previous_log_number = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
column_family_set_->UpdateMaxColumnFamily(max_column_family);
|
|
|
|
|
|
|
|
MarkFileNumberUsedDuringRecovery(previous_log_number);
|
|
|
|
MarkFileNumberUsedDuringRecovery(log_number);
|
|
|
|
}
|
|
|
|
|
|
|
|
// there were some column families in the MANIFEST that weren't specified
|
|
|
|
// in the argument. This is OK in read_only mode
|
|
|
|
if (read_only == false && !column_families_not_found.empty()) {
|
|
|
|
std::string list_of_not_found;
|
|
|
|
for (const auto& cf : column_families_not_found) {
|
|
|
|
list_of_not_found += ", " + cf.second;
|
|
|
|
}
|
|
|
|
list_of_not_found = list_of_not_found.substr(2);
|
|
|
|
s = Status::InvalidArgument(
|
|
|
|
"You have to open all column families. Column families not opened: " +
|
|
|
|
list_of_not_found);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
for (auto cfd : *column_family_set_) {
|
|
|
|
if (cfd->IsDropped()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto builders_iter = builders.find(cfd->GetID());
|
|
|
|
assert(builders_iter != builders.end());
|
|
|
|
auto* builder = builders_iter->second->version_builder();
|
|
|
|
|
|
|
|
if (db_options_->max_open_files == -1) {
|
|
|
|
// unlimited table cache. Pre-load table handle now.
|
|
|
|
// Need to do it out of the mutex.
|
|
|
|
builder->LoadTableHandlers(
|
|
|
|
cfd->internal_stats(), db_options_->max_file_opening_threads,
|
|
|
|
false /* prefetch_index_and_filter_in_cache */);
|
|
|
|
}
|
|
|
|
|
|
|
|
Version* v = new Version(cfd, this, current_version_number_++);
|
|
|
|
builder->SaveTo(v->storage_info());
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
12 years ago
|
|
|
|
|
|
|
// Install recovered version
|
|
|
|
v->PrepareApply(*cfd->GetLatestMutableCFOptions(),
|
|
|
|
!(db_options_->skip_stats_update_on_db_open));
|
|
|
|
AppendVersion(cfd, v);
|
|
|
|
}
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
12 years ago
|
|
|
|
|
|
|
manifest_file_size_ = current_manifest_file_size;
|
|
|
|
next_file_number_.store(next_file + 1);
|
|
|
|
last_sequence_ = last_sequence;
|
|
|
|
prev_log_number_ = previous_log_number;
|
|
|
|
|
|
|
|
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
|
|
|
|
"Recovered from manifest file:%s succeeded,"
|
|
|
|
"manifest_file_number is %lu, next_file_number is %lu, "
|
|
|
|
"last_sequence is %lu, log_number is %lu,"
|
|
|
|
"prev_log_number is %lu,"
|
|
|
|
"max_column_family is %u\n",
|
|
|
|
manifest_filename.c_str(), (unsigned long)manifest_file_number_,
|
|
|
|
(unsigned long)next_file_number_.load(), (unsigned long)last_sequence_,
|
|
|
|
(unsigned long)log_number, (unsigned long)prev_log_number_,
|
|
|
|
column_family_set_->GetMaxColumnFamily());
|
|
|
|
|
|
|
|
for (auto cfd : *column_family_set_) {
|
|
|
|
if (cfd->IsDropped()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
|
|
|
|
"Column family [%s] (ID %u), log number is %" PRIu64 "\n",
|
|
|
|
cfd->GetName().c_str(), cfd->GetID(), cfd->GetLogNumber());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& builder : builders) {
|
|
|
|
delete builder.second;
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status VersionSet::ListColumnFamilies(std::vector<std::string>* column_families,
|
|
|
|
const std::string& dbname, Env* env) {
|
|
|
|
// these are just for performance reasons, not correcntes,
|
|
|
|
// so we're fine using the defaults
|
|
|
|
EnvOptions soptions;
|
|
|
|
// Read "CURRENT" file, which contains a pointer to the current manifest file
|
|
|
|
std::string current;
|
|
|
|
Status s = ReadFileToString(env, CurrentFileName(dbname), ¤t);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (current.empty() || current[current.size()-1] != '\n') {
|
|
|
|
return Status::Corruption("CURRENT file does not end with newline");
|
|
|
|
}
|
|
|
|
current.resize(current.size() - 1);
|
|
|
|
|
|
|
|
std::string dscname = dbname + "/" + current;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
|
|
|
|
unique_ptr<SequentialFileReader> file_reader;
|
|
|
|
{
|
|
|
|
unique_ptr<SequentialFile> file;
|
|
|
|
s = env->NewSequentialFile(dscname, &file, soptions);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
file_reader.reset(new SequentialFileReader(std::move(file)));
|
|
|
|
}
|
|
|
|
|
|
|
|
std::map<uint32_t, std::string> column_family_names;
|
|
|
|
// default column family is always implicitly there
|
|
|
|
column_family_names.insert({0, kDefaultColumnFamilyName});
|
|
|
|
VersionSet::LogReporter reporter;
|
|
|
|
reporter.status = &s;
|
|
|
|
log::Reader reader(NULL, std::move(file_reader), &reporter, true /*checksum*/,
|
|
|
|
0 /*initial_offset*/, 0);
|
|
|
|
Slice record;
|
|
|
|
std::string scratch;
|
|
|
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
|
|
|
VersionEdit edit;
|
|
|
|
s = edit.DecodeFrom(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (edit.is_column_family_add_) {
|
|
|
|
if (column_family_names.find(edit.column_family_) !=
|
|
|
|
column_family_names.end()) {
|
|
|
|
s = Status::Corruption("Manifest adding the same column family twice");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
column_family_names.insert(
|
|
|
|
{edit.column_family_, edit.column_family_name_});
|
|
|
|
} else if (edit.is_column_family_drop_) {
|
|
|
|
if (column_family_names.find(edit.column_family_) ==
|
|
|
|
column_family_names.end()) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest - dropping non-existing column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
column_family_names.erase(edit.column_family_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
column_families->clear();
|
|
|
|
if (s.ok()) {
|
|
|
|
for (const auto& iter : column_family_names) {
|
|
|
|
column_families->push_back(iter.second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
Status VersionSet::ReduceNumberOfLevels(const std::string& dbname,
|
|
|
|
const Options* options,
|
|
|
|
const EnvOptions& env_options,
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
int new_levels) {
|
|
|
|
if (new_levels <= 1) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Number of levels needs to be bigger than 1");
|
|
|
|
}
|
|
|
|
|
|
|
|
ImmutableDBOptions db_options(*options);
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
11 years ago
|
|
|
ColumnFamilyOptions cf_options(*options);
|
|
|
|
std::shared_ptr<Cache> tc(NewLRUCache(options->max_open_files - 10,
|
|
|
|
options->table_cache_numshardbits));
|
|
|
|
WriteController wc(options->delayed_write_rate);
|
|
|
|
WriteBufferManager wb(options->db_write_buffer_size);
|
|
|
|
VersionSet versions(dbname, &db_options, env_options, tc.get(), &wb, &wc);
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
Status status;
|
|
|
|
|
|
|
|
std::vector<ColumnFamilyDescriptor> dummy;
|
|
|
|
ColumnFamilyDescriptor dummy_descriptor(kDefaultColumnFamilyName,
|
|
|
|
ColumnFamilyOptions(*options));
|
|
|
|
dummy.push_back(dummy_descriptor);
|
|
|
|
status = versions.Recover(dummy);
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
if (!status.ok()) {
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
Version* current_version =
|
|
|
|
versions.GetColumnFamilySet()->GetDefault()->current();
|
|
|
|
auto* vstorage = current_version->storage_info();
|
|
|
|
int current_levels = vstorage->num_levels();
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
|
|
|
|
if (current_levels <= new_levels) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure there are file only on one level from
|
|
|
|
// (new_levels-1) to (current_levels-1)
|
|
|
|
int first_nonempty_level = -1;
|
|
|
|
int first_nonempty_level_filenum = 0;
|
|
|
|
for (int i = new_levels - 1; i < current_levels; i++) {
|
|
|
|
int file_num = vstorage->NumLevelFiles(i);
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
if (file_num != 0) {
|
|
|
|
if (first_nonempty_level < 0) {
|
|
|
|
first_nonempty_level = i;
|
|
|
|
first_nonempty_level_filenum = file_num;
|
|
|
|
} else {
|
|
|
|
char msg[255];
|
|
|
|
snprintf(msg, sizeof(msg),
|
|
|
|
"Found at least two levels containing files: "
|
|
|
|
"[%d:%d],[%d:%d].\n",
|
|
|
|
first_nonempty_level, first_nonempty_level_filenum, i,
|
|
|
|
file_num);
|
|
|
|
return Status::InvalidArgument(msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// we need to allocate an array with the old number of levels size to
|
|
|
|
// avoid SIGSEGV in WriteSnapshot()
|
|
|
|
// however, all levels bigger or equal to new_levels will be empty
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
std::vector<FileMetaData*>* new_files_list =
|
|
|
|
new std::vector<FileMetaData*>[current_levels];
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
for (int i = 0; i < new_levels - 1; i++) {
|
|
|
|
new_files_list[i] = vstorage->LevelFiles(i);
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
}
|
|
|
|
|
|
|
|
if (first_nonempty_level > 0) {
|
|
|
|
new_files_list[new_levels - 1] = vstorage->LevelFiles(first_nonempty_level);
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
}
|
|
|
|
|
|
|
|
delete[] vstorage -> files_;
|
|
|
|
vstorage->files_ = new_files_list;
|
|
|
|
vstorage->num_levels_ = new_levels;
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
|
|
|
|
MutableCFOptions mutable_cf_options(*options);
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
VersionEdit ve;
|
|
|
|
InstrumentedMutex dummy_mutex;
|
|
|
|
InstrumentedMutexLock l(&dummy_mutex);
|
|
|
|
return versions.LogAndApply(
|
|
|
|
versions.GetColumnFamilySet()->GetDefault(),
|
|
|
|
mutable_cf_options, &ve, &dummy_mutex, nullptr, true);
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
11 years ago
|
|
|
}
|
|
|
|
|
|
|
|
Status VersionSet::DumpManifest(Options& options, std::string& dscname,
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
9 years ago
|
|
|
bool verbose, bool hex, bool json) {
|
|
|
|
// Open the specified manifest file.
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
unique_ptr<SequentialFileReader> file_reader;
|
|
|
|
Status s;
|
|
|
|
{
|
|
|
|
unique_ptr<SequentialFile> file;
|
|
|
|
s = options.env->NewSequentialFile(dscname, &file, env_options_);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
file_reader.reset(new SequentialFileReader(std::move(file)));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool have_prev_log_number = false;
|
|
|
|
bool have_next_file = false;
|
|
|
|
bool have_last_sequence = false;
|
|
|
|
uint64_t next_file = 0;
|
|
|
|
uint64_t last_sequence = 0;
|
|
|
|
uint64_t previous_log_number = 0;
|
|
|
|
int count = 0;
|
|
|
|
std::unordered_map<uint32_t, std::string> comparators;
|
|
|
|
std::unordered_map<uint32_t, BaseReferencedVersionBuilder*> builders;
|
|
|
|
|
|
|
|
// add default column family
|
|
|
|
VersionEdit default_cf_edit;
|
|
|
|
default_cf_edit.AddColumnFamily(kDefaultColumnFamilyName);
|
|
|
|
default_cf_edit.SetColumnFamily(0);
|
|
|
|
ColumnFamilyData* default_cfd =
|
|
|
|
CreateColumnFamily(ColumnFamilyOptions(options), &default_cf_edit);
|
|
|
|
builders.insert({0, new BaseReferencedVersionBuilder(default_cfd)});
|
|
|
|
|
|
|
|
{
|
|
|
|
VersionSet::LogReporter reporter;
|
|
|
|
reporter.status = &s;
|
|
|
|
log::Reader reader(NULL, std::move(file_reader), &reporter,
|
|
|
|
true /*checksum*/, 0 /*initial_offset*/, 0);
|
|
|
|
Slice record;
|
|
|
|
std::string scratch;
|
|
|
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
|
|
|
VersionEdit edit;
|
|
|
|
s = edit.DecodeFrom(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write out each individual edit
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
9 years ago
|
|
|
if (verbose && !json) {
|
|
|
|
printf("%s\n", edit.DebugString(hex).c_str());
|
|
|
|
} else if (json) {
|
|
|
|
printf("%s\n", edit.DebugJSON(count, hex).c_str());
|
|
|
|
}
|
|
|
|
count++;
|
|
|
|
|
|
|
|
bool cf_in_builders =
|
|
|
|
builders.find(edit.column_family_) != builders.end();
|
|
|
|
|
|
|
|
if (edit.has_comparator_) {
|
|
|
|
comparators.insert({edit.column_family_, edit.comparator_});
|
|
|
|
}
|
|
|
|
|
|
|
|
ColumnFamilyData* cfd = nullptr;
|
|
|
|
|
|
|
|
if (edit.is_column_family_add_) {
|
|
|
|
if (cf_in_builders) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest adding the same column family twice");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
cfd = CreateColumnFamily(ColumnFamilyOptions(options), &edit);
|
|
|
|
builders.insert(
|
|
|
|
{edit.column_family_, new BaseReferencedVersionBuilder(cfd)});
|
|
|
|
} else if (edit.is_column_family_drop_) {
|
|
|
|
if (!cf_in_builders) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest - dropping non-existing column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
auto builder_iter = builders.find(edit.column_family_);
|
|
|
|
delete builder_iter->second;
|
|
|
|
builders.erase(builder_iter);
|
|
|
|
comparators.erase(edit.column_family_);
|
|
|
|
cfd = column_family_set_->GetColumnFamily(edit.column_family_);
|
|
|
|
assert(cfd != nullptr);
|
|
|
|
cfd->Unref();
|
|
|
|
delete cfd;
|
|
|
|
cfd = nullptr;
|
|
|
|
} else {
|
|
|
|
if (!cf_in_builders) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest record referencing unknown column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
cfd = column_family_set_->GetColumnFamily(edit.column_family_);
|
|
|
|
// this should never happen since cf_in_builders is true
|
|
|
|
assert(cfd != nullptr);
|
|
|
|
|
|
|
|
// if it is not column family add or column family drop,
|
|
|
|
// then it's a file add/delete, which should be forwarded
|
|
|
|
// to builder
|
|
|
|
auto builder = builders.find(edit.column_family_);
|
|
|
|
assert(builder != builders.end());
|
|
|
|
builder->second->version_builder()->Apply(&edit);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cfd != nullptr && edit.has_log_number_) {
|
|
|
|
cfd->SetLogNumber(edit.log_number_);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (edit.has_prev_log_number_) {
|
|
|
|
previous_log_number = edit.prev_log_number_;
|
|
|
|
have_prev_log_number = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (edit.has_next_file_number_) {
|
|
|
|
next_file = edit.next_file_number_;
|
|
|
|
have_next_file = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (edit.has_last_sequence_) {
|
|
|
|
last_sequence = edit.last_sequence_;
|
|
|
|
have_last_sequence = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (edit.has_max_column_family_) {
|
|
|
|
column_family_set_->UpdateMaxColumnFamily(edit.max_column_family_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
9 years ago
|
|
|
file_reader.reset();
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
if (!have_next_file) {
|
|
|
|
s = Status::Corruption("no meta-nextfile entry in descriptor");
|
|
|
|
printf("no meta-nextfile entry in descriptor");
|
|
|
|
} else if (!have_last_sequence) {
|
|
|
|
printf("no last-sequence-number entry in descriptor");
|
|
|
|
s = Status::Corruption("no last-sequence-number entry in descriptor");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!have_prev_log_number) {
|
|
|
|
previous_log_number = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
for (auto cfd : *column_family_set_) {
|
|
|
|
if (cfd->IsDropped()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto builders_iter = builders.find(cfd->GetID());
|
|
|
|
assert(builders_iter != builders.end());
|
|
|
|
auto builder = builders_iter->second->version_builder();
|
|
|
|
|
|
|
|
Version* v = new Version(cfd, this, current_version_number_++);
|
|
|
|
builder->SaveTo(v->storage_info());
|
|
|
|
v->PrepareApply(*cfd->GetLatestMutableCFOptions(), false);
|
|
|
|
|
|
|
|
printf("--------------- Column family \"%s\" (ID %u) --------------\n",
|
|
|
|
cfd->GetName().c_str(), (unsigned int)cfd->GetID());
|
|
|
|
printf("log number: %lu\n", (unsigned long)cfd->GetLogNumber());
|
|
|
|
auto comparator = comparators.find(cfd->GetID());
|
|
|
|
if (comparator != comparators.end()) {
|
|
|
|
printf("comparator: %s\n", comparator->second.c_str());
|
|
|
|
} else {
|
|
|
|
printf("comparator: <NO COMPARATOR>\n");
|
|
|
|
}
|
|
|
|
printf("%s \n", v->DebugString(hex).c_str());
|
|
|
|
delete v;
|
|
|
|
}
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
12 years ago
|
|
|
|
|
|
|
// Free builders
|
|
|
|
for (auto& builder : builders) {
|
|
|
|
delete builder.second;
|
|
|
|
}
|
|
|
|
|
|
|
|
next_file_number_.store(next_file + 1);
|
|
|
|
last_sequence_ = last_sequence;
|
|
|
|
prev_log_number_ = previous_log_number;
|
|
|
|
|
|
|
|
printf(
|
|
|
|
"next_file_number %lu last_sequence "
|
|
|
|
"%lu prev_log_number %lu max_column_family %u\n",
|
|
|
|
(unsigned long)next_file_number_.load(), (unsigned long)last_sequence,
|
|
|
|
(unsigned long)previous_log_number,
|
|
|
|
column_family_set_->GetMaxColumnFamily());
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
|
|
|
void VersionSet::MarkFileNumberUsedDuringRecovery(uint64_t number) {
|
|
|
|
// only called during recovery which is single threaded, so this works because
|
|
|
|
// there can't be concurrent calls
|
|
|
|
if (next_file_number_.load(std::memory_order_relaxed) <= number) {
|
|
|
|
next_file_number_.store(number + 1, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status VersionSet::WriteSnapshot(log::Writer* log) {
|
|
|
|
// TODO: Break up into multiple records to reduce memory usage on recovery?
|
|
|
|
|
|
|
|
// WARNING: This method doesn't hold a mutex!!
|
|
|
|
|
|
|
|
// This is done without DB mutex lock held, but only within single-threaded
|
|
|
|
// LogAndApply. Column family manipulations can only happen within LogAndApply
|
|
|
|
// (the same single thread), so we're safe to iterate.
|
|
|
|
for (auto cfd : *column_family_set_) {
|
|
|
|
if (cfd->IsDropped()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
{
|
|
|
|
// Store column family info
|
|
|
|
VersionEdit edit;
|
|
|
|
if (cfd->GetID() != 0) {
|
|
|
|
// default column family is always there,
|
|
|
|
// no need to explicitly write it
|
|
|
|
edit.AddColumnFamily(cfd->GetName());
|
|
|
|
edit.SetColumnFamily(cfd->GetID());
|
|
|
|
}
|
|
|
|
edit.SetComparatorName(
|
|
|
|
cfd->internal_comparator().user_comparator()->Name());
|
|
|
|
std::string record;
|
|
|
|
if (!edit.EncodeTo(&record)) {
|
|
|
|
return Status::Corruption(
|
|
|
|
"Unable to Encode VersionEdit:" + edit.DebugString(true));
|
|
|
|
}
|
|
|
|
Status s = log->AddRecord(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Save files
|
|
|
|
VersionEdit edit;
|
|
|
|
edit.SetColumnFamily(cfd->GetID());
|
|
|
|
|
|
|
|
for (int level = 0; level < cfd->NumberLevels(); level++) {
|
|
|
|
for (const auto& f :
|
|
|
|
cfd->current()->storage_info()->LevelFiles(level)) {
|
|
|
|
edit.AddFile(level, f->fd.GetNumber(), f->fd.GetPathId(),
|
|
|
|
f->fd.GetFileSize(), f->smallest, f->largest,
|
|
|
|
f->smallest_seqno, f->largest_seqno,
|
|
|
|
f->marked_for_compaction);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
edit.SetLogNumber(cfd->GetLogNumber());
|
|
|
|
std::string record;
|
|
|
|
if (!edit.EncodeTo(&record)) {
|
|
|
|
return Status::Corruption(
|
|
|
|
"Unable to Encode VersionEdit:" + edit.DebugString(true));
|
|
|
|
}
|
|
|
|
Status s = log->AddRecord(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(aekmekji): in CompactionJob::GenSubcompactionBoundaries(), this
|
|
|
|
// function is called repeatedly with consecutive pairs of slices. For example
|
|
|
|
// if the slice list is [a, b, c, d] this function is called with arguments
|
|
|
|
// (a,b) then (b,c) then (c,d). Knowing this, an optimization is possible where
|
|
|
|
// we avoid doing binary search for the keys b and c twice and instead somehow
|
|
|
|
// maintain state of where they first appear in the files.
|
|
|
|
uint64_t VersionSet::ApproximateSize(Version* v, const Slice& start,
|
|
|
|
const Slice& end, int start_level,
|
|
|
|
int end_level) {
|
|
|
|
// pre-condition
|
|
|
|
assert(v->cfd_->internal_comparator().Compare(start, end) <= 0);
|
|
|
|
|
|
|
|
uint64_t size = 0;
|
|
|
|
const auto* vstorage = v->storage_info();
|
|
|
|
end_level = end_level == -1
|
|
|
|
? vstorage->num_non_empty_levels()
|
|
|
|
: std::min(end_level, vstorage->num_non_empty_levels());
|
|
|
|
|
|
|
|
assert(start_level <= end_level);
|
|
|
|
|
|
|
|
for (int level = start_level; level < end_level; level++) {
|
|
|
|
const LevelFilesBrief& files_brief = vstorage->LevelFilesBrief(level);
|
|
|
|
if (!files_brief.num_files) {
|
|
|
|
// empty level, skip exploration
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!level) {
|
|
|
|
// level 0 data is sorted order, handle the use case explicitly
|
|
|
|
size += ApproximateSizeLevel0(v, files_brief, start, end);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(level > 0);
|
|
|
|
assert(files_brief.num_files > 0);
|
|
|
|
|
|
|
|
// identify the file position for starting key
|
|
|
|
const uint64_t idx_start = FindFileInRange(
|
|
|
|
v->cfd_->internal_comparator(), files_brief, start,
|
|
|
|
/*start=*/0, static_cast<uint32_t>(files_brief.num_files - 1));
|
|
|
|
assert(idx_start < files_brief.num_files);
|
|
|
|
|
|
|
|
// scan all files from the starting position until the ending position
|
|
|
|
// inferred from the sorted order
|
|
|
|
for (uint64_t i = idx_start; i < files_brief.num_files; i++) {
|
|
|
|
uint64_t val;
|
|
|
|
val = ApproximateSize(v, files_brief.files[i], end);
|
|
|
|
if (!val) {
|
|
|
|
// the files after this will not have the range
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
size += val;
|
|
|
|
|
|
|
|
if (i == idx_start) {
|
|
|
|
// subtract the bytes needed to be scanned to get to the starting
|
|
|
|
// key
|
|
|
|
val = ApproximateSize(v, files_brief.files[i], start);
|
|
|
|
assert(size >= val);
|
|
|
|
size -= val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t VersionSet::ApproximateSizeLevel0(Version* v,
|
|
|
|
const LevelFilesBrief& files_brief,
|
|
|
|
const Slice& key_start,
|
|
|
|
const Slice& key_end) {
|
|
|
|
// level 0 files are not in sorted order, we need to iterate through
|
|
|
|
// the list to compute the total bytes that require scanning
|
|
|
|
uint64_t size = 0;
|
|
|
|
for (size_t i = 0; i < files_brief.num_files; i++) {
|
|
|
|
const uint64_t start = ApproximateSize(v, files_brief.files[i], key_start);
|
|
|
|
const uint64_t end = ApproximateSize(v, files_brief.files[i], key_end);
|
|
|
|
assert(end >= start);
|
|
|
|
size += end - start;
|
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t VersionSet::ApproximateSize(Version* v, const FdWithKeyRange& f,
|
|
|
|
const Slice& key) {
|
|
|
|
// pre-condition
|
|
|
|
assert(v);
|
|
|
|
|
|
|
|
uint64_t result = 0;
|
|
|
|
if (v->cfd_->internal_comparator().Compare(f.largest_key, key) <= 0) {
|
|
|
|
// Entire file is before "key", so just add the file size
|
|
|
|
result = f.fd.GetFileSize();
|
|
|
|
} else if (v->cfd_->internal_comparator().Compare(f.smallest_key, key) > 0) {
|
|
|
|
// Entire file is after "key", so ignore
|
|
|
|
result = 0;
|
|
|
|
} else {
|
|
|
|
// "key" falls in the range for this table. Add the
|
|
|
|
// approximate offset of "key" within the table.
|
|
|
|
TableReader* table_reader_ptr;
|
|
|
|
InternalIterator* iter = v->cfd_->table_cache()->NewIterator(
|
|
|
|
ReadOptions(), env_options_, v->cfd_->internal_comparator(), f.fd,
|
|
|
|
nullptr /* range_del_agg */, &table_reader_ptr);
|
|
|
|
if (table_reader_ptr != nullptr) {
|
|
|
|
result = table_reader_ptr->ApproximateOffsetOf(key);
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionSet::AddLiveFiles(std::vector<FileDescriptor>* live_list) {
|
[RocksDB] [Performance] Speed up FindObsoleteFiles
Summary:
FindObsoleteFiles was slow, holding the single big lock, resulted in bad p99 behavior.
Didn't profile anything, but several things could be improved:
1. VersionSet::AddLiveFiles works with std::set, which is by itself slow (a tree).
You also don't know how many dynamic allocations occur just for building up this tree.
switched to std::vector, also added logic to pre-calculate total size and do just one allocation
2. Don't see why env_->GetChildren() needs to be mutex proteced, moved to PurgeObsoleteFiles where
mutex could be unlocked.
3. switched std::set to std:unordered_set, the conversion from vector is also inside PurgeObsoleteFiles
I have a feeling this should pretty much fix it.
Test Plan: make check; db_stress
Reviewers: dhruba, heyongqiang, MarkCallaghan
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D10197
12 years ago
|
|
|
// pre-calculate space requirement
|
|
|
|
int64_t total_files = 0;
|
|
|
|
for (auto cfd : *column_family_set_) {
|
|
|
|
Version* dummy_versions = cfd->dummy_versions();
|
|
|
|
for (Version* v = dummy_versions->next_; v != dummy_versions;
|
|
|
|
v = v->next_) {
|
|
|
|
const auto* vstorage = v->storage_info();
|
|
|
|
for (int level = 0; level < vstorage->num_levels(); level++) {
|
|
|
|
total_files += vstorage->LevelFiles(level).size();
|
|
|
|
}
|
[RocksDB] [Performance] Speed up FindObsoleteFiles
Summary:
FindObsoleteFiles was slow, holding the single big lock, resulted in bad p99 behavior.
Didn't profile anything, but several things could be improved:
1. VersionSet::AddLiveFiles works with std::set, which is by itself slow (a tree).
You also don't know how many dynamic allocations occur just for building up this tree.
switched to std::vector, also added logic to pre-calculate total size and do just one allocation
2. Don't see why env_->GetChildren() needs to be mutex proteced, moved to PurgeObsoleteFiles where
mutex could be unlocked.
3. switched std::set to std:unordered_set, the conversion from vector is also inside PurgeObsoleteFiles
I have a feeling this should pretty much fix it.
Test Plan: make check; db_stress
Reviewers: dhruba, heyongqiang, MarkCallaghan
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D10197
12 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// just one time extension to the right size
|
|
|
|
live_list->reserve(live_list->size() + static_cast<size_t>(total_files));
|
[RocksDB] [Performance] Speed up FindObsoleteFiles
Summary:
FindObsoleteFiles was slow, holding the single big lock, resulted in bad p99 behavior.
Didn't profile anything, but several things could be improved:
1. VersionSet::AddLiveFiles works with std::set, which is by itself slow (a tree).
You also don't know how many dynamic allocations occur just for building up this tree.
switched to std::vector, also added logic to pre-calculate total size and do just one allocation
2. Don't see why env_->GetChildren() needs to be mutex proteced, moved to PurgeObsoleteFiles where
mutex could be unlocked.
3. switched std::set to std:unordered_set, the conversion from vector is also inside PurgeObsoleteFiles
I have a feeling this should pretty much fix it.
Test Plan: make check; db_stress
Reviewers: dhruba, heyongqiang, MarkCallaghan
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D10197
12 years ago
|
|
|
|
|
|
|
for (auto cfd : *column_family_set_) {
|
|
|
|
auto* current = cfd->current();
|
|
|
|
bool found_current = false;
|
|
|
|
Version* dummy_versions = cfd->dummy_versions();
|
|
|
|
for (Version* v = dummy_versions->next_; v != dummy_versions;
|
|
|
|
v = v->next_) {
|
|
|
|
v->AddLiveFiles(live_list);
|
|
|
|
if (v == current) {
|
|
|
|
found_current = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found_current && current != nullptr) {
|
|
|
|
// Should never happen unless it is a bug.
|
|
|
|
assert(false);
|
|
|
|
current->AddLiveFiles(live_list);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Compaction Support for Range Deletion
Summary:
This diff introduces RangeDelAggregator, which takes ownership of iterators
provided to it via AddTombstones(). The tombstones are organized in a two-level
map (snapshot stripe -> begin key -> tombstone). Tombstone creation avoids data
copy by holding Slices returned by the iterator, which remain valid thanks to pinning.
For compaction, we create a hierarchical range tombstone iterator with structure
matching the iterator over compaction input data. An aggregator based on that
iterator is used by CompactionIterator to determine which keys are covered by
range tombstones. In case of merge operand, the same aggregator is used by
MergeHelper. Upon finishing each file in the compaction, relevant range tombstones
are added to the output file's range tombstone metablock and file boundaries are
updated accordingly.
To check whether a key is covered by range tombstone, RangeDelAggregator::ShouldDelete()
considers tombstones in the key's snapshot stripe. When this function is used outside of
compaction, it also checks newer stripes, which can contain covering tombstones. Currently
the intra-stripe check involves a linear scan; however, in the future we plan to collapse ranges
within a stripe such that binary search can be used.
RangeDelAggregator::AddToBuilder() adds all range tombstones in the table's key-range
to a new table's range tombstone meta-block. Since range tombstones may fall in the gap
between files, we may need to extend some files' key-ranges. The strategy is (1) first file
extends as far left as possible and other files do not extend left, (2) all files extend right
until either the start of the next file or the end of the last range tombstone in the gap,
whichever comes first.
One other notable change is adding release/move semantics to ScopedArenaIterator
such that it can be used to transfer ownership of an arena-allocated iterator, similar to
how unique_ptr is used for malloc'd data.
Depends on D61473
Test Plan: compaction_iterator_test, mock_table, end-to-end tests in D63927
Reviewers: sdong, IslamAbdelRahman, wanning, yhchiang, lightmark
Reviewed By: lightmark
Subscribers: andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D62205
8 years ago
|
|
|
InternalIterator* VersionSet::MakeInputIterator(
|
|
|
|
const Compaction* c, RangeDelAggregator* range_del_agg) {
|
|
|
|
auto cfd = c->column_family_data();
|
|
|
|
ReadOptions read_options;
|
|
|
|
read_options.verify_checksums = true;
|
|
|
|
read_options.fill_cache = false;
|
|
|
|
if (c->ShouldFormSubcompactions()) {
|
Parallelize L0-L1 Compaction: Restructure Compaction Job
Summary:
As of now compactions involving files from Level 0 and Level 1 are single
threaded because the files in L0, although sorted, are not range partitioned like
the other levels. This means that during L0-L1 compaction each file from L1
needs to be merged with potentially all the files from L0.
This attempt to parallelize the L0-L1 compaction assigns a thread and a
corresponding iterator to each L1 file that then considers only the key range
found in that L1 file and only the L0 files that have those keys (and only the
specific portion of those L0 files in which those keys are found). In this way
the overlap is minimized and potentially eliminated between different iterators
focusing on the same files.
The first step is to restructure the compaction logic to break L0-L1 compactions
into multiple, smaller, sequential compactions. Eventually each of these smaller
jobs will be run simultaneously. Areas to pay extra attention to are
# Correct aggregation of compaction job statistics across multiple threads
# Proper opening/closing of output files (make sure each thread's is unique)
# Keys that span multiple L1 files
# Skewed distributions of keys within L0 files
Test Plan: Make and run db_test (newer version has separate compaction tests) and compaction_job_stats_test
Reviewers: igor, noetzli, anthony, sdong, yhchiang
Reviewed By: yhchiang
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D42699
9 years ago
|
|
|
read_options.total_order_seek = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Level-0 files have to be merged together. For other levels,
|
|
|
|
// we will make a concatenating iterator per level.
|
|
|
|
// TODO(opt): use concatenating iterator for level-0 if there is no overlap
|
|
|
|
const size_t space = (c->level() == 0 ? c->input_levels(0)->num_files +
|
|
|
|
c->num_input_levels() - 1
|
|
|
|
: c->num_input_levels());
|
|
|
|
InternalIterator** list = new InternalIterator* [space];
|
|
|
|
size_t num = 0;
|
|
|
|
for (size_t which = 0; which < c->num_input_levels(); which++) {
|
|
|
|
if (c->input_levels(which)->num_files != 0) {
|
|
|
|
if (c->level(which) == 0) {
|
|
|
|
const LevelFilesBrief* flevel = c->input_levels(which);
|
|
|
|
for (size_t i = 0; i < flevel->num_files; i++) {
|
|
|
|
list[num++] = cfd->table_cache()->NewIterator(
|
|
|
|
read_options, env_options_compactions_,
|
|
|
|
cfd->internal_comparator(), flevel->files[i].fd, range_del_agg,
|
|
|
|
nullptr /* table_reader_ptr */,
|
|
|
|
nullptr /* no per level latency histogram */,
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
9 years ago
|
|
|
true /* for_compaction */, nullptr /* arena */,
|
|
|
|
false /* skip_filters */, (int)which /* level */);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Create concatenating iterator for the files from this level
|
Measure file read latency histogram per level
Summary: In internal stats, remember read latency histogram, if statistics is enabled. It can be retrieved from DB::GetProperty() with "rocksdb.dbstats" property, if it is enabled.
Test Plan: Manually run db_bench and prints out "rocksdb.dbstats" by hand and make sure it prints out as expected
Reviewers: igor, IslamAbdelRahman, rven, kradhakrishnan, anthony, yhchiang
Reviewed By: yhchiang
Subscribers: MarkCallaghan, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D44193
9 years ago
|
|
|
list[num++] = NewTwoLevelIterator(
|
|
|
|
new LevelFileIteratorState(
|
|
|
|
cfd->table_cache(), read_options, env_options_,
|
|
|
|
cfd->internal_comparator(),
|
|
|
|
nullptr /* no per level latency histogram */,
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
9 years ago
|
|
|
true /* for_compaction */, false /* prefix enabled */,
|
Compaction Support for Range Deletion
Summary:
This diff introduces RangeDelAggregator, which takes ownership of iterators
provided to it via AddTombstones(). The tombstones are organized in a two-level
map (snapshot stripe -> begin key -> tombstone). Tombstone creation avoids data
copy by holding Slices returned by the iterator, which remain valid thanks to pinning.
For compaction, we create a hierarchical range tombstone iterator with structure
matching the iterator over compaction input data. An aggregator based on that
iterator is used by CompactionIterator to determine which keys are covered by
range tombstones. In case of merge operand, the same aggregator is used by
MergeHelper. Upon finishing each file in the compaction, relevant range tombstones
are added to the output file's range tombstone metablock and file boundaries are
updated accordingly.
To check whether a key is covered by range tombstone, RangeDelAggregator::ShouldDelete()
considers tombstones in the key's snapshot stripe. When this function is used outside of
compaction, it also checks newer stripes, which can contain covering tombstones. Currently
the intra-stripe check involves a linear scan; however, in the future we plan to collapse ranges
within a stripe such that binary search can be used.
RangeDelAggregator::AddToBuilder() adds all range tombstones in the table's key-range
to a new table's range tombstone meta-block. Since range tombstones may fall in the gap
between files, we may need to extend some files' key-ranges. The strategy is (1) first file
extends as far left as possible and other files do not extend left, (2) all files extend right
until either the start of the next file or the end of the last range tombstone in the gap,
whichever comes first.
One other notable change is adding release/move semantics to ScopedArenaIterator
such that it can be used to transfer ownership of an arena-allocated iterator, similar to
how unique_ptr is used for malloc'd data.
Depends on D61473
Test Plan: compaction_iterator_test, mock_table, end-to-end tests in D63927
Reviewers: sdong, IslamAbdelRahman, wanning, yhchiang, lightmark
Reviewed By: lightmark
Subscribers: andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D62205
8 years ago
|
|
|
false /* skip_filters */, (int)which /* level */,
|
|
|
|
range_del_agg),
|
|
|
|
new LevelFileNumIterator(cfd->internal_comparator(),
|
|
|
|
c->input_levels(which)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(num <= space);
|
|
|
|
InternalIterator* result =
|
|
|
|
NewMergingIterator(&c->column_family_data()->internal_comparator(), list,
|
|
|
|
static_cast<int>(num));
|
|
|
|
delete[] list;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// verify that the files listed in this compaction are present
|
|
|
|
// in the current version
|
|
|
|
bool VersionSet::VerifyCompactionFileConsistency(Compaction* c) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
Version* version = c->column_family_data()->current();
|
|
|
|
const VersionStorageInfo* vstorage = version->storage_info();
|
|
|
|
if (c->input_version() != version) {
|
|
|
|
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
|
|
|
|
"[%s] compaction output being applied to a different base version from"
|
|
|
|
" input version",
|
|
|
|
c->column_family_data()->GetName().c_str());
|
|
|
|
|
|
|
|
if (vstorage->compaction_style_ == kCompactionStyleLevel &&
|
|
|
|
c->start_level() == 0 && c->num_input_levels() > 2U) {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
// We are doing a L0->base_level compaction. The assumption is if
|
|
|
|
// base level is not L1, levels from L1 to base_level - 1 is empty.
|
|
|
|
// This is ensured by having one compaction from L0 going on at the
|
|
|
|
// same time in level-based compaction. So that during the time, no
|
|
|
|
// compaction/flush can put files to those levels.
|
|
|
|
for (int l = c->start_level() + 1; l < c->output_level(); l++) {
|
|
|
|
if (vstorage->NumLevelFiles(l) != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t input = 0; input < c->num_input_levels(); ++input) {
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
10 years ago
|
|
|
int level = c->level(input);
|
|
|
|
for (size_t i = 0; i < c->num_input_files(input); ++i) {
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
10 years ago
|
|
|
uint64_t number = c->input(input, i)->fd.GetNumber();
|
|
|
|
bool found = false;
|
|
|
|
for (size_t j = 0; j < vstorage->files_[level].size(); j++) {
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
10 years ago
|
|
|
FileMetaData* f = vstorage->files_[level][j];
|
|
|
|
if (f->fd.GetNumber() == number) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
10 years ago
|
|
|
if (!found) {
|
|
|
|
return false; // input files non existent in current version
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return true; // everything good
|
|
|
|
}
|
|
|
|
|
|
|
|
Status VersionSet::GetMetadataForFile(uint64_t number, int* filelevel,
|
|
|
|
FileMetaData** meta,
|
|
|
|
ColumnFamilyData** cfd) {
|
|
|
|
for (auto cfd_iter : *column_family_set_) {
|
|
|
|
Version* version = cfd_iter->current();
|
|
|
|
const auto* vstorage = version->storage_info();
|
|
|
|
for (int level = 0; level < vstorage->num_levels(); level++) {
|
|
|
|
for (const auto& file : vstorage->LevelFiles(level)) {
|
|
|
|
if (file->fd.GetNumber() == number) {
|
|
|
|
*meta = file;
|
|
|
|
*filelevel = level;
|
|
|
|
*cfd = cfd_iter;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Status::NotFound("File not present in any level");
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionSet::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
|
|
|
|
for (auto cfd : *column_family_set_) {
|
|
|
|
if (cfd->IsDropped()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
for (int level = 0; level < cfd->NumberLevels(); level++) {
|
|
|
|
for (const auto& file :
|
|
|
|
cfd->current()->storage_info()->LevelFiles(level)) {
|
|
|
|
LiveFileMetaData filemetadata;
|
|
|
|
filemetadata.column_family_name = cfd->GetName();
|
|
|
|
uint32_t path_id = file->fd.GetPathId();
|
|
|
|
if (path_id < db_options_->db_paths.size()) {
|
|
|
|
filemetadata.db_path = db_options_->db_paths[path_id].path;
|
|
|
|
} else {
|
|
|
|
assert(!db_options_->db_paths.empty());
|
|
|
|
filemetadata.db_path = db_options_->db_paths.back().path;
|
|
|
|
}
|
|
|
|
filemetadata.name = MakeTableFileName("", file->fd.GetNumber());
|
|
|
|
filemetadata.level = level;
|
|
|
|
filemetadata.size = file->fd.GetFileSize();
|
|
|
|
filemetadata.smallestkey = file->smallest.user_key().ToString();
|
|
|
|
filemetadata.largestkey = file->largest.user_key().ToString();
|
|
|
|
filemetadata.smallest_seqno = file->smallest_seqno;
|
|
|
|
filemetadata.largest_seqno = file->largest_seqno;
|
|
|
|
metadata->push_back(filemetadata);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VersionSet::GetObsoleteFiles(std::vector<FileMetaData*>* files,
|
|
|
|
std::vector<std::string>* manifest_filenames,
|
|
|
|
uint64_t min_pending_output) {
|
|
|
|
assert(manifest_filenames->empty());
|
|
|
|
obsolete_manifests_.swap(*manifest_filenames);
|
|
|
|
std::vector<FileMetaData*> pending_files;
|
|
|
|
for (auto f : obsolete_files_) {
|
|
|
|
if (f->fd.GetNumber() < min_pending_output) {
|
|
|
|
files->push_back(f);
|
|
|
|
} else {
|
|
|
|
pending_files.push_back(f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
obsolete_files_.swap(pending_files);
|
|
|
|
}
|
|
|
|
|
|
|
|
ColumnFamilyData* VersionSet::CreateColumnFamily(
|
|
|
|
const ColumnFamilyOptions& cf_options, VersionEdit* edit) {
|
|
|
|
assert(edit->is_column_family_add_);
|
|
|
|
|
|
|
|
Version* dummy_versions = new Version(nullptr, this);
|
|
|
|
// Ref() dummy version once so that later we can call Unref() to delete it
|
|
|
|
// by avoiding calling "delete" explicitly (~Version is private)
|
|
|
|
dummy_versions->Ref();
|
|
|
|
auto new_cfd = column_family_set_->CreateColumnFamily(
|
|
|
|
edit->column_family_name_, edit->column_family_, dummy_versions,
|
|
|
|
cf_options);
|
|
|
|
|
|
|
|
Version* v = new Version(new_cfd, this, current_version_number_++);
|
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
10 years ago
|
|
|
// Fill level target base information.
|
|
|
|
v->storage_info()->CalculateBaseBytes(*new_cfd->ioptions(),
|
|
|
|
*new_cfd->GetLatestMutableCFOptions());
|
|
|
|
AppendVersion(new_cfd, v);
|
|
|
|
// GetLatestMutableCFOptions() is safe here without mutex since the
|
|
|
|
// cfd is not available to client
|
|
|
|
new_cfd->CreateNewMemtable(*new_cfd->GetLatestMutableCFOptions(),
|
|
|
|
LastSequence());
|
|
|
|
new_cfd->SetLogNumber(edit->log_number_);
|
|
|
|
return new_cfd;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t VersionSet::GetNumLiveVersions(Version* dummy_versions) {
|
|
|
|
uint64_t count = 0;
|
|
|
|
for (Version* v = dummy_versions->next_; v != dummy_versions; v = v->next_) {
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t VersionSet::GetTotalSstFilesSize(Version* dummy_versions) {
|
|
|
|
std::unordered_set<uint64_t> unique_files;
|
|
|
|
uint64_t total_files_size = 0;
|
|
|
|
for (Version* v = dummy_versions->next_; v != dummy_versions; v = v->next_) {
|
|
|
|
VersionStorageInfo* storage_info = v->storage_info();
|
|
|
|
for (int level = 0; level < storage_info->num_levels_; level++) {
|
|
|
|
for (const auto& file_meta : storage_info->LevelFiles(level)) {
|
|
|
|
if (unique_files.find(file_meta->fd.packed_number_and_path_id) ==
|
|
|
|
unique_files.end()) {
|
|
|
|
unique_files.insert(file_meta->fd.packed_number_and_path_id);
|
|
|
|
total_files_size += file_meta->fd.GetFileSize();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return total_files_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace rocksdb
|