Fix a bug in db_stress and an incorrect assertion in FilePickerMultiGet (#5301)

Summary:
This PR has two fixes for crash test failures -
1. Fix a bug in TestMultiGet() in db_stress that was passing list of key to MultiGet() in the wrong order, thus ensuring that actual values don't match expected values
2. Remove an incorrect assertion in FilePickerMultiGet::GetNextFileInLevelWithKeys() that checks that files in a level are in sorted order. This is not true with MultiGet(), especially if there are duplicate keys and we may have to go back one file for the next key. Furthermore, this assertion makes more sense when a new version is created, rather than at lookup time

Test -
asan_crash and ubsan_crash tests
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5301

Differential Revision: D15337383

Pulled By: anand1976

fbshipit-source-id: 35092cb15bbc1700e5e823cbe07bfa62f1e9e6c6
main
anand76 6 years ago committed by Facebook Github Bot
parent f383641a1d
commit 6492430eaf
  1. 41
      db/version_set.cc
  2. 28
      tools/db_stress.cc

@ -353,7 +353,7 @@ class FilePickerMultiGet {
struct FilePickerContext;
public:
FilePickerMultiGet(std::vector<FileMetaData*>* files, MultiGetRange* range,
FilePickerMultiGet(MultiGetRange* range,
autovector<LevelFilesBrief>* file_levels,
unsigned int num_levels, FileIndexer* file_indexer,
const Comparator* user_comparator,
@ -368,18 +368,12 @@ class FilePickerMultiGet {
maybe_repeat_key_(false),
current_level_range_(*range, range->begin(), range->end()),
current_file_range_(*range, range->begin(), range->end()),
#ifndef NDEBUG
files_(files),
#endif
level_files_brief_(file_levels),
is_hit_file_last_in_level_(false),
curr_file_level_(nullptr),
file_indexer_(file_indexer),
user_comparator_(user_comparator),
internal_comparator_(internal_comparator) {
#ifdef NDEBUG
(void)files;
#endif
for (auto iter = range_->begin(); iter != range_->end(); ++iter) {
fp_ctx_array_[iter.index()] =
FilePickerContext(0, FileIndexer::kLevelMaxIndex);
@ -485,25 +479,6 @@ class FilePickerMultiGet {
} else {
file_hit = true;
}
#ifndef NDEBUG
// Sanity check to make sure that the files are correctly sorted
if (f != prev_file_) {
if (prev_file_) {
if (curr_level_ != 0) {
int comp_sign = internal_comparator_->Compare(
prev_file_->largest_key, f->smallest_key);
assert(comp_sign < 0);
} else if (fp_ctx.curr_index_in_curr_level > 0) {
// level == 0, the current file cannot be newer than the previous
// one. Use compressed data structure, has no attribute seqNo
assert(!NewestFirstBySeqNo(
files_[0][fp_ctx.curr_index_in_curr_level],
files_[0][fp_ctx.curr_index_in_curr_level - 1]));
}
}
prev_file_ = f;
}
#endif
if (cmp_largest == 0) {
// cmp_largest is 0, which means the next key will not be in this
// file, so stop looking further. Also don't increment megt_iter_
@ -635,9 +610,6 @@ class FilePickerMultiGet {
bool maybe_repeat_key_;
MultiGetRange current_level_range_;
MultiGetRange current_file_range_;
#ifndef NDEBUG
std::vector<FileMetaData*>* files_;
#endif
autovector<LevelFilesBrief>* level_files_brief_;
bool search_ended_;
bool is_hit_file_last_in_level_;
@ -645,9 +617,6 @@ class FilePickerMultiGet {
FileIndexer* file_indexer_;
const Comparator* user_comparator_;
const InternalKeyComparator* internal_comparator_;
#ifndef NDEBUG
FdWithKeyRange* prev_file_;
#endif
// Setup local variables to search next level.
// Returns false if there are no more levels to search.
@ -656,9 +625,6 @@ class FilePickerMultiGet {
MultiGetRange::Iterator mget_iter = current_level_range_.begin();
if (fp_ctx_array_[mget_iter.index()].curr_index_in_curr_level <
curr_file_level_->num_files) {
#ifndef NDEBUG
prev_file_ = nullptr;
#endif
batch_iter_prev_ = current_level_range_.begin();
batch_iter_ = current_level_range_.begin();
return true;
@ -754,9 +720,6 @@ class FilePickerMultiGet {
fp_ctx.curr_index_in_curr_level = start_index;
}
if (level_contains_keys) {
#ifndef NDEBUG
prev_file_ = nullptr;
#endif
batch_iter_prev_ = current_level_range_.begin();
batch_iter_ = current_level_range_.begin();
return true;
@ -1800,7 +1763,7 @@ void Version::MultiGet(const ReadOptions& read_options, MultiGetRange* range,
MultiGetRange file_picker_range(*range, range->begin(), range->end());
FilePickerMultiGet fp(
storage_info_.files_, &file_picker_range,
&file_picker_range,
&storage_info_.level_files_brief_, storage_info_.num_non_empty_levels_,
&storage_info_.file_indexer_, user_comparator(), internal_comparator());
FdWithKeyRange* f = fp.GetNextFile();

@ -3609,36 +3609,40 @@ class BatchedOpsStressTest : public StressTest {
const std::vector<int>& rand_column_families,
const std::vector<int64_t>& rand_keys) {
size_t num_keys = rand_keys.size();
std::vector<Status> statuses(num_keys);
std::string keys[10] = {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"};
for (int key = 0; key < 10; ++key) {
std::vector<Status> ret_status(num_keys);
std::array<std::string, 10> keys = {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"};
size_t num_prefixes = keys.size();
for (size_t rand_key = 0; rand_key < num_keys; ++rand_key) {
std::vector<Slice> key_slices;
std::vector<PinnableSlice> values(num_keys);
std::vector<PinnableSlice> values(num_prefixes);
std::vector<Status> statuses(num_prefixes);
ReadOptions readoptionscopy = readoptions;
readoptionscopy.snapshot = db_->GetSnapshot();
std::vector<std::string> key_str;
key_str.reserve(num_keys);
key_slices.reserve(num_keys);
key_str.reserve(num_prefixes);
key_slices.reserve(num_prefixes);
std::string from_db;
ColumnFamilyHandle* cfh = column_families_[rand_column_families[0]];
for (size_t rand_key = 0; rand_key < num_keys; ++rand_key) {
for (size_t key = 0; key < num_prefixes; ++key) {
key_str.emplace_back(keys[key] + Key(rand_keys[rand_key]));
key_slices.emplace_back(key_str.back());
}
db_->MultiGet(readoptionscopy, cfh, num_keys, key_slices.data(),
db_->MultiGet(readoptionscopy, cfh, num_prefixes, key_slices.data(),
values.data(), statuses.data());
for (size_t i = 0; i < num_keys; i++) {
for (size_t i = 0; i < num_prefixes; i++) {
Status s = statuses[i];
if (!s.ok() && !s.IsNotFound()) {
fprintf(stderr, "get error: %s\n", s.ToString().c_str());
thread->stats.AddErrors(1);
ret_status[rand_key] = s;
// we continue after error rather than exiting so that we can
// find more errors if any
} else if (s.IsNotFound()) {
thread->stats.AddGets(1, 0);
ret_status[rand_key] = s;
} else {
char expected_prefix = (keys[key])[0];
char expected_prefix = (keys[i])[0];
char actual_prefix = (values[i])[0];
if (actual_prefix != expected_prefix) {
fprintf(stderr, "error expected prefix = %c actual = %c\n",
@ -3655,7 +3659,7 @@ class BatchedOpsStressTest : public StressTest {
db_->ReleaseSnapshot(readoptionscopy.snapshot);
// Now that we retrieved all values, check that they all match
for (size_t i = 1; i < num_keys; i++) {
for (size_t i = 1; i < num_prefixes; i++) {
if (values[i] != values[0]) {
fprintf(stderr, "error : inconsistent values for key %s: %s, %s\n",
key_str[i].c_str(),
@ -3667,7 +3671,7 @@ class BatchedOpsStressTest : public StressTest {
}
}
return statuses;
return ret_status;
}
// Given a key, this does prefix scans for "0"+P, "1"+P,..."9"+P

Loading…
Cancel
Save