Add more tests to ASSERT_STATUS_CHECKED (3), API change (#7715)

Summary:
Third batch of adding more tests to ASSERT_STATUS_CHECKED.

* db_compaction_filter_test
* db_compaction_test
* db_dynamic_level_test
* db_inplace_update_test
* db_sst_test
* db_tailing_iter_test
* db_io_failure_test

Also update GetApproximateSizes APIs to all return Status.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/7715

Reviewed By: jay-zhuang

Differential Revision: D25806896

Pulled By: pdillinger

fbshipit-source-id: 6cb9d62ba5a756c645812754c596ad3995d7c262
main
Adam Retter 4 years ago committed by Facebook GitHub Bot
parent 5792b73fdc
commit 6e0f62f2b6
  1. 4
      HISTORY.md
  2. 6
      Makefile
  3. 33
      db/c.cc
  4. 4
      db/c_test.c
  5. 9
      db/compaction/compaction_job_stats_test.cc
  6. 97
      db/db_compaction_filter_test.cc
  7. 626
      db/db_compaction_test.cc
  8. 41
      db/db_dynamic_level_test.cc
  9. 22
      db/db_impl/db_impl.cc
  10. 6
      db/db_impl/db_impl_compaction_flush.cc
  11. 106
      db/db_io_failure_test.cc
  12. 54
      db/db_sst_test.cc
  13. 18
      db/db_tailing_iter_test.cc
  14. 119
      db/db_test.cc
  15. 41
      db/db_test_util.cc
  16. 8
      db/db_test_util.h
  17. 2
      db/db_with_timestamp_basic_test.cc
  18. 7
      db/event_helpers.cc
  19. 12
      file/delete_scheduler.cc
  20. 4
      file/sst_file_manager_impl.cc
  21. 4
      file/sst_file_manager_impl.h
  22. 4
      include/rocksdb/c.h
  23. 14
      include/rocksdb/db.h
  24. 7
      table/block_based/block_based_table_builder.cc
  25. 10
      tools/ldb_cmd.cc

@ -7,6 +7,10 @@
### Behavior Changes ### Behavior Changes
* Attempting to write a merge operand without explicitly configuring `merge_operator` now fails immediately, causing the DB to enter read-only mode. Previously, failure was deferred until the `merge_operator` was needed by a user read or a background operation. * Attempting to write a merge operand without explicitly configuring `merge_operator` now fails immediately, causing the DB to enter read-only mode. Previously, failure was deferred until the `merge_operator` was needed by a user read or a background operation.
### API Changes
* `rocksdb_approximate_sizes` and `rocksdb_approximate_sizes_cf` in the C API now requires an error pointer (`char** errptr`) for receiving any error.
* All overloads of DB::GetApproximateSizes now return Status, so that any failure to obtain the sizes is indicated to the caller.
### Bug Fixes ### Bug Fixes
* Truncated WALs ending in incomplete records can no longer produce gaps in the recovered data when `WALRecoveryMode::kPointInTimeRecovery` is used. Gaps are still possible when WALs are truncated exactly on record boundaries; for complete protection, users should enable `track_and_verify_wals_in_manifest`. * Truncated WALs ending in incomplete records can no longer produce gaps in the recovered data when `WALRecoveryMode::kPointInTimeRecovery` is used. Gaps are still possible when WALs are truncated exactly on record boundaries; for complete protection, users should enable `track_and_verify_wals_in_manifest`.
* Fix a bug where compressed blocks read by MultiGet are not inserted into the compressed block cache when use_direct_reads = true. * Fix a bug where compressed blocks read by MultiGet are not inserted into the compressed block cache when use_direct_reads = true.

@ -612,7 +612,12 @@ ifdef ASSERT_STATUS_CHECKED
db_blob_basic_test \ db_blob_basic_test \
db_blob_index_test \ db_blob_index_test \
db_block_cache_test \ db_block_cache_test \
db_compaction_test \
db_compaction_filter_test \
db_dynamic_level_test \
db_flush_test \ db_flush_test \
db_inplace_update_test \
db_io_failure_test \
db_iterator_test \ db_iterator_test \
db_logical_block_size_cache_test \ db_logical_block_size_cache_test \
db_memtable_test \ db_memtable_test \
@ -629,6 +634,7 @@ ifdef ASSERT_STATUS_CHECKED
deletefile_test \ deletefile_test \
external_sst_file_test \ external_sst_file_test \
options_file_test \ options_file_test \
db_sst_test \
db_statistics_test \ db_statistics_test \
db_table_properties_test \ db_table_properties_test \
db_tailing_iter_test \ db_tailing_iter_test \

@ -1388,34 +1388,39 @@ char* rocksdb_property_value_cf(
} }
} }
void rocksdb_approximate_sizes( void rocksdb_approximate_sizes(rocksdb_t* db, int num_ranges,
rocksdb_t* db, const char* const* range_start_key,
int num_ranges, const size_t* range_start_key_len,
const char* const* range_start_key, const size_t* range_start_key_len, const char* const* range_limit_key,
const char* const* range_limit_key, const size_t* range_limit_key_len, const size_t* range_limit_key_len,
uint64_t* sizes) { uint64_t* sizes, char** errptr) {
Range* ranges = new Range[num_ranges]; Range* ranges = new Range[num_ranges];
for (int i = 0; i < num_ranges; i++) { for (int i = 0; i < num_ranges; i++) {
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]); ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]); ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]);
} }
db->rep->GetApproximateSizes(ranges, num_ranges, sizes); Status s = db->rep->GetApproximateSizes(ranges, num_ranges, sizes);
if (!s.ok()) {
SaveError(errptr, s);
}
delete[] ranges; delete[] ranges;
} }
void rocksdb_approximate_sizes_cf( void rocksdb_approximate_sizes_cf(
rocksdb_t* db, rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
rocksdb_column_family_handle_t* column_family, int num_ranges, const char* const* range_start_key,
int num_ranges, const size_t* range_start_key_len, const char* const* range_limit_key,
const char* const* range_start_key, const size_t* range_start_key_len, const size_t* range_limit_key_len, uint64_t* sizes, char** errptr) {
const char* const* range_limit_key, const size_t* range_limit_key_len,
uint64_t* sizes) {
Range* ranges = new Range[num_ranges]; Range* ranges = new Range[num_ranges];
for (int i = 0; i < num_ranges; i++) { for (int i = 0; i < num_ranges; i++) {
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]); ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]); ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]);
} }
db->rep->GetApproximateSizes(column_family->rep, ranges, num_ranges, sizes); Status s = db->rep->GetApproximateSizes(column_family->rep, ranges,
num_ranges, sizes);
if (!s.ok()) {
SaveError(errptr, s);
}
delete[] ranges; delete[] ranges;
} }

@ -988,7 +988,9 @@ int main(int argc, char** argv) {
&err); &err);
CheckNoError(err); CheckNoError(err);
} }
rocksdb_approximate_sizes(db, 2, start, start_len, limit, limit_len, sizes); rocksdb_approximate_sizes(db, 2, start, start_len, limit, limit_len, sizes,
&err);
CheckNoError(err);
CheckCondition(sizes[0] > 0); CheckCondition(sizes[0] > 0);
CheckCondition(sizes[1] > 0); CheckCondition(sizes[1] > 0);
} }

@ -297,15 +297,14 @@ class CompactionJobStatsTest : public testing::Test,
return result; return result;
} }
uint64_t Size(const Slice& start, const Slice& limit, int cf = 0) { Status Size(uint64_t* size, const Slice& start, const Slice& limit,
int cf = 0) {
Range r(start, limit); Range r(start, limit);
uint64_t size;
if (cf == 0) { if (cf == 0) {
db_->GetApproximateSizes(&r, 1, &size); return db_->GetApproximateSizes(&r, 1, size);
} else { } else {
db_->GetApproximateSizes(handles_[1], &r, 1, &size); return db_->GetApproximateSizes(handles_[1], &r, 1, size);
} }
return size;
} }
void Compact(int cf, const Slice& start, const Slice& limit, void Compact(int cf, const Slice& start, const Slice& limit,

@ -42,7 +42,7 @@ class DBTestCompactionFilterWithCompactParam
option_config_ == kUniversalSubcompactions) { option_config_ == kUniversalSubcompactions) {
assert(options.max_subcompactions > 1); assert(options.max_subcompactions > 1);
} }
TryReopen(options); Reopen(options);
} }
}; };
@ -276,7 +276,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
for (int i = 0; i < 100000; i++) { for (int i = 0; i < 100000; i++) {
char key[100]; char key[100];
snprintf(key, sizeof(key), "B%010d", i); snprintf(key, sizeof(key), "B%010d", i);
Put(1, key, value); ASSERT_OK(Put(1, key, value));
} }
ASSERT_OK(Flush(1)); ASSERT_OK(Flush(1));
@ -284,10 +284,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
// the compaction is each level invokes the filter for // the compaction is each level invokes the filter for
// all the keys in that level. // all the keys in that level.
cfilter_count = 0; cfilter_count = 0;
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
ASSERT_EQ(cfilter_count, 100000); ASSERT_EQ(cfilter_count, 100000);
cfilter_count = 0; cfilter_count = 0;
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]));
ASSERT_EQ(cfilter_count, 100000); ASSERT_EQ(cfilter_count, 100000);
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
@ -321,6 +321,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
} }
iter->Next(); iter->Next();
} }
ASSERT_OK(iter->status());
} }
ASSERT_EQ(total, 100000); ASSERT_EQ(total, 100000);
ASSERT_EQ(count, 0); ASSERT_EQ(count, 0);
@ -337,10 +338,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
// means that all keys should pass at least once // means that all keys should pass at least once
// via the compaction filter // via the compaction filter
cfilter_count = 0; cfilter_count = 0;
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
ASSERT_EQ(cfilter_count, 100000); ASSERT_EQ(cfilter_count, 100000);
cfilter_count = 0; cfilter_count = 0;
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]));
ASSERT_EQ(cfilter_count, 100000); ASSERT_EQ(cfilter_count, 100000);
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0); ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
@ -369,10 +370,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
// verify that at the end of the compaction process, // verify that at the end of the compaction process,
// nothing is left. // nothing is left.
cfilter_count = 0; cfilter_count = 0;
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
ASSERT_EQ(cfilter_count, 100000); ASSERT_EQ(cfilter_count, 100000);
cfilter_count = 0; cfilter_count = 0;
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]));
ASSERT_EQ(cfilter_count, 0); ASSERT_EQ(cfilter_count, 0);
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0); ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
@ -387,6 +388,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
count++; count++;
iter->Next(); iter->Next();
} }
ASSERT_OK(iter->status());
ASSERT_EQ(count, 0); ASSERT_EQ(count, 0);
} }
@ -427,9 +429,9 @@ TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) {
// put some data // put some data
for (int table = 0; table < 4; ++table) { for (int table = 0; table < 4; ++table) {
for (int i = 0; i < 10 + table; ++i) { for (int i = 0; i < 10 + table; ++i) {
Put(ToString(table * 100 + i), "val"); ASSERT_OK(Put(ToString(table * 100 + i), "val"));
} }
Flush(); ASSERT_OK(Flush());
} }
// this will produce empty file (delete compaction filter) // this will produce empty file (delete compaction filter)
@ -440,6 +442,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) {
Iterator* itr = db_->NewIterator(ReadOptions()); Iterator* itr = db_->NewIterator(ReadOptions());
itr->SeekToFirst(); itr->SeekToFirst();
ASSERT_OK(itr->status());
// empty db // empty db
ASSERT_TRUE(!itr->Valid()); ASSERT_TRUE(!itr->Valid());
@ -463,25 +466,25 @@ TEST_P(DBTestCompactionFilterWithCompactParam,
for (int i = 0; i < 100001; i++) { for (int i = 0; i < 100001; i++) {
char key[100]; char key[100];
snprintf(key, sizeof(key), "B%010d", i); snprintf(key, sizeof(key), "B%010d", i);
Put(1, key, value); ASSERT_OK(Put(1, key, value));
} }
// push all files to lower levels // push all files to lower levels
ASSERT_OK(Flush(1)); ASSERT_OK(Flush(1));
if (option_config_ != kUniversalCompactionMultiLevel && if (option_config_ != kUniversalCompactionMultiLevel &&
option_config_ != kUniversalSubcompactions) { option_config_ != kUniversalSubcompactions) {
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]));
} else { } else {
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[1],
nullptr); nullptr, nullptr));
} }
// re-write all data again // re-write all data again
for (int i = 0; i < 100001; i++) { for (int i = 0; i < 100001; i++) {
char key[100]; char key[100];
snprintf(key, sizeof(key), "B%010d", i); snprintf(key, sizeof(key), "B%010d", i);
Put(1, key, value); ASSERT_OK(Put(1, key, value));
} }
// push all files to lower levels. This should // push all files to lower levels. This should
@ -489,11 +492,11 @@ TEST_P(DBTestCompactionFilterWithCompactParam,
ASSERT_OK(Flush(1)); ASSERT_OK(Flush(1));
if (option_config_ != kUniversalCompactionMultiLevel && if (option_config_ != kUniversalCompactionMultiLevel &&
option_config_ != kUniversalSubcompactions) { option_config_ != kUniversalSubcompactions) {
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]));
} else { } else {
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[1],
nullptr); nullptr, nullptr));
} }
// verify that all keys now have the new value that // verify that all keys now have the new value that
@ -531,7 +534,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
ASSERT_OK(Flush()); ASSERT_OK(Flush());
std::string newvalue = Get("foo"); std::string newvalue = Get("foo");
ASSERT_EQ(newvalue, three); ASSERT_EQ(newvalue, three);
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
newvalue = Get("foo"); newvalue = Get("foo");
ASSERT_EQ(newvalue, three); ASSERT_EQ(newvalue, three);
@ -539,12 +542,12 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
// merge keys. // merge keys.
ASSERT_OK(db_->Put(WriteOptions(), "bar", two)); ASSERT_OK(db_->Put(WriteOptions(), "bar", two));
ASSERT_OK(Flush()); ASSERT_OK(Flush());
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
newvalue = Get("bar"); newvalue = Get("bar");
ASSERT_EQ("NOT_FOUND", newvalue); ASSERT_EQ("NOT_FOUND", newvalue);
ASSERT_OK(db_->Merge(WriteOptions(), "bar", two)); ASSERT_OK(db_->Merge(WriteOptions(), "bar", two));
ASSERT_OK(Flush()); ASSERT_OK(Flush());
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
newvalue = Get("bar"); newvalue = Get("bar");
ASSERT_EQ(two, two); ASSERT_EQ(two, two);
@ -555,7 +558,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
ASSERT_OK(Flush()); ASSERT_OK(Flush());
newvalue = Get("foobar"); newvalue = Get("foobar");
ASSERT_EQ(newvalue, three); ASSERT_EQ(newvalue, three);
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
newvalue = Get("foobar"); newvalue = Get("foobar");
ASSERT_EQ(newvalue, three); ASSERT_EQ(newvalue, three);
@ -568,7 +571,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
ASSERT_OK(Flush()); ASSERT_OK(Flush());
newvalue = Get("barfoo"); newvalue = Get("barfoo");
ASSERT_EQ(newvalue, four); ASSERT_EQ(newvalue, four);
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
newvalue = Get("barfoo"); newvalue = Get("barfoo");
ASSERT_EQ(newvalue, four); ASSERT_EQ(newvalue, four);
} }
@ -590,21 +593,21 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextManual) {
for (int i = 0; i < num_keys_per_file; i++) { for (int i = 0; i < num_keys_per_file; i++) {
char key[100]; char key[100];
snprintf(key, sizeof(key), "B%08d%02d", i, j); snprintf(key, sizeof(key), "B%08d%02d", i, j);
Put(key, value); ASSERT_OK(Put(key, value));
} }
dbfull()->TEST_FlushMemTable(); ASSERT_OK(dbfull()->TEST_FlushMemTable());
// Make sure next file is much smaller so automatic compaction will not // Make sure next file is much smaller so automatic compaction will not
// be triggered. // be triggered.
num_keys_per_file /= 2; num_keys_per_file /= 2;
} }
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
// Force a manual compaction // Force a manual compaction
cfilter_count = 0; cfilter_count = 0;
filter->expect_manual_compaction_.store(true); filter->expect_manual_compaction_.store(true);
filter->expect_full_compaction_.store(true); filter->expect_full_compaction_.store(true);
filter->expect_cf_id_.store(0); filter->expect_cf_id_.store(0);
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_EQ(cfilter_count, 700); ASSERT_EQ(cfilter_count, 700);
ASSERT_EQ(NumSortedRuns(0), 1); ASSERT_EQ(NumSortedRuns(0), 1);
ASSERT_TRUE(filter->compaction_filter_created()); ASSERT_TRUE(filter->compaction_filter_created());
@ -654,14 +657,14 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextCfId) {
for (int i = 0; i < num_keys_per_file; i++) { for (int i = 0; i < num_keys_per_file; i++) {
char key[100]; char key[100];
snprintf(key, sizeof(key), "B%08d%02d", i, j); snprintf(key, sizeof(key), "B%08d%02d", i, j);
Put(1, key, value); ASSERT_OK(Put(1, key, value));
} }
Flush(1); ASSERT_OK(Flush(1));
// Make sure next file is much smaller so automatic compaction will not // Make sure next file is much smaller so automatic compaction will not
// be triggered. // be triggered.
num_keys_per_file /= 2; num_keys_per_file /= 2;
} }
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_TRUE(filter->compaction_filter_created()); ASSERT_TRUE(filter->compaction_filter_created());
} }
@ -680,9 +683,9 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
const Snapshot* snapshot = nullptr; const Snapshot* snapshot = nullptr;
for (int table = 0; table < 4; ++table) { for (int table = 0; table < 4; ++table) {
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
Put(ToString(table * 100 + i), "val"); ASSERT_OK(Put(ToString(table * 100 + i), "val"));
} }
Flush(); ASSERT_OK(Flush());
if (table == 0) { if (table == 0) {
snapshot = db_->GetSnapshot(); snapshot = db_->GetSnapshot();
@ -702,6 +705,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
read_options.snapshot = snapshot; read_options.snapshot = snapshot;
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options)); std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
iter->SeekToFirst(); iter->SeekToFirst();
ASSERT_OK(iter->status());
int count = 0; int count = 0;
while (iter->Valid()) { while (iter->Valid()) {
count++; count++;
@ -710,6 +714,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
ASSERT_EQ(count, 6); ASSERT_EQ(count, 6);
read_options.snapshot = nullptr; read_options.snapshot = nullptr;
std::unique_ptr<Iterator> iter1(db_->NewIterator(read_options)); std::unique_ptr<Iterator> iter1(db_->NewIterator(read_options));
ASSERT_OK(iter1->status());
iter1->SeekToFirst(); iter1->SeekToFirst();
count = 0; count = 0;
while (iter1->Valid()) { while (iter1->Valid()) {
@ -740,9 +745,9 @@ TEST_F(DBTestCompactionFilter, SkipUntil) {
for (int i = table * 6; i < 39 + table * 11; ++i) { for (int i = table * 6; i < 39 + table * 11; ++i) {
char key[100]; char key[100];
snprintf(key, sizeof(key), "%010d", table * 100 + i); snprintf(key, sizeof(key), "%010d", table * 100 + i);
Put(key, std::to_string(table * 1000 + i)); ASSERT_OK(Put(key, std::to_string(table * 1000 + i)));
} }
Flush(); ASSERT_OK(Flush());
} }
cfilter_skips = 0; cfilter_skips = 0;
@ -781,10 +786,10 @@ TEST_F(DBTestCompactionFilter, SkipUntilWithBloomFilter) {
options.create_if_missing = true; options.create_if_missing = true;
DestroyAndReopen(options); DestroyAndReopen(options);
Put("0000000010", "v10"); ASSERT_OK(Put("0000000010", "v10"));
Put("0000000020", "v20"); // skipped ASSERT_OK(Put("0000000020", "v20")); // skipped
Put("0000000050", "v50"); ASSERT_OK(Put("0000000050", "v50"));
Flush(); ASSERT_OK(Flush());
cfilter_skips = 0; cfilter_skips = 0;
EXPECT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); EXPECT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
@ -822,13 +827,13 @@ TEST_F(DBTestCompactionFilter, IgnoreSnapshotsFalse) {
options.compaction_filter = new TestNotSupportedFilter(); options.compaction_filter = new TestNotSupportedFilter();
DestroyAndReopen(options); DestroyAndReopen(options);
Put("a", "v10"); ASSERT_OK(Put("a", "v10"));
Put("z", "v20"); ASSERT_OK(Put("z", "v20"));
Flush(); ASSERT_OK(Flush());
Put("a", "v10"); ASSERT_OK(Put("a", "v10"));
Put("z", "v20"); ASSERT_OK(Put("z", "v20"));
Flush(); ASSERT_OK(Flush());
// Comapction should fail because IgnoreSnapshots() = false // Comapction should fail because IgnoreSnapshots() = false
EXPECT_TRUE(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr) EXPECT_TRUE(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)

File diff suppressed because it is too large Load Diff

@ -102,7 +102,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase) {
} }
// Test compact range works // Test compact range works
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
// All data should be in the last level. // All data should be in the last level.
ColumnFamilyMetaData cf_meta; ColumnFamilyMetaData cf_meta;
db_->GetColumnFamilyMetaData(&cf_meta); db_->GetColumnFamilyMetaData(&cf_meta);
@ -166,8 +167,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
ASSERT_OK(dbfull()->SetOptions({ ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "false"}, {"disable_auto_compactions", "false"},
})); }));
Flush(); ASSERT_OK(Flush());
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(4U, int_prop); ASSERT_EQ(4U, int_prop);
@ -184,8 +185,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
ASSERT_OK(dbfull()->SetOptions({ ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "false"}, {"disable_auto_compactions", "false"},
})); }));
Flush(); ASSERT_OK(Flush());
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(3U, int_prop); ASSERT_EQ(3U, int_prop);
ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level1", &str_prop)); ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level1", &str_prop));
@ -205,8 +206,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
ASSERT_OK(dbfull()->SetOptions({ ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "false"}, {"disable_auto_compactions", "false"},
})); }));
Flush(); ASSERT_OK(Flush());
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(3U, int_prop); ASSERT_EQ(3U, int_prop);
@ -234,8 +235,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
})); }));
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:0"); TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:0");
Flush(); ASSERT_OK(Flush());
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(2U, int_prop); ASSERT_EQ(2U, int_prop);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
@ -264,7 +265,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
} }
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:2"); TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:2");
Flush(); ASSERT_OK(Flush());
thread.join(); thread.join();
@ -302,7 +303,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) {
DestroyAndReopen(options); DestroyAndReopen(options);
// Compact against empty DB // Compact against empty DB
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
uint64_t int_prop; uint64_t int_prop;
std::string str_prop; std::string str_prop;
@ -316,13 +317,13 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) {
ASSERT_OK( ASSERT_OK(
Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))), rnd.RandomString(80))); Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))), rnd.RandomString(80)));
} }
Flush(); ASSERT_OK(Flush());
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
if (NumTableFilesAtLevel(0) == 0) { if (NumTableFilesAtLevel(0) == 0) {
// Make sure level 0 is not empty // Make sure level 0 is not empty
ASSERT_OK( ASSERT_OK(
Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))), rnd.RandomString(80))); Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))), rnd.RandomString(80)));
Flush(); ASSERT_OK(Flush());
} }
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
@ -343,7 +344,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) {
}); });
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_EQ(output_levels.size(), 2); ASSERT_EQ(output_levels.size(), 2);
ASSERT_TRUE(output_levels.find(3) != output_levels.end()); ASSERT_TRUE(output_levels.find(3) != output_levels.end());
ASSERT_TRUE(output_levels.find(4) != output_levels.end()); ASSERT_TRUE(output_levels.find(4) != output_levels.end());
@ -389,8 +390,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBaseInc) {
PutFixed32(&value, static_cast<uint32_t>(i)); PutFixed32(&value, static_cast<uint32_t>(i));
ASSERT_OK(Put(Key(i), value)); ASSERT_OK(Put(Key(i), value));
} }
Flush(); ASSERT_OK(Flush());
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ASSERT_EQ(non_trivial, 0); ASSERT_EQ(non_trivial, 0);
@ -449,7 +450,7 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) {
ASSERT_OK(Delete(Key(i / 10))); ASSERT_OK(Delete(Key(i / 10)));
} }
verify_func(total_keys, false); verify_func(total_keys, false);
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
options.level_compaction_dynamic_level_bytes = true; options.level_compaction_dynamic_level_bytes = true;
options.disable_auto_compactions = true; options.disable_auto_compactions = true;
@ -464,7 +465,7 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) {
CompactRangeOptions compact_options; CompactRangeOptions compact_options;
compact_options.change_level = true; compact_options.change_level = true;
compact_options.target_level = options.num_levels - 1; compact_options.target_level = options.num_levels - 1;
dbfull()->CompactRange(compact_options, nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr));
compaction_finished.store(true); compaction_finished.store(true);
}); });
do { do {
@ -484,7 +485,7 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) {
} }
verify_func(total_keys2, false); verify_func(total_keys2, false);
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
verify_func(total_keys2, false); verify_func(total_keys2, false);
// Base level is not level 1 // Base level is not level 1

@ -306,19 +306,22 @@ Status DBImpl::ResumeImpl(DBRecoverContext context) {
mutex_.AssertHeld(); mutex_.AssertHeld();
WaitForBackgroundWork(); WaitForBackgroundWork();
Status bg_error = error_handler_.GetBGError();
Status s; Status s;
if (shutdown_initiated_) { if (shutdown_initiated_) {
// Returning shutdown status to SFM during auto recovery will cause it // Returning shutdown status to SFM during auto recovery will cause it
// to abort the recovery and allow the shutdown to progress // to abort the recovery and allow the shutdown to progress
s = Status::ShutdownInProgress(); s = Status::ShutdownInProgress();
} }
if (s.ok() && bg_error.severity() > Status::Severity::kHardError) {
if (s.ok()) {
Status bg_error = error_handler_.GetBGError();
if (bg_error.severity() > Status::Severity::kHardError) {
ROCKS_LOG_INFO( ROCKS_LOG_INFO(
immutable_db_options_.info_log, immutable_db_options_.info_log,
"DB resume requested but failed due to Fatal/Unrecoverable error"); "DB resume requested but failed due to Fatal/Unrecoverable error");
s = bg_error; s = bg_error;
} }
}
// Make sure the IO Status stored in version set is set to OK. // Make sure the IO Status stored in version set is set to OK.
bool file_deletion_disabled = !IsFileDeletionsEnabled(); bool file_deletion_disabled = !IsFileDeletionsEnabled();
@ -392,6 +395,11 @@ Status DBImpl::ResumeImpl(DBRecoverContext context) {
FindObsoleteFiles(&job_context, true); FindObsoleteFiles(&job_context, true);
if (s.ok()) { if (s.ok()) {
s = error_handler_.ClearBGError(); s = error_handler_.ClearBGError();
} else {
// NOTE: this is needed to pass ASSERT_STATUS_CHECKED
// in the DBSSTTest.DBWithMaxSpaceAllowedRandomized test.
// See https://github.com/facebook/rocksdb/pull/7715#issuecomment-754947952
error_handler_.GetRecoveryError().PermitUncheckedError();
} }
mutex_.Unlock(); mutex_.Unlock();
@ -408,6 +416,12 @@ Status DBImpl::ResumeImpl(DBRecoverContext context) {
if (file_deletion_disabled) { if (file_deletion_disabled) {
// Always return ok // Always return ok
s = EnableFileDeletions(/*force=*/true); s = EnableFileDeletions(/*force=*/true);
if (!s.ok()) {
ROCKS_LOG_INFO(
immutable_db_options_.info_log,
"DB resume requested but could not enable file deletions [%s]",
s.ToString().c_str());
}
} }
ROCKS_LOG_INFO(immutable_db_options_.info_log, "Successfully resumed DB"); ROCKS_LOG_INFO(immutable_db_options_.info_log, "Successfully resumed DB");
} }
@ -3573,7 +3587,7 @@ Status DBImpl::DeleteFile(std::string name) {
Status DBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family, Status DBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family,
const RangePtr* ranges, size_t n, const RangePtr* ranges, size_t n,
bool include_end) { bool include_end) {
Status status; Status status = Status::OK();
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family); auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
ColumnFamilyData* cfd = cfh->cfd(); ColumnFamilyData* cfd = cfh->cfd();
VersionEdit edit; VersionEdit edit;
@ -3632,7 +3646,7 @@ Status DBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family,
} }
if (edit.GetDeletedFiles().empty()) { if (edit.GetDeletedFiles().empty()) {
job_context.Clean(); job_context.Clean();
return Status::OK(); return status;
} }
input_version->Ref(); input_version->Ref();
status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(), status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(),

@ -35,8 +35,10 @@ bool DBImpl::EnoughRoomForCompaction(
// Pass the current bg_error_ to SFM so it can decide what checks to // Pass the current bg_error_ to SFM so it can decide what checks to
// perform. If this DB instance hasn't seen any error yet, the SFM can be // perform. If this DB instance hasn't seen any error yet, the SFM can be
// optimistic and not do disk space checks // optimistic and not do disk space checks
enough_room = Status bg_error = error_handler_.GetBGError();
sfm->EnoughRoomForCompaction(cfd, inputs, error_handler_.GetBGError()); enough_room = sfm->EnoughRoomForCompaction(cfd, inputs, bg_error);
bg_error.PermitUncheckedError(); // bg_error is just a copy of the Status
// from the error_handler_
if (enough_room) { if (enough_room) {
*sfm_reserved_compact_space = true; *sfm_reserved_compact_space = true;
} }

@ -43,11 +43,15 @@ TEST_F(DBIOFailureTest, DropWrites) {
if (level > 0 && level == dbfull()->NumberLevels() - 1) { if (level > 0 && level == dbfull()->NumberLevels() - 1) {
break; break;
} }
Status s =
dbfull()->TEST_CompactRange(level, nullptr, nullptr, nullptr, dbfull()->TEST_CompactRange(level, nullptr, nullptr, nullptr,
true /* disallow trivial move */); true /* disallow trivial move */);
ASSERT_TRUE(s.ok() || s.IsCorruption());
} }
} else { } else {
Status s =
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
ASSERT_TRUE(s.ok() || s.IsCorruption());
} }
} }
@ -56,7 +60,8 @@ TEST_F(DBIOFailureTest, DropWrites) {
ASSERT_EQ("5", property_value); ASSERT_EQ("5", property_value);
env_->drop_writes_.store(false, std::memory_order_release); env_->drop_writes_.store(false, std::memory_order_release);
ASSERT_LT(CountFiles(), num_files + 3); const size_t count = CountFiles();
ASSERT_LT(count, num_files + 3);
// Check that compaction attempts slept after errors // Check that compaction attempts slept after errors
// TODO @krad: Figure out why ASSERT_EQ 5 keeps failing in certain compiler // TODO @krad: Figure out why ASSERT_EQ 5 keeps failing in certain compiler
@ -82,7 +87,8 @@ TEST_F(DBIOFailureTest, DropWritesFlush) {
ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value)); ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value));
ASSERT_EQ("0", property_value); ASSERT_EQ("0", property_value);
dbfull()->TEST_FlushMemTable(true); // ASSERT file is too short
ASSERT_TRUE(dbfull()->TEST_FlushMemTable(true).IsCorruption());
ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value)); ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value));
ASSERT_EQ("1", property_value); ASSERT_EQ("1", property_value);
@ -166,7 +172,7 @@ TEST_F(DBIOFailureTest, ManifestWriteError) {
ASSERT_EQ("bar", Get("foo")); ASSERT_EQ("bar", Get("foo"));
// Memtable compaction (will succeed) // Memtable compaction (will succeed)
Flush(); ASSERT_OK(Flush());
ASSERT_EQ("bar", Get("foo")); ASSERT_EQ("bar", Get("foo"));
const int last = 2; const int last = 2;
MoveFilesToLevel(2); MoveFilesToLevel(2);
@ -174,7 +180,8 @@ TEST_F(DBIOFailureTest, ManifestWriteError) {
// Merging compaction (will fail) // Merging compaction (will fail)
error_type->store(true, std::memory_order_release); error_type->store(true, std::memory_order_release);
dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail ASSERT_NOK(
dbfull()->TEST_CompactRange(last, nullptr, nullptr)); // Should fail
ASSERT_EQ("bar", Get("foo")); ASSERT_EQ("bar", Get("foo"));
error_type->store(false, std::memory_order_release); error_type->store(false, std::memory_order_release);
@ -192,7 +199,13 @@ TEST_F(DBIOFailureTest, ManifestWriteError) {
// Merging compaction (will fail) // Merging compaction (will fail)
error_type->store(true, std::memory_order_release); error_type->store(true, std::memory_order_release);
Status s =
dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
if (iter == 0) {
ASSERT_OK(s);
} else {
ASSERT_TRUE(s.IsIOError());
}
ASSERT_EQ("bar", Get("foo")); ASSERT_EQ("bar", Get("foo"));
// Recovery: should not lose data // Recovery: should not lose data
@ -220,18 +233,15 @@ TEST_F(DBIOFailureTest, PutFailsParanoid) {
options.paranoid_checks = true; options.paranoid_checks = true;
DestroyAndReopen(options); DestroyAndReopen(options);
CreateAndReopenWithCF({"pikachu"}, options); CreateAndReopenWithCF({"pikachu"}, options);
Status s;
ASSERT_OK(Put(1, "foo", "bar")); ASSERT_OK(Put(1, "foo", "bar"));
ASSERT_OK(Put(1, "foo1", "bar1")); ASSERT_OK(Put(1, "foo1", "bar1"));
// simulate error // simulate error
env_->log_write_error_.store(true, std::memory_order_release); env_->log_write_error_.store(true, std::memory_order_release);
s = Put(1, "foo2", "bar2"); ASSERT_NOK(Put(1, "foo2", "bar2"));
ASSERT_TRUE(!s.ok());
env_->log_write_error_.store(false, std::memory_order_release); env_->log_write_error_.store(false, std::memory_order_release);
s = Put(1, "foo3", "bar3");
// the next put should fail, too // the next put should fail, too
ASSERT_TRUE(!s.ok()); ASSERT_NOK(Put(1, "foo3", "bar3"));
// but we're still able to read // but we're still able to read
ASSERT_EQ("bar", Get(1, "foo")); ASSERT_EQ("bar", Get(1, "foo"));
@ -244,12 +254,10 @@ TEST_F(DBIOFailureTest, PutFailsParanoid) {
ASSERT_OK(Put(1, "foo1", "bar1")); ASSERT_OK(Put(1, "foo1", "bar1"));
// simulate error // simulate error
env_->log_write_error_.store(true, std::memory_order_release); env_->log_write_error_.store(true, std::memory_order_release);
s = Put(1, "foo2", "bar2"); ASSERT_NOK(Put(1, "foo2", "bar2"));
ASSERT_TRUE(!s.ok());
env_->log_write_error_.store(false, std::memory_order_release); env_->log_write_error_.store(false, std::memory_order_release);
s = Put(1, "foo3", "bar3");
// the next put should NOT fail // the next put should NOT fail
ASSERT_TRUE(s.ok()); ASSERT_OK(Put(1, "foo3", "bar3"));
} }
#if !(defined NDEBUG) || !defined(OS_WIN) #if !(defined NDEBUG) || !defined(OS_WIN)
TEST_F(DBIOFailureTest, FlushSstRangeSyncError) { TEST_F(DBIOFailureTest, FlushSstRangeSyncError) {
@ -269,14 +277,14 @@ TEST_F(DBIOFailureTest, FlushSstRangeSyncError) {
DestroyAndReopen(options); DestroyAndReopen(options);
CreateAndReopenWithCF({"pikachu"}, options); CreateAndReopenWithCF({"pikachu"}, options);
Status s;
const char* io_error_msg = "range sync dummy error";
std::atomic<int> range_sync_called(0); std::atomic<int> range_sync_called(0);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"SpecialEnv::SStableFile::RangeSync", [&](void* arg) { "SpecialEnv::SStableFile::RangeSync", [&](void* arg) {
if (range_sync_called.fetch_add(1) == 0) { if (range_sync_called.fetch_add(1) == 0) {
Status* st = static_cast<Status*>(arg); Status* st = static_cast<Status*>(arg);
*st = Status::IOError("range sync dummy error"); *st = Status::IOError(io_error_msg);
} }
}); });
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
@ -298,7 +306,9 @@ TEST_F(DBIOFailureTest, FlushSstRangeSyncError) {
ASSERT_OK(Put(1, "foo3_2", rnd_str)); ASSERT_OK(Put(1, "foo3_2", rnd_str));
ASSERT_OK(Put(1, "foo3_3", rnd_str)); ASSERT_OK(Put(1, "foo3_3", rnd_str));
ASSERT_OK(Put(1, "foo4", "bar")); ASSERT_OK(Put(1, "foo4", "bar"));
dbfull()->TEST_WaitForFlushMemTable(handles_[1]); Status s = dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
ASSERT_TRUE(s.IsIOError());
ASSERT_STREQ(s.getState(), io_error_msg);
// Following writes should fail as flush failed. // Following writes should fail as flush failed.
ASSERT_NOK(Put(1, "foo2", "bar3")); ASSERT_NOK(Put(1, "foo2", "bar3"));
@ -328,7 +338,6 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) {
options.table_factory.reset(NewBlockBasedTableFactory(table_options)); options.table_factory.reset(NewBlockBasedTableFactory(table_options));
DestroyAndReopen(options); DestroyAndReopen(options);
CreateAndReopenWithCF({"pikachu"}, options); CreateAndReopenWithCF({"pikachu"}, options);
Status s;
Random rnd(301); Random rnd(301);
std::string rnd_str = std::string rnd_str =
@ -342,21 +351,22 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) {
ASSERT_OK(Put(1, "foo1_1", rnd_str)); ASSERT_OK(Put(1, "foo1_1", rnd_str));
ASSERT_OK(Put(1, "foo1_2", rnd_str)); ASSERT_OK(Put(1, "foo1_2", rnd_str));
ASSERT_OK(Put(1, "foo1_3", rnd_str)); ASSERT_OK(Put(1, "foo1_3", rnd_str));
Flush(1); ASSERT_OK(Flush(1));
ASSERT_OK(Put(1, "foo", "bar")); ASSERT_OK(Put(1, "foo", "bar"));
ASSERT_OK(Put(1, "foo3_1", rnd_str)); ASSERT_OK(Put(1, "foo3_1", rnd_str));
ASSERT_OK(Put(1, "foo3_2", rnd_str)); ASSERT_OK(Put(1, "foo3_2", rnd_str));
ASSERT_OK(Put(1, "foo3_3", rnd_str)); ASSERT_OK(Put(1, "foo3_3", rnd_str));
ASSERT_OK(Put(1, "foo4", "bar")); ASSERT_OK(Put(1, "foo4", "bar"));
Flush(1); ASSERT_OK(Flush(1));
dbfull()->TEST_WaitForFlushMemTable(handles_[1]); ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
const char* io_error_msg = "range sync dummy error";
std::atomic<int> range_sync_called(0); std::atomic<int> range_sync_called(0);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"SpecialEnv::SStableFile::RangeSync", [&](void* arg) { "SpecialEnv::SStableFile::RangeSync", [&](void* arg) {
if (range_sync_called.fetch_add(1) == 0) { if (range_sync_called.fetch_add(1) == 0) {
Status* st = static_cast<Status*>(arg); Status* st = static_cast<Status*>(arg);
*st = Status::IOError("range sync dummy error"); *st = Status::IOError(io_error_msg);
} }
}); });
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
@ -365,7 +375,9 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) {
{ {
{"disable_auto_compactions", "false"}, {"disable_auto_compactions", "false"},
})); }));
dbfull()->TEST_WaitForCompact(); Status s = dbfull()->TEST_WaitForCompact();
ASSERT_TRUE(s.IsIOError());
ASSERT_STREQ(s.getState(), io_error_msg);
// Following writes should fail as flush failed. // Following writes should fail as flush failed.
ASSERT_NOK(Put(1, "foo2", "bar3")); ASSERT_NOK(Put(1, "foo2", "bar3"));
@ -389,13 +401,14 @@ TEST_F(DBIOFailureTest, FlushSstCloseError) {
DestroyAndReopen(options); DestroyAndReopen(options);
CreateAndReopenWithCF({"pikachu"}, options); CreateAndReopenWithCF({"pikachu"}, options);
Status s;
const char* io_error_msg = "close dummy error";
std::atomic<int> close_called(0); std::atomic<int> close_called(0);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"SpecialEnv::SStableFile::Close", [&](void* arg) { "SpecialEnv::SStableFile::Close", [&](void* arg) {
if (close_called.fetch_add(1) == 0) { if (close_called.fetch_add(1) == 0) {
Status* st = static_cast<Status*>(arg); Status* st = static_cast<Status*>(arg);
*st = Status::IOError("close dummy error"); *st = Status::IOError(io_error_msg);
} }
}); });
@ -404,7 +417,9 @@ TEST_F(DBIOFailureTest, FlushSstCloseError) {
ASSERT_OK(Put(1, "foo", "bar")); ASSERT_OK(Put(1, "foo", "bar"));
ASSERT_OK(Put(1, "foo1", "bar1")); ASSERT_OK(Put(1, "foo1", "bar1"));
ASSERT_OK(Put(1, "foo", "bar2")); ASSERT_OK(Put(1, "foo", "bar2"));
dbfull()->TEST_WaitForFlushMemTable(handles_[1]); Status s = dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
ASSERT_TRUE(s.IsIOError());
ASSERT_STREQ(s.getState(), io_error_msg);
// Following writes should fail as flush failed. // Following writes should fail as flush failed.
ASSERT_NOK(Put(1, "foo2", "bar3")); ASSERT_NOK(Put(1, "foo2", "bar3"));
@ -429,25 +444,25 @@ TEST_F(DBIOFailureTest, CompactionSstCloseError) {
DestroyAndReopen(options); DestroyAndReopen(options);
CreateAndReopenWithCF({"pikachu"}, options); CreateAndReopenWithCF({"pikachu"}, options);
Status s;
ASSERT_OK(Put(1, "foo", "bar")); ASSERT_OK(Put(1, "foo", "bar"));
ASSERT_OK(Put(1, "foo2", "bar")); ASSERT_OK(Put(1, "foo2", "bar"));
Flush(1); ASSERT_OK(Flush(1));
ASSERT_OK(Put(1, "foo", "bar2")); ASSERT_OK(Put(1, "foo", "bar2"));
ASSERT_OK(Put(1, "foo2", "bar")); ASSERT_OK(Put(1, "foo2", "bar"));
Flush(1); ASSERT_OK(Flush(1));
ASSERT_OK(Put(1, "foo", "bar3")); ASSERT_OK(Put(1, "foo", "bar3"));
ASSERT_OK(Put(1, "foo2", "bar")); ASSERT_OK(Put(1, "foo2", "bar"));
Flush(1); ASSERT_OK(Flush(1));
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
const char* io_error_msg = "close dummy error";
std::atomic<int> close_called(0); std::atomic<int> close_called(0);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"SpecialEnv::SStableFile::Close", [&](void* arg) { "SpecialEnv::SStableFile::Close", [&](void* arg) {
if (close_called.fetch_add(1) == 0) { if (close_called.fetch_add(1) == 0) {
Status* st = static_cast<Status*>(arg); Status* st = static_cast<Status*>(arg);
*st = Status::IOError("close dummy error"); *st = Status::IOError(io_error_msg);
} }
}); });
@ -456,7 +471,9 @@ TEST_F(DBIOFailureTest, CompactionSstCloseError) {
{ {
{"disable_auto_compactions", "false"}, {"disable_auto_compactions", "false"},
})); }));
dbfull()->TEST_WaitForCompact(); Status s = dbfull()->TEST_WaitForCompact();
ASSERT_TRUE(s.IsIOError());
ASSERT_STREQ(s.getState(), io_error_msg);
// Following writes should fail as compaction failed. // Following writes should fail as compaction failed.
ASSERT_NOK(Put(1, "foo2", "bar3")); ASSERT_NOK(Put(1, "foo2", "bar3"));
@ -480,13 +497,14 @@ TEST_F(DBIOFailureTest, FlushSstSyncError) {
DestroyAndReopen(options); DestroyAndReopen(options);
CreateAndReopenWithCF({"pikachu"}, options); CreateAndReopenWithCF({"pikachu"}, options);
Status s;
const char* io_error_msg = "sync dummy error";
std::atomic<int> sync_called(0); std::atomic<int> sync_called(0);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"SpecialEnv::SStableFile::Sync", [&](void* arg) { "SpecialEnv::SStableFile::Sync", [&](void* arg) {
if (sync_called.fetch_add(1) == 0) { if (sync_called.fetch_add(1) == 0) {
Status* st = static_cast<Status*>(arg); Status* st = static_cast<Status*>(arg);
*st = Status::IOError("sync dummy error"); *st = Status::IOError(io_error_msg);
} }
}); });
@ -495,7 +513,9 @@ TEST_F(DBIOFailureTest, FlushSstSyncError) {
ASSERT_OK(Put(1, "foo", "bar")); ASSERT_OK(Put(1, "foo", "bar"));
ASSERT_OK(Put(1, "foo1", "bar1")); ASSERT_OK(Put(1, "foo1", "bar1"));
ASSERT_OK(Put(1, "foo", "bar2")); ASSERT_OK(Put(1, "foo", "bar2"));
dbfull()->TEST_WaitForFlushMemTable(handles_[1]); Status s = dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
ASSERT_TRUE(s.IsIOError());
ASSERT_STREQ(s.getState(), io_error_msg);
// Following writes should fail as flush failed. // Following writes should fail as flush failed.
ASSERT_NOK(Put(1, "foo2", "bar3")); ASSERT_NOK(Put(1, "foo2", "bar3"));
@ -521,25 +541,25 @@ TEST_F(DBIOFailureTest, CompactionSstSyncError) {
DestroyAndReopen(options); DestroyAndReopen(options);
CreateAndReopenWithCF({"pikachu"}, options); CreateAndReopenWithCF({"pikachu"}, options);
Status s;
ASSERT_OK(Put(1, "foo", "bar")); ASSERT_OK(Put(1, "foo", "bar"));
ASSERT_OK(Put(1, "foo2", "bar")); ASSERT_OK(Put(1, "foo2", "bar"));
Flush(1); ASSERT_OK(Flush(1));
ASSERT_OK(Put(1, "foo", "bar2")); ASSERT_OK(Put(1, "foo", "bar2"));
ASSERT_OK(Put(1, "foo2", "bar")); ASSERT_OK(Put(1, "foo2", "bar"));
Flush(1); ASSERT_OK(Flush(1));
ASSERT_OK(Put(1, "foo", "bar3")); ASSERT_OK(Put(1, "foo", "bar3"));
ASSERT_OK(Put(1, "foo2", "bar")); ASSERT_OK(Put(1, "foo2", "bar"));
Flush(1); ASSERT_OK(Flush(1));
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
const char* io_error_msg = "sync dummy error";
std::atomic<int> sync_called(0); std::atomic<int> sync_called(0);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"SpecialEnv::SStableFile::Sync", [&](void* arg) { "SpecialEnv::SStableFile::Sync", [&](void* arg) {
if (sync_called.fetch_add(1) == 0) { if (sync_called.fetch_add(1) == 0) {
Status* st = static_cast<Status*>(arg); Status* st = static_cast<Status*>(arg);
*st = Status::IOError("close dummy error"); *st = Status::IOError(io_error_msg);
} }
}); });
@ -548,7 +568,9 @@ TEST_F(DBIOFailureTest, CompactionSstSyncError) {
{ {
{"disable_auto_compactions", "false"}, {"disable_auto_compactions", "false"},
})); }));
dbfull()->TEST_WaitForCompact(); Status s = dbfull()->TEST_WaitForCompact();
ASSERT_TRUE(s.IsIOError());
ASSERT_STREQ(s.getState(), io_error_msg);
// Following writes should fail as compaction failed. // Following writes should fail as compaction failed.
ASSERT_NOK(Put(1, "foo2", "bar3")); ASSERT_NOK(Put(1, "foo2", "bar3"));

@ -98,7 +98,7 @@ TEST_F(DBSSTTest, SSTsWithLdbSuffixHandling) {
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
GenerateNewFile(&rnd, &key_id, false); GenerateNewFile(&rnd, &key_id, false);
} }
Flush(); ASSERT_OK(Flush());
Close(); Close();
int const num_files = GetSstFileCount(dbname_); int const num_files = GetSstFileCount(dbname_);
ASSERT_GT(num_files, 0); ASSERT_GT(num_files, 0);
@ -393,7 +393,7 @@ TEST_F(DBSSTTest, RateLimitedDelete) {
WriteOptions wo; WriteOptions wo;
wo.disableWAL = true; wo.disableWAL = true;
ASSERT_OK(TryReopen(options)); Reopen(options);
// Create 4 files in L0 // Create 4 files in L0
for (char v = 'a'; v <= 'd'; v++) { for (char v = 'a'; v <= 'd'; v++) {
ASSERT_OK(Put("Key2", DummyString(1024, v), wo)); ASSERT_OK(Put("Key2", DummyString(1024, v), wo));
@ -540,7 +540,7 @@ TEST_P(DBWALTestWithParam, WALTrashCleanupOnOpen) {
auto sfm = static_cast<SstFileManagerImpl*>(options.sst_file_manager.get()); auto sfm = static_cast<SstFileManagerImpl*>(options.sst_file_manager.get());
sfm->delete_scheduler()->SetMaxTrashDBRatio(3.1); sfm->delete_scheduler()->SetMaxTrashDBRatio(3.1);
ASSERT_OK(TryReopen(options)); Reopen(options);
// Create 4 files in L0 // Create 4 files in L0
for (char v = 'a'; v <= 'd'; v++) { for (char v = 'a'; v <= 'd'; v++) {
@ -567,11 +567,11 @@ TEST_P(DBWALTestWithParam, WALTrashCleanupOnOpen) {
if (!wal_dir_same_as_dbname_) { if (!wal_dir_same_as_dbname_) {
// Forcibly create some trash log files // Forcibly create some trash log files
std::unique_ptr<WritableFile> result; std::unique_ptr<WritableFile> result;
env->NewWritableFile(options.wal_dir + "/1000.log.trash", &result, ASSERT_OK(env->NewWritableFile(options.wal_dir + "/1000.log.trash", &result,
EnvOptions()); EnvOptions()));
result.reset(); result.reset();
} }
env->GetChildren(options.wal_dir, &filenames); ASSERT_OK(env->GetChildren(options.wal_dir, &filenames));
for (const std::string& fname : filenames) { for (const std::string& fname : filenames) {
if (fname.find(".log.trash") != std::string::npos) { if (fname.find(".log.trash") != std::string::npos) {
trash_log_count++; trash_log_count++;
@ -580,11 +580,11 @@ TEST_P(DBWALTestWithParam, WALTrashCleanupOnOpen) {
ASSERT_GE(trash_log_count, 1); ASSERT_GE(trash_log_count, 1);
env->set_fake_log_delete(false); env->set_fake_log_delete(false);
ASSERT_OK(TryReopen(options)); Reopen(options);
filenames.clear(); filenames.clear();
trash_log_count = 0; trash_log_count = 0;
env->GetChildren(options.wal_dir, &filenames); ASSERT_OK(env->GetChildren(options.wal_dir, &filenames));
for (const std::string& fname : filenames) { for (const std::string& fname : filenames) {
if (fname.find(".log.trash") != std::string::npos) { if (fname.find(".log.trash") != std::string::npos) {
trash_log_count++; trash_log_count++;
@ -614,7 +614,7 @@ TEST_F(DBSSTTest, OpenDBWithExistingTrash) {
ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "003.sst.trash")); ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "003.sst.trash"));
// Reopen the DB and verify that it deletes existing trash files // Reopen the DB and verify that it deletes existing trash files
ASSERT_OK(TryReopen(options)); Reopen(options);
sfm->WaitForEmptyTrash(); sfm->WaitForEmptyTrash();
ASSERT_NOK(env_->FileExists(dbname_ + "/" + "001.sst.trash")); ASSERT_NOK(env_->FileExists(dbname_ + "/" + "001.sst.trash"));
ASSERT_NOK(env_->FileExists(dbname_ + "/" + "002.sst.trash")); ASSERT_NOK(env_->FileExists(dbname_ + "/" + "002.sst.trash"));
@ -872,10 +872,12 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) {
ASSERT_OK(Flush()); ASSERT_OK(Flush());
// OK, now trigger a manual compaction // OK, now trigger a manual compaction
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_TRUE(dbfull()
->CompactRange(CompactRangeOptions(), nullptr, nullptr)
.IsCompactionTooLarge());
// Wait for manual compaction to get scheduled and finish // Wait for manual compaction to get scheduled and finish
dbfull()->TEST_WaitForCompact(true); ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_EQ(sfm->GetCompactionsReservedSize(), 0); ASSERT_EQ(sfm->GetCompactionsReservedSize(), 0);
// Make sure the stat is bumped // Make sure the stat is bumped
@ -885,10 +887,13 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) {
// Now make sure CompactFiles also gets cancelled // Now make sure CompactFiles also gets cancelled
auto l0_files = collector->GetFlushedFiles(); auto l0_files = collector->GetFlushedFiles();
dbfull()->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), l0_files, 0); ASSERT_TRUE(
dbfull()
->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), l0_files, 0)
.IsCompactionTooLarge());
// Wait for manual compaction to get scheduled and finish // Wait for manual compaction to get scheduled and finish
dbfull()->TEST_WaitForCompact(true); ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_EQ(dbfull()->immutable_db_options().statistics.get()->getTickerCount( ASSERT_EQ(dbfull()->immutable_db_options().statistics.get()->getTickerCount(
COMPACTION_CANCELLED), COMPACTION_CANCELLED),
@ -903,8 +908,9 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) {
"CompactFilesImpl:End", [&](void* /*arg*/) { completed_compactions++; }); "CompactFilesImpl:End", [&](void* /*arg*/) { completed_compactions++; });
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
dbfull()->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), l0_files, 0); ASSERT_OK(dbfull()->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(),
dbfull()->TEST_WaitForCompact(true); l0_files, 0));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_EQ(sfm->GetCompactionsReservedSize(), 0); ASSERT_EQ(sfm->GetCompactionsReservedSize(), 0);
ASSERT_GT(completed_compactions, 0); ASSERT_GT(completed_compactions, 0);
@ -1008,7 +1014,7 @@ TEST_F(DBSSTTest, OpenDBWithInfiniteMaxOpenFiles) {
CompactRangeOptions compact_options; CompactRangeOptions compact_options;
compact_options.change_level = true; compact_options.change_level = true;
compact_options.target_level = 2; compact_options.target_level = 2;
db_->CompactRange(compact_options, nullptr, nullptr); ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
// Create 12 Files in L0 // Create 12 Files in L0
for (int i = 0; i < 12; i++) { for (int i = 0; i < 12; i++) {
@ -1060,7 +1066,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) {
std::string val = "val_file_" + ToString(i); std::string val = "val_file_" + ToString(i);
ASSERT_OK(Put(Key(j), val)); ASSERT_OK(Put(Key(j), val));
} }
Flush(); ASSERT_OK(Flush());
} }
ASSERT_EQ("5", FilesPerLevel(0)); ASSERT_EQ("5", FilesPerLevel(0));
@ -1084,6 +1090,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) {
// hold current version // hold current version
std::unique_ptr<Iterator> iter1(dbfull()->NewIterator(ReadOptions())); std::unique_ptr<Iterator> iter1(dbfull()->NewIterator(ReadOptions()));
ASSERT_OK(iter1->status());
// Compact 5 files into 1 file in L0 // Compact 5 files into 1 file in L0
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
@ -1107,12 +1114,13 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) {
// hold current version // hold current version
std::unique_ptr<Iterator> iter2(dbfull()->NewIterator(ReadOptions())); std::unique_ptr<Iterator> iter2(dbfull()->NewIterator(ReadOptions()));
ASSERT_OK(iter2->status());
// Delete all keys and compact, this will delete all live files // Delete all keys and compact, this will delete all live files
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
ASSERT_OK(Delete(Key(i))); ASSERT_OK(Delete(Key(i)));
} }
Flush(); ASSERT_OK(Flush());
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_EQ("", FilesPerLevel(0)); ASSERT_EQ("", FilesPerLevel(0));
@ -1126,6 +1134,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) {
// Total SST files = 6 (5 original files + compacted file) // Total SST files = 6 (5 original files + compacted file)
ASSERT_EQ(total_sst_files_size, 6 * single_file_size); ASSERT_EQ(total_sst_files_size, 6 * single_file_size);
ASSERT_OK(iter1->status());
iter1.reset(); iter1.reset();
ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
&total_sst_files_size)); &total_sst_files_size));
@ -1133,6 +1142,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) {
// Total SST files = 1 (compacted file) // Total SST files = 1 (compacted file)
ASSERT_EQ(total_sst_files_size, 1 * single_file_size); ASSERT_EQ(total_sst_files_size, 1 * single_file_size);
ASSERT_OK(iter2->status());
iter2.reset(); iter2.reset();
ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
&total_sst_files_size)); &total_sst_files_size));
@ -1151,7 +1161,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) {
// Generate 5 files in L0 // Generate 5 files in L0
for (int i = 0; i < 5; i++) { for (int i = 0; i < 5; i++) {
ASSERT_OK(Put(Key(i), "val")); ASSERT_OK(Put(Key(i), "val"));
Flush(); ASSERT_OK(Flush());
} }
ASSERT_EQ("5", FilesPerLevel(0)); ASSERT_EQ("5", FilesPerLevel(0));
@ -1176,6 +1186,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) {
// hold current version // hold current version
std::unique_ptr<Iterator> iter1(dbfull()->NewIterator(ReadOptions())); std::unique_ptr<Iterator> iter1(dbfull()->NewIterator(ReadOptions()));
ASSERT_OK(iter1->status());
// Compaction will do trivial move from L0 to L1 // Compaction will do trivial move from L0 to L1
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
@ -1199,12 +1210,13 @@ TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) {
// hold current version // hold current version
std::unique_ptr<Iterator> iter2(dbfull()->NewIterator(ReadOptions())); std::unique_ptr<Iterator> iter2(dbfull()->NewIterator(ReadOptions()));
ASSERT_OK(iter2->status());
// Delete all keys and compact, this will delete all live files // Delete all keys and compact, this will delete all live files
for (int i = 0; i < 5; i++) { for (int i = 0; i < 5; i++) {
ASSERT_OK(Delete(Key(i))); ASSERT_OK(Delete(Key(i)));
} }
Flush(); ASSERT_OK(Flush());
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_EQ("", FilesPerLevel(0)); ASSERT_EQ("", FilesPerLevel(0));
@ -1218,7 +1230,9 @@ TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) {
// Total SST files = 5 (used in 2 version) // Total SST files = 5 (used in 2 version)
ASSERT_EQ(total_sst_files_size, 5 * single_file_size); ASSERT_EQ(total_sst_files_size, 5 * single_file_size);
ASSERT_OK(iter1->status());
iter1.reset(); iter1.reset();
ASSERT_OK(iter2->status());
iter2.reset(); iter2.reset();
ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",

@ -31,6 +31,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorSingle) {
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options)); std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
iter->SeekToFirst(); iter->SeekToFirst();
ASSERT_TRUE(!iter->Valid()); ASSERT_TRUE(!iter->Valid());
ASSERT_OK(iter->status());
// add a record and check that iter can see it // add a record and check that iter can see it
ASSERT_OK(db_->Put(WriteOptions(), "mirko", "fodor")); ASSERT_OK(db_->Put(WriteOptions(), "mirko", "fodor"));
@ -48,6 +49,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorKeepAdding) {
read_options.tailing = true; read_options.tailing = true;
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1])); std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
ASSERT_OK(iter->status());
std::string value(1024, 'a'); std::string value(1024, 'a');
const int num_records = 10000; const int num_records = 10000;
@ -70,7 +72,9 @@ TEST_F(DBTestTailingIterator, TailingIteratorSeekToNext) {
read_options.tailing = true; read_options.tailing = true;
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1])); std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
ASSERT_OK(iter->status());
std::unique_ptr<Iterator> itern(db_->NewIterator(read_options, handles_[1])); std::unique_ptr<Iterator> itern(db_->NewIterator(read_options, handles_[1]));
ASSERT_OK(itern->status());
std::string value(1024, 'a'); std::string value(1024, 'a');
const int num_records = 1000; const int num_records = 1000;
@ -138,8 +142,11 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) {
Slice keyu(bufe, 20); Slice keyu(bufe, 20);
read_options.iterate_upper_bound = &keyu; read_options.iterate_upper_bound = &keyu;
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1])); std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
ASSERT_OK(iter->status());
std::unique_ptr<Iterator> itern(db_->NewIterator(read_options, handles_[1])); std::unique_ptr<Iterator> itern(db_->NewIterator(read_options, handles_[1]));
ASSERT_OK(itern->status());
std::unique_ptr<Iterator> iterh(db_->NewIterator(read_options, handles_[1])); std::unique_ptr<Iterator> iterh(db_->NewIterator(read_options, handles_[1]));
ASSERT_OK(iterh->status());
std::string value(1024, 'a'); std::string value(1024, 'a');
bool file_iters_deleted = false; bool file_iters_deleted = false;
bool file_iters_renewed_null = false; bool file_iters_renewed_null = false;
@ -225,6 +232,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) {
ReopenWithColumnFamilies({"default", "pikachu"}, options); ReopenWithColumnFamilies({"default", "pikachu"}, options);
read_options.read_tier = kBlockCacheTier; read_options.read_tier = kBlockCacheTier;
std::unique_ptr<Iterator> iteri(db_->NewIterator(read_options, handles_[1])); std::unique_ptr<Iterator> iteri(db_->NewIterator(read_options, handles_[1]));
ASSERT_OK(iteri->status());
char buf5[32]; char buf5[32];
snprintf(buf5, sizeof(buf5), "00a0%016d", (num_records / 2) * 5 - 2); snprintf(buf5, sizeof(buf5), "00a0%016d", (num_records / 2) * 5 - 2);
Slice target1(buf5, 20); Slice target1(buf5, 20);
@ -236,6 +244,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) {
options.table_factory.reset(NewBlockBasedTableFactory()); options.table_factory.reset(NewBlockBasedTableFactory());
ReopenWithColumnFamilies({"default", "pikachu"}, options); ReopenWithColumnFamilies({"default", "pikachu"}, options);
iter.reset(db_->NewIterator(read_options, handles_[1])); iter.reset(db_->NewIterator(read_options, handles_[1]));
ASSERT_OK(iter->status());
for (int i = 2 * num_records; i > 0; --i) { for (int i = 2 * num_records; i > 0; --i) {
char buf1[32]; char buf1[32];
char buf2[32]; char buf2[32];
@ -262,6 +271,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorDeletes) {
read_options.tailing = true; read_options.tailing = true;
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1])); std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
ASSERT_OK(iter->status());
// write a single record, read it using the iterator, then delete it // write a single record, read it using the iterator, then delete it
ASSERT_OK(Put(1, "0test", "test")); ASSERT_OK(Put(1, "0test", "test"));
@ -309,6 +319,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorPrefixSeek) {
CreateAndReopenWithCF({"pikachu"}, options); CreateAndReopenWithCF({"pikachu"}, options);
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1])); std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
ASSERT_OK(iter->status());
ASSERT_OK(Put(1, "0101", "test")); ASSERT_OK(Put(1, "0101", "test"));
ASSERT_OK(Flush(1)); ASSERT_OK(Flush(1));
@ -339,6 +350,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorIncomplete) {
ASSERT_OK(db_->Put(WriteOptions(), key, value)); ASSERT_OK(db_->Put(WriteOptions(), key, value));
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options)); std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
ASSERT_OK(iter->status());
iter->SeekToFirst(); iter->SeekToFirst();
// we either see the entry or it's not in cache // we either see the entry or it's not in cache
ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete()); ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
@ -369,6 +381,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorSeekToSame) {
} }
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options)); std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
ASSERT_OK(iter->status());
// Seek to 00001. We expect to find 00002. // Seek to 00001. We expect to find 00002.
std::string start_key = "00001"; std::string start_key = "00001";
iter->Seek(start_key); iter->Seek(start_key);
@ -404,6 +417,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorUpperBound) {
ASSERT_OK(Put(1, "21", "21")); ASSERT_OK(Put(1, "21", "21"));
std::unique_ptr<Iterator> it(db_->NewIterator(read_options, handles_[1])); std::unique_ptr<Iterator> it(db_->NewIterator(read_options, handles_[1]));
ASSERT_OK(it->status());
it->Seek("12"); it->Seek("12");
ASSERT_TRUE(it->Valid()); ASSERT_TRUE(it->Valid());
ASSERT_EQ("12", it->key().ToString()); ASSERT_EQ("12", it->key().ToString());
@ -479,6 +493,8 @@ TEST_F(DBTestTailingIterator, TailingIteratorGap) {
it->Next(); it->Next();
ASSERT_TRUE(it->Valid()); ASSERT_TRUE(it->Valid());
ASSERT_EQ("40", it->key().ToString()); ASSERT_EQ("40", it->key().ToString());
ASSERT_OK(it->status());
} }
TEST_F(DBTestTailingIterator, SeekWithUpperBoundBug) { TEST_F(DBTestTailingIterator, SeekWithUpperBoundBug) {
@ -497,6 +513,7 @@ TEST_F(DBTestTailingIterator, SeekWithUpperBoundBug) {
ASSERT_OK(Flush()); ASSERT_OK(Flush());
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options)); std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
ASSERT_OK(iter->status());
iter->Seek("aa"); iter->Seek("aa");
ASSERT_TRUE(iter->Valid()); ASSERT_TRUE(iter->Valid());
@ -519,6 +536,7 @@ TEST_F(DBTestTailingIterator, SeekToFirstWithUpperBoundBug) {
ASSERT_OK(Flush()); ASSERT_OK(Flush());
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options)); std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
ASSERT_OK(iter->status());
iter->SeekToFirst(); iter->SeekToFirst();
ASSERT_TRUE(iter->Valid()); ASSERT_TRUE(iter->Valid());

@ -1337,17 +1337,19 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
SizeApproximationOptions size_approx_options; SizeApproximationOptions size_approx_options;
size_approx_options.include_memtabtles = true; size_approx_options.include_memtabtles = true;
size_approx_options.include_files = true; size_approx_options.include_files = true;
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); ASSERT_OK(
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
ASSERT_GT(size, 6000); ASSERT_GT(size, 6000);
ASSERT_LT(size, 204800); ASSERT_LT(size, 204800);
// Zero if not including mem table // Zero if not including mem table
db_->GetApproximateSizes(&r, 1, &size); ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size));
ASSERT_EQ(size, 0); ASSERT_EQ(size, 0);
start = Key(500); start = Key(500);
end = Key(600); end = Key(600);
r = Range(start, end); r = Range(start, end);
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); ASSERT_OK(
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
ASSERT_EQ(size, 0); ASSERT_EQ(size, 0);
for (int i = 0; i < N; i++) { for (int i = 0; i < N; i++) {
@ -1357,13 +1359,15 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
start = Key(500); start = Key(500);
end = Key(600); end = Key(600);
r = Range(start, end); r = Range(start, end);
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); ASSERT_OK(
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
ASSERT_EQ(size, 0); ASSERT_EQ(size, 0);
start = Key(100); start = Key(100);
end = Key(1020); end = Key(1020);
r = Range(start, end); r = Range(start, end);
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); ASSERT_OK(
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
ASSERT_GT(size, 6000); ASSERT_GT(size, 6000);
options.max_write_buffer_number = 8; options.max_write_buffer_number = 8;
@ -1389,29 +1393,32 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
start = Key(100); start = Key(100);
end = Key(300); end = Key(300);
r = Range(start, end); r = Range(start, end);
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); ASSERT_OK(
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
ASSERT_EQ(size, 0); ASSERT_EQ(size, 0);
start = Key(1050); start = Key(1050);
end = Key(1080); end = Key(1080);
r = Range(start, end); r = Range(start, end);
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); ASSERT_OK(
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
ASSERT_GT(size, 6000); ASSERT_GT(size, 6000);
start = Key(2100); start = Key(2100);
end = Key(2300); end = Key(2300);
r = Range(start, end); r = Range(start, end);
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); ASSERT_OK(
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
ASSERT_EQ(size, 0); ASSERT_EQ(size, 0);
start = Key(1050); start = Key(1050);
end = Key(1080); end = Key(1080);
r = Range(start, end); r = Range(start, end);
uint64_t size_with_mt, size_without_mt; uint64_t size_with_mt, size_without_mt;
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
&size_with_mt); &size_with_mt));
ASSERT_GT(size_with_mt, 6000); ASSERT_GT(size_with_mt, 6000);
db_->GetApproximateSizes(&r, 1, &size_without_mt); ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size_without_mt));
ASSERT_EQ(size_without_mt, 0); ASSERT_EQ(size_without_mt, 0);
Flush(); Flush();
@ -1423,15 +1430,16 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
start = Key(1050); start = Key(1050);
end = Key(1080); end = Key(1080);
r = Range(start, end); r = Range(start, end);
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
&size_with_mt); &size_with_mt));
db_->GetApproximateSizes(&r, 1, &size_without_mt); ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size_without_mt));
ASSERT_GT(size_with_mt, size_without_mt); ASSERT_GT(size_with_mt, size_without_mt);
ASSERT_GT(size_without_mt, 6000); ASSERT_GT(size_without_mt, 6000);
// Check that include_memtabtles flag works as expected // Check that include_memtabtles flag works as expected
size_approx_options.include_memtabtles = false; size_approx_options.include_memtabtles = false;
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); ASSERT_OK(
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
ASSERT_EQ(size, size_without_mt); ASSERT_EQ(size, size_without_mt);
// Check that files_size_error_margin works as expected, when the heuristic // Check that files_size_error_margin works as expected, when the heuristic
@ -1440,10 +1448,12 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
end = Key(1000 + N - 2); end = Key(1000 + N - 2);
r = Range(start, end); r = Range(start, end);
size_approx_options.files_size_error_margin = -1.0; // disabled size_approx_options.files_size_error_margin = -1.0; // disabled
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); ASSERT_OK(
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
uint64_t size2; uint64_t size2;
size_approx_options.files_size_error_margin = 0.5; // enabled, but not used size_approx_options.files_size_error_margin = 0.5; // enabled, but not used
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2); ASSERT_OK(
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2));
ASSERT_EQ(size, size2); ASSERT_EQ(size, size2);
} }
@ -1494,14 +1504,16 @@ TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) {
// Get the precise size without any approximation heuristic // Get the precise size without any approximation heuristic
uint64_t size; uint64_t size;
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
&size));
ASSERT_NE(size, 0); ASSERT_NE(size, 0);
// Get the size with an approximation heuristic // Get the size with an approximation heuristic
uint64_t size2; uint64_t size2;
const double error_margin = 0.2; const double error_margin = 0.2;
size_approx_options.files_size_error_margin = error_margin; size_approx_options.files_size_error_margin = error_margin;
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2); ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
&size2));
ASSERT_LT(size2, size * (1 + error_margin)); ASSERT_LT(size2, size * (1 + error_margin));
ASSERT_GT(size2, size * (1 - error_margin)); ASSERT_GT(size2, size * (1 - error_margin));
} }
@ -1517,7 +1529,7 @@ TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) {
const std::string end = Key(i + 11); // overlap by 1 key const std::string end = Key(i + 11); // overlap by 1 key
const Range r(start, end); const Range r(start, end);
uint64_t size; uint64_t size;
db_->GetApproximateSizes(&r, 1, &size); ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size));
ASSERT_LE(size, 11 * 100); ASSERT_LE(size, 11 * 100);
} }
} }
@ -1585,9 +1597,12 @@ TEST_F(DBTest, ApproximateSizes) {
DestroyAndReopen(options); DestroyAndReopen(options);
CreateAndReopenWithCF({"pikachu"}, options); CreateAndReopenWithCF({"pikachu"}, options);
ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0)); uint64_t size;
ASSERT_OK(Size("", "xyz", 1, &size));
ASSERT_TRUE(Between(size, 0, 0));
ReopenWithColumnFamilies({"default", "pikachu"}, options); ReopenWithColumnFamilies({"default", "pikachu"}, options);
ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0)); ASSERT_OK(Size("", "xyz", 1, &size));
ASSERT_TRUE(Between(size, 0, 0));
// Write 8MB (80 values, each 100K) // Write 8MB (80 values, each 100K)
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
@ -1600,7 +1615,8 @@ TEST_F(DBTest, ApproximateSizes) {
} }
// 0 because GetApproximateSizes() does not account for memtable space // 0 because GetApproximateSizes() does not account for memtable space
ASSERT_TRUE(Between(Size("", Key(50), 1), 0, 0)); ASSERT_OK(Size("", Key(50), 1, &size));
ASSERT_TRUE(Between(size, 0, 0));
// Check sizes across recovery by reopening a few times // Check sizes across recovery by reopening a few times
for (int run = 0; run < 3; run++) { for (int run = 0; run < 3; run++) {
@ -1608,14 +1624,17 @@ TEST_F(DBTest, ApproximateSizes) {
for (int compact_start = 0; compact_start < N; compact_start += 10) { for (int compact_start = 0; compact_start < N; compact_start += 10) {
for (int i = 0; i < N; i += 10) { for (int i = 0; i < N; i += 10) {
ASSERT_TRUE(Between(Size("", Key(i), 1), S1 * i, S2 * i)); ASSERT_OK(Size("", Key(i), 1, &size));
ASSERT_TRUE(Between(Size("", Key(i) + ".suffix", 1), S1 * (i + 1), ASSERT_TRUE(Between(size, S1 * i, S2 * i));
S2 * (i + 1))); ASSERT_OK(Size("", Key(i) + ".suffix", 1, &size));
ASSERT_TRUE(Between(Size(Key(i), Key(i + 10), 1), S1 * 10, S2 * 10)); ASSERT_TRUE(Between(size, S1 * (i + 1), S2 * (i + 1)));
} ASSERT_OK(Size(Key(i), Key(i + 10), 1, &size));
ASSERT_TRUE(Between(Size("", Key(50), 1), S1 * 50, S2 * 50)); ASSERT_TRUE(Between(size, S1 * 10, S2 * 10));
ASSERT_TRUE( }
Between(Size("", Key(50) + ".suffix", 1), S1 * 50, S2 * 50)); ASSERT_OK(Size("", Key(50), 1, &size));
ASSERT_TRUE(Between(size, S1 * 50, S2 * 50));
ASSERT_OK(Size("", Key(50) + ".suffix", 1, &size));
ASSERT_TRUE(Between(size, S1 * 50, S2 * 50));
std::string cstart_str = Key(compact_start); std::string cstart_str = Key(compact_start);
std::string cend_str = Key(compact_start + 9); std::string cend_str = Key(compact_start + 9);
@ -1650,21 +1669,32 @@ TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
ASSERT_OK(Put(1, Key(7), rnd.RandomString(10000))); ASSERT_OK(Put(1, Key(7), rnd.RandomString(10000)));
// Check sizes across recovery by reopening a few times // Check sizes across recovery by reopening a few times
uint64_t size;
for (int run = 0; run < 3; run++) { for (int run = 0; run < 3; run++) {
ReopenWithColumnFamilies({"default", "pikachu"}, options); ReopenWithColumnFamilies({"default", "pikachu"}, options);
ASSERT_TRUE(Between(Size("", Key(0), 1), 0, 0)); ASSERT_OK(Size("", Key(0), 1, &size));
ASSERT_TRUE(Between(Size("", Key(1), 1), 10000, 11000)); ASSERT_TRUE(Between(size, 0, 0));
ASSERT_TRUE(Between(Size("", Key(2), 1), 20000, 21000)); ASSERT_OK(Size("", Key(1), 1, &size));
ASSERT_TRUE(Between(Size("", Key(3), 1), 120000, 121000)); ASSERT_TRUE(Between(size, 10000, 11000));
ASSERT_TRUE(Between(Size("", Key(4), 1), 130000, 131000)); ASSERT_OK(Size("", Key(2), 1, &size));
ASSERT_TRUE(Between(Size("", Key(5), 1), 230000, 232000)); ASSERT_TRUE(Between(size, 20000, 21000));
ASSERT_TRUE(Between(Size("", Key(6), 1), 240000, 242000)); ASSERT_OK(Size("", Key(3), 1, &size));
ASSERT_TRUE(Between(size, 120000, 121000));
ASSERT_OK(Size("", Key(4), 1, &size));
ASSERT_TRUE(Between(size, 130000, 131000));
ASSERT_OK(Size("", Key(5), 1, &size));
ASSERT_TRUE(Between(size, 230000, 232000));
ASSERT_OK(Size("", Key(6), 1, &size));
ASSERT_TRUE(Between(size, 240000, 242000));
// Ensure some overhead is accounted for, even without including all // Ensure some overhead is accounted for, even without including all
ASSERT_TRUE(Between(Size("", Key(7), 1), 540500, 545000)); ASSERT_OK(Size("", Key(7), 1, &size));
ASSERT_TRUE(Between(Size("", Key(8), 1), 550500, 555000)); ASSERT_TRUE(Between(size, 540500, 545000));
ASSERT_OK(Size("", Key(8), 1, &size));
ASSERT_TRUE(Between(size, 550500, 555000));
ASSERT_TRUE(Between(Size(Key(3), Key(5), 1), 110100, 111000)); ASSERT_OK(Size(Key(3), Key(5), 1, &size));
ASSERT_TRUE(Between(size, 110100, 111000));
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
} }
@ -1748,6 +1778,7 @@ TEST_F(DBTest, Snapshot) {
TEST_F(DBTest, HiddenValuesAreRemoved) { TEST_F(DBTest, HiddenValuesAreRemoved) {
anon::OptionsOverride options_override; anon::OptionsOverride options_override;
options_override.skip_policy = kSkipNoSnapshot; options_override.skip_policy = kSkipNoSnapshot;
uint64_t size;
do { do {
Options options = CurrentOptions(options_override); Options options = CurrentOptions(options_override);
CreateAndReopenWithCF({"pikachu"}, options); CreateAndReopenWithCF({"pikachu"}, options);
@ -1765,7 +1796,8 @@ TEST_F(DBTest, HiddenValuesAreRemoved) {
ASSERT_GT(NumTableFilesAtLevel(0, 1), 0); ASSERT_GT(NumTableFilesAtLevel(0, 1), 0);
ASSERT_EQ(big, Get(1, "foo", snapshot)); ASSERT_EQ(big, Get(1, "foo", snapshot));
ASSERT_TRUE(Between(Size("", "pastfoo", 1), 50000, 60000)); ASSERT_OK(Size("", "pastfoo", 1, &size));
ASSERT_TRUE(Between(size, 50000, 60000));
db_->ReleaseSnapshot(snapshot); db_->ReleaseSnapshot(snapshot);
ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny, " + big + " ]"); ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny, " + big + " ]");
Slice x("x"); Slice x("x");
@ -1776,7 +1808,8 @@ TEST_F(DBTest, HiddenValuesAreRemoved) {
dbfull()->TEST_CompactRange(1, nullptr, &x, handles_[1]); dbfull()->TEST_CompactRange(1, nullptr, &x, handles_[1]);
ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]"); ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]");
ASSERT_TRUE(Between(Size("", "pastfoo", 1), 0, 1000)); ASSERT_OK(Size("", "pastfoo", 1, &size));
ASSERT_TRUE(Between(size, 0, 1000));
// ApproximateOffsetOf() is not yet implemented in plain table format, // ApproximateOffsetOf() is not yet implemented in plain table format,
// which is used by Size(). // which is used by Size().
} while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction | } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction |

@ -1128,27 +1128,48 @@ std::string DBTestBase::FilesPerLevel(int cf) {
#endif // !ROCKSDB_LITE #endif // !ROCKSDB_LITE
size_t DBTestBase::CountFiles() { size_t DBTestBase::CountFiles() {
size_t count = 0;
std::vector<std::string> files; std::vector<std::string> files;
EXPECT_OK(env_->GetChildren(dbname_, &files)); if (env_->GetChildren(dbname_, &files).ok()) {
count += files.size();
}
std::vector<std::string> logfiles;
if (dbname_ != last_options_.wal_dir) { if (dbname_ != last_options_.wal_dir) {
Status s = env_->GetChildren(last_options_.wal_dir, &logfiles); if (env_->GetChildren(last_options_.wal_dir, &files).ok()) {
EXPECT_TRUE(s.ok() || s.IsNotFound()); count += files.size();
}
}
return count;
};
Status DBTestBase::CountFiles(size_t* count) {
std::vector<std::string> files;
Status s = env_->GetChildren(dbname_, &files);
if (!s.ok()) {
return s;
}
size_t files_count = files.size();
if (dbname_ != last_options_.wal_dir) {
s = env_->GetChildren(last_options_.wal_dir, &files);
if (!s.ok()) {
return s;
}
*count = files_count + files.size();
} }
return files.size() + logfiles.size(); return Status::OK();
} }
uint64_t DBTestBase::Size(const Slice& start, const Slice& limit, int cf) { Status DBTestBase::Size(const Slice& start, const Slice& limit, int cf,
uint64_t* size) {
Range r(start, limit); Range r(start, limit);
uint64_t size;
if (cf == 0) { if (cf == 0) {
db_->GetApproximateSizes(&r, 1, &size); return db_->GetApproximateSizes(&r, 1, size);
} else { } else {
db_->GetApproximateSizes(handles_[1], &r, 1, &size); return db_->GetApproximateSizes(handles_[1], &r, 1, size);
} }
return size;
} }
void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit, void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit,

@ -1070,7 +1070,13 @@ class DBTestBase : public testing::Test {
size_t CountFiles(); size_t CountFiles();
uint64_t Size(const Slice& start, const Slice& limit, int cf = 0); Status CountFiles(size_t* count);
Status Size(const Slice& start, const Slice& limit, uint64_t* size) {
return Size(start, limit, 0, size);
}
Status Size(const Slice& start, const Slice& limit, int cf, uint64_t* size);
void Compact(int cf, const Slice& start, const Slice& limit, void Compact(int cf, const Slice& start, const Slice& limit,
uint32_t target_path_id); uint32_t target_path_id);

@ -270,7 +270,7 @@ TEST_F(DBBasicTestWithTimestamp, GetApproximateSizes) {
ASSERT_EQ(range_sizes[1], size); ASSERT_EQ(range_sizes[1], size);
// Zero if not including mem table // Zero if not including mem table
db_->GetApproximateSizes(&r, 1, &size); ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size));
ASSERT_EQ(size, 0); ASSERT_EQ(size, 0);
start = Key(500); start = Key(500);

@ -213,17 +213,16 @@ void EventHelpers::NotifyOnErrorRecoveryCompleted(
const std::vector<std::shared_ptr<EventListener>>& listeners, const std::vector<std::shared_ptr<EventListener>>& listeners,
Status old_bg_error, InstrumentedMutex* db_mutex) { Status old_bg_error, InstrumentedMutex* db_mutex) {
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
if (listeners.size() == 0U) { if (listeners.size() > 0) {
return;
}
db_mutex->AssertHeld(); db_mutex->AssertHeld();
// release lock while notifying events // release lock while notifying events
db_mutex->Unlock(); db_mutex->Unlock();
for (auto& listener : listeners) { for (auto& listener : listeners) {
listener->OnErrorRecoveryCompleted(old_bg_error); listener->OnErrorRecoveryCompleted(old_bg_error);
} }
old_bg_error.PermitUncheckedError();
db_mutex->Lock(); db_mutex->Lock();
}
old_bg_error.PermitUncheckedError();
#else #else
(void)listeners; (void)listeners;
(void)old_bg_error; (void)old_bg_error;

@ -98,11 +98,13 @@ Status DeleteScheduler::DeleteFile(const std::string& file_path,
// Update the total trash size // Update the total trash size
uint64_t trash_file_size = 0; uint64_t trash_file_size = 0;
Status ignored = IOStatus io_s =
fs_->GetFileSize(trash_file, IOOptions(), &trash_file_size, nullptr); fs_->GetFileSize(trash_file, IOOptions(), &trash_file_size, nullptr);
ignored.PermitUncheckedError(); //**TODO: What should we do if we failed to if (io_s.ok()) {
// get the file size?
total_trash_size_.fetch_add(trash_file_size); total_trash_size_.fetch_add(trash_file_size);
}
//**TODO: What should we do if we failed to
// get the file size?
// Add file to delete queue // Add file to delete queue
{ {
@ -199,9 +201,7 @@ Status DeleteScheduler::MarkAsTrash(const std::string& file_path,
cnt++; cnt++;
} }
if (s.ok()) { if (s.ok()) {
//**TODO: What should we do if this returns an error? s = sst_file_manager_->OnMoveFile(file_path, *trash_file);
sst_file_manager_->OnMoveFile(file_path, *trash_file)
.PermitUncheckedError();
} }
return s; return s;
} }

@ -158,7 +158,7 @@ bool SstFileManagerImpl::IsMaxAllowedSpaceReachedIncludingCompactions() {
bool SstFileManagerImpl::EnoughRoomForCompaction( bool SstFileManagerImpl::EnoughRoomForCompaction(
ColumnFamilyData* cfd, const std::vector<CompactionInputFiles>& inputs, ColumnFamilyData* cfd, const std::vector<CompactionInputFiles>& inputs,
Status bg_error) { const Status& bg_error) {
MutexLock l(&mu_); MutexLock l(&mu_);
uint64_t size_added_by_compaction = 0; uint64_t size_added_by_compaction = 0;
// First check if we even have the space to do the compaction // First check if we even have the space to do the compaction
@ -183,7 +183,7 @@ bool SstFileManagerImpl::EnoughRoomForCompaction(
// seen a NoSpace() error. This is tin order to contain a single potentially // seen a NoSpace() error. This is tin order to contain a single potentially
// misbehaving DB instance and prevent it from slowing down compactions of // misbehaving DB instance and prevent it from slowing down compactions of
// other DB instances // other DB instances
if (bg_error == Status::NoSpace() && CheckFreeSpace()) { if (bg_error.IsNoSpace() && CheckFreeSpace()) {
auto fn = auto fn =
TableFileName(cfd->ioptions()->cf_paths, inputs[0][0]->fd.GetNumber(), TableFileName(cfd->ioptions()->cf_paths, inputs[0][0]->fd.GetNumber(),
inputs[0][0]->fd.GetPathId()); inputs[0][0]->fd.GetPathId());

@ -22,7 +22,7 @@ namespace ROCKSDB_NAMESPACE {
class Env; class Env;
class Logger; class Logger;
// SstFileManager is used to track SST files in the DB and control there // SstFileManager is used to track SST files in the DB and control their
// deletion rate. // deletion rate.
// All SstFileManager public functions are thread-safe. // All SstFileManager public functions are thread-safe.
class SstFileManagerImpl : public SstFileManager { class SstFileManagerImpl : public SstFileManager {
@ -77,7 +77,7 @@ class SstFileManagerImpl : public SstFileManager {
// the full compaction size). // the full compaction size).
bool EnoughRoomForCompaction(ColumnFamilyData* cfd, bool EnoughRoomForCompaction(ColumnFamilyData* cfd,
const std::vector<CompactionInputFiles>& inputs, const std::vector<CompactionInputFiles>& inputs,
Status bg_error); const Status& bg_error);
// Bookkeeping so total_file_sizes_ goes back to normal after compaction // Bookkeeping so total_file_sizes_ goes back to normal after compaction
// finishes // finishes

@ -496,13 +496,13 @@ extern ROCKSDB_LIBRARY_API char* rocksdb_property_value_cf(
extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes( extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes(
rocksdb_t* db, int num_ranges, const char* const* range_start_key, rocksdb_t* db, int num_ranges, const char* const* range_start_key,
const size_t* range_start_key_len, const char* const* range_limit_key, const size_t* range_start_key_len, const char* const* range_limit_key,
const size_t* range_limit_key_len, uint64_t* sizes); const size_t* range_limit_key_len, uint64_t* sizes, char** errptr);
extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf( extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf(
rocksdb_t* db, rocksdb_column_family_handle_t* column_family, rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
int num_ranges, const char* const* range_start_key, int num_ranges, const char* const* range_start_key,
const size_t* range_start_key_len, const char* const* range_limit_key, const size_t* range_start_key_len, const char* const* range_limit_key,
const size_t* range_limit_key_len, uint64_t* sizes); const size_t* range_limit_key_len, uint64_t* sizes, char** errptr);
extern ROCKSDB_LIBRARY_API void rocksdb_compact_range(rocksdb_t* db, extern ROCKSDB_LIBRARY_API void rocksdb_compact_range(rocksdb_t* db,
const char* start_key, const char* start_key,

@ -1027,20 +1027,22 @@ class DB {
// Simpler versions of the GetApproximateSizes() method above. // Simpler versions of the GetApproximateSizes() method above.
// The include_flags argumenbt must of type DB::SizeApproximationFlags // The include_flags argumenbt must of type DB::SizeApproximationFlags
// and can not be NONE. // and can not be NONE.
virtual void GetApproximateSizes(ColumnFamilyHandle* column_family, virtual Status GetApproximateSizes(ColumnFamilyHandle* column_family,
const Range* ranges, int n, uint64_t* sizes, const Range* ranges, int n,
uint64_t* sizes,
uint8_t include_flags = INCLUDE_FILES) { uint8_t include_flags = INCLUDE_FILES) {
SizeApproximationOptions options; SizeApproximationOptions options;
options.include_memtabtles = options.include_memtabtles =
(include_flags & SizeApproximationFlags::INCLUDE_MEMTABLES) != 0; (include_flags & SizeApproximationFlags::INCLUDE_MEMTABLES) != 0;
options.include_files = options.include_files =
(include_flags & SizeApproximationFlags::INCLUDE_FILES) != 0; (include_flags & SizeApproximationFlags::INCLUDE_FILES) != 0;
Status s = GetApproximateSizes(options, column_family, ranges, n, sizes); return GetApproximateSizes(options, column_family, ranges, n, sizes);
s.PermitUncheckedError();
} }
virtual void GetApproximateSizes(const Range* ranges, int n, uint64_t* sizes, virtual Status GetApproximateSizes(const Range* ranges, int n,
uint64_t* sizes,
uint8_t include_flags = INCLUDE_FILES) { uint8_t include_flags = INCLUDE_FILES) {
GetApproximateSizes(DefaultColumnFamily(), ranges, n, sizes, include_flags); return GetApproximateSizes(DefaultColumnFamily(), ranges, n, sizes,
include_flags);
} }
// The method is similar to GetApproximateSizes, except it // The method is similar to GetApproximateSizes, except it

@ -1452,7 +1452,8 @@ void BlockBasedTableBuilder::WriteIndexBlock(
} }
} }
// If there are more index partitions, finish them and write them out // If there are more index partitions, finish them and write them out
Status s = index_builder_status; if (index_builder_status.IsIncomplete()) {
Status s = Status::Incomplete();
while (ok() && s.IsIncomplete()) { while (ok() && s.IsIncomplete()) {
s = rep_->index_builder->Finish(&index_blocks, *index_block_handle); s = rep_->index_builder->Finish(&index_blocks, *index_block_handle);
if (!s.ok() && !s.IsIncomplete()) { if (!s.ok() && !s.IsIncomplete()) {
@ -1460,13 +1461,15 @@ void BlockBasedTableBuilder::WriteIndexBlock(
return; return;
} }
if (rep_->table_options.enable_index_compression) { if (rep_->table_options.enable_index_compression) {
WriteBlock(index_blocks.index_block_contents, index_block_handle, false); WriteBlock(index_blocks.index_block_contents, index_block_handle,
false);
} else { } else {
WriteRawBlock(index_blocks.index_block_contents, kNoCompression, WriteRawBlock(index_blocks.index_block_contents, kNoCompression,
index_block_handle); index_block_handle);
} }
// The last index_block_handle will be for the partition index block // The last index_block_handle will be for the partition index block
} }
}
} }
void BlockBasedTableBuilder::WritePropertiesBlock( void BlockBasedTableBuilder::WritePropertiesBlock(

@ -2493,14 +2493,12 @@ void ApproxSizeCommand::DoCommand() {
Range ranges[1]; Range ranges[1];
ranges[0] = Range(start_key_, end_key_); ranges[0] = Range(start_key_, end_key_);
uint64_t sizes[1]; uint64_t sizes[1];
db_->GetApproximateSizes(GetCfHandle(), ranges, 1, sizes); Status s = db_->GetApproximateSizes(GetCfHandle(), ranges, 1, sizes);
if (!s.ok()) {
exec_state_ = LDBCommandExecuteResult::Failed(s.ToString());
} else {
fprintf(stdout, "%lu\n", (unsigned long)sizes[0]); fprintf(stdout, "%lu\n", (unsigned long)sizes[0]);
/* Weird that GetApproximateSizes() returns void, although documentation
* says that it returns a Status object.
if (!st.ok()) {
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
} }
*/
} }
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------

Loading…
Cancel
Save