Add more tests to ASSERT_STATUS_CHECKED (#7367)

Summary:
db_options_test
options_file_test
auto_roll_logger_test
options_util_test
persistent_cache_test

Pull Request resolved: https://github.com/facebook/rocksdb/pull/7367

Reviewed By: jay-zhuang

Differential Revision: D23712520

Pulled By: zhichao-cao

fbshipit-source-id: 99b331e357f5d6a6aabee89d1bd933002cbb3908
main
mrambacher 4 years ago committed by Facebook GitHub Bot
parent ec024a86de
commit a08d6f18f0
  1. 5
      Makefile
  2. 11
      db/builder.cc
  3. 19
      db/compaction/compaction_job.cc
  4. 14
      db/db_impl/db_impl.cc
  5. 4
      db/db_impl/db_impl_write.cc
  6. 77
      db/db_options_test.cc
  7. 2
      db/db_test_util.cc
  8. 2
      db/event_helpers.cc
  9. 4
      db/options_file_test.cc
  10. 2
      include/rocksdb/status.h
  11. 9
      logging/auto_roll_logger.cc
  12. 3
      logging/auto_roll_logger.h
  13. 4
      logging/auto_roll_logger_test.cc
  14. 9
      table/persistent_cache_helper.cc
  15. 6
      utilities/options/options_util_test.cc
  16. 2
      utilities/persistent_cache/block_cache_tier.h
  17. 2
      utilities/persistent_cache/persistent_cache_test.cc
  18. 6
      utilities/persistent_cache/persistent_cache_test.h
  19. 6
      utilities/persistent_cache/volatile_tier_impl.cc

@ -581,6 +581,8 @@ ifdef ASSERT_STATUS_CHECKED
coding_test \ coding_test \
crc32c_test \ crc32c_test \
dbformat_test \ dbformat_test \
db_options_test \
options_file_test \
defer_test \ defer_test \
filename_test \ filename_test \
dynamic_bloom_test \ dynamic_bloom_test \
@ -588,6 +590,7 @@ ifdef ASSERT_STATUS_CHECKED
env_test \ env_test \
env_logger_test \ env_logger_test \
event_logger_test \ event_logger_test \
auto_roll_logger_test \
file_indexer_test \ file_indexer_test \
hash_table_test \ hash_table_test \
hash_test \ hash_test \
@ -616,6 +619,8 @@ ifdef ASSERT_STATUS_CHECKED
filelock_test \ filelock_test \
timer_queue_test \ timer_queue_test \
timer_test \ timer_test \
options_util_test \
persistent_cache_test \
util_merge_operators_test \ util_merge_operators_test \
block_cache_trace_analyzer_test \ block_cache_trace_analyzer_test \
block_cache_tracer_test \ block_cache_tracer_test \

@ -98,7 +98,6 @@ Status BuildTable(
const size_t kReportFlushIOStatsEvery = 1048576; const size_t kReportFlushIOStatsEvery = 1048576;
uint64_t paranoid_hash = 0; uint64_t paranoid_hash = 0;
Status s; Status s;
IOStatus io_s;
meta->fd.file_size = 0; meta->fd.file_size = 0;
iter->SeekToFirst(); iter->SeekToFirst();
std::unique_ptr<CompactionRangeDelAggregator> range_del_agg( std::unique_ptr<CompactionRangeDelAggregator> range_del_agg(
@ -131,7 +130,7 @@ Status BuildTable(
bool use_direct_writes = file_options.use_direct_writes; bool use_direct_writes = file_options.use_direct_writes;
TEST_SYNC_POINT_CALLBACK("BuildTable:create_file", &use_direct_writes); TEST_SYNC_POINT_CALLBACK("BuildTable:create_file", &use_direct_writes);
#endif // !NDEBUG #endif // !NDEBUG
io_s = NewWritableFile(fs, fname, &file, file_options); IOStatus io_s = NewWritableFile(fs, fname, &file, file_options);
s = io_s; s = io_s;
if (io_status->ok()) { if (io_status->ok()) {
*io_status = io_s; *io_status = io_s;
@ -229,9 +228,8 @@ Status BuildTable(
} else { } else {
s = builder->Finish(); s = builder->Finish();
} }
io_s = builder->io_status();
if (io_status->ok()) { if (io_status->ok()) {
*io_status = io_s; *io_status = builder->io_status();
} }
if (s.ok() && !empty) { if (s.ok() && !empty) {
@ -313,17 +311,18 @@ Status BuildTable(
if (!s.ok() || meta->fd.GetFileSize() == 0) { if (!s.ok() || meta->fd.GetFileSize() == 0) {
constexpr IODebugContext* dbg = nullptr; constexpr IODebugContext* dbg = nullptr;
fs->DeleteFile(fname, IOOptions(), dbg); Status ignored = fs->DeleteFile(fname, IOOptions(), dbg);
assert(blob_file_additions || blob_file_paths.empty()); assert(blob_file_additions || blob_file_paths.empty());
if (blob_file_additions) { if (blob_file_additions) {
for (const std::string& blob_file_path : blob_file_paths) { for (const std::string& blob_file_path : blob_file_paths) {
fs->DeleteFile(blob_file_path, IOOptions(), dbg); ignored = fs->DeleteFile(blob_file_path, IOOptions(), dbg);
} }
blob_file_additions->clear(); blob_file_additions->clear();
} }
ignored.PermitUncheckedError();
} }
if (meta->fd.GetFileSize() == 0) { if (meta->fd.GetFileSize() == 0) {

@ -978,13 +978,11 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
// going to be 1.2MB and max_output_file_size = 1MB, prefer to have 0.6MB // going to be 1.2MB and max_output_file_size = 1MB, prefer to have 0.6MB
// and 0.6MB instead of 1MB and 0.2MB) // and 0.6MB instead of 1MB and 0.2MB)
bool output_file_ended = false; bool output_file_ended = false;
Status input_status;
if (sub_compact->compaction->output_level() != 0 && if (sub_compact->compaction->output_level() != 0 &&
sub_compact->current_output_file_size >= sub_compact->current_output_file_size >=
sub_compact->compaction->max_output_file_size()) { sub_compact->compaction->max_output_file_size()) {
// (1) this key terminates the file. For historical reasons, the iterator // (1) this key terminates the file. For historical reasons, the iterator
// status before advancing will be given to FinishCompactionOutputFile(). // status before advancing will be given to FinishCompactionOutputFile().
input_status = input->status();
output_file_ended = true; output_file_ended = true;
} }
TEST_SYNC_POINT_CALLBACK( TEST_SYNC_POINT_CALLBACK(
@ -1011,7 +1009,6 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
// (2) this key belongs to the next file. For historical reasons, the // (2) this key belongs to the next file. For historical reasons, the
// iterator status after advancing will be given to // iterator status after advancing will be given to
// FinishCompactionOutputFile(). // FinishCompactionOutputFile().
input_status = input->status();
output_file_ended = true; output_file_ended = true;
} }
} }
@ -1021,9 +1018,9 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
next_key = &c_iter->key(); next_key = &c_iter->key();
} }
CompactionIterationStats range_del_out_stats; CompactionIterationStats range_del_out_stats;
status = status = FinishCompactionOutputFile(input->status(), sub_compact,
FinishCompactionOutputFile(input_status, sub_compact, &range_del_agg, &range_del_agg, &range_del_out_stats,
&range_del_out_stats, next_key); next_key);
RecordDroppedKeys(range_del_out_stats, RecordDroppedKeys(range_del_out_stats,
&sub_compact->compaction_job_stats); &sub_compact->compaction_job_stats);
} }
@ -1079,7 +1076,7 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
CompactionIterationStats range_del_out_stats; CompactionIterationStats range_del_out_stats;
Status s = FinishCompactionOutputFile(status, sub_compact, &range_del_agg, Status s = FinishCompactionOutputFile(status, sub_compact, &range_del_agg,
&range_del_out_stats); &range_del_out_stats);
if (status.ok()) { if (!s.ok() && status.ok()) {
status = s; status = s;
} }
RecordDroppedKeys(range_del_out_stats, &sub_compact->compaction_job_stats); RecordDroppedKeys(range_del_out_stats, &sub_compact->compaction_job_stats);
@ -1374,6 +1371,9 @@ Status CompactionJob::FinishCompactionOutputFile(
} }
if (sub_compact->io_status.ok()) { if (sub_compact->io_status.ok()) {
sub_compact->io_status = io_s; sub_compact->io_status = io_s;
// Since this error is really a copy of the
// "normal" status, it does not also need to be checked
sub_compact->io_status.PermitUncheckedError();
} }
sub_compact->outfile.reset(); sub_compact->outfile.reset();
@ -1432,7 +1432,10 @@ Status CompactionJob::FinishCompactionOutputFile(
auto sfm = auto sfm =
static_cast<SstFileManagerImpl*>(db_options_.sst_file_manager.get()); static_cast<SstFileManagerImpl*>(db_options_.sst_file_manager.get());
if (sfm && meta != nullptr && meta->fd.GetPathId() == 0) { if (sfm && meta != nullptr && meta->fd.GetPathId() == 0) {
sfm->OnAddFile(fname); Status add_s = sfm->OnAddFile(fname);
if (!add_s.ok() && s.ok()) {
s = add_s;
}
if (sfm->IsMaxAllowedSpaceReached()) { if (sfm->IsMaxAllowedSpaceReached()) {
// TODO(ajkr): should we return OK() if max space was reached by the final // TODO(ajkr): should we return OK() if max space was reached by the final
// compaction output file (similarly to how flush works when full)? // compaction output file (similarly to how flush works when full)?

@ -466,7 +466,8 @@ void DBImpl::CancelAllBackgroundWork(bool wait) {
if (!cfd->IsDropped() && cfd->initialized() && !cfd->mem()->IsEmpty()) { if (!cfd->IsDropped() && cfd->initialized() && !cfd->mem()->IsEmpty()) {
cfd->Ref(); cfd->Ref();
mutex_.Unlock(); mutex_.Unlock();
FlushMemTable(cfd, FlushOptions(), FlushReason::kShutDown); Status s = FlushMemTable(cfd, FlushOptions(), FlushReason::kShutDown);
s.PermitUncheckedError(); //**TODO: What to do on error?
mutex_.Lock(); mutex_.Lock();
cfd->UnrefAndTryDelete(); cfd->UnrefAndTryDelete();
} }
@ -969,8 +970,8 @@ Status DBImpl::SetOptions(
new_options = *cfd->GetLatestMutableCFOptions(); new_options = *cfd->GetLatestMutableCFOptions();
// Append new version to recompute compaction score. // Append new version to recompute compaction score.
VersionEdit dummy_edit; VersionEdit dummy_edit;
versions_->LogAndApply(cfd, new_options, &dummy_edit, &mutex_, s = versions_->LogAndApply(cfd, new_options, &dummy_edit, &mutex_,
directories_.GetDbDir()); directories_.GetDbDir());
// Trigger possible flush/compactions. This has to be before we persist // Trigger possible flush/compactions. This has to be before we persist
// options to file, otherwise there will be a deadlock with writer // options to file, otherwise there will be a deadlock with writer
// thread. // thread.
@ -1020,7 +1021,7 @@ Status DBImpl::SetDBOptions(
MutableDBOptions new_options; MutableDBOptions new_options;
Status s; Status s;
Status persist_options_status; Status persist_options_status = Status::OK();
bool wal_changed = false; bool wal_changed = false;
WriteContext write_context; WriteContext write_context;
{ {
@ -1125,6 +1126,10 @@ Status DBImpl::SetDBOptions(
persist_options_status = WriteOptionsFile( persist_options_status = WriteOptionsFile(
false /*need_mutex_lock*/, false /*need_enter_write_thread*/); false /*need_mutex_lock*/, false /*need_enter_write_thread*/);
write_thread_.ExitUnbatched(&w); write_thread_.ExitUnbatched(&w);
} else {
// To get here, we must have had invalid options and will not attempt to
// persist the options, which means the status is "OK/Uninitialized.
persist_options_status.PermitUncheckedError();
} }
} }
ROCKS_LOG_INFO(immutable_db_options_.info_log, "SetDBOptions(), inputs:"); ROCKS_LOG_INFO(immutable_db_options_.info_log, "SetDBOptions(), inputs:");
@ -2480,7 +2485,6 @@ Status DBImpl::CreateColumnFamilyImpl(const ColumnFamilyOptions& cf_options,
const std::string& column_family_name, const std::string& column_family_name,
ColumnFamilyHandle** handle) { ColumnFamilyHandle** handle) {
Status s; Status s;
Status persist_options_status;
*handle = nullptr; *handle = nullptr;
DBOptions db_options = DBOptions db_options =

@ -1805,6 +1805,10 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
NotifyOnMemTableSealed(cfd, memtable_info); NotifyOnMemTableSealed(cfd, memtable_info);
mutex_.Lock(); mutex_.Lock();
#endif // ROCKSDB_LITE #endif // ROCKSDB_LITE
// It is possible that we got here without checking the value of i_os, but
// that is okay. If we did, it most likely means that s was already an error.
// In any case, ignore any unchecked error for i_os here.
io_s.PermitUncheckedError();
return s; return s;
} }

@ -37,9 +37,10 @@ class DBOptionsTest : public DBTestBase {
ConfigOptions config_options; ConfigOptions config_options;
config_options.delimiter = "; "; config_options.delimiter = "; ";
GetStringFromMutableDBOptions(config_options, MutableDBOptions(options), EXPECT_OK(GetStringFromMutableDBOptions(
&options_str); config_options, MutableDBOptions(options), &options_str));
StringToMap(options_str, &mutable_map); EXPECT_OK(StringToMap(options_str, &mutable_map));
return mutable_map; return mutable_map;
} }
@ -50,9 +51,9 @@ class DBOptionsTest : public DBTestBase {
config_options.delimiter = "; "; config_options.delimiter = "; ";
std::unordered_map<std::string, std::string> mutable_map; std::unordered_map<std::string, std::string> mutable_map;
GetStringFromMutableCFOptions(config_options, MutableCFOptions(options), EXPECT_OK(GetStringFromMutableCFOptions(
&options_str); config_options, MutableCFOptions(options), &options_str));
StringToMap(options_str, &mutable_map); EXPECT_OK(StringToMap(options_str, &mutable_map));
return mutable_map; return mutable_map;
} }
@ -134,7 +135,7 @@ TEST_F(DBOptionsTest, SetBytesPerSync) {
WriteOptions write_opts; WriteOptions write_opts;
// should sync approximately 40MB/1MB ~= 40 times. // should sync approximately 40MB/1MB ~= 40 times.
for (i = 0; i < 40; i++) { for (i = 0; i < 40; i++) {
Put(Key(i), kValue, write_opts); ASSERT_OK(Put(Key(i), kValue, write_opts));
} }
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
@ -150,7 +151,7 @@ TEST_F(DBOptionsTest, SetBytesPerSync) {
// should sync approximately 40MB*2/8MB ~= 10 times. // should sync approximately 40MB*2/8MB ~= 10 times.
// data will be 40*2MB because of previous Puts too. // data will be 40*2MB because of previous Puts too.
for (i = 0; i < 40; i++) { for (i = 0; i < 40; i++) {
Put(Key(i), kValue, write_opts); ASSERT_OK(Put(Key(i), kValue, write_opts));
} }
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
@ -181,7 +182,7 @@ TEST_F(DBOptionsTest, SetWalBytesPerSync) {
const std::string kValue(kValueSize, 'v'); const std::string kValue(kValueSize, 'v');
int i = 0; int i = 0;
for (; i < 10; i++) { for (; i < 10; i++) {
Put(Key(i), kValue); ASSERT_OK(Put(Key(i), kValue));
} }
// Do not flush. If we flush here, SwitchWAL will reuse old WAL file since its // Do not flush. If we flush here, SwitchWAL will reuse old WAL file since its
// empty and will not get the new wal_bytes_per_sync value. // empty and will not get the new wal_bytes_per_sync value.
@ -192,7 +193,7 @@ TEST_F(DBOptionsTest, SetWalBytesPerSync) {
counter = 0; counter = 0;
i = 0; i = 0;
for (; i < 10; i++) { for (; i < 10; i++) {
Put(Key(i), kValue); ASSERT_OK(Put(Key(i), kValue));
} }
ASSERT_GT(counter, 0); ASSERT_GT(counter, 0);
ASSERT_GT(low_bytes_per_sync, 0); ASSERT_GT(low_bytes_per_sync, 0);
@ -227,9 +228,9 @@ TEST_F(DBOptionsTest, WritableFileMaxBufferSize) {
for (; i < 3; i++) { for (; i < 3; i++) {
ASSERT_OK(Put("foo", ToString(i))); ASSERT_OK(Put("foo", ToString(i)));
ASSERT_OK(Put("bar", ToString(i))); ASSERT_OK(Put("bar", ToString(i)));
Flush(); ASSERT_OK(Flush());
} }
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(unmatch_cnt, 0); ASSERT_EQ(unmatch_cnt, 0);
ASSERT_GE(match_cnt, 11); ASSERT_GE(match_cnt, 11);
@ -245,9 +246,9 @@ TEST_F(DBOptionsTest, WritableFileMaxBufferSize) {
for (; i < 3; i++) { for (; i < 3; i++) {
ASSERT_OK(Put("foo", ToString(i))); ASSERT_OK(Put("foo", ToString(i)));
ASSERT_OK(Put("bar", ToString(i))); ASSERT_OK(Put("bar", ToString(i)));
Flush(); ASSERT_OK(Flush());
} }
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(unmatch_cnt, 0); ASSERT_EQ(unmatch_cnt, 0);
ASSERT_GE(match_cnt, 11); ASSERT_GE(match_cnt, 11);
} }
@ -283,14 +284,14 @@ TEST_F(DBOptionsTest, EnableAutoCompactionAndTriggerStall) {
DestroyAndReopen(options); DestroyAndReopen(options);
int i = 0; int i = 0;
for (; i < 1024; i++) { for (; i < 1024; i++) {
Put(Key(i), kValue); ASSERT_OK(Put(Key(i), kValue));
} }
Flush(); ASSERT_OK(Flush());
for (; i < 1024 * 2; i++) { for (; i < 1024 * 2; i++) {
Put(Key(i), kValue); ASSERT_OK(Put(Key(i), kValue));
} }
Flush(); ASSERT_OK(Flush());
dbfull()->TEST_WaitForFlushMemTable(); ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
ASSERT_EQ(2, NumTableFilesAtLevel(0)); ASSERT_EQ(2, NumTableFilesAtLevel(0));
uint64_t l0_size = SizeAtLevel(0); uint64_t l0_size = SizeAtLevel(0);
@ -312,7 +313,7 @@ TEST_F(DBOptionsTest, EnableAutoCompactionAndTriggerStall) {
break; break;
} }
Reopen(options); Reopen(options);
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped()); ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay()); ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay());
@ -359,7 +360,7 @@ TEST_F(DBOptionsTest, EnableAutoCompactionAndTriggerStall) {
TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:3"); TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:3");
// Background compaction executed. // Background compaction executed.
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped()); ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay()); ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay());
} }
@ -376,12 +377,12 @@ TEST_F(DBOptionsTest, SetOptionsMayTriggerCompaction) {
// Need to insert two keys to avoid trivial move. // Need to insert two keys to avoid trivial move.
ASSERT_OK(Put("foo", ToString(i))); ASSERT_OK(Put("foo", ToString(i)));
ASSERT_OK(Put("bar", ToString(i))); ASSERT_OK(Put("bar", ToString(i)));
Flush(); ASSERT_OK(Flush());
} }
ASSERT_EQ("3", FilesPerLevel()); ASSERT_EQ("3", FilesPerLevel());
ASSERT_OK( ASSERT_OK(
dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "3"}})); dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "3"}}));
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ("0,1", FilesPerLevel()); ASSERT_EQ("0,1", FilesPerLevel());
} }
@ -503,7 +504,7 @@ TEST_F(DBOptionsTest, MaxTotalWalSizeChange) {
ASSERT_OK(dbfull()->SetDBOptions({{"max_total_wal_size", "10"}})); ASSERT_OK(dbfull()->SetDBOptions({{"max_total_wal_size", "10"}}));
for (size_t cf = 0; cf < handles_.size(); ++cf) { for (size_t cf = 0; cf < handles_.size(); ++cf) {
dbfull()->TEST_WaitForFlushMemTable(handles_[cf]); ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf]));
ASSERT_EQ("1", FilesPerLevel(static_cast<int>(cf))); ASSERT_EQ("1", FilesPerLevel(static_cast<int>(cf)));
} }
} }
@ -716,13 +717,13 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
// No files should be compacted as ttl is set to 1 hour. // No files should be compacted as ttl is set to 1 hour.
ASSERT_EQ(dbfull()->GetOptions().ttl, 3600); ASSERT_EQ(dbfull()->GetOptions().ttl, 3600);
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_EQ(NumTableFilesAtLevel(0), 10); ASSERT_EQ(NumTableFilesAtLevel(0), 10);
// Set ttl to 1 minute. So all files should get deleted. // Set ttl to 1 minute. So all files should get deleted.
ASSERT_OK(dbfull()->SetOptions({{"ttl", "60"}})); ASSERT_OK(dbfull()->SetOptions({{"ttl", "60"}}));
ASSERT_EQ(dbfull()->GetOptions().ttl, 60); ASSERT_EQ(dbfull()->GetOptions().ttl, 60);
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_EQ(NumTableFilesAtLevel(0), 0);
@ -738,7 +739,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
for (int j = 0; j < 10; j++) { for (int j = 0; j < 10; j++) {
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
} }
Flush(); ASSERT_OK(Flush());
} }
ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 10); ASSERT_EQ(NumTableFilesAtLevel(0), 10);
@ -746,7 +747,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
// No files should be compacted as max_table_files_size is set to 500 KB. // No files should be compacted as max_table_files_size is set to 500 KB.
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size, ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
500 << 10); 500 << 10);
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_EQ(NumTableFilesAtLevel(0), 10); ASSERT_EQ(NumTableFilesAtLevel(0), 10);
// Set max_table_files_size to 12 KB. So only 1 file should remain now. // Set max_table_files_size to 12 KB. So only 1 file should remain now.
@ -754,7 +755,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
{{"compaction_options_fifo", "{max_table_files_size=12288;}"}})); {{"compaction_options_fifo", "{max_table_files_size=12288;}"}}));
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size, ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
12 << 10); 12 << 10);
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 1); ASSERT_EQ(NumTableFilesAtLevel(0), 1);
@ -770,7 +771,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
for (int j = 0; j < 10; j++) { for (int j = 0; j < 10; j++) {
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
} }
Flush(); ASSERT_OK(Flush());
} }
ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 10); ASSERT_EQ(NumTableFilesAtLevel(0), 10);
@ -779,7 +780,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
// allow_compaction is false // allow_compaction is false
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction, ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
false); false);
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_EQ(NumTableFilesAtLevel(0), 10); ASSERT_EQ(NumTableFilesAtLevel(0), 10);
// Set allow_compaction to true. So number of files should be between 1 and 5. // Set allow_compaction to true. So number of files should be between 1 and 5.
@ -787,7 +788,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
{{"compaction_options_fifo", "{allow_compaction=true;}"}})); {{"compaction_options_fifo", "{allow_compaction=true;}"}}));
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction, ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
true); true);
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_GE(NumTableFilesAtLevel(0), 1); ASSERT_GE(NumTableFilesAtLevel(0), 1);
ASSERT_LE(NumTableFilesAtLevel(0), 5); ASSERT_LE(NumTableFilesAtLevel(0), 5);
@ -808,14 +809,14 @@ TEST_F(DBOptionsTest, CompactionReadaheadSizeChange) {
ASSERT_OK(dbfull()->SetDBOptions({{"compaction_readahead_size", "256"}})); ASSERT_OK(dbfull()->SetDBOptions({{"compaction_readahead_size", "256"}}));
ASSERT_EQ(256, dbfull()->GetDBOptions().compaction_readahead_size); ASSERT_EQ(256, dbfull()->GetDBOptions().compaction_readahead_size);
for (int i = 0; i < 1024; i++) { for (int i = 0; i < 1024; i++) {
Put(Key(i), kValue); ASSERT_OK(Put(Key(i), kValue));
} }
Flush(); ASSERT_OK(Flush());
for (int i = 0; i < 1024 * 2; i++) { for (int i = 0; i < 1024 * 2; i++) {
Put(Key(i), kValue); ASSERT_OK(Put(Key(i), kValue));
} }
Flush(); ASSERT_OK(Flush());
dbfull()->TEST_WaitForCompact(); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(256, env_->compaction_readahead_size_); ASSERT_EQ(256, env_->compaction_readahead_size_);
Close(); Close();
} }
@ -834,7 +835,7 @@ TEST_F(DBOptionsTest, FIFOTtlBackwardCompatible) {
for (int j = 0; j < 10; j++) { for (int j = 0; j < 10; j++) {
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
} }
Flush(); ASSERT_OK(Flush());
} }
ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 10); ASSERT_EQ(NumTableFilesAtLevel(0), 10);

@ -666,7 +666,7 @@ void DBTestBase::Reopen(const Options& options) {
void DBTestBase::Close() { void DBTestBase::Close() {
for (auto h : handles_) { for (auto h : handles_) {
db_->DestroyColumnFamilyHandle(h); EXPECT_OK(db_->DestroyColumnFamilyHandle(h));
} }
handles_.clear(); handles_.clear();
delete db_; delete db_;

@ -162,6 +162,7 @@ void EventHelpers::LogAndNotifyTableFileCreationFinished(
for (auto& listener : listeners) { for (auto& listener : listeners) {
listener->OnTableFileCreated(info); listener->OnTableFileCreated(info);
} }
info.status.PermitUncheckedError();
#else #else
(void)listeners; (void)listeners;
(void)db_name; (void)db_name;
@ -199,6 +200,7 @@ void EventHelpers::LogAndNotifyTableFileDeletion(
for (auto& listener : listeners) { for (auto& listener : listeners) {
listener->OnTableFileDeleted(info); listener->OnTableFileDeleted(info);
} }
info.status.PermitUncheckedError();
#else #else
(void)file_path; (void)file_path;
(void)dbname; (void)dbname;

@ -25,7 +25,7 @@ void UpdateOptionsFiles(DB* db,
std::unordered_set<std::string>* filename_history, std::unordered_set<std::string>* filename_history,
int* options_files_count) { int* options_files_count) {
std::vector<std::string> filenames; std::vector<std::string> filenames;
db->GetEnv()->GetChildren(db->GetName(), &filenames); EXPECT_OK(db->GetEnv()->GetChildren(db->GetName(), &filenames));
uint64_t number; uint64_t number;
FileType type; FileType type;
*options_files_count = 0; *options_files_count = 0;
@ -42,7 +42,7 @@ void VerifyOptionsFileName(
DB* db, const std::unordered_set<std::string>& past_filenames) { DB* db, const std::unordered_set<std::string>& past_filenames) {
std::vector<std::string> filenames; std::vector<std::string> filenames;
std::unordered_set<std::string> current_filenames; std::unordered_set<std::string> current_filenames;
db->GetEnv()->GetChildren(db->GetName(), &filenames); EXPECT_OK(db->GetEnv()->GetChildren(db->GetName(), &filenames));
uint64_t number; uint64_t number;
FileType type; FileType type;
for (auto filename : filenames) { for (auto filename : filenames) {

@ -38,7 +38,7 @@ class Status {
~Status() { ~Status() {
#ifdef ROCKSDB_ASSERT_STATUS_CHECKED #ifdef ROCKSDB_ASSERT_STATUS_CHECKED
if (!checked_) { if (!checked_) {
fprintf(stderr, "Failed to check Status\n"); fprintf(stderr, "Failed to check Status %p\n", this);
port::PrintStack(); port::PrintStack();
abort(); abort();
} }

@ -45,8 +45,8 @@ AutoRollLogger::AutoRollLogger(Env* env, const std::string& dbname,
RollLogFile(); RollLogFile();
} }
GetExistingFiles(); GetExistingFiles();
ResetLogger(); s = ResetLogger();
if (status_.ok()) { if (s.ok() && status_.ok()) {
status_ = TrimOldLogFiles(); status_ = TrimOldLogFiles();
} }
} }
@ -86,7 +86,10 @@ void AutoRollLogger::RollLogFile() {
dbname_, now, db_absolute_path_, db_log_dir_); dbname_, now, db_absolute_path_, db_log_dir_);
now++; now++;
} while (env_->FileExists(old_fname).ok()); } while (env_->FileExists(old_fname).ok());
env_->RenameFile(log_fname_, old_fname); Status s = env_->RenameFile(log_fname_, old_fname);
if (!s.ok()) {
// What should we do on error?
}
old_log_files_.push(old_fname); old_log_files_.push(old_fname);
} }

@ -69,8 +69,9 @@ class AutoRollLogger : public Logger {
virtual ~AutoRollLogger() { virtual ~AutoRollLogger() {
if (logger_ && !closed_) { if (logger_ && !closed_) {
logger_->Close(); logger_->Close().PermitUncheckedError();
} }
status_.PermitUncheckedError();
} }
using Logger::GetInfoLogLevel; using Logger::GetInfoLogLevel;

@ -71,7 +71,7 @@ class AutoRollLoggerTest : public testing::Test {
std::string deleteCmd = "rm -rf " + kTestDir; std::string deleteCmd = "rm -rf " + kTestDir;
#endif #endif
ASSERT_TRUE(system(deleteCmd.c_str()) == 0); ASSERT_TRUE(system(deleteCmd.c_str()) == 0);
Env::Default()->CreateDir(kTestDir); ASSERT_OK(Env::Default()->CreateDir(kTestDir));
} }
void RollLogFileBySizeTest(AutoRollLogger* logger, size_t log_max_size, void RollLogFileBySizeTest(AutoRollLogger* logger, size_t log_max_size,
@ -567,7 +567,7 @@ static std::vector<std::string> GetOldFileNames(const std::string& path) {
const std::string fname = path.substr(path.find_last_of("/") + 1); const std::string fname = path.substr(path.find_last_of("/") + 1);
std::vector<std::string> children; std::vector<std::string> children;
Env::Default()->GetChildren(dirname, &children); EXPECT_OK(Env::Default()->GetChildren(dirname, &children));
// We know that the old log files are named [path]<something> // We know that the old log files are named [path]<something>
// Return all entities that match the pattern // Return all entities that match the pattern

@ -21,7 +21,8 @@ void PersistentCacheHelper::InsertRawPage(
cache_options.key_prefix.size(), cache_options.key_prefix.size(),
handle, cache_key); handle, cache_key);
// insert content to cache // insert content to cache
cache_options.persistent_cache->Insert(key, data, size); cache_options.persistent_cache->Insert(key, data, size)
.PermitUncheckedError();
} }
void PersistentCacheHelper::InsertUncompressedPage( void PersistentCacheHelper::InsertUncompressedPage(
@ -39,8 +40,10 @@ void PersistentCacheHelper::InsertUncompressedPage(
cache_options.key_prefix.size(), cache_options.key_prefix.size(),
handle, cache_key); handle, cache_key);
// insert block contents to page cache // insert block contents to page cache
cache_options.persistent_cache->Insert(key, contents.data.data(), cache_options.persistent_cache
contents.data.size()); ->Insert(key, contents.data.data(), contents.data.size())
.PermitUncheckedError();
;
} }
Status PersistentCacheHelper::LookupRawPage( Status PersistentCacheHelper::LookupRawPage(

@ -58,7 +58,8 @@ TEST_F(OptionsUtilTest, SaveAndLoad) {
} }
const std::string kFileName = "OPTIONS-123456"; const std::string kFileName = "OPTIONS-123456";
PersistRocksDBOptions(db_opt, cf_names, cf_opts, kFileName, fs_.get()); ASSERT_OK(
PersistRocksDBOptions(db_opt, cf_names, cf_opts, kFileName, fs_.get()));
DBOptions loaded_db_opt; DBOptions loaded_db_opt;
std::vector<ColumnFamilyDescriptor> loaded_cf_descs; std::vector<ColumnFamilyDescriptor> loaded_cf_descs;
@ -120,7 +121,8 @@ TEST_F(OptionsUtilTest, SaveAndLoadWithCacheCheck) {
cf_names.push_back("cf_plain_table_sample"); cf_names.push_back("cf_plain_table_sample");
// Saving DB in file // Saving DB in file
const std::string kFileName = "OPTIONS-LOAD_CACHE_123456"; const std::string kFileName = "OPTIONS-LOAD_CACHE_123456";
PersistRocksDBOptions(db_opt, cf_names, cf_opts, kFileName, fs_.get()); ASSERT_OK(
PersistRocksDBOptions(db_opt, cf_names, cf_opts, kFileName, fs_.get()));
DBOptions loaded_db_opt; DBOptions loaded_db_opt;
std::vector<ColumnFamilyDescriptor> loaded_cf_descs; std::vector<ColumnFamilyDescriptor> loaded_cf_descs;

@ -53,7 +53,7 @@ class BlockCacheTier : public PersistentCacheTier {
virtual ~BlockCacheTier() { virtual ~BlockCacheTier() {
// Close is re-entrant so we can call close even if it is already closed // Close is re-entrant so we can call close even if it is already closed
Close(); Close().PermitUncheckedError();
assert(!insert_th_.joinable()); assert(!insert_th_.joinable());
} }

@ -433,7 +433,7 @@ void PersistentCacheDBTest::RunTest(
options.create_if_missing = true; options.create_if_missing = true;
DestroyAndReopen(options); DestroyAndReopen(options);
pcache->Close(); ASSERT_OK(pcache->Close());
} }
} }

@ -183,7 +183,7 @@ class PersistentCacheTierTest : public testing::Test {
ASSERT_EQ(stats_verify_hits_, max_keys); ASSERT_EQ(stats_verify_hits_, max_keys);
ASSERT_EQ(stats_verify_missed_, 0); ASSERT_EQ(stats_verify_missed_, 0);
cache_->Close(); ASSERT_OK(cache_->Close());
cache_.reset(); cache_.reset();
} }
@ -194,7 +194,7 @@ class PersistentCacheTierTest : public testing::Test {
ASSERT_LT(stats_verify_hits_, max_keys); ASSERT_LT(stats_verify_hits_, max_keys);
ASSERT_GT(stats_verify_missed_, 0); ASSERT_GT(stats_verify_missed_, 0);
cache_->Close(); ASSERT_OK(cache_->Close());
cache_.reset(); cache_.reset();
} }
@ -206,7 +206,7 @@ class PersistentCacheTierTest : public testing::Test {
ASSERT_GT(stats_verify_hits_, 0); ASSERT_GT(stats_verify_hits_, 0);
ASSERT_GT(stats_verify_missed_, 0); ASSERT_GT(stats_verify_missed_, 0);
cache_->Close(); ASSERT_OK(cache_->Close());
cache_.reset(); cache_.reset();
} }

@ -122,8 +122,10 @@ bool VolatileCacheTier::Evict() {
// push the evicted object to the next level // push the evicted object to the next level
if (next_tier()) { if (next_tier()) {
next_tier()->Insert(Slice(edata->key), edata->value.c_str(), // TODO: Should the insert error be ignored?
edata->value.size()); Status s = next_tier()->Insert(Slice(edata->key), edata->value.c_str(),
edata->value.size());
s.PermitUncheckedError();
} }
// adjust size and destroy data // adjust size and destroy data

Loading…
Cancel
Save