Fixed a file-not-found issue when a log file is moved to archive.

Summary:
Fixed a file-not-found issue when a log file is moved to archive
by doing a missing retry.

Test Plan:
make db_test
export ROCKSDB_TEST=TransactionLogIteratorRace
./db_test

Reviewers: sdong, haobo

Reviewed By: sdong

CC: igor, leveldb

Differential Revision: https://reviews.facebook.net/D18669
main
Yueh-Hsuan Chiang 11 years ago
parent d14581f936
commit 1c7799d8aa
  1. 11
      db/db_impl.cc
  2. 90
      db/db_test.cc
  3. 1
      db/transaction_log_impl.cc

@ -935,8 +935,19 @@ Status DBImpl::GetSortedWalsOfType(const std::string& path,
continue; continue;
} }
// Reproduce the race condition where a log file is moved
// to archived dir, between these two sync points, used in
// (DBTest,TransactionLogIteratorRace)
TEST_SYNC_POINT("DBImpl::GetSortedWalsOfType:1");
TEST_SYNC_POINT("DBImpl::GetSortedWalsOfType:2");
uint64_t size_bytes; uint64_t size_bytes;
s = env_->GetFileSize(LogFileName(path, number), &size_bytes); s = env_->GetFileSize(LogFileName(path, number), &size_bytes);
// re-try in case the alive log file has been moved to archive.
if (!s.ok() && log_type == kAliveLogFile &&
env_->FileExists(ArchivedLogFileName(path, number))) {
s = env_->GetFileSize(ArchivedLogFileName(path, number), &size_bytes);
}
if (!s.ok()) { if (!s.ok()) {
return s; return s;
} }

@ -5609,48 +5609,56 @@ TEST(DBTest, TransactionLogIterator) {
#ifndef NDEBUG // sync point is not included with DNDEBUG build #ifndef NDEBUG // sync point is not included with DNDEBUG build
TEST(DBTest, TransactionLogIteratorRace) { TEST(DBTest, TransactionLogIteratorRace) {
// Setup sync point dependency to reproduce the race condition of static const int LOG_ITERATOR_RACE_TEST_COUNT = 2;
// a log file moved to archived dir, in the middle of GetSortedWalFiles static const char* sync_points[LOG_ITERATOR_RACE_TEST_COUNT][4] =
rocksdb::SyncPoint::GetInstance()->LoadDependency( { { "DBImpl::GetSortedWalFiles:1", "DBImpl::PurgeObsoleteFiles:1",
{ { "DBImpl::GetSortedWalFiles:1", "DBImpl::PurgeObsoleteFiles:1" }, "DBImpl::PurgeObsoleteFiles:2", "DBImpl::GetSortedWalFiles:2" },
{ "DBImpl::PurgeObsoleteFiles:2", "DBImpl::GetSortedWalFiles:2" }, { "DBImpl::GetSortedWalsOfType:1", "DBImpl::PurgeObsoleteFiles:1",
}); "DBImpl::PurgeObsoleteFiles:2", "DBImpl::GetSortedWalsOfType:2" }};
for (int test = 0; test < LOG_ITERATOR_RACE_TEST_COUNT; ++test) {
do { // Setup sync point dependency to reproduce the race condition of
rocksdb::SyncPoint::GetInstance()->ClearTrace(); // a log file moved to archived dir, in the middle of GetSortedWalFiles
rocksdb::SyncPoint::GetInstance()->DisableProcessing(); rocksdb::SyncPoint::GetInstance()->LoadDependency(
Options options = OptionsForLogIterTest(); { { sync_points[test][0], sync_points[test][1] },
DestroyAndReopen(&options); { sync_points[test][2], sync_points[test][3] },
Put("key1", DummyString(1024)); });
dbfull()->Flush(FlushOptions());
Put("key2", DummyString(1024)); do {
dbfull()->Flush(FlushOptions()); rocksdb::SyncPoint::GetInstance()->ClearTrace();
Put("key3", DummyString(1024)); rocksdb::SyncPoint::GetInstance()->DisableProcessing();
dbfull()->Flush(FlushOptions()); Options options = OptionsForLogIterTest();
Put("key4", DummyString(1024)); DestroyAndReopen(&options);
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 4U); Put("key1", DummyString(1024));
dbfull()->Flush(FlushOptions());
{ Put("key2", DummyString(1024));
auto iter = OpenTransactionLogIter(0); dbfull()->Flush(FlushOptions());
ExpectRecords(4, iter); Put("key3", DummyString(1024));
} dbfull()->Flush(FlushOptions());
Put("key4", DummyString(1024));
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 4U);
// trigger async flush, and log move. Well, log move will
// wait until the GetSortedWalFiles:1 to reproduce the race {
// condition auto iter = OpenTransactionLogIter(0);
FlushOptions flush_options; ExpectRecords(4, iter);
flush_options.wait = false; }
dbfull()->Flush(flush_options);
// "key5" would be written in a new memtable and log rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Put("key5", DummyString(1024)); // trigger async flush, and log move. Well, log move will
{ // wait until the GetSortedWalFiles:1 to reproduce the race
// this iter would miss "key4" if not fixed // condition
auto iter = OpenTransactionLogIter(0); FlushOptions flush_options;
ExpectRecords(5, iter); flush_options.wait = false;
} dbfull()->Flush(flush_options);
} while (ChangeCompactOptions());
// "key5" would be written in a new memtable and log
Put("key5", DummyString(1024));
{
// this iter would miss "key4" if not fixed
auto iter = OpenTransactionLogIter(0);
ExpectRecords(5, iter);
}
} while (ChangeCompactOptions());
}
} }
#endif #endif

@ -93,6 +93,7 @@ void TransactionLogIteratorImpl::SeekToStartSequence(
Status s = OpenLogReader(files_->at(startFileIndex).get()); Status s = OpenLogReader(files_->at(startFileIndex).get());
if (!s.ok()) { if (!s.ok()) {
currentStatus_ = s; currentStatus_ = s;
reporter_.Info(currentStatus_.ToString().c_str());
return; return;
} }
while (RestrictedRead(&record, &scratch)) { while (RestrictedRead(&record, &scratch)) {

Loading…
Cancel
Save