From 7e37a5918cbe18e6aca0751d9ff01c0e1dc3b055 Mon Sep 17 00:00:00 2001 From: Akanksha Mahajan Date: Fri, 24 Jul 2020 14:45:45 -0700 Subject: [PATCH] Fix for flaky test BackupableDBTest.RateLimiting (#7167) Summary: BackupableDBTest.RateLimiting test is failing due to timed out on our test server. It might be because of nested loops run sequentially that test different type of combinations of parameters. This patch converts the test into parameterized test so that all combinations can be tested out. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7167 Test Plan: make check -j64 Reviewed By: zhichao-cao Differential Revision: D22709531 Pulled By: akankshamahajan15 fbshipit-source-id: 95518153e87b3b5311a6c1960a191bca58898786 --- utilities/backupable/backupable_db_test.cc | 116 ++++++++++++--------- 1 file changed, 67 insertions(+), 49 deletions(-) diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index 47499f33d..81e4fd456 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -1832,61 +1832,79 @@ TEST_F(BackupableDBTest, KeepLogFiles) { AssertBackupConsistency(0, 0, 500, 600, true); } -TEST_F(BackupableDBTest, RateLimiting) { - size_t const kMicrosPerSec = 1000 * 1000LL; - uint64_t const MB = 1024 * 1024; +class BackupableDBRateLimitingTestWithParam + : public BackupableDBTest, + public testing::WithParamInterface< + std::tuple /* limits */>> { + public: + BackupableDBRateLimitingTestWithParam() {} +}; - const std::vector> limits( - {{1 * MB, 5 * MB}, {2 * MB, 3 * MB}}); +uint64_t const MB = 1024 * 1024; + +INSTANTIATE_TEST_CASE_P( + RateLimiting, BackupableDBRateLimitingTestWithParam, + ::testing::Values(std::make_tuple(false, 0, std::make_pair(1 * MB, 5 * MB)), + std::make_tuple(false, 0, std::make_pair(2 * MB, 3 * MB)), + std::make_tuple(false, 1, std::make_pair(1 * MB, 5 * MB)), + std::make_tuple(false, 1, std::make_pair(2 * MB, 3 * MB)), + std::make_tuple(true, 0, std::make_pair(1 * MB, 5 * MB)), + std::make_tuple(true, 0, std::make_pair(2 * MB, 3 * MB)), + std::make_tuple(true, 1, std::make_pair(1 * MB, 5 * MB)), + std::make_tuple(true, 1, + std::make_pair(2 * MB, 3 * MB)))); + +TEST_P(BackupableDBRateLimitingTestWithParam, RateLimiting) { + size_t const kMicrosPerSec = 1000 * 1000LL; std::shared_ptr backupThrottler(NewGenericRateLimiter(1)); std::shared_ptr restoreThrottler(NewGenericRateLimiter(1)); - for (bool makeThrottler : {false, true}) { - if (makeThrottler) { - backupable_options_->backup_rate_limiter = backupThrottler; - backupable_options_->restore_rate_limiter = restoreThrottler; - } - // iter 0 -- single threaded - // iter 1 -- multi threaded - for (int iter = 0; iter < 2; ++iter) { - for (const auto& limit : limits) { - // destroy old data - DestroyDB(dbname_, Options()); - if (makeThrottler) { - backupThrottler->SetBytesPerSecond(limit.first); - restoreThrottler->SetBytesPerSecond(limit.second); - } else { - backupable_options_->backup_rate_limit = limit.first; - backupable_options_->restore_rate_limit = limit.second; - } - backupable_options_->max_background_operations = (iter == 0) ? 1 : 10; - options_.compression = kNoCompression; - OpenDBAndBackupEngine(true); - size_t bytes_written = FillDB(db_.get(), 0, 100000); - - auto start_backup = db_chroot_env_->NowMicros(); - ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), false)); - auto backup_time = db_chroot_env_->NowMicros() - start_backup; - auto rate_limited_backup_time = - (bytes_written * kMicrosPerSec) / limit.first; - ASSERT_GT(backup_time, 0.8 * rate_limited_backup_time); - - CloseDBAndBackupEngine(); - - OpenBackupEngine(); - auto start_restore = db_chroot_env_->NowMicros(); - ASSERT_OK(backup_engine_->RestoreDBFromLatestBackup(dbname_, dbname_)); - auto restore_time = db_chroot_env_->NowMicros() - start_restore; - CloseBackupEngine(); - auto rate_limited_restore_time = - (bytes_written * kMicrosPerSec) / limit.second; - ASSERT_GT(restore_time, 0.8 * rate_limited_restore_time); - - AssertBackupConsistency(0, 0, 100000, 100010); - } - } + bool makeThrottler = std::get<0>(GetParam()); + if (makeThrottler) { + backupable_options_->backup_rate_limiter = backupThrottler; + backupable_options_->restore_rate_limiter = restoreThrottler; } + + // iter 0 -- single threaded + // iter 1 -- multi threaded + int iter = std::get<1>(GetParam()); + const std::pair limit = std::get<2>(GetParam()); + + // destroy old data + DestroyDB(dbname_, Options()); + if (makeThrottler) { + backupThrottler->SetBytesPerSecond(limit.first); + restoreThrottler->SetBytesPerSecond(limit.second); + } else { + backupable_options_->backup_rate_limit = limit.first; + backupable_options_->restore_rate_limit = limit.second; + } + backupable_options_->max_background_operations = (iter == 0) ? 1 : 10; + options_.compression = kNoCompression; + OpenDBAndBackupEngine(true); + size_t bytes_written = FillDB(db_.get(), 0, 100000); + + auto start_backup = db_chroot_env_->NowMicros(); + ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), false)); + auto backup_time = db_chroot_env_->NowMicros() - start_backup; + auto rate_limited_backup_time = (bytes_written * kMicrosPerSec) / limit.first; + ASSERT_GT(backup_time, 0.8 * rate_limited_backup_time); + + CloseDBAndBackupEngine(); + + OpenBackupEngine(); + auto start_restore = db_chroot_env_->NowMicros(); + ASSERT_OK(backup_engine_->RestoreDBFromLatestBackup(dbname_, dbname_)); + auto restore_time = db_chroot_env_->NowMicros() - start_restore; + CloseBackupEngine(); + auto rate_limited_restore_time = + (bytes_written * kMicrosPerSec) / limit.second; + ASSERT_GT(restore_time, 0.8 * rate_limited_restore_time); + + AssertBackupConsistency(0, 0, 100000, 100010); } TEST_F(BackupableDBTest, ReadOnlyBackupEngine) {