Fix delete_scheduler_test valgrind error

Summary: Use shared_ptr instead of deleting in destructor

Test Plan: DISABLE_JEMALLOC=1 make delete_scheduler_test -j64 && valgrind --error-exitcode=2 --leak-check=full ./delete_scheduler_test

Reviewers: yhchiang, sdong

Reviewed By: sdong

Subscribers: dhruba

Differential Revision: https://reviews.facebook.net/D43659
main
Islam AbdelRahman 9 years ago
parent 6a4aaadcd7
commit 40f893f4a9
  1. 40
      util/delete_scheduler_test.cc

@ -36,14 +36,10 @@ class DeleteSchedulerTest : public testing::Test {
rocksdb::SyncPoint::GetInstance()->LoadDependency({});
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
DestroyDir(dummy_files_dir_);
if (delete_scheduler_ != nullptr) {
delete delete_scheduler_;
delete_scheduler_ = nullptr;
}
}
void WaitForEmptyTrash() {
reinterpret_cast<DeleteSchedulerImpl*>(delete_scheduler_)
reinterpret_cast<DeleteSchedulerImpl*>(delete_scheduler_.get())
->TEST_WaitForEmptyTrash();
}
@ -88,7 +84,7 @@ class DeleteSchedulerTest : public testing::Test {
std::string dummy_files_dir_;
std::string trash_dir_;
int64_t rate_bytes_per_sec_;
DeleteScheduler* delete_scheduler_;
std::shared_ptr<DeleteScheduler> delete_scheduler_;
};
// Test the basic functionality of DeleteScheduler (Rate Limiting).
@ -119,8 +115,8 @@ TEST_F(DeleteSchedulerTest, BasicRateLimiting) {
penalties.clear();
DestroyAndCreateDir(dummy_files_dir_);
rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
delete_scheduler_ =
NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
delete_scheduler_.reset(
NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
// Create 100 dummy files, every file is 1 Kb
std::vector<std::string> generated_files;
@ -185,8 +181,8 @@ TEST_F(DeleteSchedulerTest, RateLimitingMultiThreaded) {
penalties.clear();
DestroyAndCreateDir(dummy_files_dir_);
rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
delete_scheduler_ =
NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
delete_scheduler_.reset(
NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
// Create 100 dummy files, every file is 1 Kb
std::vector<std::string> generated_files;
@ -250,7 +246,7 @@ TEST_F(DeleteSchedulerTest, DisableRateLimiting) {
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
delete_scheduler_ = NewDeleteScheduler(env_, "", 0);
delete_scheduler_.reset(NewDeleteScheduler(env_, "", 0));
for (int i = 0; i < 10; i++) {
// Every file we delete will be deleted immediately
@ -280,7 +276,8 @@ TEST_F(DeleteSchedulerTest, ConflictNames) {
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
rate_bytes_per_sec_ = 1024 * 1024; // 1 Mb/sec
delete_scheduler_ = NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
delete_scheduler_.reset(
NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
// Create "conflict.data" and move it to trash 10 times
for (int i = 0; i < 10; i++) {
@ -316,7 +313,8 @@ TEST_F(DeleteSchedulerTest, BackgroundError) {
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
rate_bytes_per_sec_ = 1024 * 1024; // 1 Mb/sec
delete_scheduler_ = NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
delete_scheduler_.reset(
NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
// Generate 10 dummy files and move them to trash
for (int i = 0; i < 10; i++) {
@ -358,8 +356,8 @@ TEST_F(DeleteSchedulerTest, TrashWithExistingFiles) {
Status s;
rate_bytes_per_sec_ = 1024 * 1024; // 1 Mb/sec
delete_scheduler_ = NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_,
nullptr, true, &s);
delete_scheduler_.reset(NewDeleteScheduler(
env_, trash_dir_, rate_bytes_per_sec_, nullptr, true, &s));
ASSERT_OK(s);
WaitForEmptyTrash();
@ -382,7 +380,8 @@ TEST_F(DeleteSchedulerTest, StartBGEmptyTrashMultipleTimes) {
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
rate_bytes_per_sec_ = 1024 * 1024; // 1 MB / sec
delete_scheduler_ = NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
delete_scheduler_.reset(
NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
// Move files to trash, wait for empty trash, start again
for (int run = 1; run <= 5; run++) {
@ -417,7 +416,8 @@ TEST_F(DeleteSchedulerTest, DestructorWithNonEmptyQueue) {
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
rate_bytes_per_sec_ = 1; // 1 Byte / sec
delete_scheduler_ = NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
delete_scheduler_.reset(
NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
for (int i = 0; i < 100; i++) {
std::string file_name = "data_" + ToString(i) + ".data";
@ -426,8 +426,7 @@ TEST_F(DeleteSchedulerTest, DestructorWithNonEmptyQueue) {
// Deleting 100 files will need >28 hours to delete
// we will delete the DeleteScheduler while delete queue is not empty
delete delete_scheduler_;
delete_scheduler_ = nullptr;
delete_scheduler_.reset();
ASSERT_LT(bg_delete_file, 100);
ASSERT_GT(CountFilesInDir(trash_dir_), 0);
@ -447,7 +446,8 @@ TEST_F(DeleteSchedulerTest, MoveToTrashError) {
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
rate_bytes_per_sec_ = 1024; // 1 Kb / sec
delete_scheduler_ = NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
delete_scheduler_.reset(
NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
// We will delete the trash directory, that mean that DeleteScheduler wont
// be able to move files to trash and will delete files them immediately.

Loading…
Cancel
Save