rocksdb::SyncPoint::GetInstance()->LoadDependency({});
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
DestroyDir(dummy_files_dir_);
- if (delete_scheduler_ != nullptr) {
- delete delete_scheduler_;
- delete_scheduler_ = nullptr;
- }
}
void WaitForEmptyTrash() {
- reinterpret_cast<DeleteSchedulerImpl*>(delete_scheduler_)
+ reinterpret_cast<DeleteSchedulerImpl*>(delete_scheduler_.get())
->TEST_WaitForEmptyTrash();
}
std::string dummy_files_dir_;
std::string trash_dir_;
int64_t rate_bytes_per_sec_;
- DeleteScheduler* delete_scheduler_;
+ std::shared_ptr<DeleteScheduler> delete_scheduler_;
};
// Test the basic functionality of DeleteScheduler (Rate Limiting).
penalties.clear();
DestroyAndCreateDir(dummy_files_dir_);
rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
- delete_scheduler_ =
- NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
+ delete_scheduler_.reset(
+ NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
// Create 100 dummy files, every file is 1 Kb
std::vector<std::string> generated_files;
penalties.clear();
DestroyAndCreateDir(dummy_files_dir_);
rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
- delete_scheduler_ =
- NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
+ delete_scheduler_.reset(
+ NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
// Create 100 dummy files, every file is 1 Kb
std::vector<std::string> generated_files;
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
- delete_scheduler_ = NewDeleteScheduler(env_, "", 0);
+ delete_scheduler_.reset(NewDeleteScheduler(env_, "", 0));
for (int i = 0; i < 10; i++) {
// Every file we delete will be deleted immediately
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
rate_bytes_per_sec_ = 1024 * 1024; // 1 Mb/sec
- delete_scheduler_ = NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
+ delete_scheduler_.reset(
+ NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
// Create "conflict.data" and move it to trash 10 times
for (int i = 0; i < 10; i++) {
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
rate_bytes_per_sec_ = 1024 * 1024; // 1 Mb/sec
- delete_scheduler_ = NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
+ delete_scheduler_.reset(
+ NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
// Generate 10 dummy files and move them to trash
for (int i = 0; i < 10; i++) {
Status s;
rate_bytes_per_sec_ = 1024 * 1024; // 1 Mb/sec
- delete_scheduler_ = NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_,
- nullptr, true, &s);
+ delete_scheduler_.reset(NewDeleteScheduler(
+ env_, trash_dir_, rate_bytes_per_sec_, nullptr, true, &s));
ASSERT_OK(s);
WaitForEmptyTrash();
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
rate_bytes_per_sec_ = 1024 * 1024; // 1 MB / sec
- delete_scheduler_ = NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
+ delete_scheduler_.reset(
+ NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
// Move files to trash, wait for empty trash, start again
for (int run = 1; run <= 5; run++) {
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
rate_bytes_per_sec_ = 1; // 1 Byte / sec
- delete_scheduler_ = NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
+ delete_scheduler_.reset(
+ NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
for (int i = 0; i < 100; i++) {
std::string file_name = "data_" + ToString(i) + ".data";
// Deleting 100 files will need >28 hours to delete
// we will delete the DeleteScheduler while delete queue is not empty
- delete delete_scheduler_;
- delete_scheduler_ = nullptr;
+ delete_scheduler_.reset();
ASSERT_LT(bg_delete_file, 100);
ASSERT_GT(CountFilesInDir(trash_dir_), 0);
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
rate_bytes_per_sec_ = 1024; // 1 Kb / sec
- delete_scheduler_ = NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_);
+ delete_scheduler_.reset(
+ NewDeleteScheduler(env_, trash_dir_, rate_bytes_per_sec_));
// We will delete the trash directory, that mean that DeleteScheduler wont
// be able to move files to trash and will delete files them immediately.