Well, I think this is unnecessary since we already hard-cap
max objects to scan at a time into
[osd_scrub_chunk_min, osd_scrub_chunk_max].
Signed-off-by: xie xingguo <xie.xingguo@zte.com.cn>
}
}
-void PG::_scan_rollback_obs(
- const vector<ghobject_t> &rollback_obs,
- ThreadPool::TPHandle &handle)
+void PG::_scan_rollback_obs(const vector<ghobject_t> &rollback_obs)
{
ObjectStore::Transaction t;
eversion_t trimmed_to = last_rollback_info_trimmed_to_applied;
if (pos.ls.empty()) {
break;
}
- _scan_rollback_obs(rollback_obs, handle);
+ _scan_rollback_obs(rollback_obs);
pos.pos = 0;
return -EINPROGRESS;
}
void scrub_clear_state();
void _scan_snaps(ScrubMap &map);
void _repair_oinfo_oid(ScrubMap &map);
- void _scan_rollback_obs(
- const vector<ghobject_t> &rollback_obs,
- ThreadPool::TPHandle &handle);
+ void _scan_rollback_obs(const vector<ghobject_t> &rollback_obs);
void _request_scrub_map(pg_shard_t replica, eversion_t version,
hobject_t start, hobject_t end, bool deep,
bool allow_preemption);