}
}
-void PG::do_delete_work(ceph::os::Transaction &t)
+ghobject_t PG::do_delete_work(ceph::os::Transaction &t,
+ ghobject_t _next)
{
// TODO
shard_services.dec_pg_num();
void on_removal(ceph::os::Transaction &t) final {
// TODO
}
- void do_delete_work(ceph::os::Transaction &t) final;
+ ghobject_t do_delete_work(ceph::os::Transaction &t,
+ ghobject_t _next) final;
// merge/split not ready
void clear_ready_to_merge() final {}
delete this;
}
-void PG::do_delete_work(ObjectStore::Transaction &t)
+ghobject_t PG::do_delete_work(ObjectStore::Transaction &t,
+ ghobject_t _next)
{
dout(10) << __func__ << dendl;
osd->sleep_timer.add_event_at(delete_schedule_time,
delete_requeue_callback);
dout(20) << __func__ << " Delete scheduled at " << delete_schedule_time << dendl;
- return;
+ return _next;
}
}
delete_needs_sleep = true;
+ ghobject_t next;
+
vector<ghobject_t> olist;
int max = std::min(osd->store->get_ideal_list_max(),
(int)cct->_conf->osd_target_transaction_size);
- ghobject_t next;
+
osd->store->collection_list(
ch,
- next,
+ _next,
ghobject_t::get_max(),
max,
&olist,
&next);
dout(20) << __func__ << " " << olist << dendl;
+ // make sure we've removed everything
+ // by one more listing from the beginning
+ if (_next != ghobject_t() && olist.empty()) {
+ next = ghobject_t();
+ osd->store->collection_list(
+ ch,
+ next,
+ ghobject_t::get_max(),
+ max,
+ &olist,
+ &next);
+ if (!olist.empty()) {
+ dout(0) << __func__ << " additional unexpected onode list"
+ <<" (new onodes has appeared since PG removal started"
+ << olist << dendl;
+ }
+ }
+
OSDriver::OSTransaction _t(osdriver.get_transaction(&t));
int64_t num = 0;
for (auto& oid : olist) {
Context *fin = new C_DeleteMore(this, get_osdmap_epoch());
t.register_on_commit(fin);
} else {
- dout(20) << __func__ << " finished" << dendl;
if (cct->_conf->osd_inject_failure_on_pg_removal) {
_exit(1);
}
osd->logger->dec(l_osd_pg_removing);
}
}
+ return next;
}
int PG::pg_stat_adjust(osd_stat_t *ns)
return std::make_unique<PG::PGLogEntryHandler>(this, &t);
}
- void do_delete_work(ObjectStore::Transaction &t) override;
+ ghobject_t do_delete_work(ObjectStore::Transaction &t,
+ ghobject_t _next) override;
void clear_ready_to_merge() override;
void set_not_ready_to_merge_target(pg_t pgid, pg_t src) override;
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/ToDelete/Deleting")
{
+ start = ceph::mono_clock::now();
+
context< PeeringMachine >().log_enter(state_name);
+
DECLARE_LOCALS;
ps->deleting = true;
ObjectStore::Transaction &t = context<PeeringMachine>().get_cur_transaction();
const DeleteSome& evt)
{
DECLARE_LOCALS;
- pl->do_delete_work(context<PeeringMachine>().get_cur_transaction());
+ next = pl->do_delete_work(context<PeeringMachine>().get_cur_transaction(),
+ next);
return discard_event();
}
DECLARE_LOCALS;
ps->deleting = false;
pl->cancel_local_background_io_reservation();
+ psdout(20) << "Deleting::" << __func__ << this <<" finished in "
+ << ceph::mono_clock::now() - start
+ << dendl;
}
/*--------GetInfo---------*/
/// Notification of removal complete, t must be populated to complete removal
virtual void on_removal(ObjectStore::Transaction &t) = 0;
/// Perform incremental removal work
- virtual void do_delete_work(ObjectStore::Transaction &t) = 0;
+ virtual ghobject_t do_delete_work(ObjectStore::Transaction &t,
+ ghobject_t _next) = 0;
// ======================= PG Merge =========================
virtual void clear_ready_to_merge() = 0;
boost::statechart::custom_reaction< DeleteSome >,
boost::statechart::transition<DeleteInterrupted, WaitDeleteReserved>
> reactions;
+ ghobject_t next;
+ ceph::mono_clock::time_point start;
explicit Deleting(my_context ctx);
boost::statechart::result react(const DeleteSome &evt);
void exit();