* - waiting_for_active
* - !is_active()
* - only starts blocking on interval change; never restarts
+ * - waiting_for_readable
+ * - now > readable_until
+ * - unblocks when we get fresh(er) osd_pings
* - waiting_for_scrub
* - starts and stops blocking for varying intervals during scrub
* - waiting_for_unreadable_object
// ops waiting on peered
list<OpRequestRef> waiting_for_peered;
+ /// ops waiting on readble
+ list<OpRequestRef> waiting_for_readable;
+
// ops waiting on active (require peered as well)
list<OpRequestRef> waiting_for_active;
list<OpRequestRef> waiting_for_flush;
bool is_recovering() const { return recovery_state.is_recovering(); }
bool is_premerge() const { return recovery_state.is_premerge(); }
bool is_repair() const { return recovery_state.is_repair(); }
+ bool is_laggy() const { return state_test(PG_STATE_LAGGY); }
bool is_empty() const { return recovery_state.is_empty(); }
maybe_kick_recovery(soid);
}
+bool PrimaryLogPG::check_laggy(OpRequestRef& op)
+{
+ if (!state_test(PG_STATE_LAGGY)) {
+ auto mnow = osd->get_mnow();
+ auto ru = recovery_state.get_readable_until();
+ if (mnow <= ru) {
+ // not laggy
+ return true;
+ }
+ dout(10) << __func__
+ << " mnow " << mnow
+ << " > readable_until " << ru << dendl;
+
+ // go to laggy state
+ state_set(PG_STATE_LAGGY);
+ publish_stats_to_osd();
+ }
+ dout(10) << __func__ << " not readable" << dendl;
+ waiting_for_readable.push_back(op);
+ op->mark_delayed("waiting for readable");
+ return false;
+}
+
+bool PrimaryLogPG::check_laggy_requeue(OpRequestRef& op)
+{
+ if (!state_test(PG_STATE_LAGGY)) {
+ return true; // not laggy
+ }
+ dout(10) << __func__ << " not readable" << dendl;
+ waiting_for_readable.push_front(op);
+ op->mark_delayed("waiting for readable");
+ return false;
+}
+
bool PrimaryLogPG::pgls_filter(const PGLSFilter& filter, const hobject_t& sobj)
{
bufferlist bl;
}
}
+ if (!check_laggy(op)) {
+ return;
+ }
+
if (!op_has_sufficient_caps(op)) {
osd->reply_op_error(op, -EPERM);
return;
op->mark_delayed("waiting for scrub");
return;
}
+ if (!check_laggy_requeue(op)) {
+ return;
+ }
// blocked on snap?
if (auto blocked_iter = objects_blocked_on_degraded_snap.find(head);
op->mark_delayed("waiting for scrub");
return cache_result_t::BLOCKED_RECOVERY;
}
+ if (!check_laggy_requeue(op)) {
+ return cache_result_t::BLOCKED_RECOVERY;
+ }
for (auto& p : obc->obs.oi.manifest.chunk_map) {
if (p.second.is_missing()) {
}
return;
}
+ if (op && !check_laggy_requeue(op)) {
+ return;
+ }
if (!obc) { // we need to create an ObjectContext
ceph_assert(missing_oid != hobject_t());
obc = get_object_context(missing_oid, true);
requeue_ops(waiting_for_peered);
requeue_ops(waiting_for_flush);
requeue_ops(waiting_for_active);
+ requeue_ops(waiting_for_readable);
clear_scrub_reserved();
requeue_op(op);
requeue_ops(waiting_for_flush);
requeue_ops(waiting_for_active);
+ requeue_ops(waiting_for_readable);
requeue_ops(waiting_for_scrub);
requeue_ops(waiting_for_cache_not_full);
objects_blocked_on_cache_full.clear();
p.second,
p.second.begin(),
p.second.end());
+ } else if (is_laggy()) {
+ for (auto& op : p.second) {
+ op->mark_delayed("waiting for readable");
+ }
+ waiting_for_readable.splice(
+ waiting_for_readable.begin(),
+ p.second,
+ p.second.begin(),
+ p.second.end());
} else {
requeue_ops(p.second);
}
void wait_for_unreadable_object(const hobject_t& oid, OpRequestRef op);
void wait_for_all_missing(OpRequestRef op);
+ bool check_laggy(OpRequestRef& op);
+ bool check_laggy_requeue(OpRequestRef& op);
+
bool is_backfill_target(pg_shard_t osd) const {
return recovery_state.is_backfill_target(osd);
}