psdout(10) << __func__ << " prior_readable_until_ub "
<< prior_readable_until_ub << " (mnow " << mnow << " + "
<< info.history.prior_readable_until_ub << ")" << dendl;
+ prior_readable_down_osds.clear(); // we populate this when we build the priorset
+
readable_until =
readable_until_ub =
readable_until_ub_sent =
ceph_assert(ps->blocked_by.empty());
prior_set = ps->build_prior();
+ ps->prior_readable_down_osds = prior_set.down;
ps->reset_min_peer_features();
get_infos();
if (old_start < ps->info.history.last_epoch_started) {
psdout(10) << " last_epoch_started moved forward, rebuilding prior" << dendl;
prior_set = ps->build_prior();
+ ps->prior_readable_down_osds = prior_set.down;
// filter out any osds that got dropped from the probe set from
// peer_info_requested. this is less expensive than restarting
out << " NOTIFY";
if (ps.prior_readable_until_ub != ceph::signedspan::zero()) {
- out << " pruub " << ps.prior_readable_until_ub;
+ out << " pruub " << ps.prior_readable_until_ub
+ << "@" << ps.get_prior_readable_down_osds();
}
return out;
}
/// upper bound on any acting OSDs' readable_until in this interval
ceph::signedspan readable_until_ub = ceph::signedspan::zero();
+
/// upper bound from prior interval(s)
ceph::signedspan prior_readable_until_ub = ceph::signedspan::zero();
+ /// pg instances from prior interval(s) that may still be readable
+ set<int> prior_readable_down_osds;
+
/// [replica] upper bound we got from the primary (primary's clock)
ceph::signedspan readable_until_ub_from_primary = ceph::signedspan::zero();
return prior_readable_until_ub;
}
+ /// Get prior intervals' readable_until down OSDs of note
+ const set<int>& get_prior_readable_down_osds() const {
+ return prior_readable_down_osds;
+ }
+
/// Reset prior intervals' readable_until upper bound (e.g., bc it passed)
void clear_prior_readable_until_ub() {
prior_readable_until_ub = ceph::signedspan::zero();
+ prior_readable_down_osds.clear();
}
void renew_lease(ceph::signedspan now) {