void pg_history_t::encode(ceph::buffer::list &bl) const
{
- ENCODE_START(9, 4, bl);
+ ENCODE_START(10, 4, bl);
encode(epoch_created, bl);
encode(last_epoch_started, bl);
encode(last_epoch_clean, bl);
encode(last_interval_started, bl);
encode(last_interval_clean, bl);
encode(epoch_pool_created, bl);
+ encode(prior_readable_until_ub, bl);
ENCODE_FINISH(bl);
}
void pg_history_t::decode(ceph::buffer::list::const_iterator &bl)
{
- DECODE_START_LEGACY_COMPAT_LEN(9, 4, 4, bl);
+ DECODE_START_LEGACY_COMPAT_LEN(10, 4, 4, bl);
decode(epoch_created, bl);
decode(last_epoch_started, bl);
if (struct_v >= 3)
} else {
epoch_pool_created = epoch_created;
}
+ if (struct_v >= 10) {
+ decode(prior_readable_until_ub, bl);
+ }
DECODE_FINISH(bl);
}
f->dump_stream("last_deep_scrub") << last_deep_scrub;
f->dump_stream("last_deep_scrub_stamp") << last_deep_scrub_stamp;
f->dump_stream("last_clean_scrub_stamp") << last_clean_scrub_stamp;
+ f->dump_float(
+ "prior_readable_until_ub",
+ std::chrono::duration<double>(prior_readable_until_ub).count());
}
void pg_history_t::generate_test_instances(list<pg_history_t*>& o)
o.back()->last_epoch_clean = 3;
o.back()->last_interval_clean = 2;
o.back()->last_epoch_split = 4;
+ o.back()->prior_readable_until_ub = make_timespan(3.1415);
o.back()->same_up_since = 5;
o.back()->same_interval_since = 6;
o.back()->same_primary_since = 7;
epoch_t last_interval_clean = 0;; // first epoch of last_epoch_clean interval
epoch_t last_epoch_split = 0;; // as parent or child
epoch_t last_epoch_marked_full = 0;; // pool or cluster
-
+
/**
* In the event of a map discontinuity, same_*_since may reflect the first
* map the osd has seen in the new map sequence rather than the actual start
utime_t last_deep_scrub_stamp;
utime_t last_clean_scrub_stamp;
+ /// upper bound on how long prior interval readable (relative to encode time)
+ ceph::timespan prior_readable_until_ub = ceph::timespan::zero();
+
friend bool operator==(const pg_history_t& l, const pg_history_t& r) {
return
l.epoch_created == r.epoch_created &&
l.last_deep_scrub == r.last_deep_scrub &&
l.last_scrub_stamp == r.last_scrub_stamp &&
l.last_deep_scrub_stamp == r.last_deep_scrub_stamp &&
- l.last_clean_scrub_stamp == r.last_clean_scrub_stamp;
+ l.last_clean_scrub_stamp == r.last_clean_scrub_stamp &&
+ l.prior_readable_until_ub == r.prior_readable_until_ub;
}
pg_history_t() {}
}
if (last_interval_started < other.last_interval_started) {
last_interval_started = other.last_interval_started;
+ // if we are learning about a newer *started* interval, our
+ // readable_until_ub is obsolete
+ prior_readable_until_ub = other.prior_readable_until_ub;
+ modified = true;
+ } else if (other.last_interval_started == last_interval_started &&
+ other.prior_readable_until_ub < prior_readable_until_ub) {
+ // if other is the *same* interval, than pull our upper bound in
+ // if they have a tighter bound.
+ prior_readable_until_ub = other.prior_readable_until_ub;
modified = true;
}
if (last_epoch_clean < other.last_epoch_clean) {
WRITE_CLASS_ENCODER(pg_history_t)
inline std::ostream& operator<<(std::ostream& out, const pg_history_t& h) {
- return out << "ec=" << h.epoch_created << "/" << h.epoch_pool_created
- << " lis/c=" << h.last_interval_started
- << "/" << h.last_interval_clean
- << " les/c/f=" << h.last_epoch_started << "/" << h.last_epoch_clean
- << "/" << h.last_epoch_marked_full
- << " sis=" << h.same_interval_since;
+ out << "ec=" << h.epoch_created << "/" << h.epoch_pool_created
+ << " lis/c=" << h.last_interval_started
+ << "/" << h.last_interval_clean
+ << " les/c/f=" << h.last_epoch_started << "/" << h.last_epoch_clean
+ << "/" << h.last_epoch_marked_full
+ << " sis=" << h.same_interval_since;
+ if (h.prior_readable_until_ub != ceph::timespan::zero()) {
+ out << " pruub=" << h.prior_readable_until_ub;
+ }
+ return out;
}