psdout(10) << " got osd." << from << " " << oinfo << dendl;
ceph_assert(is_primary());
peer_info[from] = oinfo;
+
update_peer_info(from, oinfo);
might_have_unfound.insert(from);
&olog);
}
+ std::optional<bool> head_is_partial_write;
bool invalidate_stats = false;
// For partial writes we may be able to keep some of the divergent entries
auto p = pg_log.get_log().log.end();
while (p != pg_log.get_log().log.begin()) {
--p;
- if (p->version <= olog.head) {
+ if (p->version == olog.head) {
+ head_is_partial_write = !p->written_shards.empty();
+ break;
+ } else if (p->version <= olog.head) {
break;
}
}
}
rollbacker.get()->partial_write(&info, previous_version, *p);
olog.head = p->version;
+ head_is_partial_write = !p->written_shards.empty();
// Process the next entry
++p;
}
}
+
+ // Find the version we want to roll forwards to
+ // Iterate over all shards and see if any have a last_update equal to where we want to roll to
+ // Copy the stats for this shard into oinfo
+ // Set invalidate_stats to folse again if we do copy these stats
+ // Verify that this reintroduces the bug (Which is intended for stage 2)
+ if (invalidate_stats) {
+ for (const auto& [shard, my_info] : peer_info) {
+ if (invalidate_stats && my_info.stats.version == olog.head &&
+ (!pool.info.is_nonprimary_shard(shard.shard) ||
+ (head_is_partial_write.has_value() && !head_is_partial_write.value()))) {
+ oinfo.stats = my_info.stats;
+ invalidate_stats = false;
+ psdout(10) << "keeping stats for " << shard
+ << " (wanted last update: " << olog.head
+ << ", stats version: " << my_info.stats.version
+ << ", shard last update: " << my_info.last_update << ")."
+ << " Stats: ";
+
+ JSONFormatter f;
+ oinfo.stats.dump(&f);
+ f.flush(*_dout);
+
+ *_dout << dendl;
+ } else {
+ psdout(20) << "not using stats for " << shard
+ << " (wanted last update: " << olog.head
+ << ", stats version: " << my_info.stats.version
+ << ", shard last update: " << my_info.last_update << ")."
+ << " Stats: ";
+
+ JSONFormatter f;
+ my_info.stats.dump(&f);
+ f.flush(*_dout);
+
+ *_dout << dendl;
+ }
+ }
+ }
+
// merge log into our own log to build master log. no need to
// make any adjustments to their missing map; we are taking their
// log to be authoritative (i.e., their entries are by definitely
// non-divergent).
merge_log(t, oinfo, std::move(olog), from);
if (info.last_backfill.is_max() &&
- pool.info.is_nonprimary_shard(from.shard)) {
+ (pool.info.is_nonprimary_shard(from.shard) &&
+ (!head_is_partial_write.has_value() || head_is_partial_write.value()))){
invalidate_stats = true;
}
+
info.stats.stats_invalid |= invalidate_stats;
increment_stats_invalidations_counter(invalidate_stats);
+ if (invalidate_stats)
+ {
+ psdout(10) << "invalidating stats for " << pg_whoami << dendl;
+ }
peer_info[from] = oinfo;
psdout(10) << " peer osd." << from << " now " << oinfo
<< " " << omissing << dendl;