return false;
}
+bool PG::should_send_op(
+ pg_shard_t peer,
+ const hobject_t &hoid) const
+{
+ if (peer == get_primary())
+ return true;
+ bool should_send =
+ (hoid.pool != (int64_t)get_info().pgid.pool() ||
+ (has_backfill_state() && hoid <= get_last_backfill_started()) ||
+ hoid <= peering_state.get_peer_info(peer).last_backfill);
+ if (!should_send) {
+ ceph_assert(is_backfill_target(peer));
+ logger().debug("{} issue_repop shipping empty opt to osd."
+ "{}, object {} beyond std::max(last_backfill_started, "
+ "peer_info[peer].last_backfill {})",
+ peer, hoid, peering_state.get_peer_info(peer).last_backfill);
+ }
+ return should_send;
+ // TODO: should consider async recovery cases in the future which are not supported
+ // by crimson yet
+}
+
PG::interruptible_future<std::optional<PG::complete_op_t>>
PG::already_complete(const osd_reqid_t& reqid)
{
bool get_need_up_thru() const {
return peering_state.get_need_up_thru();
}
+ bool should_send_op(pg_shard_t peer, const hobject_t &hoid) const;
epoch_t get_same_interval_since() const {
return get_info().history.same_interval_since;
}
PeeringState& get_peering_state() final {
return peering_state;
}
+ bool has_backfill_state() const {
+ return (bool)(recovery_handler->backfill_state);
+ }
+ const BackfillState& get_backfill_state() const {
+ return *recovery_handler->backfill_state;
+ }
+ hobject_t get_last_backfill_started() const {
+ return get_backfill_state().get_last_backfill_started();
+ }
bool has_reset_since(epoch_t epoch) const final {
return peering_state.pg_has_reset_since(epoch);
}
min_epoch,
tid,
osd_op_p.at_version);
- m->set_data(encoded_txn);
+ if (pg.should_send_op(pg_shard, hoid)) {
+ m->set_data(encoded_txn);
+ } else {
+ ceph::os::Transaction t;
+ bufferlist bl;
+ encode(t, bl);
+ m->set_data(bl);
+ }
pending_txn->second.acked_peers.push_back({pg_shard, eversion_t{}});
encode(log_entries, m->logbl);
m->pg_trim_to = osd_op_p.pg_trim_to;