crimson::common::system_shutdown_exception());
}
+ logger().debug("{}: {}", __func__, *req);
if (can_discard_replica_op(*req)) {
return seastar::now();
}
auto p = req->logbl.cbegin();
std::vector<pg_log_entry_t> log_entries;
decode(log_entries, p);
- peering_state.append_log(std::move(log_entries), req->pg_trim_to,
- req->version, req->min_last_complete_ondisk, txn, !txn.empty(), false);
+ log_operation(std::move(log_entries),
+ req->pg_trim_to,
+ req->version,
+ req->min_last_complete_ondisk,
+ !txn.empty(),
+ txn,
+ false);
logger().debug("PG::handle_rep_op: do_transaction...");
return interruptor::make_interruptible(shard_services.get_store().do_transaction(
coll_ref, std::move(txn))).then_interruptible(
});
}
+void PG::log_operation(
+ std::vector<pg_log_entry_t>&& logv,
+ const eversion_t &trim_to,
+ const eversion_t &roll_forward_to,
+ const eversion_t &min_last_complete_ondisk,
+ bool transaction_applied,
+ ObjectStore::Transaction &txn,
+ bool async) {
+ logger().debug("{}", __func__);
+ if (is_primary()) {
+ ceph_assert(trim_to <= peering_state.get_last_update_ondisk());
+ }
+ /* TODO: when we add snap mapper and projected log support,
+ * we'll likely want to update them here.
+ *
+ * See src/osd/PrimaryLogPG.h:log_operation for how classic
+ * handles these cases.
+ */
+#if 0
+ if (transaction_applied) {
+ //TODO:
+ //update_snap_map(logv, t);
+ }
+ auto last = logv.rbegin();
+ if (is_primary() && last != logv.rend()) {
+ projected_log.skip_can_rollback_to_to_head();
+ projected_log.trim(cct, last->version, nullptr, nullptr, nullptr);
+ }
+#endif
+ if (!is_primary()) { // && !is_ec_pg()
+ replica_clear_repop_obc(logv);
+ }
+ peering_state.append_log(std::move(logv),
+ trim_to,
+ roll_forward_to,
+ min_last_complete_ondisk,
+ txn,
+ !txn.empty(),
+ false);
+}
+
+void PG::replica_clear_repop_obc(
+ const std::vector<pg_log_entry_t> &logv) {
+ logger().debug("{} clearing {} entries", __func__, logv.size());
+ for (auto &&e: logv) {
+ logger().debug(" {} get_object_boundary(from): {} "
+ " head version(to): {}",
+ e.soid,
+ e.soid.get_object_boundary(),
+ e.soid.get_head());
+ /* Have to blast all clones, they share a snapset */
+ obc_registry.clear_range(
+ e.soid.get_object_boundary(), e.soid.get_head());
+ }
+}
+
void PG::handle_rep_op_reply(const MOSDRepOpReply& m)
{
if (!can_discard_replica_op(m)) {
with_obc_func_t&& f);
interruptible_future<> handle_rep_op(Ref<MOSDRepOp> m);
+ void log_operation(
+ std::vector<pg_log_entry_t>&& logv,
+ const eversion_t &trim_to,
+ const eversion_t &roll_forward_to,
+ const eversion_t &min_last_complete_ondisk,
+ bool transaction_applied,
+ ObjectStore::Transaction &txn,
+ bool async = false);
+ void replica_clear_repop_obc(
+ const std::vector<pg_log_entry_t> &logv);
void handle_rep_op_reply(const MOSDRepOpReply& m);
interruptible_future<> do_update_log_missing(Ref<MOSDPGUpdateLogMissing> m);
interruptible_future<> do_update_log_missing_reply(