From: Yan, Zheng Date: Thu, 16 Aug 2018 08:14:40 +0000 (+0800) Subject: msg: define MFoo::create helper X-Git-Tag: v14.0.1~575^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=c0d3e5b3da8ebb81266b6470d0c15c13dae8238c;p=ceph.git msg: define MFoo::create helper Signed-off-by: "Yan, Zheng" --- diff --git a/src/client/Client.cc b/src/client/Client.cc index dfbed39f01eb6..f9fa8bcdf23fa 100644 --- a/src/client/Client.cc +++ b/src/client/Client.cc @@ -2016,7 +2016,7 @@ MetaSession *Client::_open_mds_session(mds_rank_t mds) } } - auto m = MClientSession::factory::build(CEPH_SESSION_REQUEST_OPEN); + auto m = MClientSession::create(CEPH_SESSION_REQUEST_OPEN); m->metadata = metadata; m->supported_features = feature_bitset_t(CEPHFS_FEATURES_CLIENT_SUPPORTED); session->con->send_message2(m); @@ -2027,7 +2027,7 @@ void Client::_close_mds_session(MetaSession *s) { ldout(cct, 2) << __func__ << " mds." << s->mds_num << " seq " << s->seq << dendl; s->state = MetaSession::STATE_CLOSING; - s->con->send_message2(MClientSession::factory::build(CEPH_SESSION_REQUEST_CLOSE, s->seq)); + s->con->send_message2(MClientSession::create(CEPH_SESSION_REQUEST_CLOSE, s->seq)); } void Client::_closed_mds_session(MetaSession *s) @@ -2104,7 +2104,7 @@ void Client::handle_client_session(MClientSession *m) break; case CEPH_SESSION_FLUSHMSG: - session->con->send_message2(MClientSession::factory::build(CEPH_SESSION_FLUSHMSG_ACK, m->get_seq())); + session->con->send_message2(MClientSession::create(CEPH_SESSION_FLUSHMSG_ACK, m->get_seq())); break; case CEPH_SESSION_FORCE_RO: @@ -2209,7 +2209,7 @@ void Client::send_request(MetaRequest *request, MetaSession *session, MClientRequest* Client::build_client_request(MetaRequest *request) { - auto req = MClientRequest::factory::build(request->get_op()); + auto req = MClientRequest::create(request->get_op()); req->set_tid(request->tid); req->set_stamp(request->op_stamp); memcpy(&req->head, &request->head, sizeof(ceph_mds_request_head)); @@ -2883,7 +2883,7 @@ void Client::got_mds_push(MetaSession *s) s->seq++; ldout(cct, 10) << " mds." << s->mds_num << " seq now " << s->seq << dendl; if (s->state == MetaSession::STATE_CLOSING) { - s->con->send_message2(MClientSession::factory::build(CEPH_SESSION_REQUEST_CLOSE, s->seq)); + s->con->send_message2(MClientSession::create(CEPH_SESSION_REQUEST_CLOSE, s->seq)); } } @@ -2924,7 +2924,7 @@ void Client::handle_lease(MClientLease *m) revoke: { - auto reply = MClientLease::factory::build(CEPH_MDS_LEASE_RELEASE, seq, m->get_mask(), m->get_ino(), m->get_first(), m->get_last(), m->dname); + auto reply = MClientLease::create(CEPH_MDS_LEASE_RELEASE, seq, m->get_mask(), m->get_ino(), m->get_first(), m->get_last(), m->dname); m->get_connection()->send_message2(reply); } m->put(); @@ -3281,7 +3281,7 @@ void Client::send_cap(Inode *in, MetaSession *session, Cap *cap, if (flush) follows = in->snaprealm->get_snap_context().seq; - auto m = MClientCaps::factory::build(op, + auto m = MClientCaps::create(op, in->ino, 0, cap->cap_id, cap->seq, @@ -3644,7 +3644,7 @@ void Client::flush_snaps(Inode *in, bool all_again) session->flushing_caps_tids.insert(capsnap.flush_tid); } - auto m = MClientCaps::factory::build(CEPH_CAP_OP_FLUSHSNAP, in->ino, in->snaprealm->ino, 0, mseq, + auto m = MClientCaps::create(CEPH_CAP_OP_FLUSHSNAP, in->ino, in->snaprealm->ino, 0, mseq, cap_epoch_barrier); m->caller_uid = capsnap.cap_dirtier_uid; m->caller_gid = capsnap.cap_dirtier_gid; @@ -5845,7 +5845,7 @@ void Client::flush_mdlog(MetaSession *session) // will crash if they see an unknown CEPH_SESSION_* value in this msg. const uint64_t features = session->con->get_features(); if (HAVE_FEATURE(features, SERVER_LUMINOUS)) { - auto m = MClientSession::factory::build(CEPH_SESSION_REQUEST_FLUSH_MDLOG); + auto m = MClientSession::create(CEPH_SESSION_REQUEST_FLUSH_MDLOG); session->con->send_message2(m); } } @@ -6113,7 +6113,7 @@ void Client::renew_caps(MetaSession *session) ldout(cct, 10) << "renew_caps mds." << session->mds_num << dendl; session->last_cap_renew_request = ceph_clock_now(); uint64_t seq = ++session->cap_renew_seq; - session->con->send_message2(MClientSession::factory::build(CEPH_SESSION_REQUEST_RENEWCAPS, seq)); + session->con->send_message2(MClientSession::create(CEPH_SESSION_REQUEST_RENEWCAPS, seq)); } diff --git a/src/mds/Beacon.cc b/src/mds/Beacon.cc index 18562976b80a1..a7e0a9fc8fa7e 100644 --- a/src/mds/Beacon.cc +++ b/src/mds/Beacon.cc @@ -208,7 +208,7 @@ void Beacon::_send() assert(want_state != MDSMap::STATE_NULL); - auto beacon = MMDSBeacon::factory::build( + auto beacon = MMDSBeacon::create( monc->get_fsid(), mds_gid_t(monc->get_global_id()), name, epoch, diff --git a/src/mds/Locker.cc b/src/mds/Locker.cc index 84eaa75311858..653e97df09df4 100644 --- a/src/mds/Locker.cc +++ b/src/mds/Locker.cc @@ -136,7 +136,7 @@ void Locker::send_lock_message(SimpleLock *lock, int msg) if (mds->is_cluster_degraded() && mds->mdsmap->get_state(it.first) < MDSMap::STATE_REJOIN) continue; - auto m = MLock::factory::build(lock, msg, mds->get_nodeid()); + auto m = MLock::create(lock, msg, mds->get_nodeid()); mds->send_message_mds(m, it.first); } } @@ -147,7 +147,7 @@ void Locker::send_lock_message(SimpleLock *lock, int msg, const bufferlist &data if (mds->is_cluster_degraded() && mds->mdsmap->get_state(it.first) < MDSMap::STATE_REJOIN) continue; - auto m = MLock::factory::build(lock, msg, mds->get_nodeid()); + auto m = MLock::create(lock, msg, mds->get_nodeid()); m->set_data(data); mds->send_message_mds(m, it.first); } @@ -449,7 +449,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, return false; } - auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPIN); + auto req = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPIN); for (set::iterator q = p->second.begin(); q != p->second.end(); ++q) { @@ -731,7 +731,7 @@ void Locker::_drop_non_rdlocks(MutationImpl *mut, set *pneed_issue) if (!mds->is_cluster_degraded() || mds->mdsmap->get_state(*p) >= MDSMap::STATE_REJOIN) { dout(10) << "_drop_non_rdlocks dropping remote locks on mds." << *p << dendl; - auto slavereq = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_DROPLOCKS); + auto slavereq = MMDSSlaveRequest::create(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_DROPLOCKS); mds->send_message_mds(slavereq, *p); } } @@ -888,12 +888,12 @@ void Locker::eval_gather(SimpleLock *lock, bool first, bool *pneed_issue, MDSInt mds->mdsmap->get_state(auth) >= MDSMap::STATE_REJOIN) { switch (lock->get_state()) { case LOCK_SYNC_LOCK: - mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_LOCKACK, mds->get_nodeid()), auth); + mds->send_message_mds(MLock::create(lock, LOCK_AC_LOCKACK, mds->get_nodeid()), auth); break; case LOCK_MIX_SYNC: { - auto reply = MLock::factory::build(lock, LOCK_AC_SYNCACK, mds->get_nodeid()); + auto reply = MLock::create(lock, LOCK_AC_SYNCACK, mds->get_nodeid()); lock->encode_locked_state(reply->get_data()); mds->send_message_mds(reply, auth); next = LOCK_MIX_SYNC2; @@ -911,7 +911,7 @@ void Locker::eval_gather(SimpleLock *lock, bool first, bool *pneed_issue, MDSInt case LOCK_SYNC_MIX: { - auto reply = MLock::factory::build(lock, LOCK_AC_MIXACK, mds->get_nodeid()); + auto reply = MLock::create(lock, LOCK_AC_MIXACK, mds->get_nodeid()); mds->send_message_mds(reply, auth); next = LOCK_SYNC_MIX2; } @@ -921,7 +921,7 @@ void Locker::eval_gather(SimpleLock *lock, bool first, bool *pneed_issue, MDSInt { bufferlist data; lock->encode_locked_state(data); - mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_LOCKACK, mds->get_nodeid(), data), auth); + mds->send_message_mds(MLock::create(lock, LOCK_AC_LOCKACK, mds->get_nodeid(), data), auth); (static_cast(lock))->start_flush(); // we'll get an AC_LOCKFLUSHED to complete } @@ -1284,7 +1284,7 @@ bool Locker::_rdlock_kick(SimpleLock *lock, bool as_anon) mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) { dout(10) << "requesting rdlock from auth on " << *lock << " on " << *lock->get_parent() << dendl; - mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_REQRDLOCK, mds->get_nodeid()), auth); + mds->send_message_mds(MLock::create(lock, LOCK_AC_REQRDLOCK, mds->get_nodeid()), auth); } return false; } @@ -1516,7 +1516,7 @@ bool Locker::wrlock_start(SimpleLock *lock, MDRequestRef& mut, bool nowait) mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) { dout(10) << "requesting scatter from auth on " << *lock << " on " << *lock->get_parent() << dendl; - mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_REQSCATTER, mds->get_nodeid()), auth); + mds->send_message_mds(MLock::create(lock, LOCK_AC_REQSCATTER, mds->get_nodeid()), auth); } break; } @@ -1572,7 +1572,7 @@ void Locker::remote_wrlock_start(SimpleLock *lock, mds_rank_t target, MDRequestR // send lock request mut->start_locking(lock, target); mut->more()->slaves.insert(target); - auto r = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_WRLOCK); + auto r = MMDSSlaveRequest::create(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_WRLOCK); r->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(r->get_object_info()); mds->send_message_mds(r, target); @@ -1593,7 +1593,7 @@ void Locker::remote_wrlock_finish(SimpleLock *lock, mds_rank_t target, << " " << *lock->get_parent() << dendl; if (!mds->is_cluster_degraded() || mds->mdsmap->get_state(target) >= MDSMap::STATE_REJOIN) { - auto slavereq = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNWRLOCK); + auto slavereq = MMDSSlaveRequest::create(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNWRLOCK); slavereq->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(slavereq->get_object_info()); mds->send_message_mds(slavereq, target); @@ -1674,7 +1674,7 @@ bool Locker::xlock_start(SimpleLock *lock, MDRequestRef& mut) // send lock request mut->more()->slaves.insert(auth); mut->start_locking(lock, auth); - auto r = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_XLOCK); + auto r = MMDSSlaveRequest::create(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_XLOCK); r->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(r->get_object_info()); mds->send_message_mds(r, auth); @@ -1740,7 +1740,7 @@ void Locker::xlock_finish(SimpleLock *lock, MutationImpl *mut, bool *pneed_issue mds_rank_t auth = lock->get_parent()->authority().first; if (!mds->is_cluster_degraded() || mds->mdsmap->get_state(auth) >= MDSMap::STATE_REJOIN) { - auto slavereq = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNXLOCK); + auto slavereq = MMDSSlaveRequest::create(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNXLOCK); slavereq->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(slavereq->get_object_info()); mds->send_message_mds(slavereq, auth); @@ -2075,7 +2075,7 @@ bool Locker::issue_caps(CInode *in, Capability *only_cap) cap->reset_num_revoke_warnings(); } - auto m = MClientCaps::factory::build(op, in->ino(), + auto m = MClientCaps::create(op, in->ino(), in->find_snaprealm()->inode->ino(), cap->get_cap_id(), cap->get_last_seq(), @@ -2101,7 +2101,7 @@ void Locker::issue_truncate(CInode *in) for (auto &p : in->client_caps) { Capability *cap = &p.second; - auto m = MClientCaps::factory::build(CEPH_CAP_OP_TRUNC, + auto m = MClientCaps::create(CEPH_CAP_OP_TRUNC, in->ino(), in->find_snaprealm()->inode->ino(), cap->get_cap_id(), cap->get_last_seq(), @@ -2241,7 +2241,7 @@ void Locker::request_inode_file_caps(CInode *in) if (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) - mds->send_message_mds(MInodeFileCaps::factory::build(in->ino(), in->replica_caps_wanted), auth); + mds->send_message_mds(MInodeFileCaps::create(in->ino(), in->replica_caps_wanted), auth); } } @@ -2476,7 +2476,7 @@ void Locker::share_inode_max_size(CInode *in, Capability *only_cap) if (cap->pending() & (CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER)) { dout(10) << "share_inode_max_size with client." << client << dendl; cap->inc_last_seq(); - auto m = MClientCaps::factory::build(CEPH_CAP_OP_GRANT, + auto m = MClientCaps::create(CEPH_CAP_OP_GRANT, in->ino(), in->find_snaprealm()->inode->ino(), cap->get_cap_id(), @@ -2672,9 +2672,9 @@ void Locker::handle_client_caps(const MClientCaps::const_ref &m) << " for client." << client << dendl; MClientCaps::ref ack; if (op == CEPH_CAP_OP_FLUSHSNAP) { - ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSHSNAP_ACK, m->get_ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier()); + ack = MClientCaps::create(CEPH_CAP_OP_FLUSHSNAP_ACK, m->get_ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier()); } else { - ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSH_ACK, m->get_ino(), 0, m->get_cap_id(), m->get_seq(), m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier()); + ack = MClientCaps::create(CEPH_CAP_OP_FLUSH_ACK, m->get_ino(), 0, m->get_cap_id(), m->get_seq(), m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier()); } ack->set_snap_follows(follows); ack->set_client_tid(m->get_client_tid()); @@ -2796,7 +2796,7 @@ void Locker::handle_client_caps(const MClientCaps::const_ref &m) // case we get a dup response, so whatever.) MClientCaps::ref ack; if (dirty) { - ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSHSNAP_ACK, in->ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier()); + ack = MClientCaps::create(CEPH_CAP_OP_FLUSHSNAP_ACK, in->ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier()); ack->set_snap_follows(follows); ack->set_client_tid(m->get_client_tid()); ack->set_oldest_flush_tid(m->get_oldest_flush_tid()); @@ -2879,7 +2879,7 @@ void Locker::handle_client_caps(const MClientCaps::const_ref &m) if (dirty && in->is_auth()) { dout(7) << " flush client." << client << " dirty " << ccap_string(dirty) << " seq " << m->get_seq() << " on " << *in << dendl; - ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSH_ACK, in->ino(), 0, cap->get_cap_id(), m->get_seq(), + ack = MClientCaps::create(CEPH_CAP_OP_FLUSH_ACK, in->ino(), 0, cap->get_cap_id(), m->get_seq(), m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier()); ack->set_client_tid(m->get_client_tid()); ack->set_oldest_flush_tid(m->get_oldest_flush_tid()); @@ -3704,7 +3704,7 @@ void Locker::handle_client_lease(const MClientLease::const_ref &m) dout(7) << "handle_client_lease client." << client << " renew on " << *dn << (!dn->lock.can_lease(client)?", revoking lease":"") << dendl; if (dn->lock.can_lease(client)) { - auto reply = MClientLease::factory::build(*m); + auto reply = MClientLease::create(*m); int pool = 1; // fixme.. do something smart! reply->h.duration_ms = (int)(1000 * mdcache->client_lease_durations[pool]); reply->h.seq = ++l->seq; @@ -3775,7 +3775,7 @@ void Locker::revoke_client_leases(SimpleLock *lock) // i should also revoke the dir ICONTENT lease, if they have it! CInode *diri = dn->get_dir()->get_inode(); - auto lease = MClientLease::factory::build(CEPH_MDS_LEASE_REVOKE, l->seq, mask, diri->ino(), diri->first, CEPH_NOSNAP, dn->get_name()); + auto lease = MClientLease::create(CEPH_MDS_LEASE_REVOKE, l->seq, mask, diri->ino(), diri->first, CEPH_NOSNAP, dn->get_name()); mds->send_message_client_counted(lease, l->client); } } @@ -4646,7 +4646,7 @@ void Locker::scatter_nudge(ScatterLock *lock, MDSInternalContextBase *c, bool fo // request unscatter? mds_rank_t auth = lock->get_parent()->authority().first; if (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) { - mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_NUDGE, mds->get_nodeid()), auth); + mds->send_message_mds(MLock::create(lock, LOCK_AC_NUDGE, mds->get_nodeid()), auth); } // wait... diff --git a/src/mds/MDBalancer.cc b/src/mds/MDBalancer.cc index 66ddbd69fe9c6..5a20b5449ca95 100644 --- a/src/mds/MDBalancer.cc +++ b/src/mds/MDBalancer.cc @@ -393,7 +393,7 @@ void MDBalancer::send_heartbeat() for (const auto& r : up) { if (r == mds->get_nodeid()) continue; - auto hb = MHeartbeat::factory::build(load, beat_epoch); + auto hb = MHeartbeat::create(load, beat_epoch); hb->get_import_map() = import_map; mds->send_message_mds(hb, r); } diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc index d7a44f5f11f97..ff06e7a614809 100644 --- a/src/mds/MDCache.cc +++ b/src/mds/MDCache.cc @@ -2039,14 +2039,14 @@ update: cap->last_rsize = i->rstat.rsize(); cap->last_rbytes = i->rstat.rbytes; - auto msg = MClientQuota::factory::build(); + auto msg = MClientQuota::create(); msg->ino = in->ino(); msg->rstat = i->rstat; msg->quota = i->quota; mds->send_message_client_counted(msg, session->get_connection()); } for (const auto &it : in->get_replicas()) { - auto msg = MGatherCaps::factory::build(); + auto msg = MGatherCaps::create(); msg->ino = in->ino(); mds->send_message_mds(msg, it.first); } @@ -2476,7 +2476,7 @@ void MDCache::_logged_slave_commit(mds_rank_t from, metareqid_t reqid) dout(10) << "_logged_slave_commit from mds." << from << " " << reqid << dendl; // send a message - auto req = MMDSSlaveRequest::factory::build(reqid, 0, MMDSSlaveRequest::OP_COMMITTED); + auto req = MMDSSlaveRequest::create(reqid, 0, MMDSSlaveRequest::OP_COMMITTED); mds->send_message_mds(req, from); } @@ -2737,7 +2737,7 @@ void MDCache::send_slave_resolves() for (map >::iterator p = uncommitted_slave_updates.begin(); p != uncommitted_slave_updates.end(); ++p) { - resolves[p->first] = MMDSResolve::factory::build(); + resolves[p->first] = MMDSResolve::create(); for (map::iterator q = p->second.begin(); q != p->second.end(); ++q) { @@ -2761,7 +2761,7 @@ void MDCache::send_slave_resolves() if (resolve_set.count(master) || is_ambiguous_slave_update(p->first, master)) { dout(10) << " including uncommitted " << *mdr << dendl; if (!resolves.count(master)) - resolves[master] = MMDSResolve::factory::build(); + resolves[master] = MMDSResolve::create(); if (!mdr->committing && mdr->has_more() && mdr->more()->is_inode_exporter) { // re-send cap exports @@ -2805,7 +2805,7 @@ void MDCache::send_subtree_resolves() if (*p == mds->get_nodeid()) continue; if (mds->is_resolve() || mds->mdsmap->is_resolve(*p)) - resolves[*p] = MMDSResolve::factory::build(); + resolves[*p] = MMDSResolve::create(); } map > my_subtrees; @@ -3185,7 +3185,7 @@ void MDCache::handle_resolve(const MMDSResolve::const_ref &m) } } - auto ack = MMDSResolveAck::factory::build(); + auto ack = MMDSResolveAck::create(); for (const auto &p : m->slave_requests) { if (uncommitted_masters.count(p.first)) { //mds->sessionmap.have_completed_request(p.first)) { // COMMIT @@ -3990,9 +3990,9 @@ void MDCache::rejoin_send_rejoins() if (*p == mds->get_nodeid()) continue; // nothing to myself! if (rejoin_sent.count(*p)) continue; // already sent a rejoin to this node! if (mds->is_rejoin()) - rejoins[*p] = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_WEAK); + rejoins[*p] = MMDSCacheRejoin::create(MMDSCacheRejoin::OP_WEAK); else if (mds->mdsmap->is_rejoin(*p)) - rejoins[*p] = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_STRONG); + rejoins[*p] = MMDSCacheRejoin::create(MMDSCacheRejoin::OP_STRONG); } if (mds->is_rejoin()) { @@ -4342,7 +4342,7 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) if (mds->is_clientreplay() || mds->is_active() || mds->is_stopping()) { survivor = true; dout(10) << "i am a surivivor, and will ack immediately" << dendl; - ack = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_ACK); + ack = MMDSCacheRejoin::create(MMDSCacheRejoin::OP_ACK); map > imported_caps; @@ -5126,7 +5126,7 @@ void MDCache::handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &ack) } // mark client caps stale. - auto m = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, p->first, 0, + auto m = MClientCaps::create(CEPH_CAP_OP_EXPORT, p->first, 0, r->second.capinfo.cap_id, 0, mds->get_osd_epoch_barrier()); m->set_cap_peer(q->second.cap_id, q->second.issue_seq, q->second.mseq, @@ -5564,7 +5564,7 @@ void MDCache::prepare_realm_split(SnapRealm *realm, client_t client, inodeno_t i snap = it->second; snap->head.op = CEPH_SNAP_OP_SPLIT; } else { - snap = MClientSnap::factory::build(CEPH_SNAP_OP_SPLIT); + snap = MClientSnap::create(CEPH_SNAP_OP_SPLIT); splits.emplace(std::piecewise_construct, std::forward_as_tuple(client), std::forward_as_tuple(snap)); snap->head.split = realm->inode->ino(); snap->bl = realm->get_snap_trace(); @@ -5596,7 +5596,7 @@ void MDCache::prepare_realm_merge(SnapRealm *realm, SnapRealm *parent_realm, assert(!p.second->empty()); auto em = splits.emplace(std::piecewise_construct, std::forward_as_tuple(p.first), std::forward_as_tuple()); if (em.second) { - auto update = MClientSnap::factory::build(CEPH_SNAP_OP_SPLIT); + auto update = MClientSnap::create(CEPH_SNAP_OP_SPLIT); update->head.split = parent_realm->inode->ino(); update->split_inos = split_inos; update->split_realms = split_realms; @@ -5712,7 +5712,7 @@ void MDCache::export_remaining_imported_caps() Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(q->first.v)); if (session) { // mark client caps stale. - auto stale = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, p->first, 0, 0, 0, mds->get_osd_epoch_barrier()); + auto stale = MClientCaps::create(CEPH_CAP_OP_EXPORT, p->first, 0, 0, 0, mds->get_osd_epoch_barrier()); stale->set_cap_peer(0, 0, 0, -1, 0); mds->send_message_client_counted(stale, q->first); } @@ -5787,7 +5787,7 @@ void MDCache::do_cap_import(Session *session, CInode *in, Capability *cap, cap->set_last_issue(); cap->set_last_issue_stamp(ceph_clock_now()); cap->clear_new(); - auto reap = MClientCaps::factory::build(CEPH_CAP_OP_IMPORT, in->ino(), realm->inode->ino(), cap->get_cap_id(), cap->get_last_seq(), cap->pending(), cap->wanted(), 0, cap->get_mseq(), mds->get_osd_epoch_barrier()); + auto reap = MClientCaps::create(CEPH_CAP_OP_IMPORT, in->ino(), realm->inode->ino(), cap->get_cap_id(), cap->get_last_seq(), cap->pending(), cap->wanted(), 0, cap->get_mseq(), mds->get_osd_epoch_barrier()); in->encode_cap_message(reap, cap); reap->snapbl = realm->get_snap_trace(); reap->set_cap_peer(p_cap_id, p_seq, p_mseq, peer, p_flags); @@ -5983,7 +5983,7 @@ void MDCache::finish_snaprealm_reconnect(client_t client, SnapRealm *realm, snap if (seq < realm->get_newest_seq()) { dout(10) << "finish_snaprealm_reconnect client." << client << " has old seq " << seq << " < " << realm->get_newest_seq() << " on " << *realm << dendl; - auto snap = MClientSnap::factory::build(CEPH_SNAP_OP_UPDATE); + auto snap = MClientSnap::create(CEPH_SNAP_OP_UPDATE); snap->bl = realm->get_snap_trace(); for (const auto& child : realm->open_children) snap->split_realms.push_back(child->inode->ino()); @@ -6038,7 +6038,7 @@ void MDCache::rejoin_send_acks() ++p) { if (rejoin_ack_sent.count(*p)) continue; - acks[*p] = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_ACK); + acks[*p] = MMDSCacheRejoin::create(MMDSCacheRejoin::OP_ACK); } rejoin_ack_sent = recovery_set; @@ -6660,7 +6660,7 @@ bool MDCache::trim(uint64_t count) auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(rank), std::forward_as_tuple()); if (em.second) { - em.first->second = MCacheExpire::factory::build(mds->get_nodeid()); + em.first->second = MCacheExpire::create(mds->get_nodeid()); } dout(20) << __func__ << ": try expiring " << *mdsdir_in << " for stopping mds." << mds << dendl; @@ -6785,7 +6785,7 @@ bool MDCache::trim_dentry(CDentry *dn, expiremap& expiremap) assert(a != mds->get_nodeid()); auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple()); if (em.second) - em.first->second = MCacheExpire::factory::build(mds->get_nodeid()); + em.first->second = MCacheExpire::create(mds->get_nodeid()); em.first->second->add_dentry(con->dirfrag(), dir->dirfrag(), dn->get_name(), dn->last, dn->get_replica_nonce()); } } @@ -6841,7 +6841,7 @@ void MDCache::trim_dirfrag(CDir *dir, CDir *con, expiremap& expiremap) assert(a != mds->get_nodeid()); auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple()); if (em.second) - em.first->second = MCacheExpire::factory::build(mds->get_nodeid()); /* new */ + em.first->second = MCacheExpire::create(mds->get_nodeid()); /* new */ em.first->second->add_dir(condf, dir->dirfrag(), dir->replica_nonce); } } @@ -6910,7 +6910,7 @@ bool MDCache::trim_inode(CDentry *dn, CInode *in, CDir *con, expiremap& expirema assert(a != mds->get_nodeid()); auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple()); if (em.second) - em.first->second = MCacheExpire::factory::build(mds->get_nodeid()); /* new */ + em.first->second = MCacheExpire::create(mds->get_nodeid()); /* new */ em.first->second->add_inode(df, in->vino(), in->get_replica_nonce()); } } @@ -7279,7 +7279,7 @@ void MDCache::handle_cache_expire(const MCacheExpire::const_ref &m) auto em = delayed_expire[parent_dir].emplace(std::piecewise_construct, std::forward_as_tuple(from), std::forward_as_tuple()); if (em.second) - em.first->second = MCacheExpire::factory::build(from); /* new */ + em.first->second = MCacheExpire::create(from); /* new */ // merge these expires into it em.first->second->add_realm(p.first, p.second); @@ -8769,7 +8769,7 @@ void MDCache::do_open_ino_peer(inodeno_t ino, open_ino_info_t& info) // got backtrace from peer or backtrace just fetched if (info.discover || !info.fetch_backtrace) pa = &info.ancestors; - mds->send_message_mds(MMDSOpenIno::factory::build(info.tid, ino, pa), peer); + mds->send_message_mds(MMDSOpenIno::create(info.tid, ino, pa), peer); if (mds->logger) mds->logger->inc(l_mds_openino_peer_discover); } @@ -8789,7 +8789,7 @@ void MDCache::handle_open_ino(const MMDSOpenIno::const_ref &m, int err) CInode *in = get_inode(ino); if (in) { dout(10) << " have " << *in << dendl; - reply = MMDSOpenInoReply::factory::build(m->get_tid(), ino, mds_rank_t(0)); + reply = MMDSOpenInoReply::create(m->get_tid(), ino, mds_rank_t(0)); if (in->is_auth()) { touch_inode(in); while (1) { @@ -8805,13 +8805,13 @@ void MDCache::handle_open_ino(const MMDSOpenIno::const_ref &m, int err) reply->hint = in->authority().first; } } else if (err < 0) { - reply = MMDSOpenInoReply::factory::build(m->get_tid(), ino, MDS_RANK_NONE, err); + reply = MMDSOpenInoReply::create(m->get_tid(), ino, MDS_RANK_NONE, err); } else { mds_rank_t hint = MDS_RANK_NONE; int ret = open_ino_traverse_dir(ino, m, m->ancestors, false, false, &hint); if (ret > 0) return; - reply = MMDSOpenInoReply::factory::build(m->get_tid(), ino, hint, ret); + reply = MMDSOpenInoReply::create(m->get_tid(), ino, hint, ret); } m->get_connection()->send_message2(reply); /* FIXME, why not send_client? */ } @@ -8985,7 +8985,7 @@ void MDCache::_do_find_ino_peer(find_ino_peer_info_t& fip) } } else { fip.checking = m; - mds->send_message_mds(MMDSFindIno::factory::build(fip.tid, fip.ino), m); + mds->send_message_mds(MMDSFindIno::create(fip.tid, fip.ino), m); } } @@ -8996,7 +8996,7 @@ void MDCache::handle_find_ino(const MMDSFindIno::const_ref &m) } dout(10) << "handle_find_ino " << *m << dendl; - auto r = MMDSFindInoReply::factory::build(m->tid); + auto r = MMDSFindInoReply::create(m->tid); CInode *in = get_inode(m->ino); if (in) { in->make_path(r->path); @@ -9270,7 +9270,7 @@ void MDCache::request_drop_foreign_locks(MDRequestRef& mdr) for (set::iterator p = mdr->more()->slaves.begin(); p != mdr->more()->slaves.end(); ++p) { - auto r = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, + auto r = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_FINISH); if (mdr->killed && !mdr->committing) { @@ -9450,7 +9450,7 @@ void MDCache::do_realm_invalidate_and_update_notify(CInode *in, int snapop, bool auto em = updates.emplace(std::piecewise_construct, std::forward_as_tuple(client), std::forward_as_tuple()); if (em.second) { - auto update = MClientSnap::factory::build(CEPH_SNAP_OP_SPLIT); + auto update = MClientSnap::create(CEPH_SNAP_OP_SPLIT); update->head.split = in->ino(); update->split_inos = split_inos; update->split_realms = split_realms; @@ -9534,7 +9534,7 @@ void MDCache::send_snap_update(CInode *in, version_t stid, int snap_op) in->encode_snap(snap_blob); for (auto p : mds_set) { - auto m = MMDSSnapUpdate::factory::build(in->ino(), stid, snap_op); + auto m = MMDSSnapUpdate::create(in->ino(), stid, snap_op); m->snap_blob = snap_blob; mds->send_message_mds(m, p); } @@ -9592,7 +9592,7 @@ void MDCache::notify_global_snaprealm_update(int snap_op) for (auto &session : sessions) { if (!session->is_open() && !session->is_stale()) continue; - auto update = MClientSnap::factory::build(snap_op); + auto update = MClientSnap::create(snap_op); update->head.split = global_snaprealm->inode->ino(); update->bl = global_snaprealm->get_snap_trace(); mds->send_message_client_counted(update, session); @@ -9668,7 +9668,7 @@ void MDCache::fetch_backtrace(inodeno_t ino, int64_t pool, bufferlist& bl, Conte void MDCache::_send_discover(discover_info_t& d) { - auto dis = MDiscover::factory::build(d.ino, d.frag, d.snap, d.want_path, d.want_base_dir, d.want_xlocked); + auto dis = MDiscover::create(d.ino, d.frag, d.snap, d.want_path, d.want_base_dir, d.want_xlocked); dis->set_tid(d.tid); mds->send_message_mds(dis, d.mds); } @@ -9860,7 +9860,7 @@ void MDCache::handle_discover(const MDiscover::const_ref &dis) CInode *cur = 0; - auto reply = MDiscoverReply::factory::build(*dis); + auto reply = MDiscoverReply::create(*dis); snapid_t snapid = dis->get_snapid(); @@ -10469,7 +10469,7 @@ int MDCache::send_dir_updates(CDir *dir, bool bcast) for (const auto &r : dir->dir_rep_by) { s.insert(r); } - mds->send_message_mds(MDirUpdate::factory::build(mds->get_nodeid(), dir->dirfrag(), dir->dir_rep, s, path, bcast), *it); + mds->send_message_mds(MDirUpdate::create(mds->get_nodeid(), dir->dirfrag(), dir->dir_rep, s, path, bcast), *it); } return 0; @@ -10538,7 +10538,7 @@ void MDCache::send_dentry_link(CDentry *dn, MDRequestRef& mdr) rejoin_gather.count(p.first))) continue; CDentry::linkage_t *dnl = dn->get_linkage(); - auto m = MDentryLink::factory::build(subtree->dirfrag(), dn->get_dir()->dirfrag(), dn->get_name(), dnl->is_primary()); + auto m = MDentryLink::create(subtree->dirfrag(), dn->get_dir()->dirfrag(), dn->get_name(), dnl->is_primary()); if (dnl->is_primary()) { dout(10) << " primary " << *dnl->get_inode() << dendl; replicate_inode(dnl->get_inode(), p.first, m->bl, @@ -10625,7 +10625,7 @@ void MDCache::send_dentry_unlink(CDentry *dn, CDentry *straydn, MDRequestRef& md rejoin_gather.count(*it))) continue; - auto unlink = MDentryUnlink::factory::build(dn->get_dir()->dirfrag(), dn->get_name()); + auto unlink = MDentryUnlink::create(dn->get_dir()->dirfrag(), dn->get_name()); if (straydn) { replicate_stray(straydn, *it, unlink->straybl); unlink->snapbl = snapbl; @@ -11477,7 +11477,7 @@ void MDCache::_fragment_stored(MDRequestRef& mdr) rejoin_gather.count(p.first))) continue; - auto notify = MMDSFragmentNotify::factory::build(basedirfrag, info.bits); + auto notify = MMDSFragmentNotify::create(basedirfrag, info.bits); // freshly replicate new dirs to peers for (list::iterator q = info.resultfrags.begin(); diff --git a/src/mds/MDSDaemon.cc b/src/mds/MDSDaemon.cc index 763b3b2d03308..390ac14817122 100644 --- a/src/mds/MDSDaemon.cc +++ b/src/mds/MDSDaemon.cc @@ -578,7 +578,7 @@ void MDSDaemon::send_command_reply(const MCommand::const_ref &m, MDSRank *mds_ra } priv.reset(); - auto reply = MCommandReply::factory::build(r, outs); + auto reply = MCommandReply::create(r, outs); reply->set_tid(m->get_tid()); reply->set_data(outbl); m->get_connection()->send_message2(reply); diff --git a/src/mds/MDSRank.cc b/src/mds/MDSRank.cc index 5563d33245afb..8b4cd4d8e7bfa 100644 --- a/src/mds/MDSRank.cc +++ b/src/mds/MDSRank.cc @@ -218,7 +218,7 @@ void MDSRank::update_targets() if (send) { dout(15) << "updating export_targets, now " << new_map_targets.size() << " ranks are targets" << dendl; - auto m = MMDSLoadTargets::factory::build(mds_gid_t(monc->get_global_id()), new_map_targets); + auto m = MMDSLoadTargets::create(mds_gid_t(monc->get_global_id()), new_map_targets); monc->send_mon_message(m.detach()); } } @@ -940,7 +940,7 @@ void MDSRank::send_message_mds(const Message::ref& m, mds_rank_t mds) // send mdsmap first? if (mds != whoami && peer_mdsmap_epoch[mds] < mdsmap->get_epoch()) { - auto _m = MMDSMap::factory::build(monc->get_fsid(), *mdsmap); + auto _m = MMDSMap::create(monc->get_fsid(), *mdsmap); messenger->send_to_mds(_m.detach(), mdsmap->get_addrs(mds)); peer_mdsmap_epoch[mds] = mdsmap->get_epoch(); } @@ -963,7 +963,7 @@ void MDSRank::forward_message_mds(const MClientRequest::const_ref& m, mds_rank_t bool client_must_resend = true; //!creq->can_forward(); // tell the client where it should go - auto f = MClientRequestForward::factory::build(m->get_tid(), mds, m->get_num_fwd()+1, client_must_resend); + auto f = MClientRequestForward::create(m->get_tid(), mds, m->get_num_fwd()+1, client_must_resend); messenger->send_message(f.detach(), m->get_source_inst()); } @@ -2983,7 +2983,7 @@ void MDSRank::bcast_mds_map() set clients; sessionmap.get_client_session_set(clients); for (const auto &session : clients) { - auto m = MMDSMap::factory::build(monc->get_fsid(), *mdsmap); + auto m = MMDSMap::create(monc->get_fsid(), *mdsmap); session->get_connection()->send_message2(std::move(m)); } last_client_mdsmap_bcast = mdsmap->get_epoch(); diff --git a/src/mds/MDSTableClient.cc b/src/mds/MDSTableClient.cc index 7381c87915846..e633ec346c62a 100644 --- a/src/mds/MDSTableClient.cc +++ b/src/mds/MDSTableClient.cc @@ -102,7 +102,7 @@ void MDSTableClient::handle_request(const MMDSTableRequest::const_ref &m) dout(10) << "stray agree on " << reqid << " tid " << tid << ", sending ROLLBACK" << dendl; assert(!server_ready); - auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_ROLLBACK, 0, tid); + auto req = MMDSTableRequest::create(table, TABLESERVER_OP_ROLLBACK, 0, tid); mds->send_message_mds(req, mds->get_mds_map()->get_tableserver()); } break; @@ -174,7 +174,7 @@ void MDSTableClient::_prepare(bufferlist& mutation, version_t *ptid, bufferlist if (server_ready) { // send message - auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_PREPARE, reqid); + auto req = MMDSTableRequest::create(table, TABLESERVER_OP_PREPARE, reqid); req->bl = mutation; mds->send_message_mds(req, mds->get_mds_map()->get_tableserver()); } else @@ -198,7 +198,7 @@ void MDSTableClient::commit(version_t tid, LogSegment *ls) if (server_ready) { // send message - auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_COMMIT, 0, tid); + auto req = MMDSTableRequest::create(table, TABLESERVER_OP_COMMIT, 0, tid); mds->send_message_mds(req, mds->get_mds_map()->get_tableserver()); } else dout(10) << "tableserver is not ready yet, deferring request" << dendl; @@ -232,7 +232,7 @@ void MDSTableClient::resend_commits() p != pending_commit.end(); ++p) { dout(10) << "resending commit on " << p->first << dendl; - auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_COMMIT, 0, p->first); + auto req = MMDSTableRequest::create(table, TABLESERVER_OP_COMMIT, 0, p->first); mds->send_message_mds(req, mds->get_mds_map()->get_tableserver()); } } @@ -248,7 +248,7 @@ void MDSTableClient::resend_prepares() p != pending_prepare.end(); ++p) { dout(10) << "resending prepare on " << p->first << dendl; - auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_PREPARE, p->first); + auto req = MMDSTableRequest::create(table, TABLESERVER_OP_PREPARE, p->first); req->bl = p->second.mutation; mds->send_message_mds(req, mds->get_mds_map()->get_tableserver()); } diff --git a/src/mds/MDSTableServer.cc b/src/mds/MDSTableServer.cc index 07aa20a213ea6..684801cd9b535 100644 --- a/src/mds/MDSTableServer.cc +++ b/src/mds/MDSTableServer.cc @@ -80,7 +80,7 @@ void MDSTableServer::_prepare_logged(const MMDSTableRequest::const_ref &req, ver _prepare(req->bl, req->reqid, from, out); assert(version == tid); - auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_AGREE, req->reqid, tid); + auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_AGREE, req->reqid, tid); reply->bl = std::move(out); if (_notify_prep(tid)) { @@ -153,7 +153,7 @@ void MDSTableServer::handle_commit(const MMDSTableRequest::const_ref &req) else if (tid <= version) { dout(0) << "got commit for tid " << tid << " <= " << version << ", already committed, sending ack." << dendl; - auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_ACK, req->reqid, tid); + auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_ACK, req->reqid, tid); mds->send_message(reply, req->get_connection()); } else { @@ -176,7 +176,7 @@ void MDSTableServer::_commit_logged(const MMDSTableRequest::const_ref &req) _commit(tid, req); _note_commit(tid); - auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_ACK, req->reqid, req->get_tid()); + auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_ACK, req->reqid, req->get_tid()); mds->send_message_mds(reply, mds_rank_t(req->get_source().num())); } @@ -281,13 +281,13 @@ void MDSTableServer::_do_server_recovery() next_reqids[who] = p.second.reqid + 1; version_t tid = p.second.tid; - auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_AGREE, p.second.reqid, tid); + auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_AGREE, p.second.reqid, tid); _get_reply_buffer(tid, &reply->bl); mds->send_message_mds(reply, who); } for (auto p : active_clients) { - auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_SERVER_READY, next_reqids[p]); + auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_SERVER_READY, next_reqids[p]); mds->send_message_mds(reply, p); } recovered = true; @@ -331,12 +331,12 @@ void MDSTableServer::handle_mds_recovery(mds_rank_t who) if (p->second.reqid >= next_reqid) next_reqid = p->second.reqid + 1; - auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_AGREE, p->second.reqid, p->second.tid); + auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_AGREE, p->second.reqid, p->second.tid); _get_reply_buffer(p->second.tid, &reply->bl); mds->send_message_mds(reply, who); } - auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_SERVER_READY, next_reqid); + auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_SERVER_READY, next_reqid); mds->send_message_mds(reply, who); } diff --git a/src/mds/Migrator.cc b/src/mds/Migrator.cc index d529645c3381d..4c93f5ce08f20 100644 --- a/src/mds/Migrator.cc +++ b/src/mds/Migrator.cc @@ -273,7 +273,7 @@ void Migrator::export_try_cancel(CDir *dir, bool notify_peer) if (notify_peer && (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer))) // tell them. - mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer); + mds->send_message_mds(MExportDirCancel::create(dir->dirfrag(), it->second.tid), it->second.peer); break; case EXPORT_FREEZING: @@ -285,7 +285,7 @@ void Migrator::export_try_cancel(CDir *dir, bool notify_peer) if (notify_peer && (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer))) // tell them. - mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer); + mds->send_message_mds(MExportDirCancel::create(dir->dirfrag(), it->second.tid), it->second.peer); break; // NOTE: state order reversal, warning comes after prepping @@ -327,7 +327,7 @@ void Migrator::export_try_cancel(CDir *dir, bool notify_peer) if (notify_peer && (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer))) // tell them. - mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer); + mds->send_message_mds(MExportDirCancel::create(dir->dirfrag(), it->second.tid), it->second.peer); break; case EXPORT_EXPORTING: @@ -935,7 +935,7 @@ void Migrator::dispatch_export_dir(MDRequestRef& mdr, int count) // send ExportDirDiscover (ask target) filepath path; dir->inode->make_path(path); - auto discover = MExportDirDiscover::factory::build(dir->dirfrag(), path, mds->get_nodeid(), it->second.tid); + auto discover = MExportDirDiscover::create(dir->dirfrag(), path, mds->get_nodeid(), it->second.tid); mds->send_message_mds(discover, dest); assert(g_conf()->mds_kill_export_at != 2); @@ -1078,7 +1078,7 @@ void Migrator::export_frozen(CDir *dir, uint64_t tid) cache->get_subtree_bounds(dir, bounds); // generate prep message, log entry. - auto prep = MExportDirPrep::factory::build(dir->dirfrag(), it->second.tid); + auto prep = MExportDirPrep::create(dir->dirfrag(), it->second.tid); // include list of bystanders for (const auto &p : dir->get_replicas()) { @@ -1311,7 +1311,7 @@ void Migrator::handle_export_prep_ack(const MExportDirPrepAck::const_ref &m) it->second.warning_ack_waiting.insert(p.first); it->second.notify_ack_waiting.insert(p.first); // we'll eventually get a notifyack, too! - auto notify = MExportDirNotify::factory::build(dir->dirfrag(), it->second.tid, true, + auto notify = MExportDirNotify::create(dir->dirfrag(), it->second.tid, true, mds_authority_t(mds->get_nodeid(),CDIR_AUTH_UNKNOWN), mds_authority_t(mds->get_nodeid(),it->second.peer)); for (auto &cdir : bounds) { @@ -1384,7 +1384,7 @@ void Migrator::export_go_synced(CDir *dir, uint64_t tid) mds->balancer->subtract_export(dir); // fill export message with cache data - auto req = MExportDir::factory::build(dir->dirfrag(), it->second.tid); + auto req = MExportDir::create(dir->dirfrag(), it->second.tid); map exported_client_map; map exported_client_metadata_map; uint64_t num_exported_inodes = encode_export_dir(req->export_data, @@ -1485,7 +1485,7 @@ void Migrator::finish_export_inode_caps(CInode *in, mds_rank_t peer, const Capability *cap = &p.second; dout(7) << "finish_export_inode_caps telling client." << p.first << " exported caps on " << *in << dendl; - auto m = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, in->ino(), 0, + auto m = MClientCaps::create(CEPH_CAP_OP_EXPORT, in->ino(), 0, cap->get_cap_id(), cap->get_mseq(), mds->get_osd_epoch_barrier()); map::iterator q = peer_imported.find(p.first); @@ -1765,7 +1765,7 @@ void Migrator::export_notify_abort(CDir *dir, export_state_t& stat, set& for (set::iterator p = stat.notify_ack_waiting.begin(); p != stat.notify_ack_waiting.end(); ++p) { - auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, true, + auto notify = MExportDirNotify::create(dir->dirfrag(), stat.tid, true, pair(mds->get_nodeid(), stat.peer), pair(mds->get_nodeid(), CDIR_AUTH_UNKNOWN)); for (set::iterator i = bounds.begin(); i != bounds.end(); ++i) @@ -1871,7 +1871,7 @@ void Migrator::export_logged_finish(CDir *dir) for (set::iterator p = stat.notify_ack_waiting.begin(); p != stat.notify_ack_waiting.end(); ++p) { - auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, true, + auto notify = MExportDirNotify::create(dir->dirfrag(), stat.tid, true, pair(mds->get_nodeid(), stat.peer), pair(stat.peer, CDIR_AUTH_UNKNOWN)); @@ -1892,7 +1892,7 @@ void Migrator::export_logged_finish(CDir *dir) // notify peer to send cap import messages to clients if (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(stat.peer)) { - mds->send_message_mds(MExportDirFinish::factory::build(dir->dirfrag(), false, stat.tid), stat.peer); + mds->send_message_mds(MExportDirFinish::create(dir->dirfrag(), false, stat.tid), stat.peer); } else { dout(7) << "not sending MExportDirFinish, dest has failed" << dendl; } @@ -1975,7 +1975,7 @@ void Migrator::export_finish(CDir *dir) // send finish/commit to new auth if (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer)) { - mds->send_message_mds(MExportDirFinish::factory::build(dir->dirfrag(), true, it->second.tid), it->second.peer); + mds->send_message_mds(MExportDirFinish::create(dir->dirfrag(), true, it->second.tid), it->second.peer); } else { dout(7) << "not sending MExportDirFinish last, dest has failed" << dendl; } @@ -2091,7 +2091,7 @@ void Migrator::handle_export_discover(const MExportDirDiscover::const_ref &m, bo if (!mds->is_active()) { dout(7) << " not active, send NACK " << dendl; - mds->send_message_mds(MExportDirDiscoverAck::factory::build(df, m->get_tid(), false), from); + mds->send_message_mds(MExportDirDiscoverAck::create(df, m->get_tid(), false), from); return; } @@ -2153,7 +2153,7 @@ void Migrator::handle_export_discover(const MExportDirDiscover::const_ref &m, bo // reply dout(7) << " sending export_discover_ack on " << *in << dendl; - mds->send_message_mds(MExportDirDiscoverAck::factory::build(df, m->get_tid()), p_state->peer); + mds->send_message_mds(MExportDirDiscoverAck::create(df, m->get_tid()), p_state->peer); assert (g_conf()->mds_kill_import_at != 2); } @@ -2421,7 +2421,7 @@ void Migrator::handle_export_prep(const MExportDirPrep::const_ref &m, bool did_a // ok! dout(7) << " sending export_prep_ack on " << *dir << dendl; - mds->send_message(MExportDirPrepAck::factory::build(dir->dirfrag(), success, m->get_tid()), m->get_connection()); + mds->send_message(MExportDirPrepAck::create(dir->dirfrag(), success, m->get_tid()), m->get_connection()); assert(g_conf()->mds_kill_import_at != 4); } @@ -2721,7 +2721,7 @@ void Migrator::import_notify_finish(CDir *dir, set& bounds) for (set::iterator p = stat.bystanders.begin(); p != stat.bystanders.end(); ++p) { - auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, false, + auto notify = MExportDirNotify::create(dir->dirfrag(), stat.tid, false, pair(stat.peer, mds->get_nodeid()), pair(mds->get_nodeid(), CDIR_AUTH_UNKNOWN)); for (set::iterator i = bounds.begin(); i != bounds.end(); ++i) @@ -2743,7 +2743,7 @@ void Migrator::import_notify_abort(CDir *dir, set& bounds) stat.bystanders.erase(p++); continue; } - auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, true, + auto notify = MExportDirNotify::create(dir->dirfrag(), stat.tid, true, mds_authority_t(stat.peer, mds->get_nodeid()), mds_authority_t(stat.peer, CDIR_AUTH_UNKNOWN)); for (set::iterator i = bounds.begin(); i != bounds.end(); ++i) @@ -2834,7 +2834,7 @@ void Migrator::import_logged_start(dirfrag_t df, CDir *dir, mds_rank_t from, // test surviving observer of a failed migration that did not complete //assert(dir->replica_map.size() < 2 || mds->get_nodeid() != 0); - auto ack = MExportDirAck::factory::build(dir->dirfrag(), it->second.tid); + auto ack = MExportDirAck::create(dir->dirfrag(), it->second.tid); encode(imported_caps, ack->imported_caps); mds->send_message_mds(ack, from); @@ -3262,7 +3262,7 @@ void Migrator::handle_export_notify(const MExportDirNotify::const_ref &m) // send ack if (m->wants_ack()) { - mds->send_message_mds(MExportDirNotifyAck::factory::build(m->get_dirfrag(), m->get_tid(), m->get_new_auth()), from); + mds->send_message_mds(MExportDirNotifyAck::create(m->get_dirfrag(), m->get_tid(), m->get_new_auth()), from); } else { // aborted. no ack. dout(7) << "handle_export_notify no ack requested" << dendl; @@ -3280,7 +3280,7 @@ void Migrator::export_caps(CInode *in) assert(!in->is_ambiguous_auth()); assert(!in->state_test(CInode::STATE_EXPORTINGCAPS)); - auto ex = MExportCaps::factory::build(); + auto ex = MExportCaps::create(); ex->ino = in->ino(); encode_export_inode_caps(in, false, ex->cap_bl, ex->client_map, ex->client_metadata_map); @@ -3311,7 +3311,7 @@ void Migrator::handle_export_caps_ack(const MExportCapsAck::const_ref &ack) dout(7) << __func__ << " telling client." << it.first << " exported caps on " << *in << dendl; - auto m = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, in->ino(), 0, + auto m = MClientCaps::create(CEPH_CAP_OP_EXPORT, in->ino(), 0, cap->get_cap_id(), cap->get_mseq(), mds->get_osd_epoch_barrier()); m->set_cap_peer(it.second.cap_id, it.second.issue_seq, it.second.mseq, from, 0); @@ -3411,7 +3411,7 @@ void Migrator::logged_import_caps(CInode *in, mds->locker->eval(in, CEPH_CAP_LOCKS, true); if (!imported_caps.empty()) { - auto ack = MExportCapsAck::factory::build(in->ino()); + auto ack = MExportCapsAck::create(in->ino()); map peer_caps_ids; for (auto &p : imported_caps ) peer_caps_ids[p.first] = it->second.at(p.first).cap_id; diff --git a/src/mds/Server.cc b/src/mds/Server.cc index 008a1ebacc87b..f1077a78ace9b 100644 --- a/src/mds/Server.cc +++ b/src/mds/Server.cc @@ -335,7 +335,7 @@ void Server::handle_client_session(const MClientSession::const_ref &m) { auto send_reject_message = [this, session](std::string_view err_str) { - auto m = MClientSession::factory::build(CEPH_SESSION_REJECT); + auto m = MClientSession::create(CEPH_SESSION_REJECT); if (session->info.has_feature(CEPHFS_FEATURE_MIMIC)) m->metadata["error_string"] = err_str; mds->send_message_client(m, session); @@ -427,7 +427,7 @@ void Server::handle_client_session(const MClientSession::const_ref &m) mds->locker->resume_stale_caps(session); mds->sessionmap.touch_session(session); } - m->get_connection()->send_message2(MClientSession::factory::build(CEPH_SESSION_RENEWCAPS, m->get_seq())); + m->get_connection()->send_message2(MClientSession::create(CEPH_SESSION_RENEWCAPS, m->get_seq())); } else { dout(10) << "ignoring renewcaps on non open|stale session (" << session->get_state_name() << ")" << dendl; } @@ -491,7 +491,7 @@ void Server::flush_client_sessions(set& client_set, MDSGatherBuilder& !session->get_connection()->has_feature(CEPH_FEATURE_EXPORT_PEER)) continue; version_t seq = session->wait_for_flush(gather.new_sub()); - mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_FLUSHMSG, seq), session); + mds->send_message_client(MClientSession::create(CEPH_SESSION_FLUSHMSG, seq), session); } } @@ -528,12 +528,12 @@ void Server::_session_logged(Session *session, uint64_t state_seq, bool open, ve mds->sessionmap.set_state(session, Session::STATE_OPEN); mds->sessionmap.touch_session(session); assert(session->get_connection()); - auto reply = MClientSession::factory::build(CEPH_SESSION_OPEN); + auto reply = MClientSession::create(CEPH_SESSION_OPEN); if (session->info.has_feature(CEPHFS_FEATURE_MIMIC)) reply->supported_features = supported_features; session->get_connection()->send_message2(reply); if (mdcache->is_readonly()) - session->get_connection()->send_message2(MClientSession::factory::build(CEPH_SESSION_FORCE_RO)); + session->get_connection()->send_message2(MClientSession::create(CEPH_SESSION_FORCE_RO)); } else if (session->is_closing() || session->is_killing()) { // kill any lingering capabilities, leases, requests @@ -573,7 +573,7 @@ void Server::_session_logged(Session *session, uint64_t state_seq, bool open, ve } // reset session - mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_CLOSE), session); + mds->send_message_client(MClientSession::create(CEPH_SESSION_CLOSE), session); mds->sessionmap.set_state(session, Session::STATE_CLOSED); session->clear(); mds->sessionmap.remove_session(session); @@ -670,13 +670,13 @@ void Server::finish_force_open_sessions(const mapsessionmap.set_state(session, Session::STATE_OPEN); mds->sessionmap.touch_session(session); - auto reply = MClientSession::factory::build(CEPH_SESSION_OPEN); + auto reply = MClientSession::create(CEPH_SESSION_OPEN); if (session->info.has_feature(CEPHFS_FEATURE_MIMIC)) reply->supported_features = supported_features; mds->send_message_client(reply, session); if (mdcache->is_readonly()) - mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_FORCE_RO), session); + mds->send_message_client(MClientSession::create(CEPH_SESSION_FORCE_RO), session); } } else { dout(10) << "force_open_sessions skipping already-open " << session->info.inst << dendl; @@ -751,7 +751,7 @@ void Server::find_idle_sessions() mds->sessionmap.set_state(session, Session::STATE_STALE); mds->locker->revoke_stale_caps(session); mds->locker->remove_stale_leases(session); - mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_STALE, session->get_push_seq()), session); + mds->send_message_client(MClientSession::create(CEPH_SESSION_STALE, session->get_push_seq()), session); finish_flush_session(session, session->get_push_seq()); } @@ -980,7 +980,7 @@ void Server::handle_client_reconnect(const MClientReconnect::const_ref &m) } if (deny) { - m->get_connection()->send_message2(MClientSession::factory::build(CEPH_SESSION_CLOSE)); + m->get_connection()->send_message2(MClientSession::create(CEPH_SESSION_CLOSE)); if (session->is_open()) kill_session(session, nullptr); return; @@ -994,7 +994,7 @@ void Server::handle_client_reconnect(const MClientReconnect::const_ref &m) } // notify client of success with an OPEN - auto reply = MClientSession::factory::build(CEPH_SESSION_OPEN); + auto reply = MClientSession::create(CEPH_SESSION_OPEN); if (session->info.has_feature(CEPHFS_FEATURE_MIMIC)) reply->supported_features = supported_features; m->get_connection()->send_message2(reply); @@ -1263,7 +1263,7 @@ void Server::recall_client_state(void) uint64_t newlim = std::max(std::min((session->caps.size() * ratio), max_caps_per_client), min_caps_per_client); if (session->caps.size() > newlim) { - auto m = MClientSession::factory::build(CEPH_SESSION_RECALL_STATE); + auto m = MClientSession::create(CEPH_SESSION_RECALL_STATE); m->head.max_caps = newlim; mds->send_message_client(m, session); session->notify_recall_sent(newlim); @@ -1283,7 +1283,7 @@ void Server::force_clients_readonly() if (!session->info.inst.name.is_client() || !(session->is_open() || session->is_stale())) continue; - mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_FORCE_RO), session); + mds->send_message_client(MClientSession::create(CEPH_SESSION_FORCE_RO), session); } } @@ -1339,7 +1339,7 @@ void Server::submit_mdlog_entry(LogEvent *le, MDSLogContextBase *fin, MDRequestR void Server::respond_to_request(MDRequestRef& mdr, int r) { if (mdr->client_request) { - reply_client_request(mdr, MClientReply::factory::build(*mdr->client_request, r)); + reply_client_request(mdr, MClientReply::create(*mdr->client_request, r)); } else if (mdr->internal_op > -1) { dout(10) << "respond_to_request on internal request " << mdr << dendl; if (!mdr->internal_op_finish) @@ -1474,7 +1474,7 @@ void Server::early_reply(MDRequestRef& mdr, CInode *tracei, CDentry *tracedn) } - auto reply = MClientReply::factory::build(*req, 0); + auto reply = MClientReply::create(*req, 0); reply->set_unsafe(); // mark xlocks "done", indicating that we are exposing uncommitted changes. @@ -1760,7 +1760,7 @@ void Server::handle_client_request(const MClientRequest::const_ref &req) req->get_op() != CEPH_MDS_OP_OPEN && req->get_op() != CEPH_MDS_OP_CREATE)) { dout(5) << "already completed " << req->get_reqid() << dendl; - auto reply = MClientReply::factory::build(*req, 0); + auto reply = MClientReply::create(*req, 0); if (created != inodeno_t()) { bufferlist extra; encode(created, extra); @@ -2029,7 +2029,7 @@ void Server::handle_slave_request(const MMDSSlaveRequest::const_ref &m) // the purpose of rename notify is enforcing causal message ordering. making sure // bystanders have received all messages from rename srcdn's auth MDS. if (m->get_op() == MMDSSlaveRequest::OP_RENAMENOTIFY) { - auto reply = MMDSSlaveRequest::factory::build(m->get_reqid(), m->get_attempt(), MMDSSlaveRequest::OP_RENAMENOTIFYACK); + auto reply = MMDSSlaveRequest::create(m->get_reqid(), m->get_attempt(), MMDSSlaveRequest::OP_RENAMENOTIFYACK); mds->send_message(reply, m->get_connection()); return; } @@ -2254,7 +2254,7 @@ void Server::dispatch_slave_request(MDRequestRef& mdr) return; // ack - auto r = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, replycode); + auto r = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, replycode); r->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(r->get_object_info()); if (replycode == MMDSSlaveRequest::OP_XLOCKACK) @@ -2420,7 +2420,7 @@ void Server::handle_slave_auth_pin(MDRequestRef& mdr) } // ack! - auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPINACK); + auto reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPINACK); // return list of my auth_pins (if any) for (set::iterator p = mdr->auth_pins.begin(); @@ -4733,7 +4733,7 @@ void Server::create_quota_realm(CInode *in) { dout(10) << __func__ << " " << *in << dendl; - auto req = MClientRequest::factory::build(CEPH_MDS_OP_SETXATTR); + auto req = MClientRequest::create(CEPH_MDS_OP_SETXATTR); req->set_filepath(filepath(in->ino())); req->set_string2("ceph.quota"); // empty vxattr value @@ -5687,7 +5687,7 @@ void Server::_link_remote(MDRequestRef& mdr, bool inc, CDentry *dn, CInode *targ op = MMDSSlaveRequest::OP_LINKPREP; else op = MMDSSlaveRequest::OP_UNLINKPREP; - auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, op); + auto req = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, op); targeti->set_object_info(req->get_object_info()); req->op_stamp = mdr->get_op_stamp(); if (auto& desti_srnode = mdr->more()->desti_srnode) @@ -5935,7 +5935,7 @@ void Server::_logged_slave_link(MDRequestRef& mdr, CInode *targeti, bool adjust_ // ack if (!mdr->aborted) { - auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_LINKPREPACK); + auto reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_LINKPREPACK); mds->send_message_mds(reply, mdr->slave_to_mds); } else { dout(10) << " abort flag set, finishing" << dendl; @@ -5980,7 +5980,7 @@ void Server::_committed_slave(MDRequestRef& mdr) assert(g_conf()->mds_kill_link_at != 8); - auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_COMMITTED); + auto req = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_COMMITTED); mds->send_message_mds(req, mdr->slave_to_mds); mdcache->request_finish(mdr); } @@ -6491,7 +6491,7 @@ bool Server::_rmdir_prepare_witness(MDRequestRef& mdr, mds_rank_t who, vectorreqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREP); + auto req = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREP); req->srcdnpath = filepath(trace.front()->get_dir()->ino()); for (auto dn : trace) req->srcdnpath.push_dentry(dn->get_name()); @@ -6640,7 +6640,7 @@ void Server::_logged_slave_rmdir(MDRequestRef& mdr, CDentry *dn, CDentry *strayd mdr->straydn = 0; if (!mdr->aborted) { - auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREPACK); + auto reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREPACK); if (!mdr->more()->slave_update_journaled) reply->mark_not_journaled(); mds->send_message_mds(reply, mdr->slave_to_mds); @@ -7412,7 +7412,7 @@ bool Server::_rename_prepare_witness(MDRequestRef& mdr, mds_rank_t who, setreqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREP); + auto req = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREP); req->srcdnpath = filepath(srctrace.front()->get_dir()->ino()); for (auto dn : srctrace) @@ -8081,7 +8081,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) if (mdr->slave_request->is_interrupted()) { dout(10) << " slave request interrupted, sending noop reply" << dendl; - auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); + auto reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); reply->mark_interrupted(); mds->send_message_mds(reply, mdr->slave_to_mds); mdr->slave_request = 0; @@ -8185,7 +8185,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) (mds->is_cluster_degraded() && !mds->mdsmap->is_clientreplay_or_active_or_stopping(*p))) continue; - auto notify = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMENOTIFY); + auto notify = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMENOTIFY); mds->send_message_mds(notify, *p); mdr->more()->waiting_on_slave.insert(*p); } @@ -8214,7 +8214,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) if (reply_witness) { assert(!srcdnrep.empty()); - auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); + auto reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); reply->witnesses.swap(srcdnrep); mds->send_message_mds(reply, mdr->slave_to_mds); mdr->slave_request = 0; @@ -8317,7 +8317,7 @@ void Server::_logged_slave_rename(MDRequestRef& mdr, // prepare ack MMDSSlaveRequest::ref reply; if (!mdr->aborted) { - reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); + reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); if (!mdr->more()->slave_update_journaled) reply->mark_not_journaled(); } diff --git a/src/mds/SnapClient.cc b/src/mds/SnapClient.cc index 928df8b8e88d8..d226ba3937d9a 100644 --- a/src/mds/SnapClient.cc +++ b/src/mds/SnapClient.cc @@ -115,7 +115,7 @@ void SnapClient::handle_notify_prep(const MMDSTableRequest::const_ref &m) { dout(10) << __func__ << " " << *m << dendl; handle_query_result(m); - auto ack = MMDSTableRequest::factory::build(table, TABLESERVER_OP_NOTIFY_ACK, 0, m->get_tid()); + auto ack = MMDSTableRequest::create(table, TABLESERVER_OP_NOTIFY_ACK, 0, m->get_tid()); mds->send_message(ack, m->get_connection()); } @@ -153,7 +153,7 @@ void SnapClient::refresh(version_t want, MDSInternalContextBase *onfinish) return; mds_rank_t ts = mds->mdsmap->get_tableserver(); - auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_QUERY, ++last_reqid, 0); + auto req = MMDSTableRequest::create(table, TABLESERVER_OP_QUERY, ++last_reqid, 0); using ceph::encode; char op = 'F'; encode(op, req->bl); diff --git a/src/mds/SnapServer.cc b/src/mds/SnapServer.cc index 67e08b2343168..7da7fbe0d7b52 100644 --- a/src/mds/SnapServer.cc +++ b/src/mds/SnapServer.cc @@ -262,7 +262,7 @@ bool SnapServer::_notify_prep(version_t tid) assert(version == tid); for (auto &p : active_clients) { - auto m = MMDSTableRequest::factory::build(table, TABLESERVER_OP_NOTIFY_PREP, 0, version); + auto m = MMDSTableRequest::create(table, TABLESERVER_OP_NOTIFY_PREP, 0, version); m->bl = bl; mds->send_message_mds(m, p); } @@ -277,7 +277,7 @@ void SnapServer::handle_query(const MMDSTableRequest::const_ref &req) auto p = req->bl.cbegin(); decode(op, p); - auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_QUERY_REPLY, req->reqid, version); + auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_QUERY_REPLY, req->reqid, version); switch (op) { case 'F': // full @@ -349,7 +349,7 @@ void SnapServer::check_osd_map(bool force) if (!all_purge.empty()) { dout(10) << "requesting removal of " << all_purge << dendl; - auto m = MRemoveSnaps::factory::build(all_purge); + auto m = MRemoveSnaps::create(all_purge); mon_client->send_mon_message(m.detach()); } diff --git a/src/mds/StrayManager.cc b/src/mds/StrayManager.cc index 053b72ef3e5d0..6b0bfe4648b99 100644 --- a/src/mds/StrayManager.cc +++ b/src/mds/StrayManager.cc @@ -663,7 +663,7 @@ void StrayManager::reintegrate_stray(CDentry *straydn, CDentry *rdn) filepath dst; rdn->make_path(dst); - auto req = MClientRequest::factory::build(CEPH_MDS_OP_RENAME); + auto req = MClientRequest::create(CEPH_MDS_OP_RENAME); req->set_filepath(dst); req->set_filepath2(src); req->set_tid(mds->issue_tid()); @@ -692,7 +692,7 @@ void StrayManager::migrate_stray(CDentry *dn, mds_rank_t to) dst.push_dentry(src[0]); dst.push_dentry(src[1]); - auto req = MClientRequest::factory::build(CEPH_MDS_OP_RENAME); + auto req = MClientRequest::create(CEPH_MDS_OP_RENAME); req->set_filepath(dst); req->set_filepath2(src); req->set_tid(mds->issue_tid()); diff --git a/src/mon/MDSMonitor.cc b/src/mon/MDSMonitor.cc index df64b3a14059e..cd502e2dfb126 100644 --- a/src/mon/MDSMonitor.cc +++ b/src/mon/MDSMonitor.cc @@ -376,7 +376,7 @@ bool MDSMonitor::preprocess_beacon(MonOpRequestRef op) MDSMap null_map; null_map.epoch = fsmap.epoch; null_map.compat = fsmap.compat; - auto m = MMDSMap::factory::build(mon->monmap->fsid, null_map); + auto m = MMDSMap::create(mon->monmap->fsid, null_map); mon->send_reply(op, m.detach()); return true; } else { @@ -452,7 +452,7 @@ bool MDSMonitor::preprocess_beacon(MonOpRequestRef op) assert(effective_epoch > 0); _note_beacon(m); { - auto beacon = MMDSBeacon::factory::build(mon->monmap->fsid, + auto beacon = MMDSBeacon::create(mon->monmap->fsid, m->get_global_id(), m->get_name(), effective_epoch, state, seq, CEPH_FEATURES_SUPPORTED_DEFAULT); mon->send_reply(op, beacon.detach()); @@ -721,7 +721,7 @@ bool MDSMonitor::prepare_beacon(MonOpRequestRef op) last_beacon.erase(gid); // Respond to MDS, so that it knows it can continue to shut down - auto beacon = MMDSBeacon::factory::build( + auto beacon = MMDSBeacon::create( mon->monmap->fsid, m->get_global_id(), m->get_name(), pending.get_epoch(), state, seq, CEPH_FEATURES_SUPPORTED_DEFAULT); @@ -739,7 +739,7 @@ bool MDSMonitor::prepare_beacon(MonOpRequestRef op) request_proposal(mon->osdmon()); // Respond to MDS, so that it knows it can continue to shut down - auto beacon = MMDSBeacon::factory::build(mon->monmap->fsid, + auto beacon = MMDSBeacon::create(mon->monmap->fsid, m->get_global_id(), m->get_name(), pending.get_epoch(), state, seq, CEPH_FEATURES_SUPPORTED_DEFAULT); mon->send_reply(op, beacon.detach()); @@ -828,10 +828,10 @@ void MDSMonitor::_updated(MonOpRequestRef op) MDSMap null_map; null_map.epoch = fsmap.epoch; null_map.compat = fsmap.compat; - auto m = MMDSMap::factory::build(mon->monmap->fsid, null_map); + auto m = MMDSMap::create(mon->monmap->fsid, null_map); mon->send_reply(op, m.detach()); } else { - auto beacon = MMDSBeacon::factory::build(mon->monmap->fsid, + auto beacon = MMDSBeacon::create(mon->monmap->fsid, m->get_global_id(), m->get_name(), fsmap.get_epoch(), m->get_state(), m->get_seq(), CEPH_FEATURES_SUPPORTED_DEFAULT); mon->send_reply(op, beacon.detach()); @@ -1570,7 +1570,7 @@ void MDSMonitor::check_sub(Subscription *sub) if (sub->next > mds_map->epoch) { return; } - auto msg = MMDSMap::factory::build(mon->monmap->fsid, *mds_map); + auto msg = MMDSMap::create(mon->monmap->fsid, *mds_map); sub->session->con->send_message(msg.detach()); if (sub->onetime) { diff --git a/src/msg/Message.cc b/src/msg/Message.cc index 60c93aaa5367c..27caab5e3ccfe 100644 --- a/src/msg/Message.cc +++ b/src/msg/Message.cc @@ -335,502 +335,502 @@ Message *decode_message(CephContext *cct, int crcflags, // -- with payload -- case MSG_PGSTATS: - m = MPGStats::factory::build(); + m = MPGStats::create(); break; case MSG_PGSTATSACK: - m = MPGStatsAck::factory::build(); + m = MPGStatsAck::create(); break; case CEPH_MSG_STATFS: - m = MStatfs::factory::build(); + m = MStatfs::create(); break; case CEPH_MSG_STATFS_REPLY: - m = MStatfsReply::factory::build(); + m = MStatfsReply::create(); break; case MSG_GETPOOLSTATS: - m = MGetPoolStats::factory::build(); + m = MGetPoolStats::create(); break; case MSG_GETPOOLSTATSREPLY: - m = MGetPoolStatsReply::factory::build(); + m = MGetPoolStatsReply::create(); break; case CEPH_MSG_POOLOP: - m = MPoolOp::factory::build(); + m = MPoolOp::create(); break; case CEPH_MSG_POOLOP_REPLY: - m = MPoolOpReply::factory::build(); + m = MPoolOpReply::create(); break; case MSG_MON_COMMAND: - m = MMonCommand::factory::build(); + m = MMonCommand::create(); break; case MSG_MON_COMMAND_ACK: - m = MMonCommandAck::factory::build(); + m = MMonCommandAck::create(); break; case MSG_MON_PAXOS: - m = MMonPaxos::factory::build(); + m = MMonPaxos::create(); break; case MSG_CONFIG: - m = MConfig::factory::build(); + m = MConfig::create(); break; case MSG_GET_CONFIG: - m = MGetConfig::factory::build(); + m = MGetConfig::create(); break; case MSG_MON_PROBE: - m = MMonProbe::factory::build(); + m = MMonProbe::create(); break; case MSG_MON_JOIN: - m = MMonJoin::factory::build(); + m = MMonJoin::create(); break; case MSG_MON_ELECTION: - m = MMonElection::factory::build(); + m = MMonElection::create(); break; case MSG_MON_SYNC: - m = MMonSync::factory::build(); + m = MMonSync::create(); break; case MSG_MON_SCRUB: - m = MMonScrub::factory::build(); + m = MMonScrub::create(); break; case MSG_LOG: - m = MLog::factory::build(); + m = MLog::create(); break; case MSG_LOGACK: - m = MLogAck::factory::build(); + m = MLogAck::create(); break; case CEPH_MSG_PING: - m = MPing::factory::build(); + m = MPing::create(); break; case MSG_COMMAND: - m = MCommand::factory::build(); + m = MCommand::create(); break; case MSG_COMMAND_REPLY: - m = MCommandReply::factory::build(); + m = MCommandReply::create(); break; case MSG_OSD_BACKFILL_RESERVE: - m = MBackfillReserve::factory::build(); + m = MBackfillReserve::create(); break; case MSG_OSD_RECOVERY_RESERVE: - m = MRecoveryReserve::factory::build(); + m = MRecoveryReserve::create(); break; case MSG_OSD_FORCE_RECOVERY: - m = MOSDForceRecovery::factory::build(); + m = MOSDForceRecovery::create(); break; case MSG_ROUTE: - m = MRoute::factory::build(); + m = MRoute::create(); break; case MSG_FORWARD: - m = MForward::factory::build(); + m = MForward::create(); break; case CEPH_MSG_MON_MAP: - m = MMonMap::factory::build(); + m = MMonMap::create(); break; case CEPH_MSG_MON_GET_MAP: - m = MMonGetMap::factory::build(); + m = MMonGetMap::create(); break; case CEPH_MSG_MON_GET_OSDMAP: - m = MMonGetOSDMap::factory::build(); + m = MMonGetOSDMap::create(); break; case CEPH_MSG_MON_GET_VERSION: - m = MMonGetVersion::factory::build(); + m = MMonGetVersion::create(); break; case CEPH_MSG_MON_GET_VERSION_REPLY: - m = MMonGetVersionReply::factory::build(); + m = MMonGetVersionReply::create(); break; case CEPH_MSG_MON_METADATA: - m = MMonMetadata::factory::build(); + m = MMonMetadata::create(); break; case MSG_OSD_BOOT: - m = MOSDBoot::factory::build(); + m = MOSDBoot::create(); break; case MSG_OSD_ALIVE: - m = MOSDAlive::factory::build(); + m = MOSDAlive::create(); break; case MSG_OSD_BEACON: - m = MOSDBeacon::factory::build(); + m = MOSDBeacon::create(); break; case MSG_OSD_PGTEMP: - m = MOSDPGTemp::factory::build(); + m = MOSDPGTemp::create(); break; case MSG_OSD_FAILURE: - m = MOSDFailure::factory::build(); + m = MOSDFailure::create(); break; case MSG_OSD_MARK_ME_DOWN: - m = MOSDMarkMeDown::factory::build(); + m = MOSDMarkMeDown::create(); break; case MSG_OSD_FULL: - m = MOSDFull::factory::build(); + m = MOSDFull::create(); break; case MSG_OSD_PING: - m = MOSDPing::factory::build(); + m = MOSDPing::create(); break; case CEPH_MSG_OSD_OP: - m = MOSDOp::factory::build(); + m = MOSDOp::create(); break; case CEPH_MSG_OSD_OPREPLY: - m = MOSDOpReply::factory::build(); + m = MOSDOpReply::create(); break; case MSG_OSD_REPOP: - m = MOSDRepOp::factory::build(); + m = MOSDRepOp::create(); break; case MSG_OSD_REPOPREPLY: - m = MOSDRepOpReply::factory::build(); + m = MOSDRepOpReply::create(); break; case MSG_OSD_PG_CREATED: - m = MOSDPGCreated::factory::build(); + m = MOSDPGCreated::create(); break; case MSG_OSD_PG_UPDATE_LOG_MISSING: - m = MOSDPGUpdateLogMissing::factory::build(); + m = MOSDPGUpdateLogMissing::create(); break; case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY: - m = MOSDPGUpdateLogMissingReply::factory::build(); + m = MOSDPGUpdateLogMissingReply::create(); break; case CEPH_MSG_OSD_BACKOFF: - m = MOSDBackoff::factory::build(); + m = MOSDBackoff::create(); break; case CEPH_MSG_OSD_MAP: - m = MOSDMap::factory::build(); + m = MOSDMap::create(); break; case CEPH_MSG_WATCH_NOTIFY: - m = MWatchNotify::factory::build(); + m = MWatchNotify::create(); break; case MSG_OSD_PG_NOTIFY: - m = MOSDPGNotify::factory::build(); + m = MOSDPGNotify::create(); break; case MSG_OSD_PG_QUERY: - m = MOSDPGQuery::factory::build(); + m = MOSDPGQuery::create(); break; case MSG_OSD_PG_LOG: - m = MOSDPGLog::factory::build(); + m = MOSDPGLog::create(); break; case MSG_OSD_PG_REMOVE: - m = MOSDPGRemove::factory::build(); + m = MOSDPGRemove::create(); break; case MSG_OSD_PG_INFO: - m = MOSDPGInfo::factory::build(); + m = MOSDPGInfo::create(); break; case MSG_OSD_PG_CREATE: - m = MOSDPGCreate::factory::build(); + m = MOSDPGCreate::create(); break; case MSG_OSD_PG_CREATE2: - m = MOSDPGCreate2::factory::build(); + m = MOSDPGCreate2::create(); break; case MSG_OSD_PG_TRIM: - m = MOSDPGTrim::factory::build(); + m = MOSDPGTrim::create(); break; case MSG_OSD_SCRUB: - m = MOSDScrub::factory::build(); + m = MOSDScrub::create(); break; case MSG_OSD_SCRUB2: - m = MOSDScrub2::factory::build(); + m = MOSDScrub2::create(); break; case MSG_OSD_SCRUB_RESERVE: - m = MOSDScrubReserve::factory::build(); + m = MOSDScrubReserve::create(); break; case MSG_REMOVE_SNAPS: - m = MRemoveSnaps::factory::build(); + m = MRemoveSnaps::create(); break; case MSG_OSD_REP_SCRUB: - m = MOSDRepScrub::factory::build(); + m = MOSDRepScrub::create(); break; case MSG_OSD_REP_SCRUBMAP: - m = MOSDRepScrubMap::factory::build(); + m = MOSDRepScrubMap::create(); break; case MSG_OSD_PG_SCAN: - m = MOSDPGScan::factory::build(); + m = MOSDPGScan::create(); break; case MSG_OSD_PG_BACKFILL: - m = MOSDPGBackfill::factory::build(); + m = MOSDPGBackfill::create(); break; case MSG_OSD_PG_BACKFILL_REMOVE: - m = MOSDPGBackfillRemove::factory::build(); + m = MOSDPGBackfillRemove::create(); break; case MSG_OSD_PG_PUSH: - m = MOSDPGPush::factory::build(); + m = MOSDPGPush::create(); break; case MSG_OSD_PG_PULL: - m = MOSDPGPull::factory::build(); + m = MOSDPGPull::create(); break; case MSG_OSD_PG_PUSH_REPLY: - m = MOSDPGPushReply::factory::build(); + m = MOSDPGPushReply::create(); break; case MSG_OSD_PG_RECOVERY_DELETE: - m = MOSDPGRecoveryDelete::factory::build(); + m = MOSDPGRecoveryDelete::create(); break; case MSG_OSD_PG_RECOVERY_DELETE_REPLY: - m = MOSDPGRecoveryDeleteReply::factory::build(); + m = MOSDPGRecoveryDeleteReply::create(); break; case MSG_OSD_EC_WRITE: - m = MOSDECSubOpWrite::factory::build(); + m = MOSDECSubOpWrite::create(); break; case MSG_OSD_EC_WRITE_REPLY: - m = MOSDECSubOpWriteReply::factory::build(); + m = MOSDECSubOpWriteReply::create(); break; case MSG_OSD_EC_READ: - m = MOSDECSubOpRead::factory::build(); + m = MOSDECSubOpRead::create(); break; case MSG_OSD_EC_READ_REPLY: - m = MOSDECSubOpReadReply::factory::build(); + m = MOSDECSubOpReadReply::create(); break; // auth case CEPH_MSG_AUTH: - m = MAuth::factory::build(); + m = MAuth::create(); break; case CEPH_MSG_AUTH_REPLY: - m = MAuthReply::factory::build(); + m = MAuthReply::create(); break; case MSG_MON_GLOBAL_ID: - m = MMonGlobalID::factory::build(); + m = MMonGlobalID::create(); break; // clients case CEPH_MSG_MON_SUBSCRIBE: - m = MMonSubscribe::factory::build(); + m = MMonSubscribe::create(); break; case CEPH_MSG_MON_SUBSCRIBE_ACK: - m = MMonSubscribeAck::factory::build(); + m = MMonSubscribeAck::create(); break; case CEPH_MSG_CLIENT_SESSION: - m = MClientSession::factory::build(); + m = MClientSession::create(); break; case CEPH_MSG_CLIENT_RECONNECT: - m = MClientReconnect::factory::build(); + m = MClientReconnect::create(); break; case CEPH_MSG_CLIENT_REQUEST: - m = MClientRequest::factory::build(); + m = MClientRequest::create(); break; case CEPH_MSG_CLIENT_REQUEST_FORWARD: - m = MClientRequestForward::factory::build(); + m = MClientRequestForward::create(); break; case CEPH_MSG_CLIENT_REPLY: - m = MClientReply::factory::build(); + m = MClientReply::create(); break; case CEPH_MSG_CLIENT_CAPS: - m = MClientCaps::factory::build(); + m = MClientCaps::create(); break; case CEPH_MSG_CLIENT_CAPRELEASE: - m = MClientCapRelease::factory::build(); + m = MClientCapRelease::create(); break; case CEPH_MSG_CLIENT_LEASE: - m = MClientLease::factory::build(); + m = MClientLease::create(); break; case CEPH_MSG_CLIENT_SNAP: - m = MClientSnap::factory::build(); + m = MClientSnap::create(); break; case CEPH_MSG_CLIENT_QUOTA: - m = MClientQuota::factory::build(); + m = MClientQuota::create(); break; // mds case MSG_MDS_SLAVE_REQUEST: - m = MMDSSlaveRequest::factory::build(); + m = MMDSSlaveRequest::create(); break; case CEPH_MSG_MDS_MAP: - m = MMDSMap::factory::build(); + m = MMDSMap::create(); break; case CEPH_MSG_FS_MAP: - m = MFSMap::factory::build(); + m = MFSMap::create(); break; case CEPH_MSG_FS_MAP_USER: - m = MFSMapUser::factory::build(); + m = MFSMapUser::create(); break; case MSG_MDS_BEACON: - m = MMDSBeacon::factory::build(); + m = MMDSBeacon::create(); break; case MSG_MDS_OFFLOAD_TARGETS: - m = MMDSLoadTargets::factory::build(); + m = MMDSLoadTargets::create(); break; case MSG_MDS_RESOLVE: - m = MMDSResolve::factory::build(); + m = MMDSResolve::create(); break; case MSG_MDS_RESOLVEACK: - m = MMDSResolveAck::factory::build(); + m = MMDSResolveAck::create(); break; case MSG_MDS_CACHEREJOIN: - m = MMDSCacheRejoin::factory::build(); + m = MMDSCacheRejoin::create(); break; case MSG_MDS_DIRUPDATE: - m = MDirUpdate::factory::build(); + m = MDirUpdate::create(); break; case MSG_MDS_DISCOVER: - m = MDiscover::factory::build(); + m = MDiscover::create(); break; case MSG_MDS_DISCOVERREPLY: - m = MDiscoverReply::factory::build(); + m = MDiscoverReply::create(); break; case MSG_MDS_FINDINO: - m = MMDSFindIno::factory::build(); + m = MMDSFindIno::create(); break; case MSG_MDS_FINDINOREPLY: - m = MMDSFindInoReply::factory::build(); + m = MMDSFindInoReply::create(); break; case MSG_MDS_OPENINO: - m = MMDSOpenIno::factory::build(); + m = MMDSOpenIno::create(); break; case MSG_MDS_OPENINOREPLY: - m = MMDSOpenInoReply::factory::build(); + m = MMDSOpenInoReply::create(); break; case MSG_MDS_SNAPUPDATE: - m = MMDSSnapUpdate::factory::build(); + m = MMDSSnapUpdate::create(); break; case MSG_MDS_FRAGMENTNOTIFY: - m = MMDSFragmentNotify::factory::build(); + m = MMDSFragmentNotify::create(); break; case MSG_MDS_EXPORTDIRDISCOVER: - m = MExportDirDiscover::factory::build(); + m = MExportDirDiscover::create(); break; case MSG_MDS_EXPORTDIRDISCOVERACK: - m = MExportDirDiscoverAck::factory::build(); + m = MExportDirDiscoverAck::create(); break; case MSG_MDS_EXPORTDIRCANCEL: - m = MExportDirCancel::factory::build(); + m = MExportDirCancel::create(); break; case MSG_MDS_EXPORTDIR: - m = MExportDir::factory::build(); + m = MExportDir::create(); break; case MSG_MDS_EXPORTDIRACK: - m = MExportDirAck::factory::build(); + m = MExportDirAck::create(); break; case MSG_MDS_EXPORTDIRFINISH: - m = MExportDirFinish::factory::build(); + m = MExportDirFinish::create(); break; case MSG_MDS_EXPORTDIRNOTIFY: - m = MExportDirNotify::factory::build(); + m = MExportDirNotify::create(); break; case MSG_MDS_EXPORTDIRNOTIFYACK: - m = MExportDirNotifyAck::factory::build(); + m = MExportDirNotifyAck::create(); break; case MSG_MDS_EXPORTDIRPREP: - m = MExportDirPrep::factory::build(); + m = MExportDirPrep::create(); break; case MSG_MDS_EXPORTDIRPREPACK: - m = MExportDirPrepAck::factory::build(); + m = MExportDirPrepAck::create(); break; case MSG_MDS_EXPORTCAPS: - m = MExportCaps::factory::build(); + m = MExportCaps::create(); break; case MSG_MDS_EXPORTCAPSACK: - m = MExportCapsAck::factory::build(); + m = MExportCapsAck::create(); break; case MSG_MDS_GATHERCAPS: - m = MGatherCaps::factory::build(); + m = MGatherCaps::create(); break; case MSG_MDS_DENTRYUNLINK: - m = MDentryUnlink::factory::build(); + m = MDentryUnlink::create(); break; case MSG_MDS_DENTRYLINK: - m = MDentryLink::factory::build(); + m = MDentryLink::create(); break; case MSG_MDS_HEARTBEAT: - m = MHeartbeat::factory::build(); + m = MHeartbeat::create(); break; case MSG_MDS_CACHEEXPIRE: - m = MCacheExpire::factory::build(); + m = MCacheExpire::create(); break; case MSG_MDS_TABLE_REQUEST: - m = MMDSTableRequest::factory::build(); + m = MMDSTableRequest::create(); break; /* case MSG_MDS_INODEUPDATE: - m = MInodeUpdate::factory::build(); + m = MInodeUpdate::create(); break; */ case MSG_MDS_INODEFILECAPS: - m = MInodeFileCaps::factory::build(); + m = MInodeFileCaps::create(); break; case MSG_MDS_LOCK: - m = MLock::factory::build(); + m = MLock::create(); break; case MSG_MGR_BEACON: - m = MMgrBeacon::factory::build(); + m = MMgrBeacon::create(); break; case MSG_MON_MGR_REPORT: - m = MMonMgrReport::factory::build(); + m = MMonMgrReport::create(); break; case MSG_SERVICE_MAP: - m = MServiceMap::factory::build(); + m = MServiceMap::create(); break; case MSG_MGR_MAP: - m = MMgrMap::factory::build(); + m = MMgrMap::create(); break; case MSG_MGR_DIGEST: - m = MMgrDigest::factory::build(); + m = MMgrDigest::create(); break; case MSG_MGR_OPEN: - m = MMgrOpen::factory::build(); + m = MMgrOpen::create(); break; case MSG_MGR_CLOSE: - m = MMgrClose::factory::build(); + m = MMgrClose::create(); break; case MSG_MGR_REPORT: - m = MMgrReport::factory::build(); + m = MMgrReport::create(); break; case MSG_MGR_CONFIGURE: - m = MMgrConfigure::factory::build(); + m = MMgrConfigure::create(); break; case MSG_TIMECHECK: - m = MTimeCheck::factory::build(); + m = MTimeCheck::create(); break; case MSG_TIMECHECK2: - m = MTimeCheck2::factory::build(); + m = MTimeCheck2::create(); break; case MSG_MON_HEALTH: - m = MMonHealth::factory::build(); + m = MMonHealth::create(); break; case MSG_MON_HEALTH_CHECKS: - m = MMonHealthChecks::factory::build(); + m = MMonHealthChecks::create(); break; #if defined(HAVE_XIO) case MSG_DATA_PING: - m = MDataPing::factory::build(); + m = MDataPing::create(); break; #endif // -- simple messages without payload -- case CEPH_MSG_SHUTDOWN: - m = MGenericMessage::factory::build(type); + m = MGenericMessage::create(type); break; default: diff --git a/src/msg/Message.h b/src/msg/Message.h index 008723c7557ed..d298738ca18cb 100644 --- a/src/msg/Message.h +++ b/src/msg/Message.h @@ -553,6 +553,10 @@ class MessageInstance : public MessageSubType { public: using factory = MessageFactory; + template + static auto create(Args&&... args) { + return MessageFactory::build(std::forward(args)...); + } static auto msgref_cast(typename Message::ref const& m) { return boost::static_pointer_cast::type::element_type>(m); } diff --git a/src/tools/ceph-dencoder/ceph_dencoder.cc b/src/tools/ceph-dencoder/ceph_dencoder.cc index e069f1fdde630..4abb93644d8f7 100644 --- a/src/tools/ceph-dencoder/ceph_dencoder.cc +++ b/src/tools/ceph-dencoder/ceph_dencoder.cc @@ -218,7 +218,7 @@ class MessageDencoderImpl : public Dencoder { list m_list; public: - MessageDencoderImpl() : m_object(T::factory::build()) {} + MessageDencoderImpl() : m_object(T::create()) {} ~MessageDencoderImpl() override {} string decode(bufferlist bl, uint64_t seek) override {