From 6632131da1f0f6f623dd370f2cae48ef45124632 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Mon, 30 Jul 2018 10:23:28 -0700 Subject: [PATCH] mds: use message factory to avoid leaks Fixes: http://tracker.ceph.com/issues/24306 Signed-off-by: Patrick Donnelly --- src/client/Client.cc | 36 +++++------ src/mds/Beacon.cc | 4 +- src/mds/Locker.cc | 62 +++++++++---------- src/mds/MDBalancer.cc | 2 +- src/mds/MDCache.cc | 92 ++++++++++++---------------- src/mds/MDSDaemon.cc | 2 +- src/mds/MDSRank.cc | 11 ++-- src/mds/MDSRank.h | 28 --------- src/mds/MDSTableClient.cc | 10 +-- src/mds/MDSTableServer.cc | 14 ++--- src/mds/Migrator.cc | 70 ++++++++++----------- src/mds/Server.cc | 64 +++++++++---------- src/mds/Server.h | 1 - src/mds/SnapClient.cc | 4 +- src/mds/SnapRealm.cc | 2 - src/mds/SnapServer.cc | 8 +-- src/mds/StrayManager.cc | 4 +- src/messages/MCacheExpire.h | 2 +- src/messages/MClientCaps.h | 5 +- src/messages/MClientLease.h | 2 +- src/messages/MClientQuota.h | 2 +- src/messages/MClientReply.h | 2 +- src/messages/MClientRequest.h | 3 +- src/messages/MClientRequestForward.h | 3 +- src/messages/MClientSession.h | 2 +- src/messages/MClientSnap.h | 2 +- src/messages/MDentryLink.h | 2 +- src/messages/MDentryUnlink.h | 2 +- src/messages/MDirUpdate.h | 23 ++++--- src/messages/MDiscover.h | 2 +- src/messages/MDiscoverReply.h | 3 +- src/messages/MExportCaps.h | 2 +- src/messages/MExportCapsAck.h | 2 +- src/messages/MExportDir.h | 2 +- src/messages/MExportDirAck.h | 2 +- src/messages/MExportDirCancel.h | 2 +- src/messages/MExportDirDiscover.h | 2 +- src/messages/MExportDirDiscoverAck.h | 2 +- src/messages/MExportDirFinish.h | 2 +- src/messages/MExportDirNotify.h | 2 +- src/messages/MExportDirNotifyAck.h | 2 +- src/messages/MExportDirPrep.h | 2 +- src/messages/MExportDirPrepAck.h | 2 +- src/messages/MGatherCaps.h | 2 +- src/messages/MHeartbeat.h | 2 +- src/messages/MInodeFileCaps.h | 2 +- src/messages/MLock.h | 2 +- src/messages/MMDSBeacon.h | 3 +- src/messages/MMDSCacheRejoin.h | 2 +- src/messages/MMDSFindIno.h | 3 + src/messages/MMDSFindInoReply.h | 3 + src/messages/MMDSFragmentNotify.h | 2 +- src/messages/MMDSLoadTargets.h | 3 +- src/messages/MMDSMap.h | 2 +- src/messages/MMDSOpenIno.h | 3 + src/messages/MMDSOpenInoReply.h | 3 + src/messages/MMDSResolve.h | 2 +- src/messages/MMDSResolveAck.h | 2 +- src/messages/MMDSSlaveRequest.h | 4 +- src/messages/MMDSSnapUpdate.h | 2 +- src/messages/MMDSTableRequest.h | 2 +- src/messages/MRemoveSnaps.h | 2 +- src/mon/MDSMonitor.cc | 46 +++++++------- 63 files changed, 269 insertions(+), 316 deletions(-) diff --git a/src/client/Client.cc b/src/client/Client.cc index 7f8041d8391c7..dfbed39f01eb6 100644 --- a/src/client/Client.cc +++ b/src/client/Client.cc @@ -2016,10 +2016,10 @@ MetaSession *Client::_open_mds_session(mds_rank_t mds) } } - MClientSession *m = new MClientSession(CEPH_SESSION_REQUEST_OPEN); + auto m = MClientSession::factory::build(CEPH_SESSION_REQUEST_OPEN); m->metadata = metadata; m->supported_features = feature_bitset_t(CEPHFS_FEATURES_CLIENT_SUPPORTED); - session->con->send_message(m); + session->con->send_message2(m); return session; } @@ -2027,7 +2027,7 @@ void Client::_close_mds_session(MetaSession *s) { ldout(cct, 2) << __func__ << " mds." << s->mds_num << " seq " << s->seq << dendl; s->state = MetaSession::STATE_CLOSING; - s->con->send_message(new MClientSession(CEPH_SESSION_REQUEST_CLOSE, s->seq)); + s->con->send_message2(MClientSession::factory::build(CEPH_SESSION_REQUEST_CLOSE, s->seq)); } void Client::_closed_mds_session(MetaSession *s) @@ -2104,7 +2104,7 @@ void Client::handle_client_session(MClientSession *m) break; case CEPH_SESSION_FLUSHMSG: - session->con->send_message(new MClientSession(CEPH_SESSION_FLUSHMSG_ACK, m->get_seq())); + session->con->send_message2(MClientSession::factory::build(CEPH_SESSION_FLUSHMSG_ACK, m->get_seq())); break; case CEPH_SESSION_FORCE_RO: @@ -2209,7 +2209,7 @@ void Client::send_request(MetaRequest *request, MetaSession *session, MClientRequest* Client::build_client_request(MetaRequest *request) { - MClientRequest *req = new MClientRequest(request->get_op()); + auto req = MClientRequest::factory::build(request->get_op()); req->set_tid(request->tid); req->set_stamp(request->op_stamp); memcpy(&req->head, &request->head, sizeof(ceph_mds_request_head)); @@ -2242,7 +2242,7 @@ MClientRequest* Client::build_client_request(MetaRequest *request) const gid_t *_gids; int gid_count = request->perms.get_gids(&_gids); req->set_gid_list(gid_count, _gids); - return req; + return req.detach(); } @@ -2883,7 +2883,7 @@ void Client::got_mds_push(MetaSession *s) s->seq++; ldout(cct, 10) << " mds." << s->mds_num << " seq now " << s->seq << dendl; if (s->state == MetaSession::STATE_CLOSING) { - s->con->send_message(new MClientSession(CEPH_SESSION_REQUEST_CLOSE, s->seq)); + s->con->send_message2(MClientSession::factory::build(CEPH_SESSION_REQUEST_CLOSE, s->seq)); } } @@ -2923,10 +2923,10 @@ void Client::handle_lease(MClientLease *m) } revoke: - m->get_connection()->send_message( - new MClientLease( - CEPH_MDS_LEASE_RELEASE, seq, - m->get_mask(), m->get_ino(), m->get_first(), m->get_last(), m->dname)); + { + auto reply = MClientLease::factory::build(CEPH_MDS_LEASE_RELEASE, seq, m->get_mask(), m->get_ino(), m->get_first(), m->get_last(), m->dname); + m->get_connection()->send_message2(reply); + } m->put(); } @@ -3281,7 +3281,7 @@ void Client::send_cap(Inode *in, MetaSession *session, Cap *cap, if (flush) follows = in->snaprealm->get_snap_context().seq; - MClientCaps *m = new MClientCaps(op, + auto m = MClientCaps::factory::build(op, in->ino, 0, cap->cap_id, cap->seq, @@ -3339,7 +3339,7 @@ void Client::send_cap(Inode *in, MetaSession *session, Cap *cap, if (!session->flushing_caps_tids.empty()) m->set_oldest_flush_tid(*session->flushing_caps_tids.begin()); - session->con->send_message(m); + session->con->send_message2(m); } static bool is_max_size_approaching(Inode *in) @@ -3644,7 +3644,7 @@ void Client::flush_snaps(Inode *in, bool all_again) session->flushing_caps_tids.insert(capsnap.flush_tid); } - MClientCaps *m = new MClientCaps(CEPH_CAP_OP_FLUSHSNAP, in->ino, in->snaprealm->ino, 0, mseq, + auto m = MClientCaps::factory::build(CEPH_CAP_OP_FLUSHSNAP, in->ino, in->snaprealm->ino, 0, mseq, cap_epoch_barrier); m->caller_uid = capsnap.cap_dirtier_uid; m->caller_gid = capsnap.cap_dirtier_gid; @@ -3680,7 +3680,7 @@ void Client::flush_snaps(Inode *in, bool all_again) assert(!session->flushing_caps_tids.empty()); m->set_oldest_flush_tid(*session->flushing_caps_tids.begin()); - session->con->send_message(m); + session->con->send_message2(m); } } @@ -5845,8 +5845,8 @@ void Client::flush_mdlog(MetaSession *session) // will crash if they see an unknown CEPH_SESSION_* value in this msg. const uint64_t features = session->con->get_features(); if (HAVE_FEATURE(features, SERVER_LUMINOUS)) { - MClientSession *m = new MClientSession(CEPH_SESSION_REQUEST_FLUSH_MDLOG); - session->con->send_message(m); + auto m = MClientSession::factory::build(CEPH_SESSION_REQUEST_FLUSH_MDLOG); + session->con->send_message2(m); } } @@ -6113,7 +6113,7 @@ void Client::renew_caps(MetaSession *session) ldout(cct, 10) << "renew_caps mds." << session->mds_num << dendl; session->last_cap_renew_request = ceph_clock_now(); uint64_t seq = ++session->cap_renew_seq; - session->con->send_message(new MClientSession(CEPH_SESSION_REQUEST_RENEWCAPS, seq)); + session->con->send_message2(MClientSession::factory::build(CEPH_SESSION_REQUEST_RENEWCAPS, seq)); } diff --git a/src/mds/Beacon.cc b/src/mds/Beacon.cc index c9dc3d8af13c6..6603ca3eb6ba6 100644 --- a/src/mds/Beacon.cc +++ b/src/mds/Beacon.cc @@ -208,13 +208,13 @@ void Beacon::_send() assert(want_state != MDSMap::STATE_NULL); - MMDSBeacon::ref beacon(new MMDSBeacon( + auto beacon = MMDSBeacon::factory::build( monc->get_fsid(), mds_gid_t(monc->get_global_id()), name, epoch, want_state, last_seq, - CEPH_FEATURES_SUPPORTED_DEFAULT), false); + CEPH_FEATURES_SUPPORTED_DEFAULT); beacon->set_standby_for_rank(standby_for_rank); beacon->set_standby_for_name(standby_for_name); diff --git a/src/mds/Locker.cc b/src/mds/Locker.cc index 1d521472f6846..97f92c1938b69 100644 --- a/src/mds/Locker.cc +++ b/src/mds/Locker.cc @@ -136,7 +136,7 @@ void Locker::send_lock_message(SimpleLock *lock, int msg) if (mds->is_cluster_degraded() && mds->mdsmap->get_state(it.first) < MDSMap::STATE_REJOIN) continue; - MLock::ref m(new MLock(lock, msg, mds->get_nodeid()), false); + auto m = MLock::factory::build(lock, msg, mds->get_nodeid()); mds->send_message_mds(m, it.first); } } @@ -147,7 +147,7 @@ void Locker::send_lock_message(SimpleLock *lock, int msg, const bufferlist &data if (mds->is_cluster_degraded() && mds->mdsmap->get_state(it.first) < MDSMap::STATE_REJOIN) continue; - MLock::ref m(new MLock(lock, msg, mds->get_nodeid()), false); + auto m = MLock::factory::build(lock, msg, mds->get_nodeid()); m->set_data(data); mds->send_message_mds(m, it.first); } @@ -449,7 +449,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, return false; } - MMDSSlaveRequest::ref req(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPIN), false); + auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPIN); for (set::iterator q = p->second.begin(); q != p->second.end(); ++q) { @@ -731,7 +731,7 @@ void Locker::_drop_non_rdlocks(MutationImpl *mut, set *pneed_issue) if (!mds->is_cluster_degraded() || mds->mdsmap->get_state(*p) >= MDSMap::STATE_REJOIN) { dout(10) << "_drop_non_rdlocks dropping remote locks on mds." << *p << dendl; - MMDSSlaveRequest::ref slavereq(new MMDSSlaveRequest(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_DROPLOCKS), false); + auto slavereq = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_DROPLOCKS); mds->send_message_mds(slavereq, *p); } } @@ -888,12 +888,12 @@ void Locker::eval_gather(SimpleLock *lock, bool first, bool *pneed_issue, MDSInt mds->mdsmap->get_state(auth) >= MDSMap::STATE_REJOIN) { switch (lock->get_state()) { case LOCK_SYNC_LOCK: - mds->send_message_mds(MLock::ref(new MLock(lock, LOCK_AC_LOCKACK, mds->get_nodeid()), false), auth); + mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_LOCKACK, mds->get_nodeid()), auth); break; case LOCK_MIX_SYNC: { - MLock::ref reply(new MLock(lock, LOCK_AC_SYNCACK, mds->get_nodeid()), false); + auto reply = MLock::factory::build(lock, LOCK_AC_SYNCACK, mds->get_nodeid()); lock->encode_locked_state(reply->get_data()); mds->send_message_mds(reply, auth); next = LOCK_MIX_SYNC2; @@ -911,7 +911,7 @@ void Locker::eval_gather(SimpleLock *lock, bool first, bool *pneed_issue, MDSInt case LOCK_SYNC_MIX: { - MLock::ref reply(new MLock(lock, LOCK_AC_MIXACK, mds->get_nodeid()), false); + auto reply = MLock::factory::build(lock, LOCK_AC_MIXACK, mds->get_nodeid()); mds->send_message_mds(reply, auth); next = LOCK_SYNC_MIX2; } @@ -921,7 +921,7 @@ void Locker::eval_gather(SimpleLock *lock, bool first, bool *pneed_issue, MDSInt { bufferlist data; lock->encode_locked_state(data); - mds->send_message_mds(MLock::ref(new MLock(lock, LOCK_AC_LOCKACK, mds->get_nodeid(), data), false), auth); + mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_LOCKACK, mds->get_nodeid(), data), auth); (static_cast(lock))->start_flush(); // we'll get an AC_LOCKFLUSHED to complete } @@ -1284,7 +1284,7 @@ bool Locker::_rdlock_kick(SimpleLock *lock, bool as_anon) mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) { dout(10) << "requesting rdlock from auth on " << *lock << " on " << *lock->get_parent() << dendl; - mds->send_message_mds(MLock::ref(new MLock(lock, LOCK_AC_REQRDLOCK, mds->get_nodeid()), false), auth); + mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_REQRDLOCK, mds->get_nodeid()), auth); } return false; } @@ -1516,7 +1516,7 @@ bool Locker::wrlock_start(SimpleLock *lock, MDRequestRef& mut, bool nowait) mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) { dout(10) << "requesting scatter from auth on " << *lock << " on " << *lock->get_parent() << dendl; - mds->send_message_mds(MLock::ref(new MLock(lock, LOCK_AC_REQSCATTER, mds->get_nodeid()), false), auth)); + mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_REQSCATTER, mds->get_nodeid()), auth); } break; } @@ -1572,7 +1572,7 @@ void Locker::remote_wrlock_start(SimpleLock *lock, mds_rank_t target, MDRequestR // send lock request mut->start_locking(lock, target); mut->more()->slaves.insert(target); - MMDSSlaveRequest::ref r(new MMDSSlaveRequest(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_WRLOCK), false); + auto r = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_WRLOCK); r->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(r->get_object_info()); mds->send_message_mds(r, target); @@ -1593,7 +1593,7 @@ void Locker::remote_wrlock_finish(SimpleLock *lock, mds_rank_t target, << " " << *lock->get_parent() << dendl; if (!mds->is_cluster_degraded() || mds->mdsmap->get_state(target) >= MDSMap::STATE_REJOIN) { - MMDSSlaveRequest::ref slavereq(new MMDSSlaveRequest(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNWRLOCK), false); + auto slavereq = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNWRLOCK); slavereq->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(slavereq->get_object_info()); mds->send_message_mds(slavereq, target); @@ -1674,7 +1674,7 @@ bool Locker::xlock_start(SimpleLock *lock, MDRequestRef& mut) // send lock request mut->more()->slaves.insert(auth); mut->start_locking(lock, auth); - MMDSSlaveRequest::ref r(new MMDSSlaveRequest(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_XLOCK), false); + auto r = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_XLOCK); r->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(r->get_object_info()); mds->send_message_mds(r, auth); @@ -1740,7 +1740,7 @@ void Locker::xlock_finish(SimpleLock *lock, MutationImpl *mut, bool *pneed_issue mds_rank_t auth = lock->get_parent()->authority().first; if (!mds->is_cluster_degraded() || mds->mdsmap->get_state(auth) >= MDSMap::STATE_REJOIN) { - MMDSSlaveRequest::ref slavereq(new MMDSSlaveRequest(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNXLOCK), false); + auto slavereq = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNXLOCK); slavereq->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(slavereq->get_object_info()); mds->send_message_mds(slavereq, auth); @@ -2075,14 +2075,13 @@ bool Locker::issue_caps(CInode *in, Capability *only_cap) cap->reset_num_revoke_warnings(); } - MClientCaps::ref m(new MClientCaps(op, in->ino(), + auto m = MClientCaps::factory::build(op, in->ino(), in->find_snaprealm()->inode->ino(), cap->get_cap_id(), cap->get_last_seq(), after, wanted, 0, cap->get_mseq(), - mds->get_osd_epoch_barrier()) - , false); + mds->get_osd_epoch_barrier()); in->encode_cap_message(m, cap); mds->send_message_client_counted(m, it->first); @@ -2102,14 +2101,13 @@ void Locker::issue_truncate(CInode *in) for (auto &p : in->client_caps) { Capability *cap = &p.second; - MClientCaps::ref m(new MClientCaps(CEPH_CAP_OP_TRUNC, + auto m = MClientCaps::factory::build(CEPH_CAP_OP_TRUNC, in->ino(), in->find_snaprealm()->inode->ino(), cap->get_cap_id(), cap->get_last_seq(), cap->pending(), cap->wanted(), 0, cap->get_mseq(), - mds->get_osd_epoch_barrier()) - , false); + mds->get_osd_epoch_barrier()); in->encode_cap_message(m, cap); mds->send_message_client_counted(m, p.first); } @@ -2243,7 +2241,7 @@ void Locker::request_inode_file_caps(CInode *in) if (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) - mds->send_message_mds(MInodeFileCaps::ref(new MInodeFileCaps(in->ino(), in->replica_caps_wanted), false), auth); + mds->send_message_mds(MInodeFileCaps::factory::build(in->ino(), in->replica_caps_wanted), auth); } } @@ -2478,7 +2476,7 @@ void Locker::share_inode_max_size(CInode *in, Capability *only_cap) if (cap->pending() & (CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER)) { dout(10) << "share_inode_max_size with client." << client << dendl; cap->inc_last_seq(); - MClientCaps::ref m(new MClientCaps(CEPH_CAP_OP_GRANT, + auto m = MClientCaps::factory::build(CEPH_CAP_OP_GRANT, in->ino(), in->find_snaprealm()->inode->ino(), cap->get_cap_id(), @@ -2486,8 +2484,7 @@ void Locker::share_inode_max_size(CInode *in, Capability *only_cap) cap->pending(), cap->wanted(), 0, cap->get_mseq(), - mds->get_osd_epoch_barrier()) - , false); + mds->get_osd_epoch_barrier()); in->encode_cap_message(m, cap); mds->send_message_client_counted(m, client); } @@ -2675,9 +2672,9 @@ void Locker::handle_client_caps(const MClientCaps::const_ref &m) << " for client." << client << dendl; MClientCaps::ref ack; if (op == CEPH_CAP_OP_FLUSHSNAP) { - ack.reset(new MClientCaps(CEPH_CAP_OP_FLUSHSNAP_ACK, m->get_ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier()), false); + ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSHSNAP_ACK, m->get_ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier()); } else { - ack.reset(new MClientCaps(CEPH_CAP_OP_FLUSH_ACK, m->get_ino(), 0, m->get_cap_id(), m->get_seq(), m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier()), false); + ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSH_ACK, m->get_ino(), 0, m->get_cap_id(), m->get_seq(), m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier()); } ack->set_snap_follows(follows); ack->set_client_tid(m->get_client_tid()); @@ -2799,7 +2796,7 @@ void Locker::handle_client_caps(const MClientCaps::const_ref &m) // case we get a dup response, so whatever.) MClientCaps::ref ack; if (dirty) { - ack.reset(new MClientCaps(CEPH_CAP_OP_FLUSHSNAP_ACK, in->ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier()), false); + ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSHSNAP_ACK, in->ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier()); ack->set_snap_follows(follows); ack->set_client_tid(m->get_client_tid()); ack->set_oldest_flush_tid(m->get_oldest_flush_tid()); @@ -2882,8 +2879,8 @@ void Locker::handle_client_caps(const MClientCaps::const_ref &m) if (dirty && in->is_auth()) { dout(7) << " flush client." << client << " dirty " << ccap_string(dirty) << " seq " << m->get_seq() << " on " << *in << dendl; - ack.reset(new MClientCaps(CEPH_CAP_OP_FLUSH_ACK, in->ino(), 0, cap->get_cap_id(), m->get_seq(), - m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier()), false); + ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSH_ACK, in->ino(), 0, cap->get_cap_id(), m->get_seq(), + m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier()); ack->set_client_tid(m->get_client_tid()); ack->set_oldest_flush_tid(m->get_oldest_flush_tid()); @@ -3707,7 +3704,7 @@ void Locker::handle_client_lease(const MClientLease::const_ref &m) dout(7) << "handle_client_lease client." << client << " renew on " << *dn << (!dn->lock.can_lease(client)?", revoking lease":"") << dendl; if (dn->lock.can_lease(client)) { - MClientLease::ref reply(new MClientLease(*m)); + auto reply = MClientLease::factory::build(*m); int pool = 1; // fixme.. do something smart! reply->h.duration_ms = (int)(1000 * mdcache->client_lease_durations[pool]); reply->h.seq = ++l->seq; @@ -3778,7 +3775,7 @@ void Locker::revoke_client_leases(SimpleLock *lock) // i should also revoke the dir ICONTENT lease, if they have it! CInode *diri = dn->get_dir()->get_inode(); - MClientLease::ref lease(new MClientLease(CEPH_MDS_LEASE_REVOKE, l->seq, mask, diri->ino(), diri->first, CEPH_NOSNAP, dn->get_name()), false); + auto lease = MClientLease::factory::build(CEPH_MDS_LEASE_REVOKE, l->seq, mask, diri->ino(), diri->first, CEPH_NOSNAP, dn->get_name()); mds->send_message_client_counted(lease, l->client); } } @@ -4649,8 +4646,7 @@ void Locker::scatter_nudge(ScatterLock *lock, MDSInternalContextBase *c, bool fo // request unscatter? mds_rank_t auth = lock->get_parent()->authority().first; if (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) { - MLock::ref msg(new MLock(lock, LOCK_AC_NUDGE, mds->get_nodeid()), false); - mds->send_message_mds(msg, auth); + mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_NUDGE, mds->get_nodeid()), auth); } // wait... diff --git a/src/mds/MDBalancer.cc b/src/mds/MDBalancer.cc index 744756c5332e8..c5374999b3229 100644 --- a/src/mds/MDBalancer.cc +++ b/src/mds/MDBalancer.cc @@ -393,7 +393,7 @@ void MDBalancer::send_heartbeat() for (const auto& r : up) { if (r == mds->get_nodeid()) continue; - MHeartbeat::ref hb(new MHeartbeat(load, beat_epoch), false); + auto hb = MHeartbeat::factory::build(load, beat_epoch); hb->get_import_map() = import_map; mds->send_message_mds(hb, r); } diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc index 213301a4e8877..c797d7e9f78ee 100644 --- a/src/mds/MDCache.cc +++ b/src/mds/MDCache.cc @@ -2039,14 +2039,14 @@ update: cap->last_rsize = i->rstat.rsize(); cap->last_rbytes = i->rstat.rbytes; - MClientQuota *msg = new MClientQuota(); + auto msg = MClientQuota::factory::build(); msg->ino = in->ino(); msg->rstat = i->rstat; msg->quota = i->quota; mds->send_message_client_counted(msg, session->get_connection()); } for (const auto &it : in->get_replicas()) { - MGatherCaps *msg = new MGatherCaps; + auto msg = MGatherCaps::factory::build(); msg->ino = in->ino(); mds->send_message_mds(msg, it.first); } @@ -2476,7 +2476,7 @@ void MDCache::_logged_slave_commit(mds_rank_t from, metareqid_t reqid) dout(10) << "_logged_slave_commit from mds." << from << " " << reqid << dendl; // send a message - MMDSSlaveRequest *req = new MMDSSlaveRequest(reqid, 0, MMDSSlaveRequest::OP_COMMITTED); + auto req = MMDSSlaveRequest::factory::build(reqid, 0, MMDSSlaveRequest::OP_COMMITTED); mds->send_message_mds(req, from); } @@ -2737,7 +2737,7 @@ void MDCache::send_slave_resolves() for (map >::iterator p = uncommitted_slave_updates.begin(); p != uncommitted_slave_updates.end(); ++p) { - resolves[p->first].reset(new MMDSResolve, false); + resolves[p->first] = MMDSResolve::factory::build(); for (map::iterator q = p->second.begin(); q != p->second.end(); ++q) { @@ -2761,7 +2761,7 @@ void MDCache::send_slave_resolves() if (resolve_set.count(master) || is_ambiguous_slave_update(p->first, master)) { dout(10) << " including uncommitted " << *mdr << dendl; if (!resolves.count(master)) - resolves[master].reset(new MMDSResolve, false); + resolves[master] = MMDSResolve::factory::build(); if (!mdr->committing && mdr->has_more() && mdr->more()->is_inode_exporter) { // re-send cap exports @@ -2805,7 +2805,7 @@ void MDCache::send_subtree_resolves() if (*p == mds->get_nodeid()) continue; if (mds->is_resolve() || mds->mdsmap->is_resolve(*p)) - resolves[*p].reset(new MMDSResolve, false); + resolves[*p] = MMDSResolve::factory::build(); } map > my_subtrees; @@ -3185,7 +3185,7 @@ void MDCache::handle_resolve(const MMDSResolve::const_ref &m) } } - MMDSResolveAck::ref ack(new MMDSResolveAck, false); + auto ack = MMDSResolveAck::factory::build(); for (const auto &p : m->slave_requests) { if (uncommitted_masters.count(p.first)) { //mds->sessionmap.have_completed_request(p.first)) { // COMMIT @@ -3990,9 +3990,9 @@ void MDCache::rejoin_send_rejoins() if (*p == mds->get_nodeid()) continue; // nothing to myself! if (rejoin_sent.count(*p)) continue; // already sent a rejoin to this node! if (mds->is_rejoin()) - rejoins[*p].reset(new MMDSCacheRejoin(MMDSCacheRejoin::OP_WEAK), false); + rejoins[*p] = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_WEAK); else if (mds->mdsmap->is_rejoin(*p)) - rejoins[*p].reset(new MMDSCacheRejoin(MMDSCacheRejoin::OP_STRONG), false); + rejoins[*p] = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_STRONG); } if (mds->is_rejoin()) { @@ -4342,7 +4342,7 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) if (mds->is_clientreplay() || mds->is_active() || mds->is_stopping()) { survivor = true; dout(10) << "i am a surivivor, and will ack immediately" << dendl; - ack.reset(new MMDSCacheRejoin(MMDSCacheRejoin::OP_ACK), false); + ack = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_ACK); map > imported_caps; @@ -5126,7 +5126,7 @@ void MDCache::handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &ack) } // mark client caps stale. - MClientCaps *m = new MClientCaps(CEPH_CAP_OP_EXPORT, p->first, 0, + auto m = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, p->first, 0, r->second.capinfo.cap_id, 0, mds->get_osd_epoch_barrier()); m->set_cap_peer(q->second.cap_id, q->second.issue_seq, q->second.mseq, @@ -5564,7 +5564,7 @@ void MDCache::prepare_realm_split(SnapRealm *realm, client_t client, inodeno_t i snap = it->second; snap->head.op = CEPH_SNAP_OP_SPLIT; } else { - snap.reset(new MClientSnap(CEPH_SNAP_OP_SPLIT), false); + snap = MClientSnap::factory::build(CEPH_SNAP_OP_SPLIT); splits.emplace(std::piecewise_construct, std::forward_as_tuple(client), std::forward_as_tuple(snap)); snap->head.split = realm->inode->ino(); snap->bl = realm->get_snap_trace(); @@ -5596,7 +5596,7 @@ void MDCache::prepare_realm_merge(SnapRealm *realm, SnapRealm *parent_realm, assert(!p.second->empty()); auto em = splits.emplace(std::piecewise_construct, std::forward_as_tuple(p.first), std::forward_as_tuple()); if (em.second) { - MClientSnap::ref update(new MClientSnap(CEPH_SNAP_OP_SPLIT), false); + auto update = MClientSnap::factory::build(CEPH_SNAP_OP_SPLIT); update->head.split = parent_realm->inode->ino(); update->split_inos = split_inos; update->split_realms = split_realms; @@ -5712,7 +5712,7 @@ void MDCache::export_remaining_imported_caps() Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(q->first.v)); if (session) { // mark client caps stale. - MClientCaps *stale = new MClientCaps(CEPH_CAP_OP_EXPORT, p->first, 0, 0, 0, mds->get_osd_epoch_barrier()); + auto stale = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, p->first, 0, 0, 0, mds->get_osd_epoch_barrier()); stale->set_cap_peer(0, 0, 0, -1, 0); mds->send_message_client_counted(stale, q->first); } @@ -5787,12 +5787,7 @@ void MDCache::do_cap_import(Session *session, CInode *in, Capability *cap, cap->set_last_issue(); cap->set_last_issue_stamp(ceph_clock_now()); cap->clear_new(); - MClientCaps *reap = new MClientCaps(CEPH_CAP_OP_IMPORT, - in->ino(), - realm->inode->ino(), - cap->get_cap_id(), cap->get_last_seq(), - cap->pending(), cap->wanted(), 0, - cap->get_mseq(), mds->get_osd_epoch_barrier()); + auto reap = MClientCaps::factory::build(CEPH_CAP_OP_IMPORT, in->ino(), realm->inode->ino(), cap->get_cap_id(), cap->get_last_seq(), cap->pending(), cap->wanted(), 0, cap->get_mseq(), mds->get_osd_epoch_barrier()); in->encode_cap_message(reap, cap); reap->snapbl = realm->get_snap_trace(); reap->set_cap_peer(p_cap_id, p_seq, p_mseq, peer, p_flags); @@ -5988,7 +5983,7 @@ void MDCache::finish_snaprealm_reconnect(client_t client, SnapRealm *realm, snap if (seq < realm->get_newest_seq()) { dout(10) << "finish_snaprealm_reconnect client." << client << " has old seq " << seq << " < " << realm->get_newest_seq() << " on " << *realm << dendl; - MClientSnap *snap = new MClientSnap(CEPH_SNAP_OP_UPDATE); + auto snap = MClientSnap::factory::build(CEPH_SNAP_OP_UPDATE); snap->bl = realm->get_snap_trace(); for (const auto& child : realm->open_children) snap->split_realms.push_back(child->inode->ino()); @@ -6037,13 +6032,13 @@ void MDCache::rejoin_send_acks() rejoin_unlinked_inodes.clear(); // send acks to everyone in the recovery set - map acks; + map acks; for (set::iterator p = recovery_set.begin(); p != recovery_set.end(); ++p) { if (rejoin_ack_sent.count(*p)) continue; - acks[*p] = new MMDSCacheRejoin(MMDSCacheRejoin::OP_ACK); + acks[*p] = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_ACK); } rejoin_ack_sent = recovery_set; @@ -6665,7 +6660,7 @@ bool MDCache::trim(uint64_t count) auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(rank), std::forward_as_tuple()); if (em.second) { - em.first->second.reset(new MCacheExpire(mds->get_nodeid()), false); + em.first->second = MCacheExpire::factory::build(mds->get_nodeid()); } dout(20) << __func__ << ": try expiring " << *mdsdir_in << " for stopping mds." << mds << dendl; @@ -6790,7 +6785,7 @@ bool MDCache::trim_dentry(CDentry *dn, expiremap& expiremap) assert(a != mds->get_nodeid()); auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple()); if (em.second) - em.first->second.reset(new MCacheExpire(mds->get_nodeid()), false); /* new */ + em.first->second = MCacheExpire::factory::build(mds->get_nodeid()); em.first->second->add_dentry(con->dirfrag(), dir->dirfrag(), dn->get_name(), dn->last, dn->get_replica_nonce()); } } @@ -6846,7 +6841,7 @@ void MDCache::trim_dirfrag(CDir *dir, CDir *con, expiremap& expiremap) assert(a != mds->get_nodeid()); auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple()); if (em.second) - em.first->second.reset(new MCacheExpire(mds->get_nodeid()), false); /* new */ + em.first->second = MCacheExpire::factory::build(mds->get_nodeid()); /* new */ em.first->second->add_dir(condf, dir->dirfrag(), dir->replica_nonce); } } @@ -6915,7 +6910,7 @@ bool MDCache::trim_inode(CDentry *dn, CInode *in, CDir *con, expiremap& expirema assert(a != mds->get_nodeid()); auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple()); if (em.second) - em.first->second.reset(new MCacheExpire(mds->get_nodeid()), false); /* new */ + em.first->second = MCacheExpire::factory::build(mds->get_nodeid()); /* new */ em.first->second->add_inode(df, in->vino(), in->get_replica_nonce()); } } @@ -7284,7 +7279,7 @@ void MDCache::handle_cache_expire(const MCacheExpire::const_ref &m) auto em = delayed_expire[parent_dir].emplace(std::piecewise_construct, std::forward_as_tuple(from), std::forward_as_tuple()); if (em.second) - em.first->second.reset(new MCacheExpire(from), false); /* new */ + em.first->second = MCacheExpire::factory::build(from); /* new */ // merge these expires into it em.first->second->add_realm(p.first, p.second); @@ -8774,7 +8769,7 @@ void MDCache::do_open_ino_peer(inodeno_t ino, open_ino_info_t& info) // got backtrace from peer or backtrace just fetched if (info.discover || !info.fetch_backtrace) pa = &info.ancestors; - mds->send_message_mds(new MMDSOpenIno(info.tid, ino, pa), peer); + mds->send_message_mds(MMDSOpenIno::factory::build(info.tid, ino, pa), peer); if (mds->logger) mds->logger->inc(l_mds_openino_peer_discover); } @@ -8794,7 +8789,7 @@ void MDCache::handle_open_ino(const MMDSOpenIno::const_ref &m, int err) CInode *in = get_inode(ino); if (in) { dout(10) << " have " << *in << dendl; - reply.reset(new MMDSOpenInoReply(m->get_tid(), ino, mds_rank_t(0)), false); + reply = MMDSOpenInoReply::factory::build(m->get_tid(), ino, mds_rank_t(0)); if (in->is_auth()) { touch_inode(in); while (1) { @@ -8810,13 +8805,13 @@ void MDCache::handle_open_ino(const MMDSOpenIno::const_ref &m, int err) reply->hint = in->authority().first; } } else if (err < 0) { - reply.reset(new MMDSOpenInoReply(m->get_tid(), ino, MDS_RANK_NONE, err), false); + reply = MMDSOpenInoReply::factory::build(m->get_tid(), ino, MDS_RANK_NONE, err); } else { mds_rank_t hint = MDS_RANK_NONE; int ret = open_ino_traverse_dir(ino, m, m->ancestors, false, false, &hint); if (ret > 0) return; - reply.reset(new MMDSOpenInoReply(m->get_tid(), ino, hint, ret), false); + reply = MMDSOpenInoReply::factory::build(m->get_tid(), ino, hint, ret); } m->get_connection()->send_message2(reply); /* FIXME, why not send_client? */ } @@ -8990,7 +8985,7 @@ void MDCache::_do_find_ino_peer(find_ino_peer_info_t& fip) } } else { fip.checking = m; - mds->send_message_mds(new MMDSFindIno(fip.tid, fip.ino), m); + mds->send_message_mds(MMDSFindIno::factory::build(fip.tid, fip.ino), m); } } @@ -9001,13 +8996,13 @@ void MDCache::handle_find_ino(const MMDSFindIno::const_ref &m) } dout(10) << "handle_find_ino " << *m << dendl; - MMDSFindInoReply *r = new MMDSFindInoReply(m->tid); + auto r = MMDSFindInoReply::factory::build(m->tid); CInode *in = get_inode(m->ino); if (in) { in->make_path(r->path); dout(10) << " have " << r->path << " " << *in << dendl; } - m->get_connection()->send_message(r); + m->get_connection()->send_message2(r); } @@ -9275,7 +9270,7 @@ void MDCache::request_drop_foreign_locks(MDRequestRef& mdr) for (set::iterator p = mdr->more()->slaves.begin(); p != mdr->more()->slaves.end(); ++p) { - MMDSSlaveRequest *r = new MMDSSlaveRequest(mdr->reqid, mdr->attempt, + auto r = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_FINISH); if (mdr->killed && !mdr->committing) { @@ -9455,7 +9450,7 @@ void MDCache::do_realm_invalidate_and_update_notify(CInode *in, int snapop, bool auto em = updates.emplace(std::piecewise_construct, std::forward_as_tuple(client), std::forward_as_tuple()); if (em.second) { - MClientSnap::ref update(new MClientSnap(CEPH_SNAP_OP_SPLIT), false); + auto update = MClientSnap::factory::build(CEPH_SNAP_OP_SPLIT); update->head.split = in->ino(); update->split_inos = split_inos; update->split_realms = split_realms; @@ -9539,7 +9534,7 @@ void MDCache::send_snap_update(CInode *in, version_t stid, int snap_op) in->encode_snap(snap_blob); for (auto p : mds_set) { - MMDSSnapUpdate::ref m(new MMDSSnapUpdate(in->ino(), stid, snap_op), false); + auto m = MMDSSnapUpdate::factory::build(in->ino(), stid, snap_op); m->snap_blob = snap_blob; mds->send_message_mds(m, p); } @@ -9597,7 +9592,7 @@ void MDCache::notify_global_snaprealm_update(int snap_op) for (auto &session : sessions) { if (!session->is_open() && !session->is_stale()) continue; - MClientSnap *update = new MClientSnap(snap_op); + auto update = MClientSnap::factory::build(snap_op); update->head.split = global_snaprealm->inode->ino(); update->bl = global_snaprealm->get_snap_trace(); mds->send_message_client_counted(update, session); @@ -9673,7 +9668,7 @@ void MDCache::fetch_backtrace(inodeno_t ino, int64_t pool, bufferlist& bl, Conte void MDCache::_send_discover(discover_info_t& d) { - MDiscover::ref dis(new MDiscover(d.ino, d.frag, d.snap, d.want_path, d.want_base_dir, d.want_xlocked), false); + auto dis = MDiscover::factory::build(d.ino, d.frag, d.snap, d.want_path, d.want_base_dir, d.want_xlocked); dis->set_tid(d.tid); mds->send_message_mds(dis, d.mds); } @@ -9865,7 +9860,7 @@ void MDCache::handle_discover(const MDiscover::const_ref &dis) CInode *cur = 0; - MDiscoverReply::ref reply(new MDiscoverReply(*dis), false); + auto reply = MDiscoverReply::factory::build(*dis); snapid_t snapid = dis->get_snapid(); @@ -10474,13 +10469,7 @@ int MDCache::send_dir_updates(CDir *dir, bool bcast) for (const auto &r : dir->dir_rep_by) { s.insert(r); } - mds->send_message_mds(new MDirUpdate(mds->get_nodeid(), - dir->dirfrag(), - dir->dir_rep, - s, - path, - bcast), - *it); + mds->send_message_mds(MDirUpdate::factory::build(mds->get_nodeid(), dir->dirfrag(), dir->dir_rep, s, path, bcast), *it); } return 0; @@ -10549,8 +10538,7 @@ void MDCache::send_dentry_link(CDentry *dn, MDRequestRef& mdr) rejoin_gather.count(p.first))) continue; CDentry::linkage_t *dnl = dn->get_linkage(); - MDentryLink *m = new MDentryLink(subtree->dirfrag(), dn->get_dir()->dirfrag(), - dn->get_name(), dnl->is_primary()); + auto m = MDentryLink::factory::build(subtree->dirfrag(), dn->get_dir()->dirfrag(), dn->get_name(), dnl->is_primary()); if (dnl->is_primary()) { dout(10) << " primary " << *dnl->get_inode() << dendl; replicate_inode(dnl->get_inode(), p.first, m->bl, @@ -10637,7 +10625,7 @@ void MDCache::send_dentry_unlink(CDentry *dn, CDentry *straydn, MDRequestRef& md rejoin_gather.count(*it))) continue; - MDentryUnlink *unlink = new MDentryUnlink(dn->get_dir()->dirfrag(), dn->get_name()); + auto unlink = MDentryUnlink::factory::build(dn->get_dir()->dirfrag(), dn->get_name()); if (straydn) { replicate_stray(straydn, *it, unlink->straybl); unlink->snapbl = snapbl; @@ -11489,7 +11477,7 @@ void MDCache::_fragment_stored(MDRequestRef& mdr) rejoin_gather.count(p.first))) continue; - MMDSFragmentNotify *notify = new MMDSFragmentNotify(basedirfrag, info.bits); + auto notify = MMDSFragmentNotify::factory::build(basedirfrag, info.bits); // freshly replicate new dirs to peers for (list::iterator q = info.resultfrags.begin(); diff --git a/src/mds/MDSDaemon.cc b/src/mds/MDSDaemon.cc index 4204f53c42072..9422e165e200e 100644 --- a/src/mds/MDSDaemon.cc +++ b/src/mds/MDSDaemon.cc @@ -578,7 +578,7 @@ void MDSDaemon::send_command_reply(const MCommand::const_ref &m, MDSRank *mds_ra } priv.reset(); - MCommandReply::ref reply(new MCommandReply(r, outs), false); + auto reply = MCommandReply::factory::build(r, outs); reply->set_tid(m->get_tid()); reply->set_data(outbl); m->get_connection()->send_message2(reply); diff --git a/src/mds/MDSRank.cc b/src/mds/MDSRank.cc index cc78d3c3d0bd3..97fcef51b177e 100644 --- a/src/mds/MDSRank.cc +++ b/src/mds/MDSRank.cc @@ -20,7 +20,6 @@ #include "messages/MClientRequestForward.h" #include "messages/MMDSLoadTargets.h" #include "messages/MMDSTableRequest.h" -#include "messages/MCommandReply.h" #include "MDSDaemon.h" #include "MDSMap.h" @@ -219,8 +218,8 @@ void MDSRank::update_targets() if (send) { dout(15) << "updating export_targets, now " << new_map_targets.size() << " ranks are targets" << dendl; - MMDSLoadTargets* m = new MMDSLoadTargets(mds_gid_t(monc->get_global_id()), new_map_targets); - monc->send_mon_message(m); + auto m = MMDSLoadTargets::factory::build(mds_gid_t(monc->get_global_id()), new_map_targets); + monc->send_mon_message(m.detach()); } } @@ -941,7 +940,7 @@ void MDSRank::send_message_mds(const Message::ref& m, mds_rank_t mds) // send mdsmap first? if (mds != whoami && peer_mdsmap_epoch[mds] < mdsmap->get_epoch()) { - Message::ref _m = MMDSMap::ref(new MMDSMap(monc->get_fsid(), *mdsmap), false); + auto _m = MMDSMap::factory::build(monc->get_fsid(), *mdsmap); messenger->send_to_mds(_m.detach(), mdsmap->get_addrs(mds)); peer_mdsmap_epoch[mds] = mdsmap->get_epoch(); } @@ -964,7 +963,7 @@ void MDSRank::forward_message_mds(const MClientRequest::const_ref& m, mds_rank_t bool client_must_resend = true; //!creq->can_forward(); // tell the client where it should go - MClientRequestForward::ref f(new MClientRequestForward(m->get_tid(), mds, m->get_num_fwd()+1, client_must_resend), false); + auto f = MClientRequestForward::factory::build(m->get_tid(), mds, m->get_num_fwd()+1, client_must_resend); messenger->send_message(f.detach(), m->get_source_inst()); } @@ -2984,7 +2983,7 @@ void MDSRank::bcast_mds_map() set clients; sessionmap.get_client_session_set(clients); for (const auto &session : clients) { - MMDSMap::ref m(new MMDSMap(monc->get_fsid(), *mdsmap), false); + auto m = MMDSMap::factory::build(monc->get_fsid(), *mdsmap); session->get_connection()->send_message2(std::move(m)); } last_client_mdsmap_bcast = mdsmap->get_epoch(); diff --git a/src/mds/MDSRank.h b/src/mds/MDSRank.h index 49f05ea77f0b8..b15ccb99ac05a 100644 --- a/src/mds/MDSRank.h +++ b/src/mds/MDSRank.h @@ -371,40 +371,12 @@ class MDSRank { double get_dispatch_queue_max_age(utime_t now) const; void send_message_mds(const Message::ref& m, mds_rank_t mds); - void send_message_mds(Message* m, mds_rank_t mds) { - Message::ref mr(m, false); - send_message_mds(mr, mds); - } void forward_message_mds(const MClientRequest::const_ref& req, mds_rank_t mds); - void send_message_client_counted(const Message::ref& m, client_t client); - void send_message_client_counted(Message* m, client_t client) { - Message::ref mr(m, false); - send_message_client_counted(mr, client); - } - void send_message_client_counted(const Message::ref& m, Session* session); - void send_message_client_counted(Message* m, Session* session) { - Message::ref mr(m, false); - send_message_client_counted(mr, session); - } - void send_message_client_counted(const Message::ref& m, const ConnectionRef& connection); - void send_message_client_counted(Message* m, const ConnectionRef& connection) { - Message::ref mr(m, false); - send_message_client_counted(mr, connection); - } - void send_message_client(const Message::ref& m, Session* session); - void send_message_client(Message* m, Session* session) { - Message::ref mr(m, false); - send_message_client(mr, session); - } - void send_message(const Message::ref& m, const ConnectionRef& c); - void send_message(Message* m, const ConnectionRef& c) { - send_message(m, c); - } void wait_for_active_peer(mds_rank_t who, MDSInternalContextBase *c) { waiting_for_active_peer[who].push_back(c); diff --git a/src/mds/MDSTableClient.cc b/src/mds/MDSTableClient.cc index 4a3a8b0777674..7381c87915846 100644 --- a/src/mds/MDSTableClient.cc +++ b/src/mds/MDSTableClient.cc @@ -102,7 +102,7 @@ void MDSTableClient::handle_request(const MMDSTableRequest::const_ref &m) dout(10) << "stray agree on " << reqid << " tid " << tid << ", sending ROLLBACK" << dendl; assert(!server_ready); - MMDSTableRequest::ref req(new MMDSTableRequest(table, TABLESERVER_OP_ROLLBACK, 0, tid), false); + auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_ROLLBACK, 0, tid); mds->send_message_mds(req, mds->get_mds_map()->get_tableserver()); } break; @@ -174,7 +174,7 @@ void MDSTableClient::_prepare(bufferlist& mutation, version_t *ptid, bufferlist if (server_ready) { // send message - MMDSTableRequest::ref req(new MMDSTableRequest(table, TABLESERVER_OP_PREPARE, reqid), false); + auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_PREPARE, reqid); req->bl = mutation; mds->send_message_mds(req, mds->get_mds_map()->get_tableserver()); } else @@ -198,7 +198,7 @@ void MDSTableClient::commit(version_t tid, LogSegment *ls) if (server_ready) { // send message - MMDSTableRequest::ref req(new MMDSTableRequest(table, TABLESERVER_OP_COMMIT, 0, tid), false); + auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_COMMIT, 0, tid); mds->send_message_mds(req, mds->get_mds_map()->get_tableserver()); } else dout(10) << "tableserver is not ready yet, deferring request" << dendl; @@ -232,7 +232,7 @@ void MDSTableClient::resend_commits() p != pending_commit.end(); ++p) { dout(10) << "resending commit on " << p->first << dendl; - MMDSTableRequest::ref req(new MMDSTableRequest(table, TABLESERVER_OP_COMMIT, 0, p->first), false); + auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_COMMIT, 0, p->first); mds->send_message_mds(req, mds->get_mds_map()->get_tableserver()); } } @@ -248,7 +248,7 @@ void MDSTableClient::resend_prepares() p != pending_prepare.end(); ++p) { dout(10) << "resending prepare on " << p->first << dendl; - MMDSTableRequest::ref req(new MMDSTableRequest(table, TABLESERVER_OP_PREPARE, p->first), false); + auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_PREPARE, p->first); req->bl = p->second.mutation; mds->send_message_mds(req, mds->get_mds_map()->get_tableserver()); } diff --git a/src/mds/MDSTableServer.cc b/src/mds/MDSTableServer.cc index 250e3405602e4..07aa20a213ea6 100644 --- a/src/mds/MDSTableServer.cc +++ b/src/mds/MDSTableServer.cc @@ -80,7 +80,7 @@ void MDSTableServer::_prepare_logged(const MMDSTableRequest::const_ref &req, ver _prepare(req->bl, req->reqid, from, out); assert(version == tid); - MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_AGREE, req->reqid, tid), false); + auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_AGREE, req->reqid, tid); reply->bl = std::move(out); if (_notify_prep(tid)) { @@ -153,7 +153,7 @@ void MDSTableServer::handle_commit(const MMDSTableRequest::const_ref &req) else if (tid <= version) { dout(0) << "got commit for tid " << tid << " <= " << version << ", already committed, sending ack." << dendl; - MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_ACK, req->reqid, tid), false); + auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_ACK, req->reqid, tid); mds->send_message(reply, req->get_connection()); } else { @@ -176,7 +176,7 @@ void MDSTableServer::_commit_logged(const MMDSTableRequest::const_ref &req) _commit(tid, req); _note_commit(tid); - MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_ACK, req->reqid, req->get_tid()), false); + auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_ACK, req->reqid, req->get_tid()); mds->send_message_mds(reply, mds_rank_t(req->get_source().num())); } @@ -281,13 +281,13 @@ void MDSTableServer::_do_server_recovery() next_reqids[who] = p.second.reqid + 1; version_t tid = p.second.tid; - MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_AGREE, p.second.reqid, tid), false); + auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_AGREE, p.second.reqid, tid); _get_reply_buffer(tid, &reply->bl); mds->send_message_mds(reply, who); } for (auto p : active_clients) { - MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_SERVER_READY, next_reqids[p]), false); + auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_SERVER_READY, next_reqids[p]); mds->send_message_mds(reply, p); } recovered = true; @@ -331,12 +331,12 @@ void MDSTableServer::handle_mds_recovery(mds_rank_t who) if (p->second.reqid >= next_reqid) next_reqid = p->second.reqid + 1; - MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_AGREE, p->second.reqid, p->second.tid), false); + auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_AGREE, p->second.reqid, p->second.tid); _get_reply_buffer(p->second.tid, &reply->bl); mds->send_message_mds(reply, who); } - MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_SERVER_READY, next_reqid), false); + auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_SERVER_READY, next_reqid); mds->send_message_mds(reply, who); } diff --git a/src/mds/Migrator.cc b/src/mds/Migrator.cc index c3ed9b63e013f..1150ff31b0d81 100644 --- a/src/mds/Migrator.cc +++ b/src/mds/Migrator.cc @@ -273,7 +273,7 @@ void Migrator::export_try_cancel(CDir *dir, bool notify_peer) if (notify_peer && (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer))) // tell them. - mds->send_message_mds(new MExportDirCancel(dir->dirfrag(), it->second.tid), it->second.peer); + mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer); break; case EXPORT_FREEZING: @@ -285,7 +285,7 @@ void Migrator::export_try_cancel(CDir *dir, bool notify_peer) if (notify_peer && (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer))) // tell them. - mds->send_message_mds(new MExportDirCancel(dir->dirfrag(), it->second.tid), it->second.peer); + mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer); break; // NOTE: state order reversal, warning comes after prepping @@ -327,7 +327,7 @@ void Migrator::export_try_cancel(CDir *dir, bool notify_peer) if (notify_peer && (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer))) // tell them. - mds->send_message_mds(new MExportDirCancel(dir->dirfrag(), it->second.tid), it->second.peer); + mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer); break; case EXPORT_EXPORTING: @@ -935,9 +935,7 @@ void Migrator::dispatch_export_dir(MDRequestRef& mdr, int count) // send ExportDirDiscover (ask target) filepath path; dir->inode->make_path(path); - MExportDirDiscover *discover = new MExportDirDiscover(dir->dirfrag(), path, - mds->get_nodeid(), - it->second.tid); + auto discover = MExportDirDiscover::factory::build(dir->dirfrag(), path, mds->get_nodeid(), it->second.tid); mds->send_message_mds(discover, dest); assert(g_conf()->mds_kill_export_at != 2); @@ -1052,7 +1050,7 @@ void Migrator::export_frozen(CDir *dir, uint64_t tid) dir->unfreeze_tree(); cache->try_subtree_merge(dir); - mds->send_message_mds(new MExportDirCancel(dir->dirfrag(), it->second.tid), it->second.peer); + mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer); export_state.erase(it); dir->clear_exporting(); @@ -1080,7 +1078,7 @@ void Migrator::export_frozen(CDir *dir, uint64_t tid) cache->get_subtree_bounds(dir, bounds); // generate prep message, log entry. - MExportDirPrep *prep = new MExportDirPrep(dir->dirfrag(), it->second.tid); + auto prep = MExportDirPrep::factory::build(dir->dirfrag(), it->second.tid); // include list of bystanders for (const auto &p : dir->get_replicas()) { @@ -1313,9 +1311,9 @@ void Migrator::handle_export_prep_ack(const MExportDirPrepAck::const_ref &m) it->second.warning_ack_waiting.insert(p.first); it->second.notify_ack_waiting.insert(p.first); // we'll eventually get a notifyack, too! - MExportDirNotify *notify = new MExportDirNotify(dir->dirfrag(), it->second.tid, true, - mds_authority_t(mds->get_nodeid(),CDIR_AUTH_UNKNOWN), - mds_authority_t(mds->get_nodeid(),it->second.peer)); + auto notify = MExportDirNotify::factory::build(dir->dirfrag(), it->second.tid, true, + mds_authority_t(mds->get_nodeid(),CDIR_AUTH_UNKNOWN), + mds_authority_t(mds->get_nodeid(),it->second.peer)); for (auto &cdir : bounds) { notify->get_bounds().push_back(cdir->dirfrag()); } @@ -1386,7 +1384,7 @@ void Migrator::export_go_synced(CDir *dir, uint64_t tid) mds->balancer->subtract_export(dir); // fill export message with cache data - MExportDir *req = new MExportDir(dir->dirfrag(), it->second.tid); + auto req = MExportDir::factory::build(dir->dirfrag(), it->second.tid); map exported_client_map; map exported_client_metadata_map; uint64_t num_exported_inodes = encode_export_dir(req->export_data, @@ -1487,7 +1485,7 @@ void Migrator::finish_export_inode_caps(CInode *in, mds_rank_t peer, const Capability *cap = &p.second; dout(7) << "finish_export_inode_caps telling client." << p.first << " exported caps on " << *in << dendl; - MClientCaps *m = new MClientCaps(CEPH_CAP_OP_EXPORT, in->ino(), 0, + auto m = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, in->ino(), 0, cap->get_cap_id(), cap->get_mseq(), mds->get_osd_epoch_barrier()); map::iterator q = peer_imported.find(p.first); @@ -1767,9 +1765,9 @@ void Migrator::export_notify_abort(CDir *dir, export_state_t& stat, set& for (set::iterator p = stat.notify_ack_waiting.begin(); p != stat.notify_ack_waiting.end(); ++p) { - MExportDirNotify *notify = new MExportDirNotify(dir->dirfrag(), stat.tid, true, - pair(mds->get_nodeid(), stat.peer), - pair(mds->get_nodeid(), CDIR_AUTH_UNKNOWN)); + auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, true, + pair(mds->get_nodeid(), stat.peer), + pair(mds->get_nodeid(), CDIR_AUTH_UNKNOWN)); for (set::iterator i = bounds.begin(); i != bounds.end(); ++i) notify->get_bounds().push_back((*i)->dirfrag()); mds->send_message_mds(notify, *p); @@ -1873,9 +1871,9 @@ void Migrator::export_logged_finish(CDir *dir) for (set::iterator p = stat.notify_ack_waiting.begin(); p != stat.notify_ack_waiting.end(); ++p) { - MExportDirNotify *notify = new MExportDirNotify(dir->dirfrag(), stat.tid, true, - pair(mds->get_nodeid(), stat.peer), - pair(stat.peer, CDIR_AUTH_UNKNOWN)); + auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, true, + pair(mds->get_nodeid(), stat.peer), + pair(stat.peer, CDIR_AUTH_UNKNOWN)); for (set::iterator i = bounds.begin(); i != bounds.end(); ++i) notify->get_bounds().push_back((*i)->dirfrag()); @@ -1894,7 +1892,7 @@ void Migrator::export_logged_finish(CDir *dir) // notify peer to send cap import messages to clients if (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(stat.peer)) { - mds->send_message_mds(new MExportDirFinish(dir->dirfrag(), false, stat.tid), stat.peer); + mds->send_message_mds(MExportDirFinish::factory::build(dir->dirfrag(), false, stat.tid), stat.peer); } else { dout(7) << "not sending MExportDirFinish, dest has failed" << dendl; } @@ -1977,7 +1975,7 @@ void Migrator::export_finish(CDir *dir) // send finish/commit to new auth if (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer)) { - mds->send_message_mds(new MExportDirFinish(dir->dirfrag(), true, it->second.tid), it->second.peer); + mds->send_message_mds(MExportDirFinish::factory::build(dir->dirfrag(), true, it->second.tid), it->second.peer); } else { dout(7) << "not sending MExportDirFinish last, dest has failed" << dendl; } @@ -2093,7 +2091,7 @@ void Migrator::handle_export_discover(const MExportDirDiscover::const_ref &m, bo if (!mds->is_active()) { dout(7) << " not active, send NACK " << dendl; - mds->send_message_mds(new MExportDirDiscoverAck(df, m->get_tid(), false), from); + mds->send_message_mds(MExportDirDiscoverAck::factory::build(df, m->get_tid(), false), from); return; } @@ -2155,7 +2153,7 @@ void Migrator::handle_export_discover(const MExportDirDiscover::const_ref &m, bo // reply dout(7) << " sending export_discover_ack on " << *in << dendl; - mds->send_message_mds(new MExportDirDiscoverAck(df, m->get_tid()), p_state->peer); + mds->send_message_mds(MExportDirDiscoverAck::factory::build(df, m->get_tid()), p_state->peer); assert (g_conf()->mds_kill_import_at != 2); } @@ -2423,7 +2421,7 @@ void Migrator::handle_export_prep(const MExportDirPrep::const_ref &m, bool did_a // ok! dout(7) << " sending export_prep_ack on " << *dir << dendl; - mds->send_message(new MExportDirPrepAck(dir->dirfrag(), success, m->get_tid()), m->get_connection()); + mds->send_message(MExportDirPrepAck::factory::build(dir->dirfrag(), success, m->get_tid()), m->get_connection()); assert(g_conf()->mds_kill_import_at != 4); } @@ -2723,10 +2721,9 @@ void Migrator::import_notify_finish(CDir *dir, set& bounds) for (set::iterator p = stat.bystanders.begin(); p != stat.bystanders.end(); ++p) { - MExportDirNotify *notify = - new MExportDirNotify(dir->dirfrag(), stat.tid, false, - pair(stat.peer, mds->get_nodeid()), - pair(mds->get_nodeid(), CDIR_AUTH_UNKNOWN)); + auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, false, + pair(stat.peer, mds->get_nodeid()), + pair(mds->get_nodeid(), CDIR_AUTH_UNKNOWN)); for (set::iterator i = bounds.begin(); i != bounds.end(); ++i) notify->get_bounds().push_back((*i)->dirfrag()); mds->send_message_mds(notify, *p); @@ -2746,10 +2743,9 @@ void Migrator::import_notify_abort(CDir *dir, set& bounds) stat.bystanders.erase(p++); continue; } - MExportDirNotify *notify = - new MExportDirNotify(dir->dirfrag(), stat.tid, true, - mds_authority_t(stat.peer, mds->get_nodeid()), - mds_authority_t(stat.peer, CDIR_AUTH_UNKNOWN)); + auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, true, + mds_authority_t(stat.peer, mds->get_nodeid()), + mds_authority_t(stat.peer, CDIR_AUTH_UNKNOWN)); for (set::iterator i = bounds.begin(); i != bounds.end(); ++i) notify->get_bounds().push_back((*i)->dirfrag()); mds->send_message_mds(notify, *p); @@ -2838,7 +2834,7 @@ void Migrator::import_logged_start(dirfrag_t df, CDir *dir, mds_rank_t from, // test surviving observer of a failed migration that did not complete //assert(dir->replica_map.size() < 2 || mds->get_nodeid() != 0); - MExportDirAck *ack = new MExportDirAck(dir->dirfrag(), it->second.tid); + auto ack = MExportDirAck::factory::build(dir->dirfrag(), it->second.tid); encode(imported_caps, ack->imported_caps); mds->send_message_mds(ack, from); @@ -3266,7 +3262,7 @@ void Migrator::handle_export_notify(const MExportDirNotify::const_ref &m) // send ack if (m->wants_ack()) { - mds->send_message_mds(new MExportDirNotifyAck(m->get_dirfrag(), m->get_tid(), m->get_new_auth()), from); + mds->send_message_mds(MExportDirNotifyAck::factory::build(m->get_dirfrag(), m->get_tid(), m->get_new_auth()), from); } else { // aborted. no ack. dout(7) << "handle_export_notify no ack requested" << dendl; @@ -3284,7 +3280,7 @@ void Migrator::export_caps(CInode *in) assert(!in->is_ambiguous_auth()); assert(!in->state_test(CInode::STATE_EXPORTINGCAPS)); - MExportCaps *ex = new MExportCaps; + auto ex = MExportCaps::factory::build(); ex->ino = in->ino(); encode_export_inode_caps(in, false, ex->cap_bl, ex->client_map, ex->client_metadata_map); @@ -3315,7 +3311,7 @@ void Migrator::handle_export_caps_ack(const MExportCapsAck::const_ref &ack) dout(7) << __func__ << " telling client." << it.first << " exported caps on " << *in << dendl; - MClientCaps *m = new MClientCaps(CEPH_CAP_OP_EXPORT, in->ino(), 0, + auto m = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, in->ino(), 0, cap->get_cap_id(), cap->get_mseq(), mds->get_osd_epoch_barrier()); m->set_cap_peer(it.second.cap_id, it.second.issue_seq, it.second.mseq, from, 0); @@ -3415,7 +3411,7 @@ void Migrator::logged_import_caps(CInode *in, mds->locker->eval(in, CEPH_CAP_LOCKS, true); if (!imported_caps.empty()) { - MExportCapsAck *ack = new MExportCapsAck(in->ino()); + auto ack = MExportCapsAck::factory::build(in->ino()); map peer_caps_ids; for (auto &p : imported_caps ) peer_caps_ids[p.first] = it->second.at(p.first).cap_id; diff --git a/src/mds/Server.cc b/src/mds/Server.cc index 6e4b83727f814..e9ff4a5278b96 100644 --- a/src/mds/Server.cc +++ b/src/mds/Server.cc @@ -335,7 +335,7 @@ void Server::handle_client_session(const MClientSession::const_ref &m) { auto send_reject_message = [this, session](std::string_view err_str) { - MClientSession *m = new MClientSession(CEPH_SESSION_REJECT); + auto m = MClientSession::factory::build(CEPH_SESSION_REJECT); if (session->info.has_feature(CEPHFS_FEATURE_MIMIC)) m->metadata["error_string"] = err_str; mds->send_message_client(m, session); @@ -427,7 +427,7 @@ void Server::handle_client_session(const MClientSession::const_ref &m) mds->locker->resume_stale_caps(session); mds->sessionmap.touch_session(session); } - m->get_connection()->send_message(new MClientSession(CEPH_SESSION_RENEWCAPS, m->get_seq())); + m->get_connection()->send_message2(MClientSession::factory::build(CEPH_SESSION_RENEWCAPS, m->get_seq())); } else { dout(10) << "ignoring renewcaps on non open|stale session (" << session->get_state_name() << ")" << dendl; } @@ -491,7 +491,7 @@ void Server::flush_client_sessions(set& client_set, MDSGatherBuilder& !session->get_connection()->has_feature(CEPH_FEATURE_EXPORT_PEER)) continue; version_t seq = session->wait_for_flush(gather.new_sub()); - mds->send_message_client(new MClientSession(CEPH_SESSION_FLUSHMSG, seq), session); + mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_FLUSHMSG, seq), session); } } @@ -528,12 +528,12 @@ void Server::_session_logged(Session *session, uint64_t state_seq, bool open, ve mds->sessionmap.set_state(session, Session::STATE_OPEN); mds->sessionmap.touch_session(session); assert(session->get_connection()); - MClientSession *reply = new MClientSession(CEPH_SESSION_OPEN); + auto reply = MClientSession::factory::build(CEPH_SESSION_OPEN); if (session->info.has_feature(CEPHFS_FEATURE_MIMIC)) reply->supported_features = supported_features; - session->get_connection()->send_message(reply); + session->get_connection()->send_message2(reply); if (mdcache->is_readonly()) - session->get_connection()->send_message(new MClientSession(CEPH_SESSION_FORCE_RO)); + session->get_connection()->send_message2(MClientSession::factory::build(CEPH_SESSION_FORCE_RO)); } else if (session->is_closing() || session->is_killing()) { // kill any lingering capabilities, leases, requests @@ -573,7 +573,7 @@ void Server::_session_logged(Session *session, uint64_t state_seq, bool open, ve } // reset session - mds->send_message_client(new MClientSession(CEPH_SESSION_CLOSE), session); + mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_CLOSE), session); mds->sessionmap.set_state(session, Session::STATE_CLOSED); session->clear(); mds->sessionmap.remove_session(session); @@ -670,13 +670,13 @@ void Server::finish_force_open_sessions(const mapsessionmap.set_state(session, Session::STATE_OPEN); mds->sessionmap.touch_session(session); - MClientSession *reply = new MClientSession(CEPH_SESSION_OPEN); + auto reply = MClientSession::factory::build(CEPH_SESSION_OPEN); if (session->info.has_feature(CEPHFS_FEATURE_MIMIC)) reply->supported_features = supported_features; mds->send_message_client(reply, session); if (mdcache->is_readonly()) - mds->send_message_client(new MClientSession(CEPH_SESSION_FORCE_RO), session); + mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_FORCE_RO), session); } } else { dout(10) << "force_open_sessions skipping already-open " << session->info.inst << dendl; @@ -751,7 +751,7 @@ void Server::find_idle_sessions() mds->sessionmap.set_state(session, Session::STATE_STALE); mds->locker->revoke_stale_caps(session); mds->locker->remove_stale_leases(session); - mds->send_message_client(new MClientSession(CEPH_SESSION_STALE, session->get_push_seq()), session); + mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_STALE, session->get_push_seq()), session); finish_flush_session(session, session->get_push_seq()); } @@ -980,7 +980,7 @@ void Server::handle_client_reconnect(const MClientReconnect::const_ref &m) } if (deny) { - m->get_connection()->send_message(new MClientSession(CEPH_SESSION_CLOSE)); + m->get_connection()->send_message2(MClientSession::factory::build(CEPH_SESSION_CLOSE)); if (session->is_open()) kill_session(session, nullptr); return; @@ -994,10 +994,10 @@ void Server::handle_client_reconnect(const MClientReconnect::const_ref &m) } // notify client of success with an OPEN - MClientSession *reply = new MClientSession(CEPH_SESSION_OPEN); + auto reply = MClientSession::factory::build(CEPH_SESSION_OPEN); if (session->info.has_feature(CEPHFS_FEATURE_MIMIC)) reply->supported_features = supported_features; - m->get_connection()->send_message(reply); + m->get_connection()->send_message2(reply); session->last_cap_renew = ceph_clock_now(); mds->clog->debug() << "reconnect by " << session->info.inst << " after " << delay; @@ -1263,7 +1263,7 @@ void Server::recall_client_state(void) uint64_t newlim = std::max(std::min((session->caps.size() * ratio), max_caps_per_client), min_caps_per_client); if (session->caps.size() > newlim) { - MClientSession *m = new MClientSession(CEPH_SESSION_RECALL_STATE); + auto m = MClientSession::factory::build(CEPH_SESSION_RECALL_STATE); m->head.max_caps = newlim; mds->send_message_client(m, session); session->notify_recall_sent(newlim); @@ -1283,7 +1283,7 @@ void Server::force_clients_readonly() if (!session->info.inst.name.is_client() || !(session->is_open() || session->is_stale())) continue; - mds->send_message_client(new MClientSession(CEPH_SESSION_FORCE_RO), session); + mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_FORCE_RO), session); } } @@ -1339,7 +1339,7 @@ void Server::submit_mdlog_entry(LogEvent *le, MDSLogContextBase *fin, MDRequestR void Server::respond_to_request(MDRequestRef& mdr, int r) { if (mdr->client_request) { - reply_client_request(mdr, MClientReply::ref(new MClientReply(*mdr->client_request, r), false)); + reply_client_request(mdr, MClientReply::factory::build(*mdr->client_request, r)); } else if (mdr->internal_op > -1) { dout(10) << "respond_to_request on internal request " << mdr << dendl; if (!mdr->internal_op_finish) @@ -1474,7 +1474,7 @@ void Server::early_reply(MDRequestRef& mdr, CInode *tracei, CDentry *tracedn) } - MClientReply::ref reply(new MClientReply(*req, 0), false); + auto reply = MClientReply::factory::build(*req, 0); reply->set_unsafe(); // mark xlocks "done", indicating that we are exposing uncommitted changes. @@ -1760,7 +1760,7 @@ void Server::handle_client_request(const MClientRequest::const_ref &req) req->get_op() != CEPH_MDS_OP_OPEN && req->get_op() != CEPH_MDS_OP_CREATE)) { dout(5) << "already completed " << req->get_reqid() << dendl; - MClientReply::ref reply(new MClientReply(*req, 0), false); + auto reply = MClientReply::factory::build(*req, 0); if (created != inodeno_t()) { bufferlist extra; encode(created, extra); @@ -2029,7 +2029,7 @@ void Server::handle_slave_request(const MMDSSlaveRequest::const_ref &m) // the purpose of rename notify is enforcing causal message ordering. making sure // bystanders have received all messages from rename srcdn's auth MDS. if (m->get_op() == MMDSSlaveRequest::OP_RENAMENOTIFY) { - MMDSSlaveRequest::ref reply(new MMDSSlaveRequest(m->get_reqid(), m->get_attempt(), MMDSSlaveRequest::OP_RENAMENOTIFYACK), false); + auto reply = MMDSSlaveRequest::factory::build(m->get_reqid(), m->get_attempt(), MMDSSlaveRequest::OP_RENAMENOTIFYACK); mds->send_message(reply, m->get_connection()); return; } @@ -2254,7 +2254,7 @@ void Server::dispatch_slave_request(MDRequestRef& mdr) return; // ack - MMDSSlaveRequest::ref r(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, replycode), false); + auto r = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, replycode); r->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(r->get_object_info()); if (replycode == MMDSSlaveRequest::OP_XLOCKACK) @@ -2420,7 +2420,7 @@ void Server::handle_slave_auth_pin(MDRequestRef& mdr) } // ack! - MMDSSlaveRequest::ref reply(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPINACK), false); + auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPINACK); // return list of my auth_pins (if any) for (set::iterator p = mdr->auth_pins.begin(); @@ -4733,7 +4733,7 @@ void Server::create_quota_realm(CInode *in) { dout(10) << __func__ << " " << *in << dendl; - MClientRequest::ref req(new MClientRequest(CEPH_MDS_OP_SETXATTR), false); + auto req = MClientRequest::factory::build(CEPH_MDS_OP_SETXATTR); req->set_filepath(filepath(in->ino())); req->set_string2("ceph.quota"); // empty vxattr value @@ -5687,7 +5687,7 @@ void Server::_link_remote(MDRequestRef& mdr, bool inc, CDentry *dn, CInode *targ op = MMDSSlaveRequest::OP_LINKPREP; else op = MMDSSlaveRequest::OP_UNLINKPREP; - MMDSSlaveRequest::ref req(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, op), false); + auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, op); targeti->set_object_info(req->get_object_info()); req->op_stamp = mdr->get_op_stamp(); if (auto& desti_srnode = mdr->more()->desti_srnode) @@ -5935,7 +5935,7 @@ void Server::_logged_slave_link(MDRequestRef& mdr, CInode *targeti, bool adjust_ // ack if (!mdr->aborted) { - MMDSSlaveRequest::ref reply(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_LINKPREPACK)); + auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_LINKPREPACK); mds->send_message_mds(reply, mdr->slave_to_mds); } else { dout(10) << " abort flag set, finishing" << dendl; @@ -5980,7 +5980,7 @@ void Server::_committed_slave(MDRequestRef& mdr) assert(g_conf()->mds_kill_link_at != 8); - MMDSSlaveRequest::ref req(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_COMMITTED), false); + auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_COMMITTED); mds->send_message_mds(req, mdr->slave_to_mds); mdcache->request_finish(mdr); } @@ -6491,7 +6491,7 @@ bool Server::_rmdir_prepare_witness(MDRequestRef& mdr, mds_rank_t who, vectorreqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREP), false); + auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREP); req->srcdnpath = filepath(trace.front()->get_dir()->ino()); for (auto dn : trace) req->srcdnpath.push_dentry(dn->get_name()); @@ -6640,7 +6640,7 @@ void Server::_logged_slave_rmdir(MDRequestRef& mdr, CDentry *dn, CDentry *strayd mdr->straydn = 0; if (!mdr->aborted) { - MMDSSlaveRequest::ref reply(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREPACK), false); + auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREPACK); if (!mdr->more()->slave_update_journaled) reply->mark_not_journaled(); mds->send_message_mds(reply, mdr->slave_to_mds); @@ -7412,7 +7412,7 @@ bool Server::_rename_prepare_witness(MDRequestRef& mdr, mds_rank_t who, setreqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREP), false); + auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREP); req->srcdnpath = filepath(srctrace.front()->get_dir()->ino()); for (auto dn : srctrace) @@ -8081,7 +8081,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) if (mdr->slave_request->is_interrupted()) { dout(10) << " slave request interrupted, sending noop reply" << dendl; - MMDSSlaveRequest::ref reply(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK), false); + auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); reply->mark_interrupted(); mds->send_message_mds(reply, mdr->slave_to_mds); mdr->slave_request = 0; @@ -8185,7 +8185,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) (mds->is_cluster_degraded() && !mds->mdsmap->is_clientreplay_or_active_or_stopping(*p))) continue; - MMDSSlaveRequest::ref notify(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMENOTIFY), false); + auto notify = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMENOTIFY); mds->send_message_mds(notify, *p); mdr->more()->waiting_on_slave.insert(*p); } @@ -8214,7 +8214,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) if (reply_witness) { assert(!srcdnrep.empty()); - MMDSSlaveRequest::ref reply(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK), false); + auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); reply->witnesses.swap(srcdnrep); mds->send_message_mds(reply, mdr->slave_to_mds); mdr->slave_request = 0; @@ -8317,7 +8317,7 @@ void Server::_logged_slave_rename(MDRequestRef& mdr, // prepare ack MMDSSlaveRequest::ref reply; if (!mdr->aborted) { - reply.reset(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK), false); + reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); if (!mdr->more()->slave_update_journaled) reply->mark_not_journaled(); } diff --git a/src/mds/Server.h b/src/mds/Server.h index bdbba34081ade..d848bd6cab270 100644 --- a/src/mds/Server.h +++ b/src/mds/Server.h @@ -17,7 +17,6 @@ #include -#include "messages/MClientCaps.h" #include "messages/MClientReconnect.h" #include "messages/MClientReply.h" #include "messages/MClientRequest.h" diff --git a/src/mds/SnapClient.cc b/src/mds/SnapClient.cc index b4fea91a6bfcf..928df8b8e88d8 100644 --- a/src/mds/SnapClient.cc +++ b/src/mds/SnapClient.cc @@ -115,7 +115,7 @@ void SnapClient::handle_notify_prep(const MMDSTableRequest::const_ref &m) { dout(10) << __func__ << " " << *m << dendl; handle_query_result(m); - MMDSTableRequest::ref ack(new MMDSTableRequest(table, TABLESERVER_OP_NOTIFY_ACK, 0, m->get_tid()), false); + auto ack = MMDSTableRequest::factory::build(table, TABLESERVER_OP_NOTIFY_ACK, 0, m->get_tid()); mds->send_message(ack, m->get_connection()); } @@ -153,7 +153,7 @@ void SnapClient::refresh(version_t want, MDSInternalContextBase *onfinish) return; mds_rank_t ts = mds->mdsmap->get_tableserver(); - MMDSTableRequest::ref req(new MMDSTableRequest(table, TABLESERVER_OP_QUERY, ++last_reqid, 0), false); + auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_QUERY, ++last_reqid, 0); using ceph::encode; char op = 'F'; encode(op, req->bl); diff --git a/src/mds/SnapRealm.cc b/src/mds/SnapRealm.cc index ff3a309681121..7b0302665b56b 100644 --- a/src/mds/SnapRealm.cc +++ b/src/mds/SnapRealm.cc @@ -19,8 +19,6 @@ #include -#include "messages/MClientSnap.h" - /* * SnapRealm diff --git a/src/mds/SnapServer.cc b/src/mds/SnapServer.cc index 78c5bc67ac00e..67e08b2343168 100644 --- a/src/mds/SnapServer.cc +++ b/src/mds/SnapServer.cc @@ -262,7 +262,7 @@ bool SnapServer::_notify_prep(version_t tid) assert(version == tid); for (auto &p : active_clients) { - MMDSTableRequest::ref m(new MMDSTableRequest(table, TABLESERVER_OP_NOTIFY_PREP, 0, version), false); + auto m = MMDSTableRequest::factory::build(table, TABLESERVER_OP_NOTIFY_PREP, 0, version); m->bl = bl; mds->send_message_mds(m, p); } @@ -277,7 +277,7 @@ void SnapServer::handle_query(const MMDSTableRequest::const_ref &req) auto p = req->bl.cbegin(); decode(op, p); - MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_QUERY_REPLY, req->reqid, version), false); + auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_QUERY_REPLY, req->reqid, version); switch (op) { case 'F': // full @@ -349,8 +349,8 @@ void SnapServer::check_osd_map(bool force) if (!all_purge.empty()) { dout(10) << "requesting removal of " << all_purge << dendl; - MRemoveSnaps *m = new MRemoveSnaps(all_purge); - mon_client->send_mon_message(m); + auto m = MRemoveSnaps::factory::build(all_purge); + mon_client->send_mon_message(m.detach()); } last_checked_osdmap = version; diff --git a/src/mds/StrayManager.cc b/src/mds/StrayManager.cc index 08c64f61c2042..053b72ef3e5d0 100644 --- a/src/mds/StrayManager.cc +++ b/src/mds/StrayManager.cc @@ -663,7 +663,7 @@ void StrayManager::reintegrate_stray(CDentry *straydn, CDentry *rdn) filepath dst; rdn->make_path(dst); - MClientRequest *req = new MClientRequest(CEPH_MDS_OP_RENAME); + auto req = MClientRequest::factory::build(CEPH_MDS_OP_RENAME); req->set_filepath(dst); req->set_filepath2(src); req->set_tid(mds->issue_tid()); @@ -692,7 +692,7 @@ void StrayManager::migrate_stray(CDentry *dn, mds_rank_t to) dst.push_dentry(src[0]); dst.push_dentry(src[1]); - MClientRequest *req = new MClientRequest(CEPH_MDS_OP_RENAME); + auto req = MClientRequest::factory::build(CEPH_MDS_OP_RENAME); req->set_filepath(dst); req->set_filepath2(src); req->set_tid(mds->issue_tid()); diff --git a/src/messages/MCacheExpire.h b/src/messages/MCacheExpire.h index 3f0f953c53971..1fd699b8ef4aa 100644 --- a/src/messages/MCacheExpire.h +++ b/src/messages/MCacheExpire.h @@ -70,11 +70,11 @@ public: int get_from() const { return from; } +protected: MCacheExpire() : Message(MSG_MDS_CACHEEXPIRE), from(-1) {} MCacheExpire(int f) : Message(MSG_MDS_CACHEEXPIRE), from(f) { } -private: ~MCacheExpire() override {} public: diff --git a/src/messages/MClientCaps.h b/src/messages/MClientCaps.h index b3f089610679c..323634261d9bb 100644 --- a/src/messages/MClientCaps.h +++ b/src/messages/MClientCaps.h @@ -131,6 +131,7 @@ private: void clear_dirty() { head.dirty = 0; } +protected: MClientCaps() : Message(CEPH_MSG_CLIENT_CAPS, HEAD_VERSION, COMPAT_VERSION) {} MClientCaps(int op, @@ -170,11 +171,11 @@ private: head.migrate_seq = mseq; memset(&peer, 0, sizeof(peer)); } + ~MClientCaps() override {} + private: file_layout_t layout; - ~MClientCaps() override {} - public: const char *get_type_name() const override { return "Cfcap";} void print(ostream& out) const override { diff --git a/src/messages/MClientLease.h b/src/messages/MClientLease.h index 4dc8b5247df64..25237c649ae21 100644 --- a/src/messages/MClientLease.h +++ b/src/messages/MClientLease.h @@ -37,6 +37,7 @@ public: snapid_t get_first() const { return snapid_t(h.first); } snapid_t get_last() const { return snapid_t(h.last); } +protected: MClientLease() : Message(CEPH_MSG_CLIENT_LEASE) {} MClientLease(const MClientLease& m) : Message(CEPH_MSG_CLIENT_LEASE), @@ -63,7 +64,6 @@ public: h.last = sl; h.duration_ms = 0; } -private: ~MClientLease() override {} public: diff --git a/src/messages/MClientQuota.h b/src/messages/MClientQuota.h index 701f2155cf2da..cd58a9ed6f0fd 100644 --- a/src/messages/MClientQuota.h +++ b/src/messages/MClientQuota.h @@ -14,11 +14,11 @@ public: nest_info_t rstat; quota_info_t quota; +protected: MClientQuota() : Message(CEPH_MSG_CLIENT_QUOTA), ino(0) {} -private: ~MClientQuota() override {} public: diff --git a/src/messages/MClientReply.h b/src/messages/MClientReply.h index 0892c13caa834..1a4765521b175 100644 --- a/src/messages/MClientReply.h +++ b/src/messages/MClientReply.h @@ -281,6 +281,7 @@ public: bool is_safe() const { return head.safe; } +protected: MClientReply() : Message(CEPH_MSG_CLIENT_REPLY) {} MClientReply(const MClientRequest &req, int result = 0) : Message(CEPH_MSG_CLIENT_REPLY) { @@ -290,7 +291,6 @@ public: head.result = result; head.safe = 1; } -private: ~MClientReply() override {} public: diff --git a/src/messages/MClientRequest.h b/src/messages/MClientRequest.h index 98533d3622405..12a28dafc1bda 100644 --- a/src/messages/MClientRequest.h +++ b/src/messages/MClientRequest.h @@ -91,7 +91,7 @@ public: /* XXX HACK */ mutable bool queued_for_replay = false; - public: +protected: // cons MClientRequest() : Message(CEPH_MSG_CLIENT_REQUEST, HEAD_VERSION, COMPAT_VERSION) {} @@ -100,7 +100,6 @@ public: memset(&head, 0, sizeof(head)); head.op = op; } -private: ~MClientRequest() override {} public: diff --git a/src/messages/MClientRequestForward.h b/src/messages/MClientRequestForward.h index 79d62989bcd73..b303a0c765bef 100644 --- a/src/messages/MClientRequestForward.h +++ b/src/messages/MClientRequestForward.h @@ -29,7 +29,7 @@ private: int32_t num_fwd; bool client_must_resend; - public: +protected: MClientRequestForward() : Message(CEPH_MSG_CLIENT_REQUEST_FORWARD), dest_mds(-1), num_fwd(-1), client_must_resend(false) {} @@ -39,7 +39,6 @@ private: assert(client_must_resend); header.tid = t; } -private: ~MClientRequestForward() override {} public: diff --git a/src/messages/MClientSession.h b/src/messages/MClientSession.h index 816a8f83b3048..3f2d1b9d193df 100644 --- a/src/messages/MClientSession.h +++ b/src/messages/MClientSession.h @@ -40,6 +40,7 @@ public: int get_max_caps() const { return head.max_caps; } int get_max_leases() const { return head.max_leases; } +protected: MClientSession() : Message(CEPH_MSG_CLIENT_SESSION, HEAD_VERSION, COMPAT_VERSION) { } MClientSession(int o, version_t s=0) : Message(CEPH_MSG_CLIENT_SESSION, HEAD_VERSION, COMPAT_VERSION) { @@ -54,7 +55,6 @@ public: head.seq = 0; st.encode_timeval(&head.stamp); } -private: ~MClientSession() override {} public: diff --git a/src/messages/MClientSnap.h b/src/messages/MClientSnap.h index 920a8b6ba21a4..21649cf9ea8d6 100644 --- a/src/messages/MClientSnap.h +++ b/src/messages/MClientSnap.h @@ -31,12 +31,12 @@ public: vector split_inos; vector split_realms; +protected: MClientSnap(int o=0) : Message(CEPH_MSG_CLIENT_SNAP) { memset(&head, 0, sizeof(head)); head.op = o; } -private: ~MClientSnap() override {} public: diff --git a/src/messages/MDentryLink.h b/src/messages/MDentryLink.h index 018e2b3b90da9..8b483c881855d 100644 --- a/src/messages/MDentryLink.h +++ b/src/messages/MDentryLink.h @@ -40,6 +40,7 @@ private: bufferlist bl; +protected: MDentryLink() : Message(MSG_MDS_DENTRYLINK) { } MDentryLink(dirfrag_t r, dirfrag_t df, std::string_view n, bool p) : @@ -48,7 +49,6 @@ private: dirfrag(df), dn(n), is_primary(p) {} -private: ~MDentryLink() override {} public: diff --git a/src/messages/MDentryUnlink.h b/src/messages/MDentryUnlink.h index ef2faa5804238..e30827bb9f80f 100644 --- a/src/messages/MDentryUnlink.h +++ b/src/messages/MDentryUnlink.h @@ -38,13 +38,13 @@ private: bufferlist straybl; bufferlist snapbl; +protected: MDentryUnlink() : Message(MSG_MDS_DENTRYUNLINK) { } MDentryUnlink(dirfrag_t df, std::string_view n) : Message(MSG_MDS_DENTRYUNLINK), dirfrag(df), dn(n) {} -private: ~MDentryUnlink() override {} public: diff --git a/src/messages/MDirUpdate.h b/src/messages/MDirUpdate.h index 4ecb79d649d22..c96ccaa1f2747 100644 --- a/src/messages/MDirUpdate.h +++ b/src/messages/MDirUpdate.h @@ -25,18 +25,6 @@ public: using factory = MessageFactory; friend factory; - MDirUpdate() : Message(MSG_MDS_DIRUPDATE) {} - MDirUpdate(mds_rank_t f, - dirfrag_t dirfrag, - int dir_rep, - const std::set& dir_rep_by, - filepath& path, - bool discover = false) : - Message(MSG_MDS_DIRUPDATE), from_mds(f), dirfrag(dirfrag), - dir_rep(dir_rep), dir_rep_by(dir_rep_by), path(path) { - this->discover = discover ? 5 : 0; - } - mds_rank_t get_source_mds() const { return from_mds; } dirfrag_t get_dirfrag() const { return dirfrag; } int get_dir_rep() const { return dir_rep; } @@ -74,6 +62,17 @@ public: protected: ~MDirUpdate() {} + MDirUpdate() : Message(MSG_MDS_DIRUPDATE) {} + MDirUpdate(mds_rank_t f, + dirfrag_t dirfrag, + int dir_rep, + const std::set& dir_rep_by, + filepath& path, + bool discover = false) : + Message(MSG_MDS_DIRUPDATE), from_mds(f), dirfrag(dirfrag), + dir_rep(dir_rep), dir_rep_by(dir_rep_by), path(path) { + this->discover = discover ? 5 : 0; + } MDirUpdate(const MDirUpdate& m) : Message(MSG_MDS_DIRUPDATE), from_mds(m.from_mds), diff --git a/src/messages/MDiscover.h b/src/messages/MDiscover.h index d035253c44def..a55f50075d757 100644 --- a/src/messages/MDiscover.h +++ b/src/messages/MDiscover.h @@ -52,6 +52,7 @@ private: void set_base_dir_frag(frag_t f) { base_dir_frag = f; } +protected: MDiscover() : Message(MSG_MDS_DISCOVER) { } MDiscover(inodeno_t base_ino_, frag_t base_frag_, @@ -66,7 +67,6 @@ private: want(want_path_), want_base_dir(want_base_dir_), want_xlocked(discover_xlocks_) { } -private: ~MDiscover() override {} public: diff --git a/src/messages/MDiscoverReply.h b/src/messages/MDiscoverReply.h index bc15d7da750cc..c8ed8cbcf4b31 100644 --- a/src/messages/MDiscoverReply.h +++ b/src/messages/MDiscoverReply.h @@ -113,7 +113,7 @@ private: void set_base_dir_frag(frag_t df) { base_dir_frag = df; } - // cons +protected: MDiscoverReply() : Message(MSG_MDS_DISCOVERREPLY, HEAD_VERSION) { } MDiscoverReply(const MDiscover &dis) : Message(MSG_MDS_DISCOVERREPLY, HEAD_VERSION), @@ -145,7 +145,6 @@ private: { header.tid = 0; } -private: ~MDiscoverReply() override {} public: diff --git a/src/messages/MExportCaps.h b/src/messages/MExportCaps.h index f538dab256d65..437ec785df0d6 100644 --- a/src/messages/MExportCaps.h +++ b/src/messages/MExportCaps.h @@ -34,9 +34,9 @@ private: map client_map; map client_metadata_map; +protected: MExportCaps() : Message(MSG_MDS_EXPORTCAPS, HEAD_VERSION, COMPAT_VERSION) {} -private: ~MExportCaps() override {} public: diff --git a/src/messages/MExportCapsAck.h b/src/messages/MExportCapsAck.h index a6b800043130e..4479df5a9ddab 100644 --- a/src/messages/MExportCapsAck.h +++ b/src/messages/MExportCapsAck.h @@ -29,11 +29,11 @@ public: inodeno_t ino; bufferlist cap_bl; +protected: MExportCapsAck() : Message(MSG_MDS_EXPORTCAPSACK) {} MExportCapsAck(inodeno_t i) : Message(MSG_MDS_EXPORTCAPSACK), ino(i) {} -private: ~MExportCapsAck() override {} public: diff --git a/src/messages/MExportDir.h b/src/messages/MExportDir.h index 90d58cd1cc203..8c5e3284b3097 100644 --- a/src/messages/MExportDir.h +++ b/src/messages/MExportDir.h @@ -30,12 +30,12 @@ public: vector bounds; bufferlist client_map; +protected: MExportDir() : Message(MSG_MDS_EXPORTDIR) {} MExportDir(dirfrag_t df, uint64_t tid) : Message(MSG_MDS_EXPORTDIR), dirfrag(df) { set_tid(tid); } -private: ~MExportDir() override {} public: diff --git a/src/messages/MExportDirAck.h b/src/messages/MExportDirAck.h index 9aa49f516ec9e..e88df8494efef 100644 --- a/src/messages/MExportDirAck.h +++ b/src/messages/MExportDirAck.h @@ -30,12 +30,12 @@ public: dirfrag_t get_dirfrag() const { return dirfrag; } +protected: MExportDirAck() : Message(MSG_MDS_EXPORTDIRACK) {} MExportDirAck(dirfrag_t df, uint64_t tid) : Message(MSG_MDS_EXPORTDIRACK), dirfrag(df) { set_tid(tid); } -private: ~MExportDirAck() override {} public: diff --git a/src/messages/MExportDirCancel.h b/src/messages/MExportDirCancel.h index c63b4e9d8a7cd..635abfec825f6 100644 --- a/src/messages/MExportDirCancel.h +++ b/src/messages/MExportDirCancel.h @@ -30,12 +30,12 @@ private: public: dirfrag_t get_dirfrag() const { return dirfrag; } +protected: MExportDirCancel() : Message(MSG_MDS_EXPORTDIRCANCEL) {} MExportDirCancel(dirfrag_t df, uint64_t tid) : Message(MSG_MDS_EXPORTDIRCANCEL), dirfrag(df) { set_tid(tid); } -private: ~MExportDirCancel() override {} public: diff --git a/src/messages/MExportDirDiscover.h b/src/messages/MExportDirDiscover.h index 1b6f41815c093..bcd3f1f345042 100644 --- a/src/messages/MExportDirDiscover.h +++ b/src/messages/MExportDirDiscover.h @@ -37,6 +37,7 @@ private: bool started; +protected: MExportDirDiscover() : Message(MSG_MDS_EXPORTDIRDISCOVER), started(false) { } @@ -45,7 +46,6 @@ private: from(f), dirfrag(df), path(p), started(false) { set_tid(tid); } -private: ~MExportDirDiscover() override {} public: diff --git a/src/messages/MExportDirDiscoverAck.h b/src/messages/MExportDirDiscoverAck.h index ab76e7a37e805..a663506156fe1 100644 --- a/src/messages/MExportDirDiscoverAck.h +++ b/src/messages/MExportDirDiscoverAck.h @@ -33,13 +33,13 @@ private: dirfrag_t get_dirfrag() const { return dirfrag; } bool is_success() const { return success; } +protected: MExportDirDiscoverAck() : Message(MSG_MDS_EXPORTDIRDISCOVERACK) {} MExportDirDiscoverAck(dirfrag_t df, uint64_t tid, bool s=true) : Message(MSG_MDS_EXPORTDIRDISCOVERACK), dirfrag(df), success(s) { set_tid(tid); } -private: ~MExportDirDiscoverAck() override {} public: diff --git a/src/messages/MExportDirFinish.h b/src/messages/MExportDirFinish.h index 7c6f21d30cabb..8c19ba55b6406 100644 --- a/src/messages/MExportDirFinish.h +++ b/src/messages/MExportDirFinish.h @@ -31,12 +31,12 @@ private: dirfrag_t get_dirfrag() const { return dirfrag; } bool is_last() const { return last; } +protected: MExportDirFinish() : last(false) {} MExportDirFinish(dirfrag_t df, bool l, uint64_t tid) : Message(MSG_MDS_EXPORTDIRFINISH), dirfrag(df), last(l) { set_tid(tid); } -private: ~MExportDirFinish() override {} public: diff --git a/src/messages/MExportDirNotify.h b/src/messages/MExportDirNotify.h index 7d4b7da06ca17..3004f3665f910 100644 --- a/src/messages/MExportDirNotify.h +++ b/src/messages/MExportDirNotify.h @@ -37,13 +37,13 @@ private: const list& get_bounds() const { return bounds; } list& get_bounds() { return bounds; } +protected: MExportDirNotify() {} MExportDirNotify(dirfrag_t i, uint64_t tid, bool a, pair<__s32,__s32> oa, pair<__s32,__s32> na) : Message(MSG_MDS_EXPORTDIRNOTIFY), base(i), ack(a), old_auth(oa), new_auth(na) { set_tid(tid); } -private: ~MExportDirNotify() override {} public: diff --git a/src/messages/MExportDirNotifyAck.h b/src/messages/MExportDirNotifyAck.h index eea1ca5a60382..844d9ec77a096 100644 --- a/src/messages/MExportDirNotifyAck.h +++ b/src/messages/MExportDirNotifyAck.h @@ -31,12 +31,12 @@ private: dirfrag_t get_dirfrag() const { return dirfrag; } pair<__s32,__s32> get_new_auth() const { return new_auth; } +protected: MExportDirNotifyAck() {} MExportDirNotifyAck(dirfrag_t df, uint64_t tid, pair<__s32,__s32> na) : Message(MSG_MDS_EXPORTDIRNOTIFYACK), dirfrag(df), new_auth(na) { set_tid(tid); } -private: ~MExportDirNotifyAck() override {} public: diff --git a/src/messages/MExportDirPrep.h b/src/messages/MExportDirPrep.h index 9c2ea6f9598a2..01d5b244f7d13 100644 --- a/src/messages/MExportDirPrep.h +++ b/src/messages/MExportDirPrep.h @@ -43,6 +43,7 @@ public: bool did_assim() const { return b_did_assim; } void mark_assim() { b_did_assim = true; } +protected: MExportDirPrep() { b_did_assim = false; } @@ -51,7 +52,6 @@ public: dirfrag(df), b_did_assim(false) { set_tid(tid); } -private: ~MExportDirPrep() override {} public: diff --git a/src/messages/MExportDirPrepAck.h b/src/messages/MExportDirPrepAck.h index 7e8c9d4643232..130ce65ea38ce 100644 --- a/src/messages/MExportDirPrepAck.h +++ b/src/messages/MExportDirPrepAck.h @@ -31,12 +31,12 @@ private: public: dirfrag_t get_dirfrag() const { return dirfrag; } +protected: MExportDirPrepAck() {} MExportDirPrepAck(dirfrag_t df, bool s, uint64_t tid) : Message(MSG_MDS_EXPORTDIRPREPACK), dirfrag(df), success(s) { set_tid(tid); } -private: ~MExportDirPrepAck() override {} public: diff --git a/src/messages/MGatherCaps.h b/src/messages/MGatherCaps.h index 57d30871fbc41..8555f89551d80 100644 --- a/src/messages/MGatherCaps.h +++ b/src/messages/MGatherCaps.h @@ -14,9 +14,9 @@ public: inodeno_t ino; +protected: MGatherCaps() : Message(MSG_MDS_GATHERCAPS) {} -private: ~MGatherCaps() override {} public: diff --git a/src/messages/MHeartbeat.h b/src/messages/MHeartbeat.h index 1b7293e8fb4e2..4d3c2001cf82c 100644 --- a/src/messages/MHeartbeat.h +++ b/src/messages/MHeartbeat.h @@ -38,13 +38,13 @@ private: const map& get_import_map() const { return import_map; } map& get_import_map() { return import_map; } +protected: MHeartbeat() : Message(MSG_MDS_HEARTBEAT), load(DecayRate()) {} MHeartbeat(mds_load_t& load, int beat) : Message(MSG_MDS_HEARTBEAT), load(load) { this->beat = beat; } -private: ~MHeartbeat() override {} public: diff --git a/src/messages/MInodeFileCaps.h b/src/messages/MInodeFileCaps.h index e2fc76c72b84e..a09189ec15db4 100644 --- a/src/messages/MInodeFileCaps.h +++ b/src/messages/MInodeFileCaps.h @@ -33,13 +33,13 @@ private: inodeno_t get_ino() const { return ino; } int get_caps() const { return caps; } +protected: MInodeFileCaps() : Message(MSG_MDS_INODEFILECAPS) {} MInodeFileCaps(inodeno_t ino, int caps) : Message(MSG_MDS_INODEFILECAPS) { this->ino = ino; this->caps = caps; } -private: ~MInodeFileCaps() override {} public: diff --git a/src/messages/MLock.h b/src/messages/MLock.h index 002cd98f9a041..6e1cdccf3fc53 100644 --- a/src/messages/MLock.h +++ b/src/messages/MLock.h @@ -47,6 +47,7 @@ public: const MDSCacheObjectInfo &get_object_info() const { return object_info; } MDSCacheObjectInfo &get_object_info() { return object_info; } +protected: MLock() : Message(MSG_MDS_LOCK) {} MLock(int ac, mds_rank_t as) : Message(MSG_MDS_LOCK), @@ -64,7 +65,6 @@ public: lock->get_parent()->set_object_info(object_info); lockdata.claim(bl); } -private: ~MLock() override {} public: diff --git a/src/messages/MMDSBeacon.h b/src/messages/MMDSBeacon.h index a306371e5ba16..00ef1a7decf8e 100644 --- a/src/messages/MMDSBeacon.h +++ b/src/messages/MMDSBeacon.h @@ -210,7 +210,7 @@ private: uint64_t mds_features; - public: +protected: MMDSBeacon() : PaxosServiceMessage(MSG_MDS_BEACON, 0, HEAD_VERSION, COMPAT_VERSION), global_id(0), state(MDSMap::STATE_NULL), standby_for_rank(MDS_RANK_NONE), @@ -225,7 +225,6 @@ private: standby_replay(false), mds_features(feat) { set_priority(CEPH_MSG_PRIO_HIGH); } -private: ~MMDSBeacon() override {} public: diff --git a/src/messages/MMDSCacheRejoin.h b/src/messages/MMDSCacheRejoin.h index 7d53829c2cd4e..2929b9ca3975e 100644 --- a/src/messages/MMDSCacheRejoin.h +++ b/src/messages/MMDSCacheRejoin.h @@ -217,13 +217,13 @@ private: map > > authpinned_dentries; map > xlocked_dentries; +protected: MMDSCacheRejoin() : Message(MSG_MDS_CACHEREJOIN, HEAD_VERSION, COMPAT_VERSION), op(0) {} MMDSCacheRejoin(int o) : Message(MSG_MDS_CACHEREJOIN, HEAD_VERSION, COMPAT_VERSION), op(o) {} -private: ~MMDSCacheRejoin() override {} public: diff --git a/src/messages/MMDSFindIno.h b/src/messages/MMDSFindIno.h index bfba7c5ef152c..31e4184b1877c 100644 --- a/src/messages/MMDSFindIno.h +++ b/src/messages/MMDSFindIno.h @@ -28,9 +28,12 @@ public: ceph_tid_t tid {0}; inodeno_t ino; +protected: MMDSFindIno() : Message(MSG_MDS_FINDINO) {} MMDSFindIno(ceph_tid_t t, inodeno_t i) : Message(MSG_MDS_FINDINO), tid(t), ino(i) {} + ~MMDSFindIno() override {} +public: const char *get_type_name() const override { return "findino"; } void print(ostream &out) const override { out << "findino(" << tid << " " << ino << ")"; diff --git a/src/messages/MMDSFindInoReply.h b/src/messages/MMDSFindInoReply.h index dc61ba8d06a75..162b830fc14ea 100644 --- a/src/messages/MMDSFindInoReply.h +++ b/src/messages/MMDSFindInoReply.h @@ -28,9 +28,12 @@ public: ceph_tid_t tid = 0; filepath path; +protected: MMDSFindInoReply() : Message(MSG_MDS_FINDINOREPLY) {} MMDSFindInoReply(ceph_tid_t t) : Message(MSG_MDS_FINDINOREPLY), tid(t) {} + ~MMDSFindInoReply() override {} +public: const char *get_type_name() const override { return "findinoreply"; } void print(ostream &out) const override { out << "findinoreply(" << tid << " " << path << ")"; diff --git a/src/messages/MMDSFragmentNotify.h b/src/messages/MMDSFragmentNotify.h index 2618d9755f1c8..7be292a3a334b 100644 --- a/src/messages/MMDSFragmentNotify.h +++ b/src/messages/MMDSFragmentNotify.h @@ -35,11 +35,11 @@ private: bufferlist basebl; +protected: MMDSFragmentNotify() : Message(MSG_MDS_FRAGMENTNOTIFY) {} MMDSFragmentNotify(dirfrag_t df, int b) : Message(MSG_MDS_FRAGMENTNOTIFY), ino(df.ino), basefrag(df.frag), bits(b) { } -private: ~MMDSFragmentNotify() override {} public: diff --git a/src/messages/MMDSLoadTargets.h b/src/messages/MMDSLoadTargets.h index 687775263bfed..feda44e5fb9a1 100644 --- a/src/messages/MMDSLoadTargets.h +++ b/src/messages/MMDSLoadTargets.h @@ -33,12 +33,11 @@ public: mds_gid_t global_id; set targets; +protected: MMDSLoadTargets() : PaxosServiceMessage(MSG_MDS_OFFLOAD_TARGETS, 0) {} - MMDSLoadTargets(mds_gid_t g, set& mds_targets) : PaxosServiceMessage(MSG_MDS_OFFLOAD_TARGETS, 0), global_id(g), targets(mds_targets) {} -private: ~MMDSLoadTargets() override {} public: diff --git a/src/messages/MMDSMap.h b/src/messages/MMDSMap.h index 03fb08791d457..6ef9b36812490 100644 --- a/src/messages/MMDSMap.h +++ b/src/messages/MMDSMap.h @@ -38,6 +38,7 @@ public: version_t get_epoch() const { return epoch; } const bufferlist& get_encoded() const { return encoded; } +protected: MMDSMap() : Message(CEPH_MSG_MDS_MAP, HEAD_VERSION, COMPAT_VERSION) {} MMDSMap(const uuid_d &f, const MDSMap &mm) : @@ -46,7 +47,6 @@ public: epoch = mm.get_epoch(); mm.encode(encoded, -1); // we will reencode with fewer features as necessary } -private: ~MMDSMap() override {} public: diff --git a/src/messages/MMDSOpenIno.h b/src/messages/MMDSOpenIno.h index 6f62c136d36fa..1860f473ae910 100644 --- a/src/messages/MMDSOpenIno.h +++ b/src/messages/MMDSOpenIno.h @@ -27,6 +27,7 @@ public: inodeno_t ino; vector ancestors; +protected: MMDSOpenIno() : Message(MSG_MDS_OPENINO) {} MMDSOpenIno(ceph_tid_t t, inodeno_t i, vector* pa) : Message(MSG_MDS_OPENINO), ino(i) { @@ -34,7 +35,9 @@ public: if (pa) ancestors = *pa; } + ~MMDSOpenIno() override {} +public: const char *get_type_name() const override { return "openino"; } void print(ostream &out) const override { out << "openino(" << header.tid << " " << ino << " " << ancestors << ")"; diff --git a/src/messages/MMDSOpenInoReply.h b/src/messages/MMDSOpenInoReply.h index 77f1777e6cb55..2dc9c1ae10b94 100644 --- a/src/messages/MMDSOpenInoReply.h +++ b/src/messages/MMDSOpenInoReply.h @@ -29,12 +29,15 @@ public: mds_rank_t hint; int32_t error; +protected: MMDSOpenInoReply() : Message(MSG_MDS_OPENINOREPLY), error(0) {} MMDSOpenInoReply(ceph_tid_t t, inodeno_t i, mds_rank_t h=MDS_RANK_NONE, int e=0) : Message(MSG_MDS_OPENINOREPLY), ino(i), hint(h), error(e) { header.tid = t; } + +public: const char *get_type_name() const override { return "openinoreply"; } void print(ostream &out) const override { out << "openinoreply(" << header.tid << " " diff --git a/src/messages/MMDSResolve.h b/src/messages/MMDSResolve.h index 4316c981a0a13..ca1dd9bdf7b93 100644 --- a/src/messages/MMDSResolve.h +++ b/src/messages/MMDSResolve.h @@ -70,8 +70,8 @@ public: list table_clients; +protected: MMDSResolve() : Message(MSG_MDS_RESOLVE) {} -private: ~MMDSResolve() override {} public: diff --git a/src/messages/MMDSResolveAck.h b/src/messages/MMDSResolveAck.h index cdc0fd3c97f37..97e26469ba965 100644 --- a/src/messages/MMDSResolveAck.h +++ b/src/messages/MMDSResolveAck.h @@ -30,8 +30,8 @@ public: map commit; vector abort; +protected: MMDSResolveAck() : Message(MSG_MDS_RESOLVEACK) {} -private: ~MMDSResolveAck() override {} public: diff --git a/src/messages/MMDSSlaveRequest.h b/src/messages/MMDSSlaveRequest.h index a2b4c25e30994..c449a79042c9d 100644 --- a/src/messages/MMDSSlaveRequest.h +++ b/src/messages/MMDSSlaveRequest.h @@ -160,14 +160,12 @@ public: const bufferlist& get_lock_data() const { return inode_export; } bufferlist& get_lock_data() { return inode_export; } - - // ---- +protected: MMDSSlaveRequest() : Message(MSG_MDS_SLAVE_REQUEST) { } MMDSSlaveRequest(metareqid_t ri, __u32 att, int o) : Message(MSG_MDS_SLAVE_REQUEST), reqid(ri), attempt(att), op(o), flags(0), lock_type(0), inode_export_v(0), srcdn_auth(MDS_RANK_NONE) { } -private: ~MMDSSlaveRequest() override {} public: diff --git a/src/messages/MMDSSnapUpdate.h b/src/messages/MMDSSnapUpdate.h index ae36b314773b6..b9063a58e03dd 100644 --- a/src/messages/MMDSSnapUpdate.h +++ b/src/messages/MMDSSnapUpdate.h @@ -34,12 +34,12 @@ public: bufferlist snap_blob; +protected: MMDSSnapUpdate() : Message(MSG_MDS_SNAPUPDATE) {} MMDSSnapUpdate(inodeno_t i, version_t tid, int op) : Message(MSG_MDS_SNAPUPDATE), ino(i), snap_op(op) { set_tid(tid); } -private: ~MMDSSnapUpdate() override {} public: diff --git a/src/messages/MMDSTableRequest.h b/src/messages/MMDSTableRequest.h index bc8165a8c42ed..8504584f80c26 100644 --- a/src/messages/MMDSTableRequest.h +++ b/src/messages/MMDSTableRequest.h @@ -31,13 +31,13 @@ public: uint64_t reqid = 0; bufferlist bl; +protected: MMDSTableRequest() : Message(MSG_MDS_TABLE_REQUEST) {} MMDSTableRequest(int tab, int o, uint64_t r, version_t v=0) : Message(MSG_MDS_TABLE_REQUEST), table(tab), op(o), reqid(r) { set_tid(v); } -private: ~MMDSTableRequest() override {} public: diff --git a/src/messages/MRemoveSnaps.h b/src/messages/MRemoveSnaps.h index 332b5c9f48e3d..c0f83bfc52686 100644 --- a/src/messages/MRemoveSnaps.h +++ b/src/messages/MRemoveSnaps.h @@ -26,13 +26,13 @@ public: map > snaps; +protected: MRemoveSnaps() : PaxosServiceMessage(MSG_REMOVE_SNAPS, 0) { } MRemoveSnaps(map >& s) : PaxosServiceMessage(MSG_REMOVE_SNAPS, 0) { snaps.swap(s); } -private: ~MRemoveSnaps() override {} public: diff --git a/src/mon/MDSMonitor.cc b/src/mon/MDSMonitor.cc index 768dffbb92d8f..df64b3a14059e 100644 --- a/src/mon/MDSMonitor.cc +++ b/src/mon/MDSMonitor.cc @@ -376,7 +376,8 @@ bool MDSMonitor::preprocess_beacon(MonOpRequestRef op) MDSMap null_map; null_map.epoch = fsmap.epoch; null_map.compat = fsmap.compat; - mon->send_reply(op, new MMDSMap(mon->monmap->fsid, null_map)); + auto m = MMDSMap::factory::build(mon->monmap->fsid, null_map); + mon->send_reply(op, m.detach()); return true; } else { return false; // not booted yet. @@ -450,10 +451,12 @@ bool MDSMonitor::preprocess_beacon(MonOpRequestRef op) // note time and reply assert(effective_epoch > 0); _note_beacon(m); - mon->send_reply(op, - new MMDSBeacon(mon->monmap->fsid, m->get_global_id(), m->get_name(), - effective_epoch, state, seq, - CEPH_FEATURES_SUPPORTED_DEFAULT)); + { + auto beacon = MMDSBeacon::factory::build(mon->monmap->fsid, + m->get_global_id(), m->get_name(), effective_epoch, + state, seq, CEPH_FEATURES_SUPPORTED_DEFAULT); + mon->send_reply(op, beacon.detach()); + } return true; ignore: @@ -718,11 +721,11 @@ bool MDSMonitor::prepare_beacon(MonOpRequestRef op) last_beacon.erase(gid); // Respond to MDS, so that it knows it can continue to shut down - mon->send_reply(op, - new MMDSBeacon( + auto beacon = MMDSBeacon::factory::build( mon->monmap->fsid, m->get_global_id(), m->get_name(), pending.get_epoch(), state, seq, - CEPH_FEATURES_SUPPORTED_DEFAULT)); + CEPH_FEATURES_SUPPORTED_DEFAULT); + mon->send_reply(op, beacon.detach()); } else if (state == MDSMap::STATE_DNE) { if (!mon->osdmon()->is_writeable()) { dout(1) << __func__ << ": DNE from rank " << info.rank @@ -736,11 +739,10 @@ bool MDSMonitor::prepare_beacon(MonOpRequestRef op) request_proposal(mon->osdmon()); // Respond to MDS, so that it knows it can continue to shut down - mon->send_reply(op, - new MMDSBeacon( - mon->monmap->fsid, m->get_global_id(), - m->get_name(), pending.get_epoch(), state, seq, - CEPH_FEATURES_SUPPORTED_DEFAULT)); + auto beacon = MMDSBeacon::factory::build(mon->monmap->fsid, + m->get_global_id(), m->get_name(), pending.get_epoch(), state, seq, + CEPH_FEATURES_SUPPORTED_DEFAULT); + mon->send_reply(op, beacon.detach()); } else if (info.state == MDSMap::STATE_STANDBY && state != info.state) { // Standby daemons should never modify their own // state. Reject any attempts to do so. @@ -826,15 +828,13 @@ void MDSMonitor::_updated(MonOpRequestRef op) MDSMap null_map; null_map.epoch = fsmap.epoch; null_map.compat = fsmap.compat; - mon->send_reply(op, new MMDSMap(mon->monmap->fsid, null_map)); + auto m = MMDSMap::factory::build(mon->monmap->fsid, null_map); + mon->send_reply(op, m.detach()); } else { - mon->send_reply(op, new MMDSBeacon(mon->monmap->fsid, - m->get_global_id(), - m->get_name(), - fsmap.get_epoch(), - m->get_state(), - m->get_seq(), - CEPH_FEATURES_SUPPORTED_DEFAULT)); + auto beacon = MMDSBeacon::factory::build(mon->monmap->fsid, + m->get_global_id(), m->get_name(), fsmap.get_epoch(), + m->get_state(), m->get_seq(), CEPH_FEATURES_SUPPORTED_DEFAULT); + mon->send_reply(op, beacon.detach()); } } @@ -1570,9 +1570,9 @@ void MDSMonitor::check_sub(Subscription *sub) if (sub->next > mds_map->epoch) { return; } - auto msg = new MMDSMap(mon->monmap->fsid, *mds_map); + auto msg = MMDSMap::factory::build(mon->monmap->fsid, *mds_map); - sub->session->con->send_message(msg); + sub->session->con->send_message(msg.detach()); if (sub->onetime) { mon->session_map.remove_sub(sub); } else { -- 2.39.5