}
}
- MClientSession *m = new MClientSession(CEPH_SESSION_REQUEST_OPEN);
+ auto m = MClientSession::factory::build(CEPH_SESSION_REQUEST_OPEN);
m->metadata = metadata;
m->supported_features = feature_bitset_t(CEPHFS_FEATURES_CLIENT_SUPPORTED);
- session->con->send_message(m);
+ session->con->send_message2(m);
return session;
}
{
ldout(cct, 2) << __func__ << " mds." << s->mds_num << " seq " << s->seq << dendl;
s->state = MetaSession::STATE_CLOSING;
- s->con->send_message(new MClientSession(CEPH_SESSION_REQUEST_CLOSE, s->seq));
+ s->con->send_message2(MClientSession::factory::build(CEPH_SESSION_REQUEST_CLOSE, s->seq));
}
void Client::_closed_mds_session(MetaSession *s)
break;
case CEPH_SESSION_FLUSHMSG:
- session->con->send_message(new MClientSession(CEPH_SESSION_FLUSHMSG_ACK, m->get_seq()));
+ session->con->send_message2(MClientSession::factory::build(CEPH_SESSION_FLUSHMSG_ACK, m->get_seq()));
break;
case CEPH_SESSION_FORCE_RO:
MClientRequest* Client::build_client_request(MetaRequest *request)
{
- MClientRequest *req = new MClientRequest(request->get_op());
+ auto req = MClientRequest::factory::build(request->get_op());
req->set_tid(request->tid);
req->set_stamp(request->op_stamp);
memcpy(&req->head, &request->head, sizeof(ceph_mds_request_head));
const gid_t *_gids;
int gid_count = request->perms.get_gids(&_gids);
req->set_gid_list(gid_count, _gids);
- return req;
+ return req.detach();
}
s->seq++;
ldout(cct, 10) << " mds." << s->mds_num << " seq now " << s->seq << dendl;
if (s->state == MetaSession::STATE_CLOSING) {
- s->con->send_message(new MClientSession(CEPH_SESSION_REQUEST_CLOSE, s->seq));
+ s->con->send_message2(MClientSession::factory::build(CEPH_SESSION_REQUEST_CLOSE, s->seq));
}
}
}
revoke:
- m->get_connection()->send_message(
- new MClientLease(
- CEPH_MDS_LEASE_RELEASE, seq,
- m->get_mask(), m->get_ino(), m->get_first(), m->get_last(), m->dname));
+ {
+ auto reply = MClientLease::factory::build(CEPH_MDS_LEASE_RELEASE, seq, m->get_mask(), m->get_ino(), m->get_first(), m->get_last(), m->dname);
+ m->get_connection()->send_message2(reply);
+ }
m->put();
}
if (flush)
follows = in->snaprealm->get_snap_context().seq;
- MClientCaps *m = new MClientCaps(op,
+ auto m = MClientCaps::factory::build(op,
in->ino,
0,
cap->cap_id, cap->seq,
if (!session->flushing_caps_tids.empty())
m->set_oldest_flush_tid(*session->flushing_caps_tids.begin());
- session->con->send_message(m);
+ session->con->send_message2(m);
}
static bool is_max_size_approaching(Inode *in)
session->flushing_caps_tids.insert(capsnap.flush_tid);
}
- MClientCaps *m = new MClientCaps(CEPH_CAP_OP_FLUSHSNAP, in->ino, in->snaprealm->ino, 0, mseq,
+ auto m = MClientCaps::factory::build(CEPH_CAP_OP_FLUSHSNAP, in->ino, in->snaprealm->ino, 0, mseq,
cap_epoch_barrier);
m->caller_uid = capsnap.cap_dirtier_uid;
m->caller_gid = capsnap.cap_dirtier_gid;
assert(!session->flushing_caps_tids.empty());
m->set_oldest_flush_tid(*session->flushing_caps_tids.begin());
- session->con->send_message(m);
+ session->con->send_message2(m);
}
}
// will crash if they see an unknown CEPH_SESSION_* value in this msg.
const uint64_t features = session->con->get_features();
if (HAVE_FEATURE(features, SERVER_LUMINOUS)) {
- MClientSession *m = new MClientSession(CEPH_SESSION_REQUEST_FLUSH_MDLOG);
- session->con->send_message(m);
+ auto m = MClientSession::factory::build(CEPH_SESSION_REQUEST_FLUSH_MDLOG);
+ session->con->send_message2(m);
}
}
ldout(cct, 10) << "renew_caps mds." << session->mds_num << dendl;
session->last_cap_renew_request = ceph_clock_now();
uint64_t seq = ++session->cap_renew_seq;
- session->con->send_message(new MClientSession(CEPH_SESSION_REQUEST_RENEWCAPS, seq));
+ session->con->send_message2(MClientSession::factory::build(CEPH_SESSION_REQUEST_RENEWCAPS, seq));
}
assert(want_state != MDSMap::STATE_NULL);
- MMDSBeacon::ref beacon(new MMDSBeacon(
+ auto beacon = MMDSBeacon::factory::build(
monc->get_fsid(), mds_gid_t(monc->get_global_id()),
name,
epoch,
want_state,
last_seq,
- CEPH_FEATURES_SUPPORTED_DEFAULT), false);
+ CEPH_FEATURES_SUPPORTED_DEFAULT);
beacon->set_standby_for_rank(standby_for_rank);
beacon->set_standby_for_name(standby_for_name);
if (mds->is_cluster_degraded() &&
mds->mdsmap->get_state(it.first) < MDSMap::STATE_REJOIN)
continue;
- MLock::ref m(new MLock(lock, msg, mds->get_nodeid()), false);
+ auto m = MLock::factory::build(lock, msg, mds->get_nodeid());
mds->send_message_mds(m, it.first);
}
}
if (mds->is_cluster_degraded() &&
mds->mdsmap->get_state(it.first) < MDSMap::STATE_REJOIN)
continue;
- MLock::ref m(new MLock(lock, msg, mds->get_nodeid()), false);
+ auto m = MLock::factory::build(lock, msg, mds->get_nodeid());
m->set_data(data);
mds->send_message_mds(m, it.first);
}
return false;
}
- MMDSSlaveRequest::ref req(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPIN), false);
+ auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPIN);
for (set<MDSCacheObject*>::iterator q = p->second.begin();
q != p->second.end();
++q) {
if (!mds->is_cluster_degraded() ||
mds->mdsmap->get_state(*p) >= MDSMap::STATE_REJOIN) {
dout(10) << "_drop_non_rdlocks dropping remote locks on mds." << *p << dendl;
- MMDSSlaveRequest::ref slavereq(new MMDSSlaveRequest(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_DROPLOCKS), false);
+ auto slavereq = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_DROPLOCKS);
mds->send_message_mds(slavereq, *p);
}
}
mds->mdsmap->get_state(auth) >= MDSMap::STATE_REJOIN) {
switch (lock->get_state()) {
case LOCK_SYNC_LOCK:
- mds->send_message_mds(MLock::ref(new MLock(lock, LOCK_AC_LOCKACK, mds->get_nodeid()), false), auth);
+ mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_LOCKACK, mds->get_nodeid()), auth);
break;
case LOCK_MIX_SYNC:
{
- MLock::ref reply(new MLock(lock, LOCK_AC_SYNCACK, mds->get_nodeid()), false);
+ auto reply = MLock::factory::build(lock, LOCK_AC_SYNCACK, mds->get_nodeid());
lock->encode_locked_state(reply->get_data());
mds->send_message_mds(reply, auth);
next = LOCK_MIX_SYNC2;
case LOCK_SYNC_MIX:
{
- MLock::ref reply(new MLock(lock, LOCK_AC_MIXACK, mds->get_nodeid()), false);
+ auto reply = MLock::factory::build(lock, LOCK_AC_MIXACK, mds->get_nodeid());
mds->send_message_mds(reply, auth);
next = LOCK_SYNC_MIX2;
}
{
bufferlist data;
lock->encode_locked_state(data);
- mds->send_message_mds(MLock::ref(new MLock(lock, LOCK_AC_LOCKACK, mds->get_nodeid(), data), false), auth);
+ mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_LOCKACK, mds->get_nodeid(), data), auth);
(static_cast<ScatterLock *>(lock))->start_flush();
// we'll get an AC_LOCKFLUSHED to complete
}
mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) {
dout(10) << "requesting rdlock from auth on "
<< *lock << " on " << *lock->get_parent() << dendl;
- mds->send_message_mds(MLock::ref(new MLock(lock, LOCK_AC_REQRDLOCK, mds->get_nodeid()), false), auth);
+ mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_REQRDLOCK, mds->get_nodeid()), auth);
}
return false;
}
mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) {
dout(10) << "requesting scatter from auth on "
<< *lock << " on " << *lock->get_parent() << dendl;
- mds->send_message_mds(MLock::ref(new MLock(lock, LOCK_AC_REQSCATTER, mds->get_nodeid()), false), auth));
+ mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_REQSCATTER, mds->get_nodeid()), auth);
}
break;
}
// send lock request
mut->start_locking(lock, target);
mut->more()->slaves.insert(target);
- MMDSSlaveRequest::ref r(new MMDSSlaveRequest(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_WRLOCK), false);
+ auto r = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_WRLOCK);
r->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(r->get_object_info());
mds->send_message_mds(r, target);
<< " " << *lock->get_parent() << dendl;
if (!mds->is_cluster_degraded() ||
mds->mdsmap->get_state(target) >= MDSMap::STATE_REJOIN) {
- MMDSSlaveRequest::ref slavereq(new MMDSSlaveRequest(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNWRLOCK), false);
+ auto slavereq = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNWRLOCK);
slavereq->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(slavereq->get_object_info());
mds->send_message_mds(slavereq, target);
// send lock request
mut->more()->slaves.insert(auth);
mut->start_locking(lock, auth);
- MMDSSlaveRequest::ref r(new MMDSSlaveRequest(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_XLOCK), false);
+ auto r = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_XLOCK);
r->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(r->get_object_info());
mds->send_message_mds(r, auth);
mds_rank_t auth = lock->get_parent()->authority().first;
if (!mds->is_cluster_degraded() ||
mds->mdsmap->get_state(auth) >= MDSMap::STATE_REJOIN) {
- MMDSSlaveRequest::ref slavereq(new MMDSSlaveRequest(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNXLOCK), false);
+ auto slavereq = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNXLOCK);
slavereq->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(slavereq->get_object_info());
mds->send_message_mds(slavereq, auth);
cap->reset_num_revoke_warnings();
}
- MClientCaps::ref m(new MClientCaps(op, in->ino(),
+ auto m = MClientCaps::factory::build(op, in->ino(),
in->find_snaprealm()->inode->ino(),
cap->get_cap_id(),
cap->get_last_seq(),
after, wanted, 0,
cap->get_mseq(),
- mds->get_osd_epoch_barrier())
- , false);
+ mds->get_osd_epoch_barrier());
in->encode_cap_message(m, cap);
mds->send_message_client_counted(m, it->first);
for (auto &p : in->client_caps) {
Capability *cap = &p.second;
- MClientCaps::ref m(new MClientCaps(CEPH_CAP_OP_TRUNC,
+ auto m = MClientCaps::factory::build(CEPH_CAP_OP_TRUNC,
in->ino(),
in->find_snaprealm()->inode->ino(),
cap->get_cap_id(), cap->get_last_seq(),
cap->pending(), cap->wanted(), 0,
cap->get_mseq(),
- mds->get_osd_epoch_barrier())
- , false);
+ mds->get_osd_epoch_barrier());
in->encode_cap_message(m, cap);
mds->send_message_client_counted(m, p.first);
}
if (!mds->is_cluster_degraded() ||
mds->mdsmap->is_clientreplay_or_active_or_stopping(auth))
- mds->send_message_mds(MInodeFileCaps::ref(new MInodeFileCaps(in->ino(), in->replica_caps_wanted), false), auth);
+ mds->send_message_mds(MInodeFileCaps::factory::build(in->ino(), in->replica_caps_wanted), auth);
}
}
if (cap->pending() & (CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER)) {
dout(10) << "share_inode_max_size with client." << client << dendl;
cap->inc_last_seq();
- MClientCaps::ref m(new MClientCaps(CEPH_CAP_OP_GRANT,
+ auto m = MClientCaps::factory::build(CEPH_CAP_OP_GRANT,
in->ino(),
in->find_snaprealm()->inode->ino(),
cap->get_cap_id(),
cap->pending(),
cap->wanted(), 0,
cap->get_mseq(),
- mds->get_osd_epoch_barrier())
- , false);
+ mds->get_osd_epoch_barrier());
in->encode_cap_message(m, cap);
mds->send_message_client_counted(m, client);
}
<< " for client." << client << dendl;
MClientCaps::ref ack;
if (op == CEPH_CAP_OP_FLUSHSNAP) {
- ack.reset(new MClientCaps(CEPH_CAP_OP_FLUSHSNAP_ACK, m->get_ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier()), false);
+ ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSHSNAP_ACK, m->get_ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier());
} else {
- ack.reset(new MClientCaps(CEPH_CAP_OP_FLUSH_ACK, m->get_ino(), 0, m->get_cap_id(), m->get_seq(), m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier()), false);
+ ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSH_ACK, m->get_ino(), 0, m->get_cap_id(), m->get_seq(), m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier());
}
ack->set_snap_follows(follows);
ack->set_client_tid(m->get_client_tid());
// case we get a dup response, so whatever.)
MClientCaps::ref ack;
if (dirty) {
- ack.reset(new MClientCaps(CEPH_CAP_OP_FLUSHSNAP_ACK, in->ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier()), false);
+ ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSHSNAP_ACK, in->ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier());
ack->set_snap_follows(follows);
ack->set_client_tid(m->get_client_tid());
ack->set_oldest_flush_tid(m->get_oldest_flush_tid());
if (dirty && in->is_auth()) {
dout(7) << " flush client." << client << " dirty " << ccap_string(dirty)
<< " seq " << m->get_seq() << " on " << *in << dendl;
- ack.reset(new MClientCaps(CEPH_CAP_OP_FLUSH_ACK, in->ino(), 0, cap->get_cap_id(), m->get_seq(),
- m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier()), false);
+ ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSH_ACK, in->ino(), 0, cap->get_cap_id(), m->get_seq(),
+ m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier());
ack->set_client_tid(m->get_client_tid());
ack->set_oldest_flush_tid(m->get_oldest_flush_tid());
dout(7) << "handle_client_lease client." << client << " renew on " << *dn
<< (!dn->lock.can_lease(client)?", revoking lease":"") << dendl;
if (dn->lock.can_lease(client)) {
- MClientLease::ref reply(new MClientLease(*m));
+ auto reply = MClientLease::factory::build(*m);
int pool = 1; // fixme.. do something smart!
reply->h.duration_ms = (int)(1000 * mdcache->client_lease_durations[pool]);
reply->h.seq = ++l->seq;
// i should also revoke the dir ICONTENT lease, if they have it!
CInode *diri = dn->get_dir()->get_inode();
- MClientLease::ref lease(new MClientLease(CEPH_MDS_LEASE_REVOKE, l->seq, mask, diri->ino(), diri->first, CEPH_NOSNAP, dn->get_name()), false);
+ auto lease = MClientLease::factory::build(CEPH_MDS_LEASE_REVOKE, l->seq, mask, diri->ino(), diri->first, CEPH_NOSNAP, dn->get_name());
mds->send_message_client_counted(lease, l->client);
}
}
// request unscatter?
mds_rank_t auth = lock->get_parent()->authority().first;
if (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) {
- MLock::ref msg(new MLock(lock, LOCK_AC_NUDGE, mds->get_nodeid()), false);
- mds->send_message_mds(msg, auth);
+ mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_NUDGE, mds->get_nodeid()), auth);
}
// wait...
for (const auto& r : up) {
if (r == mds->get_nodeid())
continue;
- MHeartbeat::ref hb(new MHeartbeat(load, beat_epoch), false);
+ auto hb = MHeartbeat::factory::build(load, beat_epoch);
hb->get_import_map() = import_map;
mds->send_message_mds(hb, r);
}
cap->last_rsize = i->rstat.rsize();
cap->last_rbytes = i->rstat.rbytes;
- MClientQuota *msg = new MClientQuota();
+ auto msg = MClientQuota::factory::build();
msg->ino = in->ino();
msg->rstat = i->rstat;
msg->quota = i->quota;
mds->send_message_client_counted(msg, session->get_connection());
}
for (const auto &it : in->get_replicas()) {
- MGatherCaps *msg = new MGatherCaps;
+ auto msg = MGatherCaps::factory::build();
msg->ino = in->ino();
mds->send_message_mds(msg, it.first);
}
dout(10) << "_logged_slave_commit from mds." << from << " " << reqid << dendl;
// send a message
- MMDSSlaveRequest *req = new MMDSSlaveRequest(reqid, 0, MMDSSlaveRequest::OP_COMMITTED);
+ auto req = MMDSSlaveRequest::factory::build(reqid, 0, MMDSSlaveRequest::OP_COMMITTED);
mds->send_message_mds(req, from);
}
for (map<mds_rank_t, map<metareqid_t, MDSlaveUpdate*> >::iterator p = uncommitted_slave_updates.begin();
p != uncommitted_slave_updates.end();
++p) {
- resolves[p->first].reset(new MMDSResolve, false);
+ resolves[p->first] = MMDSResolve::factory::build();
for (map<metareqid_t, MDSlaveUpdate*>::iterator q = p->second.begin();
q != p->second.end();
++q) {
if (resolve_set.count(master) || is_ambiguous_slave_update(p->first, master)) {
dout(10) << " including uncommitted " << *mdr << dendl;
if (!resolves.count(master))
- resolves[master].reset(new MMDSResolve, false);
+ resolves[master] = MMDSResolve::factory::build();
if (!mdr->committing &&
mdr->has_more() && mdr->more()->is_inode_exporter) {
// re-send cap exports
if (*p == mds->get_nodeid())
continue;
if (mds->is_resolve() || mds->mdsmap->is_resolve(*p))
- resolves[*p].reset(new MMDSResolve, false);
+ resolves[*p] = MMDSResolve::factory::build();
}
map<dirfrag_t, vector<dirfrag_t> > my_subtrees;
}
}
- MMDSResolveAck::ref ack(new MMDSResolveAck, false);
+ auto ack = MMDSResolveAck::factory::build();
for (const auto &p : m->slave_requests) {
if (uncommitted_masters.count(p.first)) { //mds->sessionmap.have_completed_request(p.first)) {
// COMMIT
if (*p == mds->get_nodeid()) continue; // nothing to myself!
if (rejoin_sent.count(*p)) continue; // already sent a rejoin to this node!
if (mds->is_rejoin())
- rejoins[*p].reset(new MMDSCacheRejoin(MMDSCacheRejoin::OP_WEAK), false);
+ rejoins[*p] = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_WEAK);
else if (mds->mdsmap->is_rejoin(*p))
- rejoins[*p].reset(new MMDSCacheRejoin(MMDSCacheRejoin::OP_STRONG), false);
+ rejoins[*p] = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_STRONG);
}
if (mds->is_rejoin()) {
if (mds->is_clientreplay() || mds->is_active() || mds->is_stopping()) {
survivor = true;
dout(10) << "i am a surivivor, and will ack immediately" << dendl;
- ack.reset(new MMDSCacheRejoin(MMDSCacheRejoin::OP_ACK), false);
+ ack = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_ACK);
map<inodeno_t,map<client_t,Capability::Import> > imported_caps;
}
// mark client caps stale.
- MClientCaps *m = new MClientCaps(CEPH_CAP_OP_EXPORT, p->first, 0,
+ auto m = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, p->first, 0,
r->second.capinfo.cap_id, 0,
mds->get_osd_epoch_barrier());
m->set_cap_peer(q->second.cap_id, q->second.issue_seq, q->second.mseq,
snap = it->second;
snap->head.op = CEPH_SNAP_OP_SPLIT;
} else {
- snap.reset(new MClientSnap(CEPH_SNAP_OP_SPLIT), false);
+ snap = MClientSnap::factory::build(CEPH_SNAP_OP_SPLIT);
splits.emplace(std::piecewise_construct, std::forward_as_tuple(client), std::forward_as_tuple(snap));
snap->head.split = realm->inode->ino();
snap->bl = realm->get_snap_trace();
assert(!p.second->empty());
auto em = splits.emplace(std::piecewise_construct, std::forward_as_tuple(p.first), std::forward_as_tuple());
if (em.second) {
- MClientSnap::ref update(new MClientSnap(CEPH_SNAP_OP_SPLIT), false);
+ auto update = MClientSnap::factory::build(CEPH_SNAP_OP_SPLIT);
update->head.split = parent_realm->inode->ino();
update->split_inos = split_inos;
update->split_realms = split_realms;
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(q->first.v));
if (session) {
// mark client caps stale.
- MClientCaps *stale = new MClientCaps(CEPH_CAP_OP_EXPORT, p->first, 0, 0, 0, mds->get_osd_epoch_barrier());
+ auto stale = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, p->first, 0, 0, 0, mds->get_osd_epoch_barrier());
stale->set_cap_peer(0, 0, 0, -1, 0);
mds->send_message_client_counted(stale, q->first);
}
cap->set_last_issue();
cap->set_last_issue_stamp(ceph_clock_now());
cap->clear_new();
- MClientCaps *reap = new MClientCaps(CEPH_CAP_OP_IMPORT,
- in->ino(),
- realm->inode->ino(),
- cap->get_cap_id(), cap->get_last_seq(),
- cap->pending(), cap->wanted(), 0,
- cap->get_mseq(), mds->get_osd_epoch_barrier());
+ auto reap = MClientCaps::factory::build(CEPH_CAP_OP_IMPORT, in->ino(), realm->inode->ino(), cap->get_cap_id(), cap->get_last_seq(), cap->pending(), cap->wanted(), 0, cap->get_mseq(), mds->get_osd_epoch_barrier());
in->encode_cap_message(reap, cap);
reap->snapbl = realm->get_snap_trace();
reap->set_cap_peer(p_cap_id, p_seq, p_mseq, peer, p_flags);
if (seq < realm->get_newest_seq()) {
dout(10) << "finish_snaprealm_reconnect client." << client << " has old seq " << seq << " < "
<< realm->get_newest_seq() << " on " << *realm << dendl;
- MClientSnap *snap = new MClientSnap(CEPH_SNAP_OP_UPDATE);
+ auto snap = MClientSnap::factory::build(CEPH_SNAP_OP_UPDATE);
snap->bl = realm->get_snap_trace();
for (const auto& child : realm->open_children)
snap->split_realms.push_back(child->inode->ino());
rejoin_unlinked_inodes.clear();
// send acks to everyone in the recovery set
- map<mds_rank_t,MMDSCacheRejoin*> acks;
+ map<mds_rank_t,MMDSCacheRejoin::ref> acks;
for (set<mds_rank_t>::iterator p = recovery_set.begin();
p != recovery_set.end();
++p) {
if (rejoin_ack_sent.count(*p))
continue;
- acks[*p] = new MMDSCacheRejoin(MMDSCacheRejoin::OP_ACK);
+ acks[*p] = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_ACK);
}
rejoin_ack_sent = recovery_set;
auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(rank), std::forward_as_tuple());
if (em.second) {
- em.first->second.reset(new MCacheExpire(mds->get_nodeid()), false);
+ em.first->second = MCacheExpire::factory::build(mds->get_nodeid());
}
dout(20) << __func__ << ": try expiring " << *mdsdir_in << " for stopping mds." << mds << dendl;
assert(a != mds->get_nodeid());
auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple());
if (em.second)
- em.first->second.reset(new MCacheExpire(mds->get_nodeid()), false); /* new */
+ em.first->second = MCacheExpire::factory::build(mds->get_nodeid());
em.first->second->add_dentry(con->dirfrag(), dir->dirfrag(), dn->get_name(), dn->last, dn->get_replica_nonce());
}
}
assert(a != mds->get_nodeid());
auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple());
if (em.second)
- em.first->second.reset(new MCacheExpire(mds->get_nodeid()), false); /* new */
+ em.first->second = MCacheExpire::factory::build(mds->get_nodeid()); /* new */
em.first->second->add_dir(condf, dir->dirfrag(), dir->replica_nonce);
}
}
assert(a != mds->get_nodeid());
auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple());
if (em.second)
- em.first->second.reset(new MCacheExpire(mds->get_nodeid()), false); /* new */
+ em.first->second = MCacheExpire::factory::build(mds->get_nodeid()); /* new */
em.first->second->add_inode(df, in->vino(), in->get_replica_nonce());
}
}
auto em = delayed_expire[parent_dir].emplace(std::piecewise_construct, std::forward_as_tuple(from), std::forward_as_tuple());
if (em.second)
- em.first->second.reset(new MCacheExpire(from), false); /* new */
+ em.first->second = MCacheExpire::factory::build(from); /* new */
// merge these expires into it
em.first->second->add_realm(p.first, p.second);
// got backtrace from peer or backtrace just fetched
if (info.discover || !info.fetch_backtrace)
pa = &info.ancestors;
- mds->send_message_mds(new MMDSOpenIno(info.tid, ino, pa), peer);
+ mds->send_message_mds(MMDSOpenIno::factory::build(info.tid, ino, pa), peer);
if (mds->logger)
mds->logger->inc(l_mds_openino_peer_discover);
}
CInode *in = get_inode(ino);
if (in) {
dout(10) << " have " << *in << dendl;
- reply.reset(new MMDSOpenInoReply(m->get_tid(), ino, mds_rank_t(0)), false);
+ reply = MMDSOpenInoReply::factory::build(m->get_tid(), ino, mds_rank_t(0));
if (in->is_auth()) {
touch_inode(in);
while (1) {
reply->hint = in->authority().first;
}
} else if (err < 0) {
- reply.reset(new MMDSOpenInoReply(m->get_tid(), ino, MDS_RANK_NONE, err), false);
+ reply = MMDSOpenInoReply::factory::build(m->get_tid(), ino, MDS_RANK_NONE, err);
} else {
mds_rank_t hint = MDS_RANK_NONE;
int ret = open_ino_traverse_dir(ino, m, m->ancestors, false, false, &hint);
if (ret > 0)
return;
- reply.reset(new MMDSOpenInoReply(m->get_tid(), ino, hint, ret), false);
+ reply = MMDSOpenInoReply::factory::build(m->get_tid(), ino, hint, ret);
}
m->get_connection()->send_message2(reply); /* FIXME, why not send_client? */
}
}
} else {
fip.checking = m;
- mds->send_message_mds(new MMDSFindIno(fip.tid, fip.ino), m);
+ mds->send_message_mds(MMDSFindIno::factory::build(fip.tid, fip.ino), m);
}
}
}
dout(10) << "handle_find_ino " << *m << dendl;
- MMDSFindInoReply *r = new MMDSFindInoReply(m->tid);
+ auto r = MMDSFindInoReply::factory::build(m->tid);
CInode *in = get_inode(m->ino);
if (in) {
in->make_path(r->path);
dout(10) << " have " << r->path << " " << *in << dendl;
}
- m->get_connection()->send_message(r);
+ m->get_connection()->send_message2(r);
}
for (set<mds_rank_t>::iterator p = mdr->more()->slaves.begin();
p != mdr->more()->slaves.end();
++p) {
- MMDSSlaveRequest *r = new MMDSSlaveRequest(mdr->reqid, mdr->attempt,
+ auto r = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt,
MMDSSlaveRequest::OP_FINISH);
if (mdr->killed && !mdr->committing) {
auto em = updates.emplace(std::piecewise_construct, std::forward_as_tuple(client), std::forward_as_tuple());
if (em.second) {
- MClientSnap::ref update(new MClientSnap(CEPH_SNAP_OP_SPLIT), false);
+ auto update = MClientSnap::factory::build(CEPH_SNAP_OP_SPLIT);
update->head.split = in->ino();
update->split_inos = split_inos;
update->split_realms = split_realms;
in->encode_snap(snap_blob);
for (auto p : mds_set) {
- MMDSSnapUpdate::ref m(new MMDSSnapUpdate(in->ino(), stid, snap_op), false);
+ auto m = MMDSSnapUpdate::factory::build(in->ino(), stid, snap_op);
m->snap_blob = snap_blob;
mds->send_message_mds(m, p);
}
for (auto &session : sessions) {
if (!session->is_open() && !session->is_stale())
continue;
- MClientSnap *update = new MClientSnap(snap_op);
+ auto update = MClientSnap::factory::build(snap_op);
update->head.split = global_snaprealm->inode->ino();
update->bl = global_snaprealm->get_snap_trace();
mds->send_message_client_counted(update, session);
void MDCache::_send_discover(discover_info_t& d)
{
- MDiscover::ref dis(new MDiscover(d.ino, d.frag, d.snap, d.want_path, d.want_base_dir, d.want_xlocked), false);
+ auto dis = MDiscover::factory::build(d.ino, d.frag, d.snap, d.want_path, d.want_base_dir, d.want_xlocked);
dis->set_tid(d.tid);
mds->send_message_mds(dis, d.mds);
}
CInode *cur = 0;
- MDiscoverReply::ref reply(new MDiscoverReply(*dis), false);
+ auto reply = MDiscoverReply::factory::build(*dis);
snapid_t snapid = dis->get_snapid();
for (const auto &r : dir->dir_rep_by) {
s.insert(r);
}
- mds->send_message_mds(new MDirUpdate(mds->get_nodeid(),
- dir->dirfrag(),
- dir->dir_rep,
- s,
- path,
- bcast),
- *it);
+ mds->send_message_mds(MDirUpdate::factory::build(mds->get_nodeid(), dir->dirfrag(), dir->dir_rep, s, path, bcast), *it);
}
return 0;
rejoin_gather.count(p.first)))
continue;
CDentry::linkage_t *dnl = dn->get_linkage();
- MDentryLink *m = new MDentryLink(subtree->dirfrag(), dn->get_dir()->dirfrag(),
- dn->get_name(), dnl->is_primary());
+ auto m = MDentryLink::factory::build(subtree->dirfrag(), dn->get_dir()->dirfrag(), dn->get_name(), dnl->is_primary());
if (dnl->is_primary()) {
dout(10) << " primary " << *dnl->get_inode() << dendl;
replicate_inode(dnl->get_inode(), p.first, m->bl,
rejoin_gather.count(*it)))
continue;
- MDentryUnlink *unlink = new MDentryUnlink(dn->get_dir()->dirfrag(), dn->get_name());
+ auto unlink = MDentryUnlink::factory::build(dn->get_dir()->dirfrag(), dn->get_name());
if (straydn) {
replicate_stray(straydn, *it, unlink->straybl);
unlink->snapbl = snapbl;
rejoin_gather.count(p.first)))
continue;
- MMDSFragmentNotify *notify = new MMDSFragmentNotify(basedirfrag, info.bits);
+ auto notify = MMDSFragmentNotify::factory::build(basedirfrag, info.bits);
// freshly replicate new dirs to peers
for (list<CDir*>::iterator q = info.resultfrags.begin();
}
priv.reset();
- MCommandReply::ref reply(new MCommandReply(r, outs), false);
+ auto reply = MCommandReply::factory::build(r, outs);
reply->set_tid(m->get_tid());
reply->set_data(outbl);
m->get_connection()->send_message2(reply);
#include "messages/MClientRequestForward.h"
#include "messages/MMDSLoadTargets.h"
#include "messages/MMDSTableRequest.h"
-#include "messages/MCommandReply.h"
#include "MDSDaemon.h"
#include "MDSMap.h"
if (send) {
dout(15) << "updating export_targets, now " << new_map_targets.size() << " ranks are targets" << dendl;
- MMDSLoadTargets* m = new MMDSLoadTargets(mds_gid_t(monc->get_global_id()), new_map_targets);
- monc->send_mon_message(m);
+ auto m = MMDSLoadTargets::factory::build(mds_gid_t(monc->get_global_id()), new_map_targets);
+ monc->send_mon_message(m.detach());
}
}
// send mdsmap first?
if (mds != whoami && peer_mdsmap_epoch[mds] < mdsmap->get_epoch()) {
- Message::ref _m = MMDSMap::ref(new MMDSMap(monc->get_fsid(), *mdsmap), false);
+ auto _m = MMDSMap::factory::build(monc->get_fsid(), *mdsmap);
messenger->send_to_mds(_m.detach(), mdsmap->get_addrs(mds));
peer_mdsmap_epoch[mds] = mdsmap->get_epoch();
}
bool client_must_resend = true; //!creq->can_forward();
// tell the client where it should go
- MClientRequestForward::ref f(new MClientRequestForward(m->get_tid(), mds, m->get_num_fwd()+1, client_must_resend), false);
+ auto f = MClientRequestForward::factory::build(m->get_tid(), mds, m->get_num_fwd()+1, client_must_resend);
messenger->send_message(f.detach(), m->get_source_inst());
}
set<Session*> clients;
sessionmap.get_client_session_set(clients);
for (const auto &session : clients) {
- MMDSMap::ref m(new MMDSMap(monc->get_fsid(), *mdsmap), false);
+ auto m = MMDSMap::factory::build(monc->get_fsid(), *mdsmap);
session->get_connection()->send_message2(std::move(m));
}
last_client_mdsmap_bcast = mdsmap->get_epoch();
double get_dispatch_queue_max_age(utime_t now) const;
void send_message_mds(const Message::ref& m, mds_rank_t mds);
- void send_message_mds(Message* m, mds_rank_t mds) {
- Message::ref mr(m, false);
- send_message_mds(mr, mds);
- }
void forward_message_mds(const MClientRequest::const_ref& req, mds_rank_t mds);
-
void send_message_client_counted(const Message::ref& m, client_t client);
- void send_message_client_counted(Message* m, client_t client) {
- Message::ref mr(m, false);
- send_message_client_counted(mr, client);
- }
-
void send_message_client_counted(const Message::ref& m, Session* session);
- void send_message_client_counted(Message* m, Session* session) {
- Message::ref mr(m, false);
- send_message_client_counted(mr, session);
- }
-
void send_message_client_counted(const Message::ref& m, const ConnectionRef& connection);
- void send_message_client_counted(Message* m, const ConnectionRef& connection) {
- Message::ref mr(m, false);
- send_message_client_counted(mr, connection);
- }
-
void send_message_client(const Message::ref& m, Session* session);
- void send_message_client(Message* m, Session* session) {
- Message::ref mr(m, false);
- send_message_client(mr, session);
- }
-
void send_message(const Message::ref& m, const ConnectionRef& c);
- void send_message(Message* m, const ConnectionRef& c) {
- send_message(m, c);
- }
void wait_for_active_peer(mds_rank_t who, MDSInternalContextBase *c) {
waiting_for_active_peer[who].push_back(c);
dout(10) << "stray agree on " << reqid << " tid " << tid
<< ", sending ROLLBACK" << dendl;
assert(!server_ready);
- MMDSTableRequest::ref req(new MMDSTableRequest(table, TABLESERVER_OP_ROLLBACK, 0, tid), false);
+ auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_ROLLBACK, 0, tid);
mds->send_message_mds(req, mds->get_mds_map()->get_tableserver());
}
break;
if (server_ready) {
// send message
- MMDSTableRequest::ref req(new MMDSTableRequest(table, TABLESERVER_OP_PREPARE, reqid), false);
+ auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_PREPARE, reqid);
req->bl = mutation;
mds->send_message_mds(req, mds->get_mds_map()->get_tableserver());
} else
if (server_ready) {
// send message
- MMDSTableRequest::ref req(new MMDSTableRequest(table, TABLESERVER_OP_COMMIT, 0, tid), false);
+ auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_COMMIT, 0, tid);
mds->send_message_mds(req, mds->get_mds_map()->get_tableserver());
} else
dout(10) << "tableserver is not ready yet, deferring request" << dendl;
p != pending_commit.end();
++p) {
dout(10) << "resending commit on " << p->first << dendl;
- MMDSTableRequest::ref req(new MMDSTableRequest(table, TABLESERVER_OP_COMMIT, 0, p->first), false);
+ auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_COMMIT, 0, p->first);
mds->send_message_mds(req, mds->get_mds_map()->get_tableserver());
}
}
p != pending_prepare.end();
++p) {
dout(10) << "resending prepare on " << p->first << dendl;
- MMDSTableRequest::ref req(new MMDSTableRequest(table, TABLESERVER_OP_PREPARE, p->first), false);
+ auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_PREPARE, p->first);
req->bl = p->second.mutation;
mds->send_message_mds(req, mds->get_mds_map()->get_tableserver());
}
_prepare(req->bl, req->reqid, from, out);
assert(version == tid);
- MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_AGREE, req->reqid, tid), false);
+ auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_AGREE, req->reqid, tid);
reply->bl = std::move(out);
if (_notify_prep(tid)) {
else if (tid <= version) {
dout(0) << "got commit for tid " << tid << " <= " << version
<< ", already committed, sending ack." << dendl;
- MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_ACK, req->reqid, tid), false);
+ auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_ACK, req->reqid, tid);
mds->send_message(reply, req->get_connection());
}
else {
_commit(tid, req);
_note_commit(tid);
- MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_ACK, req->reqid, req->get_tid()), false);
+ auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_ACK, req->reqid, req->get_tid());
mds->send_message_mds(reply, mds_rank_t(req->get_source().num()));
}
next_reqids[who] = p.second.reqid + 1;
version_t tid = p.second.tid;
- MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_AGREE, p.second.reqid, tid), false);
+ auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_AGREE, p.second.reqid, tid);
_get_reply_buffer(tid, &reply->bl);
mds->send_message_mds(reply, who);
}
for (auto p : active_clients) {
- MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_SERVER_READY, next_reqids[p]), false);
+ auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_SERVER_READY, next_reqids[p]);
mds->send_message_mds(reply, p);
}
recovered = true;
if (p->second.reqid >= next_reqid)
next_reqid = p->second.reqid + 1;
- MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_AGREE, p->second.reqid, p->second.tid), false);
+ auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_AGREE, p->second.reqid, p->second.tid);
_get_reply_buffer(p->second.tid, &reply->bl);
mds->send_message_mds(reply, who);
}
- MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_SERVER_READY, next_reqid), false);
+ auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_SERVER_READY, next_reqid);
mds->send_message_mds(reply, who);
}
if (notify_peer &&
(!mds->is_cluster_degraded() ||
mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer))) // tell them.
- mds->send_message_mds(new MExportDirCancel(dir->dirfrag(), it->second.tid), it->second.peer);
+ mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer);
break;
case EXPORT_FREEZING:
if (notify_peer &&
(!mds->is_cluster_degraded() ||
mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer))) // tell them.
- mds->send_message_mds(new MExportDirCancel(dir->dirfrag(), it->second.tid), it->second.peer);
+ mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer);
break;
// NOTE: state order reversal, warning comes after prepping
if (notify_peer &&
(!mds->is_cluster_degraded() ||
mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer))) // tell them.
- mds->send_message_mds(new MExportDirCancel(dir->dirfrag(), it->second.tid), it->second.peer);
+ mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer);
break;
case EXPORT_EXPORTING:
// send ExportDirDiscover (ask target)
filepath path;
dir->inode->make_path(path);
- MExportDirDiscover *discover = new MExportDirDiscover(dir->dirfrag(), path,
- mds->get_nodeid(),
- it->second.tid);
+ auto discover = MExportDirDiscover::factory::build(dir->dirfrag(), path, mds->get_nodeid(), it->second.tid);
mds->send_message_mds(discover, dest);
assert(g_conf()->mds_kill_export_at != 2);
dir->unfreeze_tree();
cache->try_subtree_merge(dir);
- mds->send_message_mds(new MExportDirCancel(dir->dirfrag(), it->second.tid), it->second.peer);
+ mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer);
export_state.erase(it);
dir->clear_exporting();
cache->get_subtree_bounds(dir, bounds);
// generate prep message, log entry.
- MExportDirPrep *prep = new MExportDirPrep(dir->dirfrag(), it->second.tid);
+ auto prep = MExportDirPrep::factory::build(dir->dirfrag(), it->second.tid);
// include list of bystanders
for (const auto &p : dir->get_replicas()) {
it->second.warning_ack_waiting.insert(p.first);
it->second.notify_ack_waiting.insert(p.first); // we'll eventually get a notifyack, too!
- MExportDirNotify *notify = new MExportDirNotify(dir->dirfrag(), it->second.tid, true,
- mds_authority_t(mds->get_nodeid(),CDIR_AUTH_UNKNOWN),
- mds_authority_t(mds->get_nodeid(),it->second.peer));
+ auto notify = MExportDirNotify::factory::build(dir->dirfrag(), it->second.tid, true,
+ mds_authority_t(mds->get_nodeid(),CDIR_AUTH_UNKNOWN),
+ mds_authority_t(mds->get_nodeid(),it->second.peer));
for (auto &cdir : bounds) {
notify->get_bounds().push_back(cdir->dirfrag());
}
mds->balancer->subtract_export(dir);
// fill export message with cache data
- MExportDir *req = new MExportDir(dir->dirfrag(), it->second.tid);
+ auto req = MExportDir::factory::build(dir->dirfrag(), it->second.tid);
map<client_t,entity_inst_t> exported_client_map;
map<client_t,client_metadata_t> exported_client_metadata_map;
uint64_t num_exported_inodes = encode_export_dir(req->export_data,
const Capability *cap = &p.second;
dout(7) << "finish_export_inode_caps telling client." << p.first
<< " exported caps on " << *in << dendl;
- MClientCaps *m = new MClientCaps(CEPH_CAP_OP_EXPORT, in->ino(), 0,
+ auto m = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, in->ino(), 0,
cap->get_cap_id(), cap->get_mseq(), mds->get_osd_epoch_barrier());
map<client_t,Capability::Import>::iterator q = peer_imported.find(p.first);
for (set<mds_rank_t>::iterator p = stat.notify_ack_waiting.begin();
p != stat.notify_ack_waiting.end();
++p) {
- MExportDirNotify *notify = new MExportDirNotify(dir->dirfrag(), stat.tid, true,
- pair<int,int>(mds->get_nodeid(), stat.peer),
- pair<int,int>(mds->get_nodeid(), CDIR_AUTH_UNKNOWN));
+ auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, true,
+ pair<int,int>(mds->get_nodeid(), stat.peer),
+ pair<int,int>(mds->get_nodeid(), CDIR_AUTH_UNKNOWN));
for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); ++i)
notify->get_bounds().push_back((*i)->dirfrag());
mds->send_message_mds(notify, *p);
for (set<mds_rank_t>::iterator p = stat.notify_ack_waiting.begin();
p != stat.notify_ack_waiting.end();
++p) {
- MExportDirNotify *notify = new MExportDirNotify(dir->dirfrag(), stat.tid, true,
- pair<int,int>(mds->get_nodeid(), stat.peer),
- pair<int,int>(stat.peer, CDIR_AUTH_UNKNOWN));
+ auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, true,
+ pair<int,int>(mds->get_nodeid(), stat.peer),
+ pair<int,int>(stat.peer, CDIR_AUTH_UNKNOWN));
for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); ++i)
notify->get_bounds().push_back((*i)->dirfrag());
// notify peer to send cap import messages to clients
if (!mds->is_cluster_degraded() ||
mds->mdsmap->is_clientreplay_or_active_or_stopping(stat.peer)) {
- mds->send_message_mds(new MExportDirFinish(dir->dirfrag(), false, stat.tid), stat.peer);
+ mds->send_message_mds(MExportDirFinish::factory::build(dir->dirfrag(), false, stat.tid), stat.peer);
} else {
dout(7) << "not sending MExportDirFinish, dest has failed" << dendl;
}
// send finish/commit to new auth
if (!mds->is_cluster_degraded() ||
mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer)) {
- mds->send_message_mds(new MExportDirFinish(dir->dirfrag(), true, it->second.tid), it->second.peer);
+ mds->send_message_mds(MExportDirFinish::factory::build(dir->dirfrag(), true, it->second.tid), it->second.peer);
} else {
dout(7) << "not sending MExportDirFinish last, dest has failed" << dendl;
}
if (!mds->is_active()) {
dout(7) << " not active, send NACK " << dendl;
- mds->send_message_mds(new MExportDirDiscoverAck(df, m->get_tid(), false), from);
+ mds->send_message_mds(MExportDirDiscoverAck::factory::build(df, m->get_tid(), false), from);
return;
}
// reply
dout(7) << " sending export_discover_ack on " << *in << dendl;
- mds->send_message_mds(new MExportDirDiscoverAck(df, m->get_tid()), p_state->peer);
+ mds->send_message_mds(MExportDirDiscoverAck::factory::build(df, m->get_tid()), p_state->peer);
assert (g_conf()->mds_kill_import_at != 2);
}
// ok!
dout(7) << " sending export_prep_ack on " << *dir << dendl;
- mds->send_message(new MExportDirPrepAck(dir->dirfrag(), success, m->get_tid()), m->get_connection());
+ mds->send_message(MExportDirPrepAck::factory::build(dir->dirfrag(), success, m->get_tid()), m->get_connection());
assert(g_conf()->mds_kill_import_at != 4);
}
for (set<mds_rank_t>::iterator p = stat.bystanders.begin();
p != stat.bystanders.end();
++p) {
- MExportDirNotify *notify =
- new MExportDirNotify(dir->dirfrag(), stat.tid, false,
- pair<int,int>(stat.peer, mds->get_nodeid()),
- pair<int,int>(mds->get_nodeid(), CDIR_AUTH_UNKNOWN));
+ auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, false,
+ pair<int,int>(stat.peer, mds->get_nodeid()),
+ pair<int,int>(mds->get_nodeid(), CDIR_AUTH_UNKNOWN));
for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); ++i)
notify->get_bounds().push_back((*i)->dirfrag());
mds->send_message_mds(notify, *p);
stat.bystanders.erase(p++);
continue;
}
- MExportDirNotify *notify =
- new MExportDirNotify(dir->dirfrag(), stat.tid, true,
- mds_authority_t(stat.peer, mds->get_nodeid()),
- mds_authority_t(stat.peer, CDIR_AUTH_UNKNOWN));
+ auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, true,
+ mds_authority_t(stat.peer, mds->get_nodeid()),
+ mds_authority_t(stat.peer, CDIR_AUTH_UNKNOWN));
for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); ++i)
notify->get_bounds().push_back((*i)->dirfrag());
mds->send_message_mds(notify, *p);
// test surviving observer of a failed migration that did not complete
//assert(dir->replica_map.size() < 2 || mds->get_nodeid() != 0);
- MExportDirAck *ack = new MExportDirAck(dir->dirfrag(), it->second.tid);
+ auto ack = MExportDirAck::factory::build(dir->dirfrag(), it->second.tid);
encode(imported_caps, ack->imported_caps);
mds->send_message_mds(ack, from);
// send ack
if (m->wants_ack()) {
- mds->send_message_mds(new MExportDirNotifyAck(m->get_dirfrag(), m->get_tid(), m->get_new_auth()), from);
+ mds->send_message_mds(MExportDirNotifyAck::factory::build(m->get_dirfrag(), m->get_tid(), m->get_new_auth()), from);
} else {
// aborted. no ack.
dout(7) << "handle_export_notify no ack requested" << dendl;
assert(!in->is_ambiguous_auth());
assert(!in->state_test(CInode::STATE_EXPORTINGCAPS));
- MExportCaps *ex = new MExportCaps;
+ auto ex = MExportCaps::factory::build();
ex->ino = in->ino();
encode_export_inode_caps(in, false, ex->cap_bl, ex->client_map, ex->client_metadata_map);
dout(7) << __func__ << " telling client." << it.first
<< " exported caps on " << *in << dendl;
- MClientCaps *m = new MClientCaps(CEPH_CAP_OP_EXPORT, in->ino(), 0,
+ auto m = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, in->ino(), 0,
cap->get_cap_id(), cap->get_mseq(),
mds->get_osd_epoch_barrier());
m->set_cap_peer(it.second.cap_id, it.second.issue_seq, it.second.mseq, from, 0);
mds->locker->eval(in, CEPH_CAP_LOCKS, true);
if (!imported_caps.empty()) {
- MExportCapsAck *ack = new MExportCapsAck(in->ino());
+ auto ack = MExportCapsAck::factory::build(in->ino());
map<client_t,uint64_t> peer_caps_ids;
for (auto &p : imported_caps )
peer_caps_ids[p.first] = it->second.at(p.first).cap_id;
{
auto send_reject_message = [this, session](std::string_view err_str) {
- MClientSession *m = new MClientSession(CEPH_SESSION_REJECT);
+ auto m = MClientSession::factory::build(CEPH_SESSION_REJECT);
if (session->info.has_feature(CEPHFS_FEATURE_MIMIC))
m->metadata["error_string"] = err_str;
mds->send_message_client(m, session);
mds->locker->resume_stale_caps(session);
mds->sessionmap.touch_session(session);
}
- m->get_connection()->send_message(new MClientSession(CEPH_SESSION_RENEWCAPS, m->get_seq()));
+ m->get_connection()->send_message2(MClientSession::factory::build(CEPH_SESSION_RENEWCAPS, m->get_seq()));
} else {
dout(10) << "ignoring renewcaps on non open|stale session (" << session->get_state_name() << ")" << dendl;
}
!session->get_connection()->has_feature(CEPH_FEATURE_EXPORT_PEER))
continue;
version_t seq = session->wait_for_flush(gather.new_sub());
- mds->send_message_client(new MClientSession(CEPH_SESSION_FLUSHMSG, seq), session);
+ mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_FLUSHMSG, seq), session);
}
}
mds->sessionmap.set_state(session, Session::STATE_OPEN);
mds->sessionmap.touch_session(session);
assert(session->get_connection());
- MClientSession *reply = new MClientSession(CEPH_SESSION_OPEN);
+ auto reply = MClientSession::factory::build(CEPH_SESSION_OPEN);
if (session->info.has_feature(CEPHFS_FEATURE_MIMIC))
reply->supported_features = supported_features;
- session->get_connection()->send_message(reply);
+ session->get_connection()->send_message2(reply);
if (mdcache->is_readonly())
- session->get_connection()->send_message(new MClientSession(CEPH_SESSION_FORCE_RO));
+ session->get_connection()->send_message2(MClientSession::factory::build(CEPH_SESSION_FORCE_RO));
} else if (session->is_closing() ||
session->is_killing()) {
// kill any lingering capabilities, leases, requests
}
// reset session
- mds->send_message_client(new MClientSession(CEPH_SESSION_CLOSE), session);
+ mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_CLOSE), session);
mds->sessionmap.set_state(session, Session::STATE_CLOSED);
session->clear();
mds->sessionmap.remove_session(session);
mds->sessionmap.set_state(session, Session::STATE_OPEN);
mds->sessionmap.touch_session(session);
- MClientSession *reply = new MClientSession(CEPH_SESSION_OPEN);
+ auto reply = MClientSession::factory::build(CEPH_SESSION_OPEN);
if (session->info.has_feature(CEPHFS_FEATURE_MIMIC))
reply->supported_features = supported_features;
mds->send_message_client(reply, session);
if (mdcache->is_readonly())
- mds->send_message_client(new MClientSession(CEPH_SESSION_FORCE_RO), session);
+ mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_FORCE_RO), session);
}
} else {
dout(10) << "force_open_sessions skipping already-open " << session->info.inst << dendl;
mds->sessionmap.set_state(session, Session::STATE_STALE);
mds->locker->revoke_stale_caps(session);
mds->locker->remove_stale_leases(session);
- mds->send_message_client(new MClientSession(CEPH_SESSION_STALE, session->get_push_seq()), session);
+ mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_STALE, session->get_push_seq()), session);
finish_flush_session(session, session->get_push_seq());
}
}
if (deny) {
- m->get_connection()->send_message(new MClientSession(CEPH_SESSION_CLOSE));
+ m->get_connection()->send_message2(MClientSession::factory::build(CEPH_SESSION_CLOSE));
if (session->is_open())
kill_session(session, nullptr);
return;
}
// notify client of success with an OPEN
- MClientSession *reply = new MClientSession(CEPH_SESSION_OPEN);
+ auto reply = MClientSession::factory::build(CEPH_SESSION_OPEN);
if (session->info.has_feature(CEPHFS_FEATURE_MIMIC))
reply->supported_features = supported_features;
- m->get_connection()->send_message(reply);
+ m->get_connection()->send_message2(reply);
session->last_cap_renew = ceph_clock_now();
mds->clog->debug() << "reconnect by " << session->info.inst << " after " << delay;
uint64_t newlim = std::max(std::min<uint64_t>((session->caps.size() * ratio), max_caps_per_client), min_caps_per_client);
if (session->caps.size() > newlim) {
- MClientSession *m = new MClientSession(CEPH_SESSION_RECALL_STATE);
+ auto m = MClientSession::factory::build(CEPH_SESSION_RECALL_STATE);
m->head.max_caps = newlim;
mds->send_message_client(m, session);
session->notify_recall_sent(newlim);
if (!session->info.inst.name.is_client() ||
!(session->is_open() || session->is_stale()))
continue;
- mds->send_message_client(new MClientSession(CEPH_SESSION_FORCE_RO), session);
+ mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_FORCE_RO), session);
}
}
void Server::respond_to_request(MDRequestRef& mdr, int r)
{
if (mdr->client_request) {
- reply_client_request(mdr, MClientReply::ref(new MClientReply(*mdr->client_request, r), false));
+ reply_client_request(mdr, MClientReply::factory::build(*mdr->client_request, r));
} else if (mdr->internal_op > -1) {
dout(10) << "respond_to_request on internal request " << mdr << dendl;
if (!mdr->internal_op_finish)
}
- MClientReply::ref reply(new MClientReply(*req, 0), false);
+ auto reply = MClientReply::factory::build(*req, 0);
reply->set_unsafe();
// mark xlocks "done", indicating that we are exposing uncommitted changes.
req->get_op() != CEPH_MDS_OP_OPEN &&
req->get_op() != CEPH_MDS_OP_CREATE)) {
dout(5) << "already completed " << req->get_reqid() << dendl;
- MClientReply::ref reply(new MClientReply(*req, 0), false);
+ auto reply = MClientReply::factory::build(*req, 0);
if (created != inodeno_t()) {
bufferlist extra;
encode(created, extra);
// the purpose of rename notify is enforcing causal message ordering. making sure
// bystanders have received all messages from rename srcdn's auth MDS.
if (m->get_op() == MMDSSlaveRequest::OP_RENAMENOTIFY) {
- MMDSSlaveRequest::ref reply(new MMDSSlaveRequest(m->get_reqid(), m->get_attempt(), MMDSSlaveRequest::OP_RENAMENOTIFYACK), false);
+ auto reply = MMDSSlaveRequest::factory::build(m->get_reqid(), m->get_attempt(), MMDSSlaveRequest::OP_RENAMENOTIFYACK);
mds->send_message(reply, m->get_connection());
return;
}
return;
// ack
- MMDSSlaveRequest::ref r(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, replycode), false);
+ auto r = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, replycode);
r->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(r->get_object_info());
if (replycode == MMDSSlaveRequest::OP_XLOCKACK)
}
// ack!
- MMDSSlaveRequest::ref reply(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPINACK), false);
+ auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPINACK);
// return list of my auth_pins (if any)
for (set<MDSCacheObject*>::iterator p = mdr->auth_pins.begin();
{
dout(10) << __func__ << " " << *in << dendl;
- MClientRequest::ref req(new MClientRequest(CEPH_MDS_OP_SETXATTR), false);
+ auto req = MClientRequest::factory::build(CEPH_MDS_OP_SETXATTR);
req->set_filepath(filepath(in->ino()));
req->set_string2("ceph.quota");
// empty vxattr value
op = MMDSSlaveRequest::OP_LINKPREP;
else
op = MMDSSlaveRequest::OP_UNLINKPREP;
- MMDSSlaveRequest::ref req(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, op), false);
+ auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, op);
targeti->set_object_info(req->get_object_info());
req->op_stamp = mdr->get_op_stamp();
if (auto& desti_srnode = mdr->more()->desti_srnode)
// ack
if (!mdr->aborted) {
- MMDSSlaveRequest::ref reply(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_LINKPREPACK));
+ auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_LINKPREPACK);
mds->send_message_mds(reply, mdr->slave_to_mds);
} else {
dout(10) << " abort flag set, finishing" << dendl;
assert(g_conf()->mds_kill_link_at != 8);
- MMDSSlaveRequest::ref req(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_COMMITTED), false);
+ auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_COMMITTED);
mds->send_message_mds(req, mdr->slave_to_mds);
mdcache->request_finish(mdr);
}
}
dout(10) << "_rmdir_prepare_witness mds." << who << dendl;
- MMDSSlaveRequest::ref req(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREP), false);
+ auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREP);
req->srcdnpath = filepath(trace.front()->get_dir()->ino());
for (auto dn : trace)
req->srcdnpath.push_dentry(dn->get_name());
mdr->straydn = 0;
if (!mdr->aborted) {
- MMDSSlaveRequest::ref reply(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREPACK), false);
+ auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREPACK);
if (!mdr->more()->slave_update_journaled)
reply->mark_not_journaled();
mds->send_message_mds(reply, mdr->slave_to_mds);
}
dout(10) << "_rename_prepare_witness mds." << who << dendl;
- MMDSSlaveRequest::ref req(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREP), false);
+ auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREP);
req->srcdnpath = filepath(srctrace.front()->get_dir()->ino());
for (auto dn : srctrace)
if (mdr->slave_request->is_interrupted()) {
dout(10) << " slave request interrupted, sending noop reply" << dendl;
- MMDSSlaveRequest::ref reply(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK), false);
+ auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
reply->mark_interrupted();
mds->send_message_mds(reply, mdr->slave_to_mds);
mdr->slave_request = 0;
(mds->is_cluster_degraded() &&
!mds->mdsmap->is_clientreplay_or_active_or_stopping(*p)))
continue;
- MMDSSlaveRequest::ref notify(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMENOTIFY), false);
+ auto notify = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMENOTIFY);
mds->send_message_mds(notify, *p);
mdr->more()->waiting_on_slave.insert(*p);
}
if (reply_witness) {
assert(!srcdnrep.empty());
- MMDSSlaveRequest::ref reply(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK), false);
+ auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
reply->witnesses.swap(srcdnrep);
mds->send_message_mds(reply, mdr->slave_to_mds);
mdr->slave_request = 0;
// prepare ack
MMDSSlaveRequest::ref reply;
if (!mdr->aborted) {
- reply.reset(new MMDSSlaveRequest(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK), false);
+ reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
if (!mdr->more()->slave_update_journaled)
reply->mark_not_journaled();
}
#include <string_view>
-#include "messages/MClientCaps.h"
#include "messages/MClientReconnect.h"
#include "messages/MClientReply.h"
#include "messages/MClientRequest.h"
{
dout(10) << __func__ << " " << *m << dendl;
handle_query_result(m);
- MMDSTableRequest::ref ack(new MMDSTableRequest(table, TABLESERVER_OP_NOTIFY_ACK, 0, m->get_tid()), false);
+ auto ack = MMDSTableRequest::factory::build(table, TABLESERVER_OP_NOTIFY_ACK, 0, m->get_tid());
mds->send_message(ack, m->get_connection());
}
return;
mds_rank_t ts = mds->mdsmap->get_tableserver();
- MMDSTableRequest::ref req(new MMDSTableRequest(table, TABLESERVER_OP_QUERY, ++last_reqid, 0), false);
+ auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_QUERY, ++last_reqid, 0);
using ceph::encode;
char op = 'F';
encode(op, req->bl);
#include <string_view>
-#include "messages/MClientSnap.h"
-
/*
* SnapRealm
assert(version == tid);
for (auto &p : active_clients) {
- MMDSTableRequest::ref m(new MMDSTableRequest(table, TABLESERVER_OP_NOTIFY_PREP, 0, version), false);
+ auto m = MMDSTableRequest::factory::build(table, TABLESERVER_OP_NOTIFY_PREP, 0, version);
m->bl = bl;
mds->send_message_mds(m, p);
}
auto p = req->bl.cbegin();
decode(op, p);
- MMDSTableRequest::ref reply(new MMDSTableRequest(table, TABLESERVER_OP_QUERY_REPLY, req->reqid, version), false);
+ auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_QUERY_REPLY, req->reqid, version);
switch (op) {
case 'F': // full
if (!all_purge.empty()) {
dout(10) << "requesting removal of " << all_purge << dendl;
- MRemoveSnaps *m = new MRemoveSnaps(all_purge);
- mon_client->send_mon_message(m);
+ auto m = MRemoveSnaps::factory::build(all_purge);
+ mon_client->send_mon_message(m.detach());
}
last_checked_osdmap = version;
filepath dst;
rdn->make_path(dst);
- MClientRequest *req = new MClientRequest(CEPH_MDS_OP_RENAME);
+ auto req = MClientRequest::factory::build(CEPH_MDS_OP_RENAME);
req->set_filepath(dst);
req->set_filepath2(src);
req->set_tid(mds->issue_tid());
dst.push_dentry(src[0]);
dst.push_dentry(src[1]);
- MClientRequest *req = new MClientRequest(CEPH_MDS_OP_RENAME);
+ auto req = MClientRequest::factory::build(CEPH_MDS_OP_RENAME);
req->set_filepath(dst);
req->set_filepath2(src);
req->set_tid(mds->issue_tid());
int get_from() const { return from; }
+protected:
MCacheExpire() : Message(MSG_MDS_CACHEEXPIRE), from(-1) {}
MCacheExpire(int f) :
Message(MSG_MDS_CACHEEXPIRE),
from(f) { }
-private:
~MCacheExpire() override {}
public:
void clear_dirty() { head.dirty = 0; }
+protected:
MClientCaps()
: Message(CEPH_MSG_CLIENT_CAPS, HEAD_VERSION, COMPAT_VERSION) {}
MClientCaps(int op,
head.migrate_seq = mseq;
memset(&peer, 0, sizeof(peer));
}
+ ~MClientCaps() override {}
+
private:
file_layout_t layout;
- ~MClientCaps() override {}
-
public:
const char *get_type_name() const override { return "Cfcap";}
void print(ostream& out) const override {
snapid_t get_first() const { return snapid_t(h.first); }
snapid_t get_last() const { return snapid_t(h.last); }
+protected:
MClientLease() : Message(CEPH_MSG_CLIENT_LEASE) {}
MClientLease(const MClientLease& m) :
Message(CEPH_MSG_CLIENT_LEASE),
h.last = sl;
h.duration_ms = 0;
}
-private:
~MClientLease() override {}
public:
nest_info_t rstat;
quota_info_t quota;
+protected:
MClientQuota() :
Message(CEPH_MSG_CLIENT_QUOTA),
ino(0)
{}
-private:
~MClientQuota() override {}
public:
bool is_safe() const { return head.safe; }
+protected:
MClientReply() : Message(CEPH_MSG_CLIENT_REPLY) {}
MClientReply(const MClientRequest &req, int result = 0) :
Message(CEPH_MSG_CLIENT_REPLY) {
head.result = result;
head.safe = 1;
}
-private:
~MClientReply() override {}
public:
/* XXX HACK */
mutable bool queued_for_replay = false;
- public:
+protected:
// cons
MClientRequest()
: Message(CEPH_MSG_CLIENT_REQUEST, HEAD_VERSION, COMPAT_VERSION) {}
memset(&head, 0, sizeof(head));
head.op = op;
}
-private:
~MClientRequest() override {}
public:
int32_t num_fwd;
bool client_must_resend;
- public:
+protected:
MClientRequestForward()
: Message(CEPH_MSG_CLIENT_REQUEST_FORWARD),
dest_mds(-1), num_fwd(-1), client_must_resend(false) {}
assert(client_must_resend);
header.tid = t;
}
-private:
~MClientRequestForward() override {}
public:
int get_max_caps() const { return head.max_caps; }
int get_max_leases() const { return head.max_leases; }
+protected:
MClientSession() : Message(CEPH_MSG_CLIENT_SESSION, HEAD_VERSION, COMPAT_VERSION) { }
MClientSession(int o, version_t s=0) :
Message(CEPH_MSG_CLIENT_SESSION, HEAD_VERSION, COMPAT_VERSION) {
head.seq = 0;
st.encode_timeval(&head.stamp);
}
-private:
~MClientSession() override {}
public:
vector<inodeno_t> split_inos;
vector<inodeno_t> split_realms;
+protected:
MClientSnap(int o=0) :
Message(CEPH_MSG_CLIENT_SNAP) {
memset(&head, 0, sizeof(head));
head.op = o;
}
-private:
~MClientSnap() override {}
public:
bufferlist bl;
+protected:
MDentryLink() :
Message(MSG_MDS_DENTRYLINK) { }
MDentryLink(dirfrag_t r, dirfrag_t df, std::string_view n, bool p) :
dirfrag(df),
dn(n),
is_primary(p) {}
-private:
~MDentryLink() override {}
public:
bufferlist straybl;
bufferlist snapbl;
+protected:
MDentryUnlink() :
Message(MSG_MDS_DENTRYUNLINK) { }
MDentryUnlink(dirfrag_t df, std::string_view n) :
Message(MSG_MDS_DENTRYUNLINK),
dirfrag(df),
dn(n) {}
-private:
~MDentryUnlink() override {}
public:
using factory = MessageFactory<MDirUpdate>;
friend factory;
- MDirUpdate() : Message(MSG_MDS_DIRUPDATE) {}
- MDirUpdate(mds_rank_t f,
- dirfrag_t dirfrag,
- int dir_rep,
- const std::set<int32_t>& dir_rep_by,
- filepath& path,
- bool discover = false) :
- Message(MSG_MDS_DIRUPDATE), from_mds(f), dirfrag(dirfrag),
- dir_rep(dir_rep), dir_rep_by(dir_rep_by), path(path) {
- this->discover = discover ? 5 : 0;
- }
-
mds_rank_t get_source_mds() const { return from_mds; }
dirfrag_t get_dirfrag() const { return dirfrag; }
int get_dir_rep() const { return dir_rep; }
protected:
~MDirUpdate() {}
+ MDirUpdate() : Message(MSG_MDS_DIRUPDATE) {}
+ MDirUpdate(mds_rank_t f,
+ dirfrag_t dirfrag,
+ int dir_rep,
+ const std::set<int32_t>& dir_rep_by,
+ filepath& path,
+ bool discover = false) :
+ Message(MSG_MDS_DIRUPDATE), from_mds(f), dirfrag(dirfrag),
+ dir_rep(dir_rep), dir_rep_by(dir_rep_by), path(path) {
+ this->discover = discover ? 5 : 0;
+ }
MDirUpdate(const MDirUpdate& m)
: Message(MSG_MDS_DIRUPDATE),
from_mds(m.from_mds),
void set_base_dir_frag(frag_t f) { base_dir_frag = f; }
+protected:
MDiscover() : Message(MSG_MDS_DISCOVER) { }
MDiscover(inodeno_t base_ino_,
frag_t base_frag_,
want(want_path_),
want_base_dir(want_base_dir_),
want_xlocked(discover_xlocks_) { }
-private:
~MDiscover() override {}
public:
void set_base_dir_frag(frag_t df) { base_dir_frag = df; }
- // cons
+protected:
MDiscoverReply() : Message(MSG_MDS_DISCOVERREPLY, HEAD_VERSION) { }
MDiscoverReply(const MDiscover &dis) :
Message(MSG_MDS_DISCOVERREPLY, HEAD_VERSION),
{
header.tid = 0;
}
-private:
~MDiscoverReply() override {}
public:
map<client_t,entity_inst_t> client_map;
map<client_t,client_metadata_t> client_metadata_map;
+protected:
MExportCaps() :
Message(MSG_MDS_EXPORTCAPS, HEAD_VERSION, COMPAT_VERSION) {}
-private:
~MExportCaps() override {}
public:
inodeno_t ino;
bufferlist cap_bl;
+protected:
MExportCapsAck() :
Message(MSG_MDS_EXPORTCAPSACK) {}
MExportCapsAck(inodeno_t i) :
Message(MSG_MDS_EXPORTCAPSACK), ino(i) {}
-private:
~MExportCapsAck() override {}
public:
vector<dirfrag_t> bounds;
bufferlist client_map;
+protected:
MExportDir() : Message(MSG_MDS_EXPORTDIR) {}
MExportDir(dirfrag_t df, uint64_t tid) :
Message(MSG_MDS_EXPORTDIR), dirfrag(df) {
set_tid(tid);
}
-private:
~MExportDir() override {}
public:
dirfrag_t get_dirfrag() const { return dirfrag; }
+protected:
MExportDirAck() : Message(MSG_MDS_EXPORTDIRACK) {}
MExportDirAck(dirfrag_t df, uint64_t tid) :
Message(MSG_MDS_EXPORTDIRACK), dirfrag(df) {
set_tid(tid);
}
-private:
~MExportDirAck() override {}
public:
public:
dirfrag_t get_dirfrag() const { return dirfrag; }
+protected:
MExportDirCancel() : Message(MSG_MDS_EXPORTDIRCANCEL) {}
MExportDirCancel(dirfrag_t df, uint64_t tid) :
Message(MSG_MDS_EXPORTDIRCANCEL), dirfrag(df) {
set_tid(tid);
}
-private:
~MExportDirCancel() override {}
public:
bool started;
+protected:
MExportDirDiscover() :
Message(MSG_MDS_EXPORTDIRDISCOVER),
started(false) { }
from(f), dirfrag(df), path(p), started(false) {
set_tid(tid);
}
-private:
~MExportDirDiscover() override {}
public:
dirfrag_t get_dirfrag() const { return dirfrag; }
bool is_success() const { return success; }
+protected:
MExportDirDiscoverAck() : Message(MSG_MDS_EXPORTDIRDISCOVERACK) {}
MExportDirDiscoverAck(dirfrag_t df, uint64_t tid, bool s=true) :
Message(MSG_MDS_EXPORTDIRDISCOVERACK),
dirfrag(df), success(s) {
set_tid(tid);
}
-private:
~MExportDirDiscoverAck() override {}
public:
dirfrag_t get_dirfrag() const { return dirfrag; }
bool is_last() const { return last; }
+protected:
MExportDirFinish() : last(false) {}
MExportDirFinish(dirfrag_t df, bool l, uint64_t tid) :
Message(MSG_MDS_EXPORTDIRFINISH), dirfrag(df), last(l) {
set_tid(tid);
}
-private:
~MExportDirFinish() override {}
public:
const list<dirfrag_t>& get_bounds() const { return bounds; }
list<dirfrag_t>& get_bounds() { return bounds; }
+protected:
MExportDirNotify() {}
MExportDirNotify(dirfrag_t i, uint64_t tid, bool a, pair<__s32,__s32> oa, pair<__s32,__s32> na) :
Message(MSG_MDS_EXPORTDIRNOTIFY),
base(i), ack(a), old_auth(oa), new_auth(na) {
set_tid(tid);
}
-private:
~MExportDirNotify() override {}
public:
dirfrag_t get_dirfrag() const { return dirfrag; }
pair<__s32,__s32> get_new_auth() const { return new_auth; }
+protected:
MExportDirNotifyAck() {}
MExportDirNotifyAck(dirfrag_t df, uint64_t tid, pair<__s32,__s32> na) :
Message(MSG_MDS_EXPORTDIRNOTIFYACK), dirfrag(df), new_auth(na) {
set_tid(tid);
}
-private:
~MExportDirNotifyAck() override {}
public:
bool did_assim() const { return b_did_assim; }
void mark_assim() { b_did_assim = true; }
+protected:
MExportDirPrep() {
b_did_assim = false;
}
dirfrag(df), b_did_assim(false) {
set_tid(tid);
}
-private:
~MExportDirPrep() override {}
public:
public:
dirfrag_t get_dirfrag() const { return dirfrag; }
+protected:
MExportDirPrepAck() {}
MExportDirPrepAck(dirfrag_t df, bool s, uint64_t tid) :
Message(MSG_MDS_EXPORTDIRPREPACK), dirfrag(df), success(s) {
set_tid(tid);
}
-private:
~MExportDirPrepAck() override {}
public:
inodeno_t ino;
+protected:
MGatherCaps() :
Message(MSG_MDS_GATHERCAPS) {}
-private:
~MGatherCaps() override {}
public:
const map<mds_rank_t, float>& get_import_map() const { return import_map; }
map<mds_rank_t, float>& get_import_map() { return import_map; }
+protected:
MHeartbeat() : Message(MSG_MDS_HEARTBEAT), load(DecayRate()) {}
MHeartbeat(mds_load_t& load, int beat)
: Message(MSG_MDS_HEARTBEAT),
load(load) {
this->beat = beat;
}
-private:
~MHeartbeat() override {}
public:
inodeno_t get_ino() const { return ino; }
int get_caps() const { return caps; }
+protected:
MInodeFileCaps() : Message(MSG_MDS_INODEFILECAPS) {}
MInodeFileCaps(inodeno_t ino, int caps) :
Message(MSG_MDS_INODEFILECAPS) {
this->ino = ino;
this->caps = caps;
}
-private:
~MInodeFileCaps() override {}
public:
const MDSCacheObjectInfo &get_object_info() const { return object_info; }
MDSCacheObjectInfo &get_object_info() { return object_info; }
+protected:
MLock() : Message(MSG_MDS_LOCK) {}
MLock(int ac, mds_rank_t as) :
Message(MSG_MDS_LOCK),
lock->get_parent()->set_object_info(object_info);
lockdata.claim(bl);
}
-private:
~MLock() override {}
public:
uint64_t mds_features;
- public:
+protected:
MMDSBeacon()
: PaxosServiceMessage(MSG_MDS_BEACON, 0, HEAD_VERSION, COMPAT_VERSION),
global_id(0), state(MDSMap::STATE_NULL), standby_for_rank(MDS_RANK_NONE),
standby_replay(false), mds_features(feat) {
set_priority(CEPH_MSG_PRIO_HIGH);
}
-private:
~MMDSBeacon() override {}
public:
map<dirfrag_t, map<string_snap_t, list<slave_reqid> > > authpinned_dentries;
map<dirfrag_t, map<string_snap_t, slave_reqid> > xlocked_dentries;
+protected:
MMDSCacheRejoin() :
Message(MSG_MDS_CACHEREJOIN, HEAD_VERSION, COMPAT_VERSION),
op(0) {}
MMDSCacheRejoin(int o) :
Message(MSG_MDS_CACHEREJOIN, HEAD_VERSION, COMPAT_VERSION),
op(o) {}
-private:
~MMDSCacheRejoin() override {}
public:
ceph_tid_t tid {0};
inodeno_t ino;
+protected:
MMDSFindIno() : Message(MSG_MDS_FINDINO) {}
MMDSFindIno(ceph_tid_t t, inodeno_t i) : Message(MSG_MDS_FINDINO), tid(t), ino(i) {}
+ ~MMDSFindIno() override {}
+public:
const char *get_type_name() const override { return "findino"; }
void print(ostream &out) const override {
out << "findino(" << tid << " " << ino << ")";
ceph_tid_t tid = 0;
filepath path;
+protected:
MMDSFindInoReply() : Message(MSG_MDS_FINDINOREPLY) {}
MMDSFindInoReply(ceph_tid_t t) : Message(MSG_MDS_FINDINOREPLY), tid(t) {}
+ ~MMDSFindInoReply() override {}
+public:
const char *get_type_name() const override { return "findinoreply"; }
void print(ostream &out) const override {
out << "findinoreply(" << tid << " " << path << ")";
bufferlist basebl;
+protected:
MMDSFragmentNotify() : Message(MSG_MDS_FRAGMENTNOTIFY) {}
MMDSFragmentNotify(dirfrag_t df, int b) :
Message(MSG_MDS_FRAGMENTNOTIFY),
ino(df.ino), basefrag(df.frag), bits(b) { }
-private:
~MMDSFragmentNotify() override {}
public:
mds_gid_t global_id;
set<mds_rank_t> targets;
+protected:
MMDSLoadTargets() : PaxosServiceMessage(MSG_MDS_OFFLOAD_TARGETS, 0) {}
-
MMDSLoadTargets(mds_gid_t g, set<mds_rank_t>& mds_targets) :
PaxosServiceMessage(MSG_MDS_OFFLOAD_TARGETS, 0),
global_id(g), targets(mds_targets) {}
-private:
~MMDSLoadTargets() override {}
public:
version_t get_epoch() const { return epoch; }
const bufferlist& get_encoded() const { return encoded; }
+protected:
MMDSMap() :
Message(CEPH_MSG_MDS_MAP, HEAD_VERSION, COMPAT_VERSION) {}
MMDSMap(const uuid_d &f, const MDSMap &mm) :
epoch = mm.get_epoch();
mm.encode(encoded, -1); // we will reencode with fewer features as necessary
}
-private:
~MMDSMap() override {}
public:
inodeno_t ino;
vector<inode_backpointer_t> ancestors;
+protected:
MMDSOpenIno() : Message(MSG_MDS_OPENINO) {}
MMDSOpenIno(ceph_tid_t t, inodeno_t i, vector<inode_backpointer_t>* pa) :
Message(MSG_MDS_OPENINO), ino(i) {
if (pa)
ancestors = *pa;
}
+ ~MMDSOpenIno() override {}
+public:
const char *get_type_name() const override { return "openino"; }
void print(ostream &out) const override {
out << "openino(" << header.tid << " " << ino << " " << ancestors << ")";
mds_rank_t hint;
int32_t error;
+protected:
MMDSOpenInoReply() : Message(MSG_MDS_OPENINOREPLY), error(0) {}
MMDSOpenInoReply(ceph_tid_t t, inodeno_t i, mds_rank_t h=MDS_RANK_NONE, int e=0) :
Message(MSG_MDS_OPENINOREPLY), ino(i), hint(h), error(e) {
header.tid = t;
}
+
+public:
const char *get_type_name() const override { return "openinoreply"; }
void print(ostream &out) const override {
out << "openinoreply(" << header.tid << " "
list<table_client> table_clients;
+protected:
MMDSResolve() : Message(MSG_MDS_RESOLVE) {}
-private:
~MMDSResolve() override {}
public:
map<metareqid_t, bufferlist> commit;
vector<metareqid_t> abort;
+protected:
MMDSResolveAck() : Message(MSG_MDS_RESOLVEACK) {}
-private:
~MMDSResolveAck() override {}
public:
const bufferlist& get_lock_data() const { return inode_export; }
bufferlist& get_lock_data() { return inode_export; }
-
- // ----
+protected:
MMDSSlaveRequest() : Message(MSG_MDS_SLAVE_REQUEST) { }
MMDSSlaveRequest(metareqid_t ri, __u32 att, int o) :
Message(MSG_MDS_SLAVE_REQUEST),
reqid(ri), attempt(att), op(o), flags(0), lock_type(0),
inode_export_v(0), srcdn_auth(MDS_RANK_NONE) { }
-private:
~MMDSSlaveRequest() override {}
public:
bufferlist snap_blob;
+protected:
MMDSSnapUpdate() : Message(MSG_MDS_SNAPUPDATE) {}
MMDSSnapUpdate(inodeno_t i, version_t tid, int op) :
Message(MSG_MDS_SNAPUPDATE), ino(i), snap_op(op) {
set_tid(tid);
}
-private:
~MMDSSnapUpdate() override {}
public:
uint64_t reqid = 0;
bufferlist bl;
+protected:
MMDSTableRequest() : Message(MSG_MDS_TABLE_REQUEST) {}
MMDSTableRequest(int tab, int o, uint64_t r, version_t v=0) :
Message(MSG_MDS_TABLE_REQUEST),
table(tab), op(o), reqid(r) {
set_tid(v);
}
-private:
~MMDSTableRequest() override {}
public:
map<int, vector<snapid_t> > snaps;
+protected:
MRemoveSnaps() :
PaxosServiceMessage(MSG_REMOVE_SNAPS, 0) { }
MRemoveSnaps(map<int, vector<snapid_t> >& s) :
PaxosServiceMessage(MSG_REMOVE_SNAPS, 0) {
snaps.swap(s);
}
-private:
~MRemoveSnaps() override {}
public:
MDSMap null_map;
null_map.epoch = fsmap.epoch;
null_map.compat = fsmap.compat;
- mon->send_reply(op, new MMDSMap(mon->monmap->fsid, null_map));
+ auto m = MMDSMap::factory::build(mon->monmap->fsid, null_map);
+ mon->send_reply(op, m.detach());
return true;
} else {
return false; // not booted yet.
// note time and reply
assert(effective_epoch > 0);
_note_beacon(m);
- mon->send_reply(op,
- new MMDSBeacon(mon->monmap->fsid, m->get_global_id(), m->get_name(),
- effective_epoch, state, seq,
- CEPH_FEATURES_SUPPORTED_DEFAULT));
+ {
+ auto beacon = MMDSBeacon::factory::build(mon->monmap->fsid,
+ m->get_global_id(), m->get_name(), effective_epoch,
+ state, seq, CEPH_FEATURES_SUPPORTED_DEFAULT);
+ mon->send_reply(op, beacon.detach());
+ }
return true;
ignore:
last_beacon.erase(gid);
// Respond to MDS, so that it knows it can continue to shut down
- mon->send_reply(op,
- new MMDSBeacon(
+ auto beacon = MMDSBeacon::factory::build(
mon->monmap->fsid, m->get_global_id(),
m->get_name(), pending.get_epoch(), state, seq,
- CEPH_FEATURES_SUPPORTED_DEFAULT));
+ CEPH_FEATURES_SUPPORTED_DEFAULT);
+ mon->send_reply(op, beacon.detach());
} else if (state == MDSMap::STATE_DNE) {
if (!mon->osdmon()->is_writeable()) {
dout(1) << __func__ << ": DNE from rank " << info.rank
request_proposal(mon->osdmon());
// Respond to MDS, so that it knows it can continue to shut down
- mon->send_reply(op,
- new MMDSBeacon(
- mon->monmap->fsid, m->get_global_id(),
- m->get_name(), pending.get_epoch(), state, seq,
- CEPH_FEATURES_SUPPORTED_DEFAULT));
+ auto beacon = MMDSBeacon::factory::build(mon->monmap->fsid,
+ m->get_global_id(), m->get_name(), pending.get_epoch(), state, seq,
+ CEPH_FEATURES_SUPPORTED_DEFAULT);
+ mon->send_reply(op, beacon.detach());
} else if (info.state == MDSMap::STATE_STANDBY && state != info.state) {
// Standby daemons should never modify their own
// state. Reject any attempts to do so.
MDSMap null_map;
null_map.epoch = fsmap.epoch;
null_map.compat = fsmap.compat;
- mon->send_reply(op, new MMDSMap(mon->monmap->fsid, null_map));
+ auto m = MMDSMap::factory::build(mon->monmap->fsid, null_map);
+ mon->send_reply(op, m.detach());
} else {
- mon->send_reply(op, new MMDSBeacon(mon->monmap->fsid,
- m->get_global_id(),
- m->get_name(),
- fsmap.get_epoch(),
- m->get_state(),
- m->get_seq(),
- CEPH_FEATURES_SUPPORTED_DEFAULT));
+ auto beacon = MMDSBeacon::factory::build(mon->monmap->fsid,
+ m->get_global_id(), m->get_name(), fsmap.get_epoch(),
+ m->get_state(), m->get_seq(), CEPH_FEATURES_SUPPORTED_DEFAULT);
+ mon->send_reply(op, beacon.detach());
}
}
if (sub->next > mds_map->epoch) {
return;
}
- auto msg = new MMDSMap(mon->monmap->fsid, *mds_map);
+ auto msg = MMDSMap::factory::build(mon->monmap->fsid, *mds_map);
- sub->session->con->send_message(msg);
+ sub->session->con->send_message(msg.detach());
if (sub->onetime) {
mon->session_map.remove_sub(sub);
} else {