}
}
- auto m = MClientSession::factory::build(CEPH_SESSION_REQUEST_OPEN);
+ auto m = MClientSession::create(CEPH_SESSION_REQUEST_OPEN);
m->metadata = metadata;
m->supported_features = feature_bitset_t(CEPHFS_FEATURES_CLIENT_SUPPORTED);
session->con->send_message2(m);
{
ldout(cct, 2) << __func__ << " mds." << s->mds_num << " seq " << s->seq << dendl;
s->state = MetaSession::STATE_CLOSING;
- s->con->send_message2(MClientSession::factory::build(CEPH_SESSION_REQUEST_CLOSE, s->seq));
+ s->con->send_message2(MClientSession::create(CEPH_SESSION_REQUEST_CLOSE, s->seq));
}
void Client::_closed_mds_session(MetaSession *s)
break;
case CEPH_SESSION_FLUSHMSG:
- session->con->send_message2(MClientSession::factory::build(CEPH_SESSION_FLUSHMSG_ACK, m->get_seq()));
+ session->con->send_message2(MClientSession::create(CEPH_SESSION_FLUSHMSG_ACK, m->get_seq()));
break;
case CEPH_SESSION_FORCE_RO:
MClientRequest* Client::build_client_request(MetaRequest *request)
{
- auto req = MClientRequest::factory::build(request->get_op());
+ auto req = MClientRequest::create(request->get_op());
req->set_tid(request->tid);
req->set_stamp(request->op_stamp);
memcpy(&req->head, &request->head, sizeof(ceph_mds_request_head));
s->seq++;
ldout(cct, 10) << " mds." << s->mds_num << " seq now " << s->seq << dendl;
if (s->state == MetaSession::STATE_CLOSING) {
- s->con->send_message2(MClientSession::factory::build(CEPH_SESSION_REQUEST_CLOSE, s->seq));
+ s->con->send_message2(MClientSession::create(CEPH_SESSION_REQUEST_CLOSE, s->seq));
}
}
revoke:
{
- auto reply = MClientLease::factory::build(CEPH_MDS_LEASE_RELEASE, seq, m->get_mask(), m->get_ino(), m->get_first(), m->get_last(), m->dname);
+ auto reply = MClientLease::create(CEPH_MDS_LEASE_RELEASE, seq, m->get_mask(), m->get_ino(), m->get_first(), m->get_last(), m->dname);
m->get_connection()->send_message2(reply);
}
m->put();
if (flush)
follows = in->snaprealm->get_snap_context().seq;
- auto m = MClientCaps::factory::build(op,
+ auto m = MClientCaps::create(op,
in->ino,
0,
cap->cap_id, cap->seq,
session->flushing_caps_tids.insert(capsnap.flush_tid);
}
- auto m = MClientCaps::factory::build(CEPH_CAP_OP_FLUSHSNAP, in->ino, in->snaprealm->ino, 0, mseq,
+ auto m = MClientCaps::create(CEPH_CAP_OP_FLUSHSNAP, in->ino, in->snaprealm->ino, 0, mseq,
cap_epoch_barrier);
m->caller_uid = capsnap.cap_dirtier_uid;
m->caller_gid = capsnap.cap_dirtier_gid;
// will crash if they see an unknown CEPH_SESSION_* value in this msg.
const uint64_t features = session->con->get_features();
if (HAVE_FEATURE(features, SERVER_LUMINOUS)) {
- auto m = MClientSession::factory::build(CEPH_SESSION_REQUEST_FLUSH_MDLOG);
+ auto m = MClientSession::create(CEPH_SESSION_REQUEST_FLUSH_MDLOG);
session->con->send_message2(m);
}
}
ldout(cct, 10) << "renew_caps mds." << session->mds_num << dendl;
session->last_cap_renew_request = ceph_clock_now();
uint64_t seq = ++session->cap_renew_seq;
- session->con->send_message2(MClientSession::factory::build(CEPH_SESSION_REQUEST_RENEWCAPS, seq));
+ session->con->send_message2(MClientSession::create(CEPH_SESSION_REQUEST_RENEWCAPS, seq));
}
assert(want_state != MDSMap::STATE_NULL);
- auto beacon = MMDSBeacon::factory::build(
+ auto beacon = MMDSBeacon::create(
monc->get_fsid(), mds_gid_t(monc->get_global_id()),
name,
epoch,
if (mds->is_cluster_degraded() &&
mds->mdsmap->get_state(it.first) < MDSMap::STATE_REJOIN)
continue;
- auto m = MLock::factory::build(lock, msg, mds->get_nodeid());
+ auto m = MLock::create(lock, msg, mds->get_nodeid());
mds->send_message_mds(m, it.first);
}
}
if (mds->is_cluster_degraded() &&
mds->mdsmap->get_state(it.first) < MDSMap::STATE_REJOIN)
continue;
- auto m = MLock::factory::build(lock, msg, mds->get_nodeid());
+ auto m = MLock::create(lock, msg, mds->get_nodeid());
m->set_data(data);
mds->send_message_mds(m, it.first);
}
return false;
}
- auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPIN);
+ auto req = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPIN);
for (set<MDSCacheObject*>::iterator q = p->second.begin();
q != p->second.end();
++q) {
if (!mds->is_cluster_degraded() ||
mds->mdsmap->get_state(*p) >= MDSMap::STATE_REJOIN) {
dout(10) << "_drop_non_rdlocks dropping remote locks on mds." << *p << dendl;
- auto slavereq = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_DROPLOCKS);
+ auto slavereq = MMDSSlaveRequest::create(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_DROPLOCKS);
mds->send_message_mds(slavereq, *p);
}
}
mds->mdsmap->get_state(auth) >= MDSMap::STATE_REJOIN) {
switch (lock->get_state()) {
case LOCK_SYNC_LOCK:
- mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_LOCKACK, mds->get_nodeid()), auth);
+ mds->send_message_mds(MLock::create(lock, LOCK_AC_LOCKACK, mds->get_nodeid()), auth);
break;
case LOCK_MIX_SYNC:
{
- auto reply = MLock::factory::build(lock, LOCK_AC_SYNCACK, mds->get_nodeid());
+ auto reply = MLock::create(lock, LOCK_AC_SYNCACK, mds->get_nodeid());
lock->encode_locked_state(reply->get_data());
mds->send_message_mds(reply, auth);
next = LOCK_MIX_SYNC2;
case LOCK_SYNC_MIX:
{
- auto reply = MLock::factory::build(lock, LOCK_AC_MIXACK, mds->get_nodeid());
+ auto reply = MLock::create(lock, LOCK_AC_MIXACK, mds->get_nodeid());
mds->send_message_mds(reply, auth);
next = LOCK_SYNC_MIX2;
}
{
bufferlist data;
lock->encode_locked_state(data);
- mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_LOCKACK, mds->get_nodeid(), data), auth);
+ mds->send_message_mds(MLock::create(lock, LOCK_AC_LOCKACK, mds->get_nodeid(), data), auth);
(static_cast<ScatterLock *>(lock))->start_flush();
// we'll get an AC_LOCKFLUSHED to complete
}
mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) {
dout(10) << "requesting rdlock from auth on "
<< *lock << " on " << *lock->get_parent() << dendl;
- mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_REQRDLOCK, mds->get_nodeid()), auth);
+ mds->send_message_mds(MLock::create(lock, LOCK_AC_REQRDLOCK, mds->get_nodeid()), auth);
}
return false;
}
mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) {
dout(10) << "requesting scatter from auth on "
<< *lock << " on " << *lock->get_parent() << dendl;
- mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_REQSCATTER, mds->get_nodeid()), auth);
+ mds->send_message_mds(MLock::create(lock, LOCK_AC_REQSCATTER, mds->get_nodeid()), auth);
}
break;
}
// send lock request
mut->start_locking(lock, target);
mut->more()->slaves.insert(target);
- auto r = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_WRLOCK);
+ auto r = MMDSSlaveRequest::create(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_WRLOCK);
r->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(r->get_object_info());
mds->send_message_mds(r, target);
<< " " << *lock->get_parent() << dendl;
if (!mds->is_cluster_degraded() ||
mds->mdsmap->get_state(target) >= MDSMap::STATE_REJOIN) {
- auto slavereq = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNWRLOCK);
+ auto slavereq = MMDSSlaveRequest::create(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNWRLOCK);
slavereq->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(slavereq->get_object_info());
mds->send_message_mds(slavereq, target);
// send lock request
mut->more()->slaves.insert(auth);
mut->start_locking(lock, auth);
- auto r = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_XLOCK);
+ auto r = MMDSSlaveRequest::create(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_XLOCK);
r->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(r->get_object_info());
mds->send_message_mds(r, auth);
mds_rank_t auth = lock->get_parent()->authority().first;
if (!mds->is_cluster_degraded() ||
mds->mdsmap->get_state(auth) >= MDSMap::STATE_REJOIN) {
- auto slavereq = MMDSSlaveRequest::factory::build(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNXLOCK);
+ auto slavereq = MMDSSlaveRequest::create(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNXLOCK);
slavereq->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(slavereq->get_object_info());
mds->send_message_mds(slavereq, auth);
cap->reset_num_revoke_warnings();
}
- auto m = MClientCaps::factory::build(op, in->ino(),
+ auto m = MClientCaps::create(op, in->ino(),
in->find_snaprealm()->inode->ino(),
cap->get_cap_id(),
cap->get_last_seq(),
for (auto &p : in->client_caps) {
Capability *cap = &p.second;
- auto m = MClientCaps::factory::build(CEPH_CAP_OP_TRUNC,
+ auto m = MClientCaps::create(CEPH_CAP_OP_TRUNC,
in->ino(),
in->find_snaprealm()->inode->ino(),
cap->get_cap_id(), cap->get_last_seq(),
if (!mds->is_cluster_degraded() ||
mds->mdsmap->is_clientreplay_or_active_or_stopping(auth))
- mds->send_message_mds(MInodeFileCaps::factory::build(in->ino(), in->replica_caps_wanted), auth);
+ mds->send_message_mds(MInodeFileCaps::create(in->ino(), in->replica_caps_wanted), auth);
}
}
if (cap->pending() & (CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER)) {
dout(10) << "share_inode_max_size with client." << client << dendl;
cap->inc_last_seq();
- auto m = MClientCaps::factory::build(CEPH_CAP_OP_GRANT,
+ auto m = MClientCaps::create(CEPH_CAP_OP_GRANT,
in->ino(),
in->find_snaprealm()->inode->ino(),
cap->get_cap_id(),
<< " for client." << client << dendl;
MClientCaps::ref ack;
if (op == CEPH_CAP_OP_FLUSHSNAP) {
- ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSHSNAP_ACK, m->get_ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier());
+ ack = MClientCaps::create(CEPH_CAP_OP_FLUSHSNAP_ACK, m->get_ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier());
} else {
- ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSH_ACK, m->get_ino(), 0, m->get_cap_id(), m->get_seq(), m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier());
+ ack = MClientCaps::create(CEPH_CAP_OP_FLUSH_ACK, m->get_ino(), 0, m->get_cap_id(), m->get_seq(), m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier());
}
ack->set_snap_follows(follows);
ack->set_client_tid(m->get_client_tid());
// case we get a dup response, so whatever.)
MClientCaps::ref ack;
if (dirty) {
- ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSHSNAP_ACK, in->ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier());
+ ack = MClientCaps::create(CEPH_CAP_OP_FLUSHSNAP_ACK, in->ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier());
ack->set_snap_follows(follows);
ack->set_client_tid(m->get_client_tid());
ack->set_oldest_flush_tid(m->get_oldest_flush_tid());
if (dirty && in->is_auth()) {
dout(7) << " flush client." << client << " dirty " << ccap_string(dirty)
<< " seq " << m->get_seq() << " on " << *in << dendl;
- ack = MClientCaps::factory::build(CEPH_CAP_OP_FLUSH_ACK, in->ino(), 0, cap->get_cap_id(), m->get_seq(),
+ ack = MClientCaps::create(CEPH_CAP_OP_FLUSH_ACK, in->ino(), 0, cap->get_cap_id(), m->get_seq(),
m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier());
ack->set_client_tid(m->get_client_tid());
ack->set_oldest_flush_tid(m->get_oldest_flush_tid());
dout(7) << "handle_client_lease client." << client << " renew on " << *dn
<< (!dn->lock.can_lease(client)?", revoking lease":"") << dendl;
if (dn->lock.can_lease(client)) {
- auto reply = MClientLease::factory::build(*m);
+ auto reply = MClientLease::create(*m);
int pool = 1; // fixme.. do something smart!
reply->h.duration_ms = (int)(1000 * mdcache->client_lease_durations[pool]);
reply->h.seq = ++l->seq;
// i should also revoke the dir ICONTENT lease, if they have it!
CInode *diri = dn->get_dir()->get_inode();
- auto lease = MClientLease::factory::build(CEPH_MDS_LEASE_REVOKE, l->seq, mask, diri->ino(), diri->first, CEPH_NOSNAP, dn->get_name());
+ auto lease = MClientLease::create(CEPH_MDS_LEASE_REVOKE, l->seq, mask, diri->ino(), diri->first, CEPH_NOSNAP, dn->get_name());
mds->send_message_client_counted(lease, l->client);
}
}
// request unscatter?
mds_rank_t auth = lock->get_parent()->authority().first;
if (!mds->is_cluster_degraded() || mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) {
- mds->send_message_mds(MLock::factory::build(lock, LOCK_AC_NUDGE, mds->get_nodeid()), auth);
+ mds->send_message_mds(MLock::create(lock, LOCK_AC_NUDGE, mds->get_nodeid()), auth);
}
// wait...
for (const auto& r : up) {
if (r == mds->get_nodeid())
continue;
- auto hb = MHeartbeat::factory::build(load, beat_epoch);
+ auto hb = MHeartbeat::create(load, beat_epoch);
hb->get_import_map() = import_map;
mds->send_message_mds(hb, r);
}
cap->last_rsize = i->rstat.rsize();
cap->last_rbytes = i->rstat.rbytes;
- auto msg = MClientQuota::factory::build();
+ auto msg = MClientQuota::create();
msg->ino = in->ino();
msg->rstat = i->rstat;
msg->quota = i->quota;
mds->send_message_client_counted(msg, session->get_connection());
}
for (const auto &it : in->get_replicas()) {
- auto msg = MGatherCaps::factory::build();
+ auto msg = MGatherCaps::create();
msg->ino = in->ino();
mds->send_message_mds(msg, it.first);
}
dout(10) << "_logged_slave_commit from mds." << from << " " << reqid << dendl;
// send a message
- auto req = MMDSSlaveRequest::factory::build(reqid, 0, MMDSSlaveRequest::OP_COMMITTED);
+ auto req = MMDSSlaveRequest::create(reqid, 0, MMDSSlaveRequest::OP_COMMITTED);
mds->send_message_mds(req, from);
}
for (map<mds_rank_t, map<metareqid_t, MDSlaveUpdate*> >::iterator p = uncommitted_slave_updates.begin();
p != uncommitted_slave_updates.end();
++p) {
- resolves[p->first] = MMDSResolve::factory::build();
+ resolves[p->first] = MMDSResolve::create();
for (map<metareqid_t, MDSlaveUpdate*>::iterator q = p->second.begin();
q != p->second.end();
++q) {
if (resolve_set.count(master) || is_ambiguous_slave_update(p->first, master)) {
dout(10) << " including uncommitted " << *mdr << dendl;
if (!resolves.count(master))
- resolves[master] = MMDSResolve::factory::build();
+ resolves[master] = MMDSResolve::create();
if (!mdr->committing &&
mdr->has_more() && mdr->more()->is_inode_exporter) {
// re-send cap exports
if (*p == mds->get_nodeid())
continue;
if (mds->is_resolve() || mds->mdsmap->is_resolve(*p))
- resolves[*p] = MMDSResolve::factory::build();
+ resolves[*p] = MMDSResolve::create();
}
map<dirfrag_t, vector<dirfrag_t> > my_subtrees;
}
}
- auto ack = MMDSResolveAck::factory::build();
+ auto ack = MMDSResolveAck::create();
for (const auto &p : m->slave_requests) {
if (uncommitted_masters.count(p.first)) { //mds->sessionmap.have_completed_request(p.first)) {
// COMMIT
if (*p == mds->get_nodeid()) continue; // nothing to myself!
if (rejoin_sent.count(*p)) continue; // already sent a rejoin to this node!
if (mds->is_rejoin())
- rejoins[*p] = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_WEAK);
+ rejoins[*p] = MMDSCacheRejoin::create(MMDSCacheRejoin::OP_WEAK);
else if (mds->mdsmap->is_rejoin(*p))
- rejoins[*p] = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_STRONG);
+ rejoins[*p] = MMDSCacheRejoin::create(MMDSCacheRejoin::OP_STRONG);
}
if (mds->is_rejoin()) {
if (mds->is_clientreplay() || mds->is_active() || mds->is_stopping()) {
survivor = true;
dout(10) << "i am a surivivor, and will ack immediately" << dendl;
- ack = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_ACK);
+ ack = MMDSCacheRejoin::create(MMDSCacheRejoin::OP_ACK);
map<inodeno_t,map<client_t,Capability::Import> > imported_caps;
}
// mark client caps stale.
- auto m = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, p->first, 0,
+ auto m = MClientCaps::create(CEPH_CAP_OP_EXPORT, p->first, 0,
r->second.capinfo.cap_id, 0,
mds->get_osd_epoch_barrier());
m->set_cap_peer(q->second.cap_id, q->second.issue_seq, q->second.mseq,
snap = it->second;
snap->head.op = CEPH_SNAP_OP_SPLIT;
} else {
- snap = MClientSnap::factory::build(CEPH_SNAP_OP_SPLIT);
+ snap = MClientSnap::create(CEPH_SNAP_OP_SPLIT);
splits.emplace(std::piecewise_construct, std::forward_as_tuple(client), std::forward_as_tuple(snap));
snap->head.split = realm->inode->ino();
snap->bl = realm->get_snap_trace();
assert(!p.second->empty());
auto em = splits.emplace(std::piecewise_construct, std::forward_as_tuple(p.first), std::forward_as_tuple());
if (em.second) {
- auto update = MClientSnap::factory::build(CEPH_SNAP_OP_SPLIT);
+ auto update = MClientSnap::create(CEPH_SNAP_OP_SPLIT);
update->head.split = parent_realm->inode->ino();
update->split_inos = split_inos;
update->split_realms = split_realms;
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(q->first.v));
if (session) {
// mark client caps stale.
- auto stale = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, p->first, 0, 0, 0, mds->get_osd_epoch_barrier());
+ auto stale = MClientCaps::create(CEPH_CAP_OP_EXPORT, p->first, 0, 0, 0, mds->get_osd_epoch_barrier());
stale->set_cap_peer(0, 0, 0, -1, 0);
mds->send_message_client_counted(stale, q->first);
}
cap->set_last_issue();
cap->set_last_issue_stamp(ceph_clock_now());
cap->clear_new();
- auto reap = MClientCaps::factory::build(CEPH_CAP_OP_IMPORT, in->ino(), realm->inode->ino(), cap->get_cap_id(), cap->get_last_seq(), cap->pending(), cap->wanted(), 0, cap->get_mseq(), mds->get_osd_epoch_barrier());
+ auto reap = MClientCaps::create(CEPH_CAP_OP_IMPORT, in->ino(), realm->inode->ino(), cap->get_cap_id(), cap->get_last_seq(), cap->pending(), cap->wanted(), 0, cap->get_mseq(), mds->get_osd_epoch_barrier());
in->encode_cap_message(reap, cap);
reap->snapbl = realm->get_snap_trace();
reap->set_cap_peer(p_cap_id, p_seq, p_mseq, peer, p_flags);
if (seq < realm->get_newest_seq()) {
dout(10) << "finish_snaprealm_reconnect client." << client << " has old seq " << seq << " < "
<< realm->get_newest_seq() << " on " << *realm << dendl;
- auto snap = MClientSnap::factory::build(CEPH_SNAP_OP_UPDATE);
+ auto snap = MClientSnap::create(CEPH_SNAP_OP_UPDATE);
snap->bl = realm->get_snap_trace();
for (const auto& child : realm->open_children)
snap->split_realms.push_back(child->inode->ino());
++p) {
if (rejoin_ack_sent.count(*p))
continue;
- acks[*p] = MMDSCacheRejoin::factory::build(MMDSCacheRejoin::OP_ACK);
+ acks[*p] = MMDSCacheRejoin::create(MMDSCacheRejoin::OP_ACK);
}
rejoin_ack_sent = recovery_set;
auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(rank), std::forward_as_tuple());
if (em.second) {
- em.first->second = MCacheExpire::factory::build(mds->get_nodeid());
+ em.first->second = MCacheExpire::create(mds->get_nodeid());
}
dout(20) << __func__ << ": try expiring " << *mdsdir_in << " for stopping mds." << mds << dendl;
assert(a != mds->get_nodeid());
auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple());
if (em.second)
- em.first->second = MCacheExpire::factory::build(mds->get_nodeid());
+ em.first->second = MCacheExpire::create(mds->get_nodeid());
em.first->second->add_dentry(con->dirfrag(), dir->dirfrag(), dn->get_name(), dn->last, dn->get_replica_nonce());
}
}
assert(a != mds->get_nodeid());
auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple());
if (em.second)
- em.first->second = MCacheExpire::factory::build(mds->get_nodeid()); /* new */
+ em.first->second = MCacheExpire::create(mds->get_nodeid()); /* new */
em.first->second->add_dir(condf, dir->dirfrag(), dir->replica_nonce);
}
}
assert(a != mds->get_nodeid());
auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple());
if (em.second)
- em.first->second = MCacheExpire::factory::build(mds->get_nodeid()); /* new */
+ em.first->second = MCacheExpire::create(mds->get_nodeid()); /* new */
em.first->second->add_inode(df, in->vino(), in->get_replica_nonce());
}
}
auto em = delayed_expire[parent_dir].emplace(std::piecewise_construct, std::forward_as_tuple(from), std::forward_as_tuple());
if (em.second)
- em.first->second = MCacheExpire::factory::build(from); /* new */
+ em.first->second = MCacheExpire::create(from); /* new */
// merge these expires into it
em.first->second->add_realm(p.first, p.second);
// got backtrace from peer or backtrace just fetched
if (info.discover || !info.fetch_backtrace)
pa = &info.ancestors;
- mds->send_message_mds(MMDSOpenIno::factory::build(info.tid, ino, pa), peer);
+ mds->send_message_mds(MMDSOpenIno::create(info.tid, ino, pa), peer);
if (mds->logger)
mds->logger->inc(l_mds_openino_peer_discover);
}
CInode *in = get_inode(ino);
if (in) {
dout(10) << " have " << *in << dendl;
- reply = MMDSOpenInoReply::factory::build(m->get_tid(), ino, mds_rank_t(0));
+ reply = MMDSOpenInoReply::create(m->get_tid(), ino, mds_rank_t(0));
if (in->is_auth()) {
touch_inode(in);
while (1) {
reply->hint = in->authority().first;
}
} else if (err < 0) {
- reply = MMDSOpenInoReply::factory::build(m->get_tid(), ino, MDS_RANK_NONE, err);
+ reply = MMDSOpenInoReply::create(m->get_tid(), ino, MDS_RANK_NONE, err);
} else {
mds_rank_t hint = MDS_RANK_NONE;
int ret = open_ino_traverse_dir(ino, m, m->ancestors, false, false, &hint);
if (ret > 0)
return;
- reply = MMDSOpenInoReply::factory::build(m->get_tid(), ino, hint, ret);
+ reply = MMDSOpenInoReply::create(m->get_tid(), ino, hint, ret);
}
m->get_connection()->send_message2(reply); /* FIXME, why not send_client? */
}
}
} else {
fip.checking = m;
- mds->send_message_mds(MMDSFindIno::factory::build(fip.tid, fip.ino), m);
+ mds->send_message_mds(MMDSFindIno::create(fip.tid, fip.ino), m);
}
}
}
dout(10) << "handle_find_ino " << *m << dendl;
- auto r = MMDSFindInoReply::factory::build(m->tid);
+ auto r = MMDSFindInoReply::create(m->tid);
CInode *in = get_inode(m->ino);
if (in) {
in->make_path(r->path);
for (set<mds_rank_t>::iterator p = mdr->more()->slaves.begin();
p != mdr->more()->slaves.end();
++p) {
- auto r = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt,
+ auto r = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt,
MMDSSlaveRequest::OP_FINISH);
if (mdr->killed && !mdr->committing) {
auto em = updates.emplace(std::piecewise_construct, std::forward_as_tuple(client), std::forward_as_tuple());
if (em.second) {
- auto update = MClientSnap::factory::build(CEPH_SNAP_OP_SPLIT);
+ auto update = MClientSnap::create(CEPH_SNAP_OP_SPLIT);
update->head.split = in->ino();
update->split_inos = split_inos;
update->split_realms = split_realms;
in->encode_snap(snap_blob);
for (auto p : mds_set) {
- auto m = MMDSSnapUpdate::factory::build(in->ino(), stid, snap_op);
+ auto m = MMDSSnapUpdate::create(in->ino(), stid, snap_op);
m->snap_blob = snap_blob;
mds->send_message_mds(m, p);
}
for (auto &session : sessions) {
if (!session->is_open() && !session->is_stale())
continue;
- auto update = MClientSnap::factory::build(snap_op);
+ auto update = MClientSnap::create(snap_op);
update->head.split = global_snaprealm->inode->ino();
update->bl = global_snaprealm->get_snap_trace();
mds->send_message_client_counted(update, session);
void MDCache::_send_discover(discover_info_t& d)
{
- auto dis = MDiscover::factory::build(d.ino, d.frag, d.snap, d.want_path, d.want_base_dir, d.want_xlocked);
+ auto dis = MDiscover::create(d.ino, d.frag, d.snap, d.want_path, d.want_base_dir, d.want_xlocked);
dis->set_tid(d.tid);
mds->send_message_mds(dis, d.mds);
}
CInode *cur = 0;
- auto reply = MDiscoverReply::factory::build(*dis);
+ auto reply = MDiscoverReply::create(*dis);
snapid_t snapid = dis->get_snapid();
for (const auto &r : dir->dir_rep_by) {
s.insert(r);
}
- mds->send_message_mds(MDirUpdate::factory::build(mds->get_nodeid(), dir->dirfrag(), dir->dir_rep, s, path, bcast), *it);
+ mds->send_message_mds(MDirUpdate::create(mds->get_nodeid(), dir->dirfrag(), dir->dir_rep, s, path, bcast), *it);
}
return 0;
rejoin_gather.count(p.first)))
continue;
CDentry::linkage_t *dnl = dn->get_linkage();
- auto m = MDentryLink::factory::build(subtree->dirfrag(), dn->get_dir()->dirfrag(), dn->get_name(), dnl->is_primary());
+ auto m = MDentryLink::create(subtree->dirfrag(), dn->get_dir()->dirfrag(), dn->get_name(), dnl->is_primary());
if (dnl->is_primary()) {
dout(10) << " primary " << *dnl->get_inode() << dendl;
replicate_inode(dnl->get_inode(), p.first, m->bl,
rejoin_gather.count(*it)))
continue;
- auto unlink = MDentryUnlink::factory::build(dn->get_dir()->dirfrag(), dn->get_name());
+ auto unlink = MDentryUnlink::create(dn->get_dir()->dirfrag(), dn->get_name());
if (straydn) {
replicate_stray(straydn, *it, unlink->straybl);
unlink->snapbl = snapbl;
rejoin_gather.count(p.first)))
continue;
- auto notify = MMDSFragmentNotify::factory::build(basedirfrag, info.bits);
+ auto notify = MMDSFragmentNotify::create(basedirfrag, info.bits);
// freshly replicate new dirs to peers
for (list<CDir*>::iterator q = info.resultfrags.begin();
}
priv.reset();
- auto reply = MCommandReply::factory::build(r, outs);
+ auto reply = MCommandReply::create(r, outs);
reply->set_tid(m->get_tid());
reply->set_data(outbl);
m->get_connection()->send_message2(reply);
if (send) {
dout(15) << "updating export_targets, now " << new_map_targets.size() << " ranks are targets" << dendl;
- auto m = MMDSLoadTargets::factory::build(mds_gid_t(monc->get_global_id()), new_map_targets);
+ auto m = MMDSLoadTargets::create(mds_gid_t(monc->get_global_id()), new_map_targets);
monc->send_mon_message(m.detach());
}
}
// send mdsmap first?
if (mds != whoami && peer_mdsmap_epoch[mds] < mdsmap->get_epoch()) {
- auto _m = MMDSMap::factory::build(monc->get_fsid(), *mdsmap);
+ auto _m = MMDSMap::create(monc->get_fsid(), *mdsmap);
messenger->send_to_mds(_m.detach(), mdsmap->get_addrs(mds));
peer_mdsmap_epoch[mds] = mdsmap->get_epoch();
}
bool client_must_resend = true; //!creq->can_forward();
// tell the client where it should go
- auto f = MClientRequestForward::factory::build(m->get_tid(), mds, m->get_num_fwd()+1, client_must_resend);
+ auto f = MClientRequestForward::create(m->get_tid(), mds, m->get_num_fwd()+1, client_must_resend);
messenger->send_message(f.detach(), m->get_source_inst());
}
set<Session*> clients;
sessionmap.get_client_session_set(clients);
for (const auto &session : clients) {
- auto m = MMDSMap::factory::build(monc->get_fsid(), *mdsmap);
+ auto m = MMDSMap::create(monc->get_fsid(), *mdsmap);
session->get_connection()->send_message2(std::move(m));
}
last_client_mdsmap_bcast = mdsmap->get_epoch();
dout(10) << "stray agree on " << reqid << " tid " << tid
<< ", sending ROLLBACK" << dendl;
assert(!server_ready);
- auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_ROLLBACK, 0, tid);
+ auto req = MMDSTableRequest::create(table, TABLESERVER_OP_ROLLBACK, 0, tid);
mds->send_message_mds(req, mds->get_mds_map()->get_tableserver());
}
break;
if (server_ready) {
// send message
- auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_PREPARE, reqid);
+ auto req = MMDSTableRequest::create(table, TABLESERVER_OP_PREPARE, reqid);
req->bl = mutation;
mds->send_message_mds(req, mds->get_mds_map()->get_tableserver());
} else
if (server_ready) {
// send message
- auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_COMMIT, 0, tid);
+ auto req = MMDSTableRequest::create(table, TABLESERVER_OP_COMMIT, 0, tid);
mds->send_message_mds(req, mds->get_mds_map()->get_tableserver());
} else
dout(10) << "tableserver is not ready yet, deferring request" << dendl;
p != pending_commit.end();
++p) {
dout(10) << "resending commit on " << p->first << dendl;
- auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_COMMIT, 0, p->first);
+ auto req = MMDSTableRequest::create(table, TABLESERVER_OP_COMMIT, 0, p->first);
mds->send_message_mds(req, mds->get_mds_map()->get_tableserver());
}
}
p != pending_prepare.end();
++p) {
dout(10) << "resending prepare on " << p->first << dendl;
- auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_PREPARE, p->first);
+ auto req = MMDSTableRequest::create(table, TABLESERVER_OP_PREPARE, p->first);
req->bl = p->second.mutation;
mds->send_message_mds(req, mds->get_mds_map()->get_tableserver());
}
_prepare(req->bl, req->reqid, from, out);
assert(version == tid);
- auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_AGREE, req->reqid, tid);
+ auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_AGREE, req->reqid, tid);
reply->bl = std::move(out);
if (_notify_prep(tid)) {
else if (tid <= version) {
dout(0) << "got commit for tid " << tid << " <= " << version
<< ", already committed, sending ack." << dendl;
- auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_ACK, req->reqid, tid);
+ auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_ACK, req->reqid, tid);
mds->send_message(reply, req->get_connection());
}
else {
_commit(tid, req);
_note_commit(tid);
- auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_ACK, req->reqid, req->get_tid());
+ auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_ACK, req->reqid, req->get_tid());
mds->send_message_mds(reply, mds_rank_t(req->get_source().num()));
}
next_reqids[who] = p.second.reqid + 1;
version_t tid = p.second.tid;
- auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_AGREE, p.second.reqid, tid);
+ auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_AGREE, p.second.reqid, tid);
_get_reply_buffer(tid, &reply->bl);
mds->send_message_mds(reply, who);
}
for (auto p : active_clients) {
- auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_SERVER_READY, next_reqids[p]);
+ auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_SERVER_READY, next_reqids[p]);
mds->send_message_mds(reply, p);
}
recovered = true;
if (p->second.reqid >= next_reqid)
next_reqid = p->second.reqid + 1;
- auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_AGREE, p->second.reqid, p->second.tid);
+ auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_AGREE, p->second.reqid, p->second.tid);
_get_reply_buffer(p->second.tid, &reply->bl);
mds->send_message_mds(reply, who);
}
- auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_SERVER_READY, next_reqid);
+ auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_SERVER_READY, next_reqid);
mds->send_message_mds(reply, who);
}
if (notify_peer &&
(!mds->is_cluster_degraded() ||
mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer))) // tell them.
- mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer);
+ mds->send_message_mds(MExportDirCancel::create(dir->dirfrag(), it->second.tid), it->second.peer);
break;
case EXPORT_FREEZING:
if (notify_peer &&
(!mds->is_cluster_degraded() ||
mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer))) // tell them.
- mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer);
+ mds->send_message_mds(MExportDirCancel::create(dir->dirfrag(), it->second.tid), it->second.peer);
break;
// NOTE: state order reversal, warning comes after prepping
if (notify_peer &&
(!mds->is_cluster_degraded() ||
mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer))) // tell them.
- mds->send_message_mds(MExportDirCancel::factory::build(dir->dirfrag(), it->second.tid), it->second.peer);
+ mds->send_message_mds(MExportDirCancel::create(dir->dirfrag(), it->second.tid), it->second.peer);
break;
case EXPORT_EXPORTING:
// send ExportDirDiscover (ask target)
filepath path;
dir->inode->make_path(path);
- auto discover = MExportDirDiscover::factory::build(dir->dirfrag(), path, mds->get_nodeid(), it->second.tid);
+ auto discover = MExportDirDiscover::create(dir->dirfrag(), path, mds->get_nodeid(), it->second.tid);
mds->send_message_mds(discover, dest);
assert(g_conf()->mds_kill_export_at != 2);
cache->get_subtree_bounds(dir, bounds);
// generate prep message, log entry.
- auto prep = MExportDirPrep::factory::build(dir->dirfrag(), it->second.tid);
+ auto prep = MExportDirPrep::create(dir->dirfrag(), it->second.tid);
// include list of bystanders
for (const auto &p : dir->get_replicas()) {
it->second.warning_ack_waiting.insert(p.first);
it->second.notify_ack_waiting.insert(p.first); // we'll eventually get a notifyack, too!
- auto notify = MExportDirNotify::factory::build(dir->dirfrag(), it->second.tid, true,
+ auto notify = MExportDirNotify::create(dir->dirfrag(), it->second.tid, true,
mds_authority_t(mds->get_nodeid(),CDIR_AUTH_UNKNOWN),
mds_authority_t(mds->get_nodeid(),it->second.peer));
for (auto &cdir : bounds) {
mds->balancer->subtract_export(dir);
// fill export message with cache data
- auto req = MExportDir::factory::build(dir->dirfrag(), it->second.tid);
+ auto req = MExportDir::create(dir->dirfrag(), it->second.tid);
map<client_t,entity_inst_t> exported_client_map;
map<client_t,client_metadata_t> exported_client_metadata_map;
uint64_t num_exported_inodes = encode_export_dir(req->export_data,
const Capability *cap = &p.second;
dout(7) << "finish_export_inode_caps telling client." << p.first
<< " exported caps on " << *in << dendl;
- auto m = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, in->ino(), 0,
+ auto m = MClientCaps::create(CEPH_CAP_OP_EXPORT, in->ino(), 0,
cap->get_cap_id(), cap->get_mseq(), mds->get_osd_epoch_barrier());
map<client_t,Capability::Import>::iterator q = peer_imported.find(p.first);
for (set<mds_rank_t>::iterator p = stat.notify_ack_waiting.begin();
p != stat.notify_ack_waiting.end();
++p) {
- auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, true,
+ auto notify = MExportDirNotify::create(dir->dirfrag(), stat.tid, true,
pair<int,int>(mds->get_nodeid(), stat.peer),
pair<int,int>(mds->get_nodeid(), CDIR_AUTH_UNKNOWN));
for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); ++i)
for (set<mds_rank_t>::iterator p = stat.notify_ack_waiting.begin();
p != stat.notify_ack_waiting.end();
++p) {
- auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, true,
+ auto notify = MExportDirNotify::create(dir->dirfrag(), stat.tid, true,
pair<int,int>(mds->get_nodeid(), stat.peer),
pair<int,int>(stat.peer, CDIR_AUTH_UNKNOWN));
// notify peer to send cap import messages to clients
if (!mds->is_cluster_degraded() ||
mds->mdsmap->is_clientreplay_or_active_or_stopping(stat.peer)) {
- mds->send_message_mds(MExportDirFinish::factory::build(dir->dirfrag(), false, stat.tid), stat.peer);
+ mds->send_message_mds(MExportDirFinish::create(dir->dirfrag(), false, stat.tid), stat.peer);
} else {
dout(7) << "not sending MExportDirFinish, dest has failed" << dendl;
}
// send finish/commit to new auth
if (!mds->is_cluster_degraded() ||
mds->mdsmap->is_clientreplay_or_active_or_stopping(it->second.peer)) {
- mds->send_message_mds(MExportDirFinish::factory::build(dir->dirfrag(), true, it->second.tid), it->second.peer);
+ mds->send_message_mds(MExportDirFinish::create(dir->dirfrag(), true, it->second.tid), it->second.peer);
} else {
dout(7) << "not sending MExportDirFinish last, dest has failed" << dendl;
}
if (!mds->is_active()) {
dout(7) << " not active, send NACK " << dendl;
- mds->send_message_mds(MExportDirDiscoverAck::factory::build(df, m->get_tid(), false), from);
+ mds->send_message_mds(MExportDirDiscoverAck::create(df, m->get_tid(), false), from);
return;
}
// reply
dout(7) << " sending export_discover_ack on " << *in << dendl;
- mds->send_message_mds(MExportDirDiscoverAck::factory::build(df, m->get_tid()), p_state->peer);
+ mds->send_message_mds(MExportDirDiscoverAck::create(df, m->get_tid()), p_state->peer);
assert (g_conf()->mds_kill_import_at != 2);
}
// ok!
dout(7) << " sending export_prep_ack on " << *dir << dendl;
- mds->send_message(MExportDirPrepAck::factory::build(dir->dirfrag(), success, m->get_tid()), m->get_connection());
+ mds->send_message(MExportDirPrepAck::create(dir->dirfrag(), success, m->get_tid()), m->get_connection());
assert(g_conf()->mds_kill_import_at != 4);
}
for (set<mds_rank_t>::iterator p = stat.bystanders.begin();
p != stat.bystanders.end();
++p) {
- auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, false,
+ auto notify = MExportDirNotify::create(dir->dirfrag(), stat.tid, false,
pair<int,int>(stat.peer, mds->get_nodeid()),
pair<int,int>(mds->get_nodeid(), CDIR_AUTH_UNKNOWN));
for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); ++i)
stat.bystanders.erase(p++);
continue;
}
- auto notify = MExportDirNotify::factory::build(dir->dirfrag(), stat.tid, true,
+ auto notify = MExportDirNotify::create(dir->dirfrag(), stat.tid, true,
mds_authority_t(stat.peer, mds->get_nodeid()),
mds_authority_t(stat.peer, CDIR_AUTH_UNKNOWN));
for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); ++i)
// test surviving observer of a failed migration that did not complete
//assert(dir->replica_map.size() < 2 || mds->get_nodeid() != 0);
- auto ack = MExportDirAck::factory::build(dir->dirfrag(), it->second.tid);
+ auto ack = MExportDirAck::create(dir->dirfrag(), it->second.tid);
encode(imported_caps, ack->imported_caps);
mds->send_message_mds(ack, from);
// send ack
if (m->wants_ack()) {
- mds->send_message_mds(MExportDirNotifyAck::factory::build(m->get_dirfrag(), m->get_tid(), m->get_new_auth()), from);
+ mds->send_message_mds(MExportDirNotifyAck::create(m->get_dirfrag(), m->get_tid(), m->get_new_auth()), from);
} else {
// aborted. no ack.
dout(7) << "handle_export_notify no ack requested" << dendl;
assert(!in->is_ambiguous_auth());
assert(!in->state_test(CInode::STATE_EXPORTINGCAPS));
- auto ex = MExportCaps::factory::build();
+ auto ex = MExportCaps::create();
ex->ino = in->ino();
encode_export_inode_caps(in, false, ex->cap_bl, ex->client_map, ex->client_metadata_map);
dout(7) << __func__ << " telling client." << it.first
<< " exported caps on " << *in << dendl;
- auto m = MClientCaps::factory::build(CEPH_CAP_OP_EXPORT, in->ino(), 0,
+ auto m = MClientCaps::create(CEPH_CAP_OP_EXPORT, in->ino(), 0,
cap->get_cap_id(), cap->get_mseq(),
mds->get_osd_epoch_barrier());
m->set_cap_peer(it.second.cap_id, it.second.issue_seq, it.second.mseq, from, 0);
mds->locker->eval(in, CEPH_CAP_LOCKS, true);
if (!imported_caps.empty()) {
- auto ack = MExportCapsAck::factory::build(in->ino());
+ auto ack = MExportCapsAck::create(in->ino());
map<client_t,uint64_t> peer_caps_ids;
for (auto &p : imported_caps )
peer_caps_ids[p.first] = it->second.at(p.first).cap_id;
{
auto send_reject_message = [this, session](std::string_view err_str) {
- auto m = MClientSession::factory::build(CEPH_SESSION_REJECT);
+ auto m = MClientSession::create(CEPH_SESSION_REJECT);
if (session->info.has_feature(CEPHFS_FEATURE_MIMIC))
m->metadata["error_string"] = err_str;
mds->send_message_client(m, session);
mds->locker->resume_stale_caps(session);
mds->sessionmap.touch_session(session);
}
- m->get_connection()->send_message2(MClientSession::factory::build(CEPH_SESSION_RENEWCAPS, m->get_seq()));
+ m->get_connection()->send_message2(MClientSession::create(CEPH_SESSION_RENEWCAPS, m->get_seq()));
} else {
dout(10) << "ignoring renewcaps on non open|stale session (" << session->get_state_name() << ")" << dendl;
}
!session->get_connection()->has_feature(CEPH_FEATURE_EXPORT_PEER))
continue;
version_t seq = session->wait_for_flush(gather.new_sub());
- mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_FLUSHMSG, seq), session);
+ mds->send_message_client(MClientSession::create(CEPH_SESSION_FLUSHMSG, seq), session);
}
}
mds->sessionmap.set_state(session, Session::STATE_OPEN);
mds->sessionmap.touch_session(session);
assert(session->get_connection());
- auto reply = MClientSession::factory::build(CEPH_SESSION_OPEN);
+ auto reply = MClientSession::create(CEPH_SESSION_OPEN);
if (session->info.has_feature(CEPHFS_FEATURE_MIMIC))
reply->supported_features = supported_features;
session->get_connection()->send_message2(reply);
if (mdcache->is_readonly())
- session->get_connection()->send_message2(MClientSession::factory::build(CEPH_SESSION_FORCE_RO));
+ session->get_connection()->send_message2(MClientSession::create(CEPH_SESSION_FORCE_RO));
} else if (session->is_closing() ||
session->is_killing()) {
// kill any lingering capabilities, leases, requests
}
// reset session
- mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_CLOSE), session);
+ mds->send_message_client(MClientSession::create(CEPH_SESSION_CLOSE), session);
mds->sessionmap.set_state(session, Session::STATE_CLOSED);
session->clear();
mds->sessionmap.remove_session(session);
mds->sessionmap.set_state(session, Session::STATE_OPEN);
mds->sessionmap.touch_session(session);
- auto reply = MClientSession::factory::build(CEPH_SESSION_OPEN);
+ auto reply = MClientSession::create(CEPH_SESSION_OPEN);
if (session->info.has_feature(CEPHFS_FEATURE_MIMIC))
reply->supported_features = supported_features;
mds->send_message_client(reply, session);
if (mdcache->is_readonly())
- mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_FORCE_RO), session);
+ mds->send_message_client(MClientSession::create(CEPH_SESSION_FORCE_RO), session);
}
} else {
dout(10) << "force_open_sessions skipping already-open " << session->info.inst << dendl;
mds->sessionmap.set_state(session, Session::STATE_STALE);
mds->locker->revoke_stale_caps(session);
mds->locker->remove_stale_leases(session);
- mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_STALE, session->get_push_seq()), session);
+ mds->send_message_client(MClientSession::create(CEPH_SESSION_STALE, session->get_push_seq()), session);
finish_flush_session(session, session->get_push_seq());
}
}
if (deny) {
- m->get_connection()->send_message2(MClientSession::factory::build(CEPH_SESSION_CLOSE));
+ m->get_connection()->send_message2(MClientSession::create(CEPH_SESSION_CLOSE));
if (session->is_open())
kill_session(session, nullptr);
return;
}
// notify client of success with an OPEN
- auto reply = MClientSession::factory::build(CEPH_SESSION_OPEN);
+ auto reply = MClientSession::create(CEPH_SESSION_OPEN);
if (session->info.has_feature(CEPHFS_FEATURE_MIMIC))
reply->supported_features = supported_features;
m->get_connection()->send_message2(reply);
uint64_t newlim = std::max(std::min<uint64_t>((session->caps.size() * ratio), max_caps_per_client), min_caps_per_client);
if (session->caps.size() > newlim) {
- auto m = MClientSession::factory::build(CEPH_SESSION_RECALL_STATE);
+ auto m = MClientSession::create(CEPH_SESSION_RECALL_STATE);
m->head.max_caps = newlim;
mds->send_message_client(m, session);
session->notify_recall_sent(newlim);
if (!session->info.inst.name.is_client() ||
!(session->is_open() || session->is_stale()))
continue;
- mds->send_message_client(MClientSession::factory::build(CEPH_SESSION_FORCE_RO), session);
+ mds->send_message_client(MClientSession::create(CEPH_SESSION_FORCE_RO), session);
}
}
void Server::respond_to_request(MDRequestRef& mdr, int r)
{
if (mdr->client_request) {
- reply_client_request(mdr, MClientReply::factory::build(*mdr->client_request, r));
+ reply_client_request(mdr, MClientReply::create(*mdr->client_request, r));
} else if (mdr->internal_op > -1) {
dout(10) << "respond_to_request on internal request " << mdr << dendl;
if (!mdr->internal_op_finish)
}
- auto reply = MClientReply::factory::build(*req, 0);
+ auto reply = MClientReply::create(*req, 0);
reply->set_unsafe();
// mark xlocks "done", indicating that we are exposing uncommitted changes.
req->get_op() != CEPH_MDS_OP_OPEN &&
req->get_op() != CEPH_MDS_OP_CREATE)) {
dout(5) << "already completed " << req->get_reqid() << dendl;
- auto reply = MClientReply::factory::build(*req, 0);
+ auto reply = MClientReply::create(*req, 0);
if (created != inodeno_t()) {
bufferlist extra;
encode(created, extra);
// the purpose of rename notify is enforcing causal message ordering. making sure
// bystanders have received all messages from rename srcdn's auth MDS.
if (m->get_op() == MMDSSlaveRequest::OP_RENAMENOTIFY) {
- auto reply = MMDSSlaveRequest::factory::build(m->get_reqid(), m->get_attempt(), MMDSSlaveRequest::OP_RENAMENOTIFYACK);
+ auto reply = MMDSSlaveRequest::create(m->get_reqid(), m->get_attempt(), MMDSSlaveRequest::OP_RENAMENOTIFYACK);
mds->send_message(reply, m->get_connection());
return;
}
return;
// ack
- auto r = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, replycode);
+ auto r = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, replycode);
r->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(r->get_object_info());
if (replycode == MMDSSlaveRequest::OP_XLOCKACK)
}
// ack!
- auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPINACK);
+ auto reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPINACK);
// return list of my auth_pins (if any)
for (set<MDSCacheObject*>::iterator p = mdr->auth_pins.begin();
{
dout(10) << __func__ << " " << *in << dendl;
- auto req = MClientRequest::factory::build(CEPH_MDS_OP_SETXATTR);
+ auto req = MClientRequest::create(CEPH_MDS_OP_SETXATTR);
req->set_filepath(filepath(in->ino()));
req->set_string2("ceph.quota");
// empty vxattr value
op = MMDSSlaveRequest::OP_LINKPREP;
else
op = MMDSSlaveRequest::OP_UNLINKPREP;
- auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, op);
+ auto req = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, op);
targeti->set_object_info(req->get_object_info());
req->op_stamp = mdr->get_op_stamp();
if (auto& desti_srnode = mdr->more()->desti_srnode)
// ack
if (!mdr->aborted) {
- auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_LINKPREPACK);
+ auto reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_LINKPREPACK);
mds->send_message_mds(reply, mdr->slave_to_mds);
} else {
dout(10) << " abort flag set, finishing" << dendl;
assert(g_conf()->mds_kill_link_at != 8);
- auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_COMMITTED);
+ auto req = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_COMMITTED);
mds->send_message_mds(req, mdr->slave_to_mds);
mdcache->request_finish(mdr);
}
}
dout(10) << "_rmdir_prepare_witness mds." << who << dendl;
- auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREP);
+ auto req = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREP);
req->srcdnpath = filepath(trace.front()->get_dir()->ino());
for (auto dn : trace)
req->srcdnpath.push_dentry(dn->get_name());
mdr->straydn = 0;
if (!mdr->aborted) {
- auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREPACK);
+ auto reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREPACK);
if (!mdr->more()->slave_update_journaled)
reply->mark_not_journaled();
mds->send_message_mds(reply, mdr->slave_to_mds);
}
dout(10) << "_rename_prepare_witness mds." << who << dendl;
- auto req = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREP);
+ auto req = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREP);
req->srcdnpath = filepath(srctrace.front()->get_dir()->ino());
for (auto dn : srctrace)
if (mdr->slave_request->is_interrupted()) {
dout(10) << " slave request interrupted, sending noop reply" << dendl;
- auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
+ auto reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
reply->mark_interrupted();
mds->send_message_mds(reply, mdr->slave_to_mds);
mdr->slave_request = 0;
(mds->is_cluster_degraded() &&
!mds->mdsmap->is_clientreplay_or_active_or_stopping(*p)))
continue;
- auto notify = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMENOTIFY);
+ auto notify = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMENOTIFY);
mds->send_message_mds(notify, *p);
mdr->more()->waiting_on_slave.insert(*p);
}
if (reply_witness) {
assert(!srcdnrep.empty());
- auto reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
+ auto reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
reply->witnesses.swap(srcdnrep);
mds->send_message_mds(reply, mdr->slave_to_mds);
mdr->slave_request = 0;
// prepare ack
MMDSSlaveRequest::ref reply;
if (!mdr->aborted) {
- reply = MMDSSlaveRequest::factory::build(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
+ reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
if (!mdr->more()->slave_update_journaled)
reply->mark_not_journaled();
}
{
dout(10) << __func__ << " " << *m << dendl;
handle_query_result(m);
- auto ack = MMDSTableRequest::factory::build(table, TABLESERVER_OP_NOTIFY_ACK, 0, m->get_tid());
+ auto ack = MMDSTableRequest::create(table, TABLESERVER_OP_NOTIFY_ACK, 0, m->get_tid());
mds->send_message(ack, m->get_connection());
}
return;
mds_rank_t ts = mds->mdsmap->get_tableserver();
- auto req = MMDSTableRequest::factory::build(table, TABLESERVER_OP_QUERY, ++last_reqid, 0);
+ auto req = MMDSTableRequest::create(table, TABLESERVER_OP_QUERY, ++last_reqid, 0);
using ceph::encode;
char op = 'F';
encode(op, req->bl);
assert(version == tid);
for (auto &p : active_clients) {
- auto m = MMDSTableRequest::factory::build(table, TABLESERVER_OP_NOTIFY_PREP, 0, version);
+ auto m = MMDSTableRequest::create(table, TABLESERVER_OP_NOTIFY_PREP, 0, version);
m->bl = bl;
mds->send_message_mds(m, p);
}
auto p = req->bl.cbegin();
decode(op, p);
- auto reply = MMDSTableRequest::factory::build(table, TABLESERVER_OP_QUERY_REPLY, req->reqid, version);
+ auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_QUERY_REPLY, req->reqid, version);
switch (op) {
case 'F': // full
if (!all_purge.empty()) {
dout(10) << "requesting removal of " << all_purge << dendl;
- auto m = MRemoveSnaps::factory::build(all_purge);
+ auto m = MRemoveSnaps::create(all_purge);
mon_client->send_mon_message(m.detach());
}
filepath dst;
rdn->make_path(dst);
- auto req = MClientRequest::factory::build(CEPH_MDS_OP_RENAME);
+ auto req = MClientRequest::create(CEPH_MDS_OP_RENAME);
req->set_filepath(dst);
req->set_filepath2(src);
req->set_tid(mds->issue_tid());
dst.push_dentry(src[0]);
dst.push_dentry(src[1]);
- auto req = MClientRequest::factory::build(CEPH_MDS_OP_RENAME);
+ auto req = MClientRequest::create(CEPH_MDS_OP_RENAME);
req->set_filepath(dst);
req->set_filepath2(src);
req->set_tid(mds->issue_tid());
MDSMap null_map;
null_map.epoch = fsmap.epoch;
null_map.compat = fsmap.compat;
- auto m = MMDSMap::factory::build(mon->monmap->fsid, null_map);
+ auto m = MMDSMap::create(mon->monmap->fsid, null_map);
mon->send_reply(op, m.detach());
return true;
} else {
assert(effective_epoch > 0);
_note_beacon(m);
{
- auto beacon = MMDSBeacon::factory::build(mon->monmap->fsid,
+ auto beacon = MMDSBeacon::create(mon->monmap->fsid,
m->get_global_id(), m->get_name(), effective_epoch,
state, seq, CEPH_FEATURES_SUPPORTED_DEFAULT);
mon->send_reply(op, beacon.detach());
last_beacon.erase(gid);
// Respond to MDS, so that it knows it can continue to shut down
- auto beacon = MMDSBeacon::factory::build(
+ auto beacon = MMDSBeacon::create(
mon->monmap->fsid, m->get_global_id(),
m->get_name(), pending.get_epoch(), state, seq,
CEPH_FEATURES_SUPPORTED_DEFAULT);
request_proposal(mon->osdmon());
// Respond to MDS, so that it knows it can continue to shut down
- auto beacon = MMDSBeacon::factory::build(mon->monmap->fsid,
+ auto beacon = MMDSBeacon::create(mon->monmap->fsid,
m->get_global_id(), m->get_name(), pending.get_epoch(), state, seq,
CEPH_FEATURES_SUPPORTED_DEFAULT);
mon->send_reply(op, beacon.detach());
MDSMap null_map;
null_map.epoch = fsmap.epoch;
null_map.compat = fsmap.compat;
- auto m = MMDSMap::factory::build(mon->monmap->fsid, null_map);
+ auto m = MMDSMap::create(mon->monmap->fsid, null_map);
mon->send_reply(op, m.detach());
} else {
- auto beacon = MMDSBeacon::factory::build(mon->monmap->fsid,
+ auto beacon = MMDSBeacon::create(mon->monmap->fsid,
m->get_global_id(), m->get_name(), fsmap.get_epoch(),
m->get_state(), m->get_seq(), CEPH_FEATURES_SUPPORTED_DEFAULT);
mon->send_reply(op, beacon.detach());
if (sub->next > mds_map->epoch) {
return;
}
- auto msg = MMDSMap::factory::build(mon->monmap->fsid, *mds_map);
+ auto msg = MMDSMap::create(mon->monmap->fsid, *mds_map);
sub->session->con->send_message(msg.detach());
if (sub->onetime) {
// -- with payload --
case MSG_PGSTATS:
- m = MPGStats::factory::build();
+ m = MPGStats::create();
break;
case MSG_PGSTATSACK:
- m = MPGStatsAck::factory::build();
+ m = MPGStatsAck::create();
break;
case CEPH_MSG_STATFS:
- m = MStatfs::factory::build();
+ m = MStatfs::create();
break;
case CEPH_MSG_STATFS_REPLY:
- m = MStatfsReply::factory::build();
+ m = MStatfsReply::create();
break;
case MSG_GETPOOLSTATS:
- m = MGetPoolStats::factory::build();
+ m = MGetPoolStats::create();
break;
case MSG_GETPOOLSTATSREPLY:
- m = MGetPoolStatsReply::factory::build();
+ m = MGetPoolStatsReply::create();
break;
case CEPH_MSG_POOLOP:
- m = MPoolOp::factory::build();
+ m = MPoolOp::create();
break;
case CEPH_MSG_POOLOP_REPLY:
- m = MPoolOpReply::factory::build();
+ m = MPoolOpReply::create();
break;
case MSG_MON_COMMAND:
- m = MMonCommand::factory::build();
+ m = MMonCommand::create();
break;
case MSG_MON_COMMAND_ACK:
- m = MMonCommandAck::factory::build();
+ m = MMonCommandAck::create();
break;
case MSG_MON_PAXOS:
- m = MMonPaxos::factory::build();
+ m = MMonPaxos::create();
break;
case MSG_CONFIG:
- m = MConfig::factory::build();
+ m = MConfig::create();
break;
case MSG_GET_CONFIG:
- m = MGetConfig::factory::build();
+ m = MGetConfig::create();
break;
case MSG_MON_PROBE:
- m = MMonProbe::factory::build();
+ m = MMonProbe::create();
break;
case MSG_MON_JOIN:
- m = MMonJoin::factory::build();
+ m = MMonJoin::create();
break;
case MSG_MON_ELECTION:
- m = MMonElection::factory::build();
+ m = MMonElection::create();
break;
case MSG_MON_SYNC:
- m = MMonSync::factory::build();
+ m = MMonSync::create();
break;
case MSG_MON_SCRUB:
- m = MMonScrub::factory::build();
+ m = MMonScrub::create();
break;
case MSG_LOG:
- m = MLog::factory::build();
+ m = MLog::create();
break;
case MSG_LOGACK:
- m = MLogAck::factory::build();
+ m = MLogAck::create();
break;
case CEPH_MSG_PING:
- m = MPing::factory::build();
+ m = MPing::create();
break;
case MSG_COMMAND:
- m = MCommand::factory::build();
+ m = MCommand::create();
break;
case MSG_COMMAND_REPLY:
- m = MCommandReply::factory::build();
+ m = MCommandReply::create();
break;
case MSG_OSD_BACKFILL_RESERVE:
- m = MBackfillReserve::factory::build();
+ m = MBackfillReserve::create();
break;
case MSG_OSD_RECOVERY_RESERVE:
- m = MRecoveryReserve::factory::build();
+ m = MRecoveryReserve::create();
break;
case MSG_OSD_FORCE_RECOVERY:
- m = MOSDForceRecovery::factory::build();
+ m = MOSDForceRecovery::create();
break;
case MSG_ROUTE:
- m = MRoute::factory::build();
+ m = MRoute::create();
break;
case MSG_FORWARD:
- m = MForward::factory::build();
+ m = MForward::create();
break;
case CEPH_MSG_MON_MAP:
- m = MMonMap::factory::build();
+ m = MMonMap::create();
break;
case CEPH_MSG_MON_GET_MAP:
- m = MMonGetMap::factory::build();
+ m = MMonGetMap::create();
break;
case CEPH_MSG_MON_GET_OSDMAP:
- m = MMonGetOSDMap::factory::build();
+ m = MMonGetOSDMap::create();
break;
case CEPH_MSG_MON_GET_VERSION:
- m = MMonGetVersion::factory::build();
+ m = MMonGetVersion::create();
break;
case CEPH_MSG_MON_GET_VERSION_REPLY:
- m = MMonGetVersionReply::factory::build();
+ m = MMonGetVersionReply::create();
break;
case CEPH_MSG_MON_METADATA:
- m = MMonMetadata::factory::build();
+ m = MMonMetadata::create();
break;
case MSG_OSD_BOOT:
- m = MOSDBoot::factory::build();
+ m = MOSDBoot::create();
break;
case MSG_OSD_ALIVE:
- m = MOSDAlive::factory::build();
+ m = MOSDAlive::create();
break;
case MSG_OSD_BEACON:
- m = MOSDBeacon::factory::build();
+ m = MOSDBeacon::create();
break;
case MSG_OSD_PGTEMP:
- m = MOSDPGTemp::factory::build();
+ m = MOSDPGTemp::create();
break;
case MSG_OSD_FAILURE:
- m = MOSDFailure::factory::build();
+ m = MOSDFailure::create();
break;
case MSG_OSD_MARK_ME_DOWN:
- m = MOSDMarkMeDown::factory::build();
+ m = MOSDMarkMeDown::create();
break;
case MSG_OSD_FULL:
- m = MOSDFull::factory::build();
+ m = MOSDFull::create();
break;
case MSG_OSD_PING:
- m = MOSDPing::factory::build();
+ m = MOSDPing::create();
break;
case CEPH_MSG_OSD_OP:
- m = MOSDOp::factory::build();
+ m = MOSDOp::create();
break;
case CEPH_MSG_OSD_OPREPLY:
- m = MOSDOpReply::factory::build();
+ m = MOSDOpReply::create();
break;
case MSG_OSD_REPOP:
- m = MOSDRepOp::factory::build();
+ m = MOSDRepOp::create();
break;
case MSG_OSD_REPOPREPLY:
- m = MOSDRepOpReply::factory::build();
+ m = MOSDRepOpReply::create();
break;
case MSG_OSD_PG_CREATED:
- m = MOSDPGCreated::factory::build();
+ m = MOSDPGCreated::create();
break;
case MSG_OSD_PG_UPDATE_LOG_MISSING:
- m = MOSDPGUpdateLogMissing::factory::build();
+ m = MOSDPGUpdateLogMissing::create();
break;
case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY:
- m = MOSDPGUpdateLogMissingReply::factory::build();
+ m = MOSDPGUpdateLogMissingReply::create();
break;
case CEPH_MSG_OSD_BACKOFF:
- m = MOSDBackoff::factory::build();
+ m = MOSDBackoff::create();
break;
case CEPH_MSG_OSD_MAP:
- m = MOSDMap::factory::build();
+ m = MOSDMap::create();
break;
case CEPH_MSG_WATCH_NOTIFY:
- m = MWatchNotify::factory::build();
+ m = MWatchNotify::create();
break;
case MSG_OSD_PG_NOTIFY:
- m = MOSDPGNotify::factory::build();
+ m = MOSDPGNotify::create();
break;
case MSG_OSD_PG_QUERY:
- m = MOSDPGQuery::factory::build();
+ m = MOSDPGQuery::create();
break;
case MSG_OSD_PG_LOG:
- m = MOSDPGLog::factory::build();
+ m = MOSDPGLog::create();
break;
case MSG_OSD_PG_REMOVE:
- m = MOSDPGRemove::factory::build();
+ m = MOSDPGRemove::create();
break;
case MSG_OSD_PG_INFO:
- m = MOSDPGInfo::factory::build();
+ m = MOSDPGInfo::create();
break;
case MSG_OSD_PG_CREATE:
- m = MOSDPGCreate::factory::build();
+ m = MOSDPGCreate::create();
break;
case MSG_OSD_PG_CREATE2:
- m = MOSDPGCreate2::factory::build();
+ m = MOSDPGCreate2::create();
break;
case MSG_OSD_PG_TRIM:
- m = MOSDPGTrim::factory::build();
+ m = MOSDPGTrim::create();
break;
case MSG_OSD_SCRUB:
- m = MOSDScrub::factory::build();
+ m = MOSDScrub::create();
break;
case MSG_OSD_SCRUB2:
- m = MOSDScrub2::factory::build();
+ m = MOSDScrub2::create();
break;
case MSG_OSD_SCRUB_RESERVE:
- m = MOSDScrubReserve::factory::build();
+ m = MOSDScrubReserve::create();
break;
case MSG_REMOVE_SNAPS:
- m = MRemoveSnaps::factory::build();
+ m = MRemoveSnaps::create();
break;
case MSG_OSD_REP_SCRUB:
- m = MOSDRepScrub::factory::build();
+ m = MOSDRepScrub::create();
break;
case MSG_OSD_REP_SCRUBMAP:
- m = MOSDRepScrubMap::factory::build();
+ m = MOSDRepScrubMap::create();
break;
case MSG_OSD_PG_SCAN:
- m = MOSDPGScan::factory::build();
+ m = MOSDPGScan::create();
break;
case MSG_OSD_PG_BACKFILL:
- m = MOSDPGBackfill::factory::build();
+ m = MOSDPGBackfill::create();
break;
case MSG_OSD_PG_BACKFILL_REMOVE:
- m = MOSDPGBackfillRemove::factory::build();
+ m = MOSDPGBackfillRemove::create();
break;
case MSG_OSD_PG_PUSH:
- m = MOSDPGPush::factory::build();
+ m = MOSDPGPush::create();
break;
case MSG_OSD_PG_PULL:
- m = MOSDPGPull::factory::build();
+ m = MOSDPGPull::create();
break;
case MSG_OSD_PG_PUSH_REPLY:
- m = MOSDPGPushReply::factory::build();
+ m = MOSDPGPushReply::create();
break;
case MSG_OSD_PG_RECOVERY_DELETE:
- m = MOSDPGRecoveryDelete::factory::build();
+ m = MOSDPGRecoveryDelete::create();
break;
case MSG_OSD_PG_RECOVERY_DELETE_REPLY:
- m = MOSDPGRecoveryDeleteReply::factory::build();
+ m = MOSDPGRecoveryDeleteReply::create();
break;
case MSG_OSD_EC_WRITE:
- m = MOSDECSubOpWrite::factory::build();
+ m = MOSDECSubOpWrite::create();
break;
case MSG_OSD_EC_WRITE_REPLY:
- m = MOSDECSubOpWriteReply::factory::build();
+ m = MOSDECSubOpWriteReply::create();
break;
case MSG_OSD_EC_READ:
- m = MOSDECSubOpRead::factory::build();
+ m = MOSDECSubOpRead::create();
break;
case MSG_OSD_EC_READ_REPLY:
- m = MOSDECSubOpReadReply::factory::build();
+ m = MOSDECSubOpReadReply::create();
break;
// auth
case CEPH_MSG_AUTH:
- m = MAuth::factory::build();
+ m = MAuth::create();
break;
case CEPH_MSG_AUTH_REPLY:
- m = MAuthReply::factory::build();
+ m = MAuthReply::create();
break;
case MSG_MON_GLOBAL_ID:
- m = MMonGlobalID::factory::build();
+ m = MMonGlobalID::create();
break;
// clients
case CEPH_MSG_MON_SUBSCRIBE:
- m = MMonSubscribe::factory::build();
+ m = MMonSubscribe::create();
break;
case CEPH_MSG_MON_SUBSCRIBE_ACK:
- m = MMonSubscribeAck::factory::build();
+ m = MMonSubscribeAck::create();
break;
case CEPH_MSG_CLIENT_SESSION:
- m = MClientSession::factory::build();
+ m = MClientSession::create();
break;
case CEPH_MSG_CLIENT_RECONNECT:
- m = MClientReconnect::factory::build();
+ m = MClientReconnect::create();
break;
case CEPH_MSG_CLIENT_REQUEST:
- m = MClientRequest::factory::build();
+ m = MClientRequest::create();
break;
case CEPH_MSG_CLIENT_REQUEST_FORWARD:
- m = MClientRequestForward::factory::build();
+ m = MClientRequestForward::create();
break;
case CEPH_MSG_CLIENT_REPLY:
- m = MClientReply::factory::build();
+ m = MClientReply::create();
break;
case CEPH_MSG_CLIENT_CAPS:
- m = MClientCaps::factory::build();
+ m = MClientCaps::create();
break;
case CEPH_MSG_CLIENT_CAPRELEASE:
- m = MClientCapRelease::factory::build();
+ m = MClientCapRelease::create();
break;
case CEPH_MSG_CLIENT_LEASE:
- m = MClientLease::factory::build();
+ m = MClientLease::create();
break;
case CEPH_MSG_CLIENT_SNAP:
- m = MClientSnap::factory::build();
+ m = MClientSnap::create();
break;
case CEPH_MSG_CLIENT_QUOTA:
- m = MClientQuota::factory::build();
+ m = MClientQuota::create();
break;
// mds
case MSG_MDS_SLAVE_REQUEST:
- m = MMDSSlaveRequest::factory::build();
+ m = MMDSSlaveRequest::create();
break;
case CEPH_MSG_MDS_MAP:
- m = MMDSMap::factory::build();
+ m = MMDSMap::create();
break;
case CEPH_MSG_FS_MAP:
- m = MFSMap::factory::build();
+ m = MFSMap::create();
break;
case CEPH_MSG_FS_MAP_USER:
- m = MFSMapUser::factory::build();
+ m = MFSMapUser::create();
break;
case MSG_MDS_BEACON:
- m = MMDSBeacon::factory::build();
+ m = MMDSBeacon::create();
break;
case MSG_MDS_OFFLOAD_TARGETS:
- m = MMDSLoadTargets::factory::build();
+ m = MMDSLoadTargets::create();
break;
case MSG_MDS_RESOLVE:
- m = MMDSResolve::factory::build();
+ m = MMDSResolve::create();
break;
case MSG_MDS_RESOLVEACK:
- m = MMDSResolveAck::factory::build();
+ m = MMDSResolveAck::create();
break;
case MSG_MDS_CACHEREJOIN:
- m = MMDSCacheRejoin::factory::build();
+ m = MMDSCacheRejoin::create();
break;
case MSG_MDS_DIRUPDATE:
- m = MDirUpdate::factory::build();
+ m = MDirUpdate::create();
break;
case MSG_MDS_DISCOVER:
- m = MDiscover::factory::build();
+ m = MDiscover::create();
break;
case MSG_MDS_DISCOVERREPLY:
- m = MDiscoverReply::factory::build();
+ m = MDiscoverReply::create();
break;
case MSG_MDS_FINDINO:
- m = MMDSFindIno::factory::build();
+ m = MMDSFindIno::create();
break;
case MSG_MDS_FINDINOREPLY:
- m = MMDSFindInoReply::factory::build();
+ m = MMDSFindInoReply::create();
break;
case MSG_MDS_OPENINO:
- m = MMDSOpenIno::factory::build();
+ m = MMDSOpenIno::create();
break;
case MSG_MDS_OPENINOREPLY:
- m = MMDSOpenInoReply::factory::build();
+ m = MMDSOpenInoReply::create();
break;
case MSG_MDS_SNAPUPDATE:
- m = MMDSSnapUpdate::factory::build();
+ m = MMDSSnapUpdate::create();
break;
case MSG_MDS_FRAGMENTNOTIFY:
- m = MMDSFragmentNotify::factory::build();
+ m = MMDSFragmentNotify::create();
break;
case MSG_MDS_EXPORTDIRDISCOVER:
- m = MExportDirDiscover::factory::build();
+ m = MExportDirDiscover::create();
break;
case MSG_MDS_EXPORTDIRDISCOVERACK:
- m = MExportDirDiscoverAck::factory::build();
+ m = MExportDirDiscoverAck::create();
break;
case MSG_MDS_EXPORTDIRCANCEL:
- m = MExportDirCancel::factory::build();
+ m = MExportDirCancel::create();
break;
case MSG_MDS_EXPORTDIR:
- m = MExportDir::factory::build();
+ m = MExportDir::create();
break;
case MSG_MDS_EXPORTDIRACK:
- m = MExportDirAck::factory::build();
+ m = MExportDirAck::create();
break;
case MSG_MDS_EXPORTDIRFINISH:
- m = MExportDirFinish::factory::build();
+ m = MExportDirFinish::create();
break;
case MSG_MDS_EXPORTDIRNOTIFY:
- m = MExportDirNotify::factory::build();
+ m = MExportDirNotify::create();
break;
case MSG_MDS_EXPORTDIRNOTIFYACK:
- m = MExportDirNotifyAck::factory::build();
+ m = MExportDirNotifyAck::create();
break;
case MSG_MDS_EXPORTDIRPREP:
- m = MExportDirPrep::factory::build();
+ m = MExportDirPrep::create();
break;
case MSG_MDS_EXPORTDIRPREPACK:
- m = MExportDirPrepAck::factory::build();
+ m = MExportDirPrepAck::create();
break;
case MSG_MDS_EXPORTCAPS:
- m = MExportCaps::factory::build();
+ m = MExportCaps::create();
break;
case MSG_MDS_EXPORTCAPSACK:
- m = MExportCapsAck::factory::build();
+ m = MExportCapsAck::create();
break;
case MSG_MDS_GATHERCAPS:
- m = MGatherCaps::factory::build();
+ m = MGatherCaps::create();
break;
case MSG_MDS_DENTRYUNLINK:
- m = MDentryUnlink::factory::build();
+ m = MDentryUnlink::create();
break;
case MSG_MDS_DENTRYLINK:
- m = MDentryLink::factory::build();
+ m = MDentryLink::create();
break;
case MSG_MDS_HEARTBEAT:
- m = MHeartbeat::factory::build();
+ m = MHeartbeat::create();
break;
case MSG_MDS_CACHEEXPIRE:
- m = MCacheExpire::factory::build();
+ m = MCacheExpire::create();
break;
case MSG_MDS_TABLE_REQUEST:
- m = MMDSTableRequest::factory::build();
+ m = MMDSTableRequest::create();
break;
/* case MSG_MDS_INODEUPDATE:
- m = MInodeUpdate::factory::build();
+ m = MInodeUpdate::create();
break;
*/
case MSG_MDS_INODEFILECAPS:
- m = MInodeFileCaps::factory::build();
+ m = MInodeFileCaps::create();
break;
case MSG_MDS_LOCK:
- m = MLock::factory::build();
+ m = MLock::create();
break;
case MSG_MGR_BEACON:
- m = MMgrBeacon::factory::build();
+ m = MMgrBeacon::create();
break;
case MSG_MON_MGR_REPORT:
- m = MMonMgrReport::factory::build();
+ m = MMonMgrReport::create();
break;
case MSG_SERVICE_MAP:
- m = MServiceMap::factory::build();
+ m = MServiceMap::create();
break;
case MSG_MGR_MAP:
- m = MMgrMap::factory::build();
+ m = MMgrMap::create();
break;
case MSG_MGR_DIGEST:
- m = MMgrDigest::factory::build();
+ m = MMgrDigest::create();
break;
case MSG_MGR_OPEN:
- m = MMgrOpen::factory::build();
+ m = MMgrOpen::create();
break;
case MSG_MGR_CLOSE:
- m = MMgrClose::factory::build();
+ m = MMgrClose::create();
break;
case MSG_MGR_REPORT:
- m = MMgrReport::factory::build();
+ m = MMgrReport::create();
break;
case MSG_MGR_CONFIGURE:
- m = MMgrConfigure::factory::build();
+ m = MMgrConfigure::create();
break;
case MSG_TIMECHECK:
- m = MTimeCheck::factory::build();
+ m = MTimeCheck::create();
break;
case MSG_TIMECHECK2:
- m = MTimeCheck2::factory::build();
+ m = MTimeCheck2::create();
break;
case MSG_MON_HEALTH:
- m = MMonHealth::factory::build();
+ m = MMonHealth::create();
break;
case MSG_MON_HEALTH_CHECKS:
- m = MMonHealthChecks::factory::build();
+ m = MMonHealthChecks::create();
break;
#if defined(HAVE_XIO)
case MSG_DATA_PING:
- m = MDataPing::factory::build();
+ m = MDataPing::create();
break;
#endif
// -- simple messages without payload --
case CEPH_MSG_SHUTDOWN:
- m = MGenericMessage::factory::build(type);
+ m = MGenericMessage::create(type);
break;
default:
public:
using factory = MessageFactory<T>;
+ template<typename... Args>
+ static auto create(Args&&... args) {
+ return MessageFactory<T>::build(std::forward<Args>(args)...);
+ }
static auto msgref_cast(typename Message::ref const& m) {
return boost::static_pointer_cast<typename T::ref::element_type, typename std::remove_reference<decltype(m)>::type::element_type>(m);
}
list<typename T::ref> m_list;
public:
- MessageDencoderImpl() : m_object(T::factory::build()) {}
+ MessageDencoderImpl() : m_object(T::create()) {}
~MessageDencoderImpl() override {}
string decode(bufferlist bl, uint64_t seek) override {