-Subproject commit 45eaee4bb8756c0bcc8120b4b6efb43766b0116e
+Subproject commit 70228ed56466b4be8a9abff9024f69820f68f6d0
#include "MDSRank.h"
#include "MDSMap.h"
#include "messages/MInodeFileCaps.h"
-#include "messages/MMDSSlaveRequest.h"
+#include "messages/MMDSPeerRequest.h"
#include "Migrator.h"
#include "msg/Messenger.h"
#include "osdc/Objecter.h"
}
}
} else {
- // if the lock is the latest locked one, it's possible that slave mds got the lock
+ // if the lock is the latest locked one, it's possible that peer mds got the lock
// while there are recovering mds.
if (!mdr->is_xlocked(lock) || mdr->is_last_locked(lock))
wait = true;
// leader. wrlock versionlock so we can pipeline dentry updates to journal.
lov.add_wrlock(&dn->versionlock, i + 1);
} else {
- // slave. exclusively lock the dentry version (i.e. block other journal updates).
+ // peer. exclusively lock the dentry version (i.e. block other journal updates).
// this makes rollback safe.
lov.add_xlock(&dn->versionlock, i + 1);
}
// leader. wrlock versionlock so we can pipeline inode updates to journal.
lov.add_wrlock(&in->versionlock, i + 1);
} else {
- // slave. exclusively lock the inode version (i.e. block other journal updates).
+ // peer. exclusively lock the inode version (i.e. block other journal updates).
// this makes rollback safe.
lov.add_xlock(&in->versionlock, i + 1);
}
mustpin.insert(object);
} else if (!object->is_auth() &&
!lock->can_wrlock(_client) && // we might have to request a scatter
- !mdr->is_slave()) { // if we are slave (remote_wrlock), the leader already authpinned
+ !mdr->is_peer()) { // if we are peer (remote_wrlock), the leader already authpinned
dout(15) << " will also auth_pin " << *object
<< " in case we need to request a scatter" << dendl;
mustpin.insert(object);
if (mds->is_cluster_degraded() &&
!mds->mdsmap->is_clientreplay_or_active_or_stopping(p.first)) {
dout(10) << " mds." << p.first << " is not active" << dendl;
- if (mdr->more()->waiting_on_slave.empty())
+ if (mdr->more()->waiting_on_peer.empty())
mds->wait_for_active_peer(p.first, new C_MDS_RetryRequest(mdcache, mdr));
return false;
}
- auto req = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt,
- MMDSSlaveRequest::OP_AUTHPIN);
+ auto req = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt,
+ MMDSPeerRequest::OP_AUTHPIN);
for (auto& o : p.second) {
dout(10) << " req remote auth_pin of " << *o << dendl;
MDSCacheObjectInfo info;
mds->send_message_mds(req, p.first);
// put in waiting list
- auto ret = mdr->more()->waiting_on_slave.insert(p.first);
+ auto ret = mdr->more()->waiting_on_peer.insert(p.first);
ceph_assert(ret.second);
}
return false;
void Locker::_drop_locks(MutationImpl *mut, set<CInode*> *pneed_issue,
bool drop_rdlocks)
{
- set<mds_rank_t> slaves;
+ set<mds_rank_t> peers;
for (auto it = mut->locks.begin(); it != mut->locks.end(); ) {
SimpleLock *lock = it->lock;
pneed_issue->insert(static_cast<CInode*>(obj));
} else {
ceph_assert(lock->get_sm()->can_remote_xlock);
- slaves.insert(obj->authority().first);
+ peers.insert(obj->authority().first);
lock->put_xlock();
mut->locks.erase(it++);
}
} else if (it->is_wrlock() || it->is_remote_wrlock()) {
if (it->is_remote_wrlock()) {
- slaves.insert(it->wrlock_target);
+ peers.insert(it->wrlock_target);
it->clear_remote_wrlock();
}
if (it->is_wrlock()) {
}
}
- for (set<mds_rank_t>::iterator p = slaves.begin(); p != slaves.end(); ++p) {
+ for (set<mds_rank_t>::iterator p = peers.begin(); p != peers.end(); ++p) {
if (!mds->is_cluster_degraded() ||
mds->mdsmap->get_state(*p) >= MDSMap::STATE_REJOIN) {
dout(10) << "_drop_non_rdlocks dropping remote locks on mds." << *p << dendl;
- auto slavereq = make_message<MMDSSlaveRequest>(mut->reqid, mut->attempt,
- MMDSSlaveRequest::OP_DROPLOCKS);
- mds->send_message_mds(slavereq, *p);
+ auto peerreq = make_message<MMDSPeerRequest>(mut->reqid, mut->attempt,
+ MMDSPeerRequest::OP_DROPLOCKS);
+ mds->send_message_mds(peerreq, *p);
}
}
}
return;
}
- if (mdr->has_more() && !mdr->more()->slaves.empty()) {
- dout(10) << " there are slaves requests for " << *mdr << ", noop" << dendl;
+ if (mdr->has_more() && !mdr->more()->peers.empty()) {
+ dout(10) << " there are peers requests for " << *mdr << ", noop" << dendl;
return;
}
if (mds->is_cluster_degraded() &&
!mds->mdsmap->is_clientreplay_or_active_or_stopping(target)) {
dout(7) << " mds." << target << " is not active" << dendl;
- if (mut->more()->waiting_on_slave.empty())
+ if (mut->more()->waiting_on_peer.empty())
mds->wait_for_active_peer(target, new C_MDS_RetryRequest(mdcache, mut));
return;
}
// send lock request
mut->start_locking(lock, target);
- mut->more()->slaves.insert(target);
- auto r = make_message<MMDSSlaveRequest>(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_WRLOCK);
+ mut->more()->peers.insert(target);
+ auto r = make_message<MMDSPeerRequest>(mut->reqid, mut->attempt, MMDSPeerRequest::OP_WRLOCK);
r->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(r->get_object_info());
mds->send_message_mds(r, target);
- ceph_assert(mut->more()->waiting_on_slave.count(target) == 0);
- mut->more()->waiting_on_slave.insert(target);
+ ceph_assert(mut->more()->waiting_on_peer.count(target) == 0);
+ mut->more()->waiting_on_peer.insert(target);
}
void Locker::remote_wrlock_finish(const MutationImpl::lock_iterator& it, MutationImpl *mut)
<< " " << *lock->get_parent() << dendl;
if (!mds->is_cluster_degraded() ||
mds->mdsmap->get_state(target) >= MDSMap::STATE_REJOIN) {
- auto slavereq = make_message<MMDSSlaveRequest>(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNWRLOCK);
- slavereq->set_lock_type(lock->get_type());
- lock->get_parent()->set_object_info(slavereq->get_object_info());
- mds->send_message_mds(slavereq, target);
+ auto peerreq = make_message<MMDSPeerRequest>(mut->reqid, mut->attempt, MMDSPeerRequest::OP_UNWRLOCK);
+ peerreq->set_lock_type(lock->get_type());
+ lock->get_parent()->set_object_info(peerreq->get_object_info());
+ mds->send_message_mds(peerreq, target);
}
}
} else {
// replica
ceph_assert(lock->get_sm()->can_remote_xlock);
- ceph_assert(!mut->slave_request);
+ ceph_assert(!mut->peer_request);
// wait for single auth
if (lock->get_parent()->is_ambiguous_auth()) {
if (mds->is_cluster_degraded() &&
!mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) {
dout(7) << " mds." << auth << " is not active" << dendl;
- if (mut->more()->waiting_on_slave.empty())
+ if (mut->more()->waiting_on_peer.empty())
mds->wait_for_active_peer(auth, new C_MDS_RetryRequest(mdcache, mut));
return false;
}
// send lock request
- mut->more()->slaves.insert(auth);
+ mut->more()->peers.insert(auth);
mut->start_locking(lock, auth);
- auto r = make_message<MMDSSlaveRequest>(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_XLOCK);
+ auto r = make_message<MMDSPeerRequest>(mut->reqid, mut->attempt, MMDSPeerRequest::OP_XLOCK);
r->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(r->get_object_info());
mds->send_message_mds(r, auth);
- ceph_assert(mut->more()->waiting_on_slave.count(auth) == 0);
- mut->more()->waiting_on_slave.insert(auth);
+ ceph_assert(mut->more()->waiting_on_peer.count(auth) == 0);
+ mut->more()->waiting_on_peer.insert(auth);
return false;
}
mds_rank_t auth = lock->get_parent()->authority().first;
if (!mds->is_cluster_degraded() ||
mds->mdsmap->get_state(auth) >= MDSMap::STATE_REJOIN) {
- auto slavereq = make_message<MMDSSlaveRequest>(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNXLOCK);
- slavereq->set_lock_type(lock->get_type());
- lock->get_parent()->set_object_info(slavereq->get_object_info());
- mds->send_message_mds(slavereq, auth);
+ auto peerreq = make_message<MMDSPeerRequest>(mut->reqid, mut->attempt, MMDSPeerRequest::OP_UNXLOCK);
+ peerreq->set_lock_type(lock->get_type());
+ lock->get_parent()->set_object_info(peerreq->get_object_info());
+ mds->send_message_mds(peerreq, auth);
}
// others waiting?
lock->finish_waiters(SimpleLock::WAIT_STABLE |
#include "events/ESessions.h"
#include "events/EUpdate.h"
-#include "events/ESlaveUpdate.h"
+#include "events/EPeerUpdate.h"
#include "events/EOpen.h"
#include "events/ECommitted.h"
#include "events/EPurged.h"
case EVENT_SESSIONS_OLD: return "SESSIONS_OLD";
case EVENT_SESSIONS: return "SESSIONS";
case EVENT_UPDATE: return "UPDATE";
- case EVENT_SLAVEUPDATE: return "SLAVEUPDATE";
+ case EVENT_PEERUPDATE: return "PEERUPDATE";
case EVENT_OPEN: return "OPEN";
case EVENT_COMMITTED: return "COMMITTED";
case EVENT_PURGED: return "PURGED";
{"SESSIONS_OLD", EVENT_SESSIONS_OLD},
{"SESSIONS", EVENT_SESSIONS},
{"UPDATE", EVENT_UPDATE},
- {"SLAVEUPDATE", EVENT_SLAVEUPDATE},
+ {"PEERUPDATE", EVENT_PEERUPDATE},
{"OPEN", EVENT_OPEN},
{"COMMITTED", EVENT_COMMITTED},
{"PURGED", EVENT_PURGED},
case EVENT_UPDATE:
le = std::make_unique<EUpdate>();
break;
- case EVENT_SLAVEUPDATE:
- le = std::make_unique<ESlaveUpdate>();
+ case EVENT_PEERUPDATE:
+ le = std::make_unique<EPeerUpdate>();
break;
case EVENT_OPEN:
le = std::make_unique<EOpen>();
#define EVENT_SESSIONS 12
#define EVENT_UPDATE 20
-#define EVENT_SLAVEUPDATE 21
+#define EVENT_PEERUPDATE 21
#define EVENT_OPEN 22
#define EVENT_COMMITTED 23
#define EVENT_PURGED 24
class CInode;
class CDentry;
class MDSRank;
-struct MDSlaveUpdate;
+struct MDPeerUpdate;
class LogSegment {
public:
map<int, ceph::unordered_set<version_t> > pending_commit_tids; // mdstable
set<metareqid_t> uncommitted_leaders;
- set<metareqid_t> uncommitted_slaves;
+ set<metareqid_t> uncommitted_peers;
set<dirfrag_t> uncommitted_fragments;
// client request ids
#include "events/ESubtreeMap.h"
#include "events/EUpdate.h"
-#include "events/ESlaveUpdate.h"
+#include "events/EPeerUpdate.h"
#include "events/EImportFinish.h"
#include "events/EFragment.h"
#include "events/ECommitted.h"
if (cur->last >= floor) {
bool update = true;
if (cur->state_test(CInode::STATE_AMBIGUOUSAUTH) && cur->is_auth()) {
- // rename src inode is not projected in the slave rename prep case. so we should
+ // rename src inode is not projected in the peer rename prep case. so we should
// avoid updateing the inode.
ceph_assert(linkunlink < 0);
ceph_assert(cur->is_frozen_inode());
// because we are about to write into the dirfrag fnode and that needs
// to commit before the lock can cycle.
if (linkunlink) {
- ceph_assert(pin->nestlock.get_num_wrlocks() || mut->is_slave());
+ ceph_assert(pin->nestlock.get_num_wrlocks() || mut->is_peer());
}
if (!mut->is_wrlocked(&pin->nestlock)) {
if (!mut->is_wrlocked(&pin->versionlock))
mds->locker->local_wrlock_grab(&pin->versionlock, mut);
- ceph_assert(mut->is_wrlocked(&pin->nestlock) || mut->is_slave());
+ ceph_assert(mut->is_wrlocked(&pin->nestlock) || mut->is_peer());
pin->last_dirstat_prop = mut->get_mds_stamp();
// ===================================
-// slave requests
+// peer requests
/*
- * some handlers for leader requests with slaves. we need to make
- * sure slaves journal commits before we forget we leadered them and
+ * some handlers for leader requests with peers. we need to make
+ * sure leader journal commits before we forget we leadered them and
* remove them from the uncommitted_leaders map (used during recovery
- * to commit|abort slaves).
+ * to commit|abort peers).
*/
struct C_MDC_CommittedLeader : public MDCacheLogContext {
metareqid_t reqid;
// while active...
-void MDCache::committed_leader_slave(metareqid_t r, mds_rank_t from)
+void MDCache::committed_leader_peer(metareqid_t r, mds_rank_t from)
{
- dout(10) << "committed_leader_slave mds." << from << " on " << r << dendl;
+ dout(10) << "committed_leader_peer mds." << from << " on " << r << dendl;
ceph_assert(uncommitted_leaders.count(r));
- uncommitted_leaders[r].slaves.erase(from);
- if (!uncommitted_leaders[r].recovering && uncommitted_leaders[r].slaves.empty())
+ uncommitted_leaders[r].peers.erase(from);
+ if (!uncommitted_leaders[r].recovering && uncommitted_leaders[r].peers.empty())
log_leader_commit(r);
}
}
/*
- * Leader may crash after receiving all slaves' commit acks, but before journalling
- * the final commit. Slaves may crash after journalling the slave commit, but before
- * sending commit ack to the leader. Commit leaders with no uncommitted slave when
+ * Leader may crash after receiving all peers' commit acks, but before journalling
+ * the final commit. Peers may crash after journalling the peer commit, but before
+ * sending commit ack to the leader. Commit leaders with no uncommitted peer when
* resolve finishes.
*/
void MDCache::finish_committed_leaders()
p != uncommitted_leaders.end();
++p) {
p->second.recovering = false;
- if (!p->second.committing && p->second.slaves.empty()) {
+ if (!p->second.committing && p->second.peers.empty()) {
dout(10) << "finish_committed_leaders " << p->first << dendl;
log_leader_commit(p->first);
}
}
/*
- * at end of resolve... we must journal a commit|abort for all slave
+ * at end of resolve... we must journal a commit|abort for all peer
* updates, before moving on.
*
* this is so that the leader can safely journal ECommitted on ops it
* leaders when it reaches up:active (all other recovering nodes must
* complete resolve before that happens).
*/
-struct C_MDC_SlaveCommit : public MDCacheLogContext {
+struct C_MDC_PeerCommit : public MDCacheLogContext {
mds_rank_t from;
metareqid_t reqid;
- C_MDC_SlaveCommit(MDCache *c, int f, metareqid_t r) : MDCacheLogContext(c), from(f), reqid(r) {}
+ C_MDC_PeerCommit(MDCache *c, int f, metareqid_t r) : MDCacheLogContext(c), from(f), reqid(r) {}
void finish(int r) override {
- mdcache->_logged_slave_commit(from, reqid);
+ mdcache->_logged_peer_commit(from, reqid);
}
};
-void MDCache::_logged_slave_commit(mds_rank_t from, metareqid_t reqid)
+void MDCache::_logged_peer_commit(mds_rank_t from, metareqid_t reqid)
{
- dout(10) << "_logged_slave_commit from mds." << from << " " << reqid << dendl;
+ dout(10) << "_logged_peer_commit from mds." << from << " " << reqid << dendl;
// send a message
- auto req = make_message<MMDSSlaveRequest>(reqid, 0, MMDSSlaveRequest::OP_COMMITTED);
+ auto req = make_message<MMDSPeerRequest>(reqid, 0, MMDSPeerRequest::OP_COMMITTED);
mds->send_message_mds(req, from);
}
void MDCache::send_resolves()
{
- send_slave_resolves();
+ send_peer_resolves();
if (!resolve_done) {
// I'm survivor: refresh snap cache
mds->snapclient->sync(
new MDSInternalContextWrapper(mds,
new LambdaContext([this](int r) {
- maybe_finish_slave_resolve();
+ maybe_finish_peer_resolve();
})
)
);
send_subtree_resolves();
}
-void MDCache::send_slave_resolves()
+void MDCache::send_peer_resolves()
{
- dout(10) << "send_slave_resolves" << dendl;
+ dout(10) << "send_peer_resolves" << dendl;
map<mds_rank_t, ref_t<MMDSResolve>> resolves;
if (mds->is_resolve()) {
- for (map<metareqid_t, uslave>::iterator p = uncommitted_slaves.begin();
- p != uncommitted_slaves.end();
+ for (map<metareqid_t, upeer>::iterator p = uncommitted_peers.begin();
+ p != uncommitted_peers.end();
++p) {
mds_rank_t leader = p->second.leader;
auto &m = resolves[leader];
if (!m) m = make_message<MMDSResolve>();
- m->add_slave_request(p->first, false);
+ m->add_peer_request(p->first, false);
}
} else {
set<mds_rank_t> resolve_set;
p != active_requests.end();
++p) {
MDRequestRef& mdr = p->second;
- if (!mdr->is_slave())
+ if (!mdr->is_peer())
continue;
- if (!mdr->slave_did_prepare() && !mdr->committing) {
+ if (!mdr->peer_did_prepare() && !mdr->committing) {
continue;
}
- mds_rank_t leader = mdr->slave_to_mds;
- if (resolve_set.count(leader) || is_ambiguous_slave_update(p->first, leader)) {
+ mds_rank_t leader = mdr->peer_to_mds;
+ if (resolve_set.count(leader) || is_ambiguous_peer_update(p->first, leader)) {
dout(10) << " including uncommitted " << *mdr << dendl;
if (!resolves.count(leader))
resolves[leader] = make_message<MMDSResolve>();
map<client_t, Capability::Export> cap_map;
in->export_client_caps(cap_map);
bufferlist bl;
- MMDSResolve::slave_inode_cap inode_caps(in->ino(), cap_map);
+ MMDSResolve::peer_inode_cap inode_caps(in->ino(), cap_map);
encode(inode_caps, bl);
- resolves[leader]->add_slave_request(p->first, bl);
+ resolves[leader]->add_peer_request(p->first, bl);
} else {
- resolves[leader]->add_slave_request(p->first, mdr->committing);
+ resolves[leader]->add_peer_request(p->first, mdr->committing);
}
}
}
}
for (auto &p : resolves) {
- dout(10) << "sending slave resolve to mds." << p.first << dendl;
+ dout(10) << "sending peer resolve to mds." << p.first << dendl;
mds->send_message_mds(p.second, p.first);
resolve_ack_gather.insert(p.first);
}
resolves_pending = false;
}
-void MDCache::maybe_finish_slave_resolve() {
+void MDCache::maybe_finish_peer_resolve() {
if (resolve_ack_gather.empty() && resolve_need_rollback.empty()) {
// snap cache get synced or I'm in resolve state
if (mds->snapclient->is_synced() || resolve_done)
resolve_gather.insert(who);
discard_delayed_resolve(who);
- ambiguous_slave_updates.erase(who);
+ ambiguous_peer_updates.erase(who);
rejoin_gather.insert(who);
rejoin_sent.erase(who); // i need to send another
// tell the balancer too.
mds->balancer->handle_mds_failure(who);
- // clean up any requests slave to/from this node
+ // clean up any requests peer to/from this node
list<MDRequestRef> finish;
for (ceph::unordered_map<metareqid_t, MDRequestRef>::iterator p = active_requests.begin();
p != active_requests.end();
++p) {
MDRequestRef& mdr = p->second;
- // slave to the failed node?
- if (mdr->slave_to_mds == who) {
- if (mdr->slave_did_prepare()) {
- dout(10) << " slave request " << *mdr << " uncommitted, will resolve shortly" << dendl;
- if (is_ambiguous_slave_update(p->first, mdr->slave_to_mds))
- remove_ambiguous_slave_update(p->first, mdr->slave_to_mds);
-
- if (!mdr->more()->waiting_on_slave.empty()) {
+ // peer to the failed node?
+ if (mdr->peer_to_mds == who) {
+ if (mdr->peer_did_prepare()) {
+ dout(10) << " peer request " << *mdr << " uncommitted, will resolve shortly" << dendl;
+ if (is_ambiguous_peer_update(p->first, mdr->peer_to_mds))
+ remove_ambiguous_peer_update(p->first, mdr->peer_to_mds);
+
+ if (!mdr->more()->waiting_on_peer.empty()) {
ceph_assert(mdr->more()->srcdn_auth_mds == mds->get_nodeid());
// will rollback, no need to wait
- mdr->reset_slave_request();
- mdr->more()->waiting_on_slave.clear();
+ mdr->reset_peer_request();
+ mdr->more()->waiting_on_peer.clear();
}
} else if (!mdr->committing) {
- dout(10) << " slave request " << *mdr << " has no prepare, finishing up" << dendl;
- if (mdr->slave_request || mdr->slave_rolling_back())
+ dout(10) << " peer request " << *mdr << " has no prepare, finishing up" << dendl;
+ if (mdr->peer_request || mdr->peer_rolling_back())
mdr->aborted = true;
else
finish.push_back(mdr);
}
}
- if (mdr->is_slave() && mdr->slave_did_prepare()) {
- if (mdr->more()->waiting_on_slave.count(who)) {
+ if (mdr->is_peer() && mdr->peer_did_prepare()) {
+ if (mdr->more()->waiting_on_peer.count(who)) {
ceph_assert(mdr->more()->srcdn_auth_mds == mds->get_nodeid());
- dout(10) << " slave request " << *mdr << " no longer need rename notity ack from mds."
+ dout(10) << " peer request " << *mdr << " no longer need rename notity ack from mds."
<< who << dendl;
- mdr->more()->waiting_on_slave.erase(who);
- if (mdr->more()->waiting_on_slave.empty() && mdr->slave_request)
+ mdr->more()->waiting_on_peer.erase(who);
+ if (mdr->more()->waiting_on_peer.empty() && mdr->peer_request)
mds->queue_waiter(new C_MDS_RetryRequest(this, mdr));
}
if (mdr->more()->srcdn_auth_mds == who &&
- mds->mdsmap->is_clientreplay_or_active_or_stopping(mdr->slave_to_mds)) {
+ mds->mdsmap->is_clientreplay_or_active_or_stopping(mdr->peer_to_mds)) {
// rename srcdn's auth mds failed, resolve even I'm a survivor.
- dout(10) << " slave request " << *mdr << " uncommitted, will resolve shortly" << dendl;
- add_ambiguous_slave_update(p->first, mdr->slave_to_mds);
+ dout(10) << " peer request " << *mdr << " uncommitted, will resolve shortly" << dendl;
+ add_ambiguous_peer_update(p->first, mdr->peer_to_mds);
}
- } else if (mdr->slave_request) {
- const cref_t<MMDSSlaveRequest> &slave_req = mdr->slave_request;
- // FIXME: Slave rename request can arrive after we notice mds failure.
+ } else if (mdr->peer_request) {
+ const cref_t<MMDSPeerRequest> &peer_req = mdr->peer_request;
+ // FIXME: Peer rename request can arrive after we notice mds failure.
// This can cause mds to crash (does not affect integrity of FS).
- if (slave_req->get_op() == MMDSSlaveRequest::OP_RENAMEPREP &&
- slave_req->srcdn_auth == who)
- slave_req->mark_interrupted();
+ if (peer_req->get_op() == MMDSPeerRequest::OP_RENAMEPREP &&
+ peer_req->srcdn_auth == who)
+ peer_req->mark_interrupted();
}
- // failed node is slave?
+ // failed node is peer?
if (mdr->is_leader() && !mdr->committing) {
if (mdr->more()->srcdn_auth_mds == who) {
dout(10) << " leader request " << *mdr << " waiting for rename srcdn's auth mds."
if (mdr->more()->witnessed.count(who)) {
mds_rank_t srcdn_auth = mdr->more()->srcdn_auth_mds;
- if (srcdn_auth >= 0 && mdr->more()->waiting_on_slave.count(srcdn_auth)) {
+ if (srcdn_auth >= 0 && mdr->more()->waiting_on_peer.count(srcdn_auth)) {
dout(10) << " leader request " << *mdr << " waiting for rename srcdn's auth mds."
<< mdr->more()->srcdn_auth_mds << " to reply" << dendl;
- // waiting for the slave (rename srcdn's auth mds), delay sending resolve ack
- // until either the request is committing or the slave also fails.
- ceph_assert(mdr->more()->waiting_on_slave.size() == 1);
+ // waiting for the peer (rename srcdn's auth mds), delay sending resolve ack
+ // until either the request is committing or the peer also fails.
+ ceph_assert(mdr->more()->waiting_on_peer.size() == 1);
pending_leaders.insert(p->first);
} else {
- dout(10) << " leader request " << *mdr << " no longer witnessed by slave mds."
+ dout(10) << " leader request " << *mdr << " no longer witnessed by peer mds."
<< who << " to recover" << dendl;
if (srcdn_auth >= 0)
ceph_assert(mdr->more()->witnessed.count(srcdn_auth) == 0);
}
}
- if (mdr->more()->waiting_on_slave.count(who)) {
- dout(10) << " leader request " << *mdr << " waiting for slave mds." << who
+ if (mdr->more()->waiting_on_peer.count(who)) {
+ dout(10) << " leader request " << *mdr << " waiting for peer mds." << who
<< " to recover" << dendl;
// retry request when peer recovers
- mdr->more()->waiting_on_slave.erase(who);
- if (mdr->more()->waiting_on_slave.empty())
+ mdr->more()->waiting_on_peer.erase(who);
+ if (mdr->more()->waiting_on_peer.empty())
mds->wait_for_active_peer(who, new C_MDS_RetryRequest(this, mdr));
}
for (map<metareqid_t, uleader>::iterator p = uncommitted_leaders.begin();
p != uncommitted_leaders.end();
++p) {
- // The failed MDS may have already committed the slave update
- if (p->second.slaves.count(who)) {
+ // The failed MDS may have already committed the peer update
+ if (p->second.peers.count(who)) {
p->second.recovering = true;
- p->second.slaves.erase(who);
+ p->second.peers.erase(who);
}
}
while (!finish.empty()) {
- dout(10) << "cleaning up slave request " << *finish.front() << dendl;
+ dout(10) << "cleaning up peer request " << *finish.front() << dendl;
request_finish(finish.front());
finish.pop_front();
}
discard_delayed_resolve(from);
- // ambiguous slave requests?
- if (!m->slave_requests.empty()) {
+ // ambiguous peer requests?
+ if (!m->peer_requests.empty()) {
if (mds->is_clientreplay() || mds->is_active() || mds->is_stopping()) {
- for (auto p = m->slave_requests.begin(); p != m->slave_requests.end(); ++p) {
+ for (auto p = m->peer_requests.begin(); p != m->peer_requests.end(); ++p) {
if (uncommitted_leaders.count(p->first) && !uncommitted_leaders[p->first].safe) {
ceph_assert(!p->second.committing);
pending_leaders.insert(p->first);
}
if (!pending_leaders.empty()) {
- dout(10) << " still have pending updates, delay processing slave resolve" << dendl;
+ dout(10) << " still have pending updates, delay processing peer resolve" << dendl;
delayed_resolve[from] = m;
return;
}
}
auto ack = make_message<MMDSResolveAck>();
- for (const auto &p : m->slave_requests) {
+ for (const auto &p : m->peer_requests) {
if (uncommitted_leaders.count(p.first)) { //mds->sessionmap.have_completed_request(p.first)) {
// COMMIT
if (p.second.committing) {
- // already committing, waiting for the OP_COMMITTED slave reply
- dout(10) << " already committing slave request " << p << " noop "<< dendl;
+ // already committing, waiting for the OP_COMMITTED peer reply
+ dout(10) << " already committing peer request " << p << " noop "<< dendl;
} else {
- dout(10) << " ambiguous slave request " << p << " will COMMIT" << dendl;
+ dout(10) << " ambiguous peer request " << p << " will COMMIT" << dendl;
ack->add_commit(p.first);
}
- uncommitted_leaders[p.first].slaves.insert(from); // wait for slave OP_COMMITTED before we log ECommitted
+ uncommitted_leaders[p.first].peers.insert(from); // wait for peer OP_COMMITTED before we log ECommitted
if (p.second.inode_caps.length() > 0) {
- // slave wants to export caps (rename)
+ // peer wants to export caps (rename)
ceph_assert(mds->is_resolve());
- MMDSResolve::slave_inode_cap inode_caps;
+ MMDSResolve::peer_inode_cap inode_caps;
auto q = p.second.inode_caps.cbegin();
decode(inode_caps, q);
inodeno_t ino = inode_caps.ino;
}
// will process these caps in rejoin stage
- rejoin_slave_exports[ino].first = from;
- rejoin_slave_exports[ino].second.swap(cap_exports);
+ rejoin_peer_exports[ino].first = from;
+ rejoin_peer_exports[ino].second.swap(cap_exports);
- // send information of imported caps back to slave
+ // send information of imported caps back to peer
encode(rejoin_imported_caps[from][ino], ack->commit[p.first]);
}
} else {
// ABORT
- dout(10) << " ambiguous slave request " << p << " will ABORT" << dendl;
+ dout(10) << " ambiguous peer request " << p << " will ABORT" << dendl;
ceph_assert(!p.second.committing);
ack->add_abort(p.first);
}
return;
}
- if (ambiguous_slave_updates.count(from)) {
+ if (ambiguous_peer_updates.count(from)) {
ceph_assert(mds->mdsmap->is_clientreplay_or_active_or_stopping(from));
ceph_assert(mds->is_clientreplay() || mds->is_active() || mds->is_stopping());
}
for (const auto &p : ack->commit) {
- dout(10) << " commit on slave " << p.first << dendl;
+ dout(10) << " commit on peer " << p.first << dendl;
- if (ambiguous_slave_updates.count(from)) {
- remove_ambiguous_slave_update(p.first, from);
+ if (ambiguous_peer_updates.count(from)) {
+ remove_ambiguous_peer_update(p.first, from);
continue;
}
if (mds->is_resolve()) {
// replay
- MDSlaveUpdate *su = get_uncommitted_slave(p.first, from);
+ MDPeerUpdate *su = get_uncommitted_peer(p.first, from);
ceph_assert(su);
// log commit
- mds->mdlog->start_submit_entry(new ESlaveUpdate(mds->mdlog, "unknown", p.first, from,
- ESlaveUpdate::OP_COMMIT, su->origop),
- new C_MDC_SlaveCommit(this, from, p.first));
+ mds->mdlog->start_submit_entry(new EPeerUpdate(mds->mdlog, "unknown", p.first, from,
+ EPeerUpdate::OP_COMMIT, su->origop),
+ new C_MDC_PeerCommit(this, from, p.first));
mds->mdlog->flush();
- finish_uncommitted_slave(p.first);
+ finish_uncommitted_peer(p.first);
} else {
MDRequestRef mdr = request_get(p.first);
// information about leader imported caps
if (p.second.length() > 0)
mdr->more()->inode_import.share(p.second);
- ceph_assert(mdr->slave_request == 0); // shouldn't be doing anything!
+ ceph_assert(mdr->peer_request == 0); // shouldn't be doing anything!
request_finish(mdr);
}
}
for (const auto &metareq : ack->abort) {
- dout(10) << " abort on slave " << metareq << dendl;
+ dout(10) << " abort on peer " << metareq << dendl;
if (mds->is_resolve()) {
- MDSlaveUpdate *su = get_uncommitted_slave(metareq, from);
+ MDPeerUpdate *su = get_uncommitted_peer(metareq, from);
ceph_assert(su);
// perform rollback (and journal a rollback entry)
// note: this will hold up the resolve a bit, until the rollback entries journal.
MDRequestRef null_ref;
switch (su->origop) {
- case ESlaveUpdate::LINK:
+ case EPeerUpdate::LINK:
mds->server->do_link_rollback(su->rollback, from, null_ref);
break;
- case ESlaveUpdate::RENAME:
+ case EPeerUpdate::RENAME:
mds->server->do_rename_rollback(su->rollback, from, null_ref);
break;
- case ESlaveUpdate::RMDIR:
+ case EPeerUpdate::RMDIR:
mds->server->do_rmdir_rollback(su->rollback, from, null_ref);
break;
default:
} else {
MDRequestRef mdr = request_get(metareq);
mdr->aborted = true;
- if (mdr->slave_request) {
- if (mdr->slave_did_prepare()) // journaling slave prepare ?
+ if (mdr->peer_request) {
+ if (mdr->peer_did_prepare()) // journaling peer prepare ?
add_rollback(metareq, from);
} else {
request_finish(mdr);
}
}
- if (!ambiguous_slave_updates.count(from)) {
+ if (!ambiguous_peer_updates.count(from)) {
resolve_ack_gather.erase(from);
- maybe_finish_slave_resolve();
+ maybe_finish_peer_resolve();
}
}
-void MDCache::add_uncommitted_slave(metareqid_t reqid, LogSegment *ls, mds_rank_t leader, MDSlaveUpdate *su)
+void MDCache::add_uncommitted_peer(metareqid_t reqid, LogSegment *ls, mds_rank_t leader, MDPeerUpdate *su)
{
- auto const &ret = uncommitted_slaves.emplace(std::piecewise_construct,
+ auto const &ret = uncommitted_peers.emplace(std::piecewise_construct,
std::forward_as_tuple(reqid),
std::forward_as_tuple());
ceph_assert(ret.second);
- ls->uncommitted_slaves.insert(reqid);
- uslave &u = ret.first->second;
+ ls->uncommitted_peers.insert(reqid);
+ upeer &u = ret.first->second;
u.leader = leader;
u.ls = ls;
u.su = su;
return;
}
for(set<CInode*>::iterator p = su->olddirs.begin(); p != su->olddirs.end(); ++p)
- uncommitted_slave_rename_olddir[*p]++;
+ uncommitted_peer_rename_olddir[*p]++;
for(set<CInode*>::iterator p = su->unlinked.begin(); p != su->unlinked.end(); ++p)
- uncommitted_slave_unlink[*p]++;
+ uncommitted_peer_unlink[*p]++;
}
-void MDCache::finish_uncommitted_slave(metareqid_t reqid, bool assert_exist)
+void MDCache::finish_uncommitted_peer(metareqid_t reqid, bool assert_exist)
{
- auto it = uncommitted_slaves.find(reqid);
- if (it == uncommitted_slaves.end()) {
+ auto it = uncommitted_peers.find(reqid);
+ if (it == uncommitted_peers.end()) {
ceph_assert(!assert_exist);
return;
}
- uslave &u = it->second;
- MDSlaveUpdate* su = u.su;
+ upeer &u = it->second;
+ MDPeerUpdate* su = u.su;
if (!u.waiters.empty()) {
mds->queue_waiters(u.waiters);
}
- u.ls->uncommitted_slaves.erase(reqid);
- uncommitted_slaves.erase(it);
+ u.ls->uncommitted_peers.erase(reqid);
+ uncommitted_peers.erase(it);
if (su == nullptr) {
return;
// discard the non-auth subtree we renamed out of
for(set<CInode*>::iterator p = su->olddirs.begin(); p != su->olddirs.end(); ++p) {
CInode *diri = *p;
- map<CInode*, int>::iterator it = uncommitted_slave_rename_olddir.find(diri);
- ceph_assert(it != uncommitted_slave_rename_olddir.end());
+ map<CInode*, int>::iterator it = uncommitted_peer_rename_olddir.find(diri);
+ ceph_assert(it != uncommitted_peer_rename_olddir.end());
it->second--;
if (it->second == 0) {
- uncommitted_slave_rename_olddir.erase(it);
+ uncommitted_peer_rename_olddir.erase(it);
auto&& ls = diri->get_dirfrags();
for (const auto& dir : ls) {
CDir *root = get_subtree_root(dir);
} else
ceph_assert(it->second > 0);
}
- // removed the inodes that were unlinked by slave update
+ // removed the inodes that were unlinked by peer update
for(set<CInode*>::iterator p = su->unlinked.begin(); p != su->unlinked.end(); ++p) {
CInode *in = *p;
- map<CInode*, int>::iterator it = uncommitted_slave_unlink.find(in);
- ceph_assert(it != uncommitted_slave_unlink.end());
+ map<CInode*, int>::iterator it = uncommitted_peer_unlink.find(in);
+ ceph_assert(it != uncommitted_peer_unlink.end());
it->second--;
if (it->second == 0) {
- uncommitted_slave_unlink.erase(it);
+ uncommitted_peer_unlink.erase(it);
if (!in->get_projected_parent_dn())
mds->mdcache->remove_inode_recursive(in);
} else
delete su;
}
-MDSlaveUpdate* MDCache::get_uncommitted_slave(metareqid_t reqid, mds_rank_t leader)
+MDPeerUpdate* MDCache::get_uncommitted_peer(metareqid_t reqid, mds_rank_t leader)
{
- MDSlaveUpdate* su = nullptr;
- auto it = uncommitted_slaves.find(reqid);
- if (it != uncommitted_slaves.end() &&
+ MDPeerUpdate* su = nullptr;
+ auto it = uncommitted_peers.find(reqid);
+ if (it != uncommitted_peers.end() &&
it->second.leader == leader) {
su = it->second.su;
}
auto p = resolve_need_rollback.find(mdr->reqid);
ceph_assert(p != resolve_need_rollback.end());
if (mds->is_resolve()) {
- finish_uncommitted_slave(reqid, false);
+ finish_uncommitted_peer(reqid, false);
} else if (mdr) {
- finish_uncommitted_slave(mdr->reqid, mdr->more()->slave_update_journaled);
+ finish_uncommitted_peer(mdr->reqid, mdr->more()->peer_update_journaled);
}
resolve_need_rollback.erase(p);
- maybe_finish_slave_resolve();
+ maybe_finish_peer_resolve();
}
void MDCache::disambiguate_other_imports()
p != active_requests.end();
++p) {
MDRequestRef& mdr = p->second;
- if (mdr->is_slave())
+ if (mdr->is_peer())
continue;
// auth pins
for (const auto& q : mdr->object_states) {
// dn auth_pin?
const auto pinned_it = strong->authpinned_dentries.find(dirfrag);
if (pinned_it != strong->authpinned_dentries.end()) {
- const auto slave_reqid_it = pinned_it->second.find(ss);
- if (slave_reqid_it != pinned_it->second.end()) {
- for (const auto &r : slave_reqid_it->second) {
+ const auto peer_reqid_it = pinned_it->second.find(ss);
+ if (peer_reqid_it != pinned_it->second.end()) {
+ for (const auto &r : peer_reqid_it->second) {
dout(10) << " dn authpin by " << r << " on " << *dn << dendl;
- // get/create slave mdrequest
+ // get/create peer mdrequest
MDRequestRef mdr;
if (have_request(r.reqid))
mdr = request_get(r.reqid);
else
- mdr = request_start_slave(r.reqid, r.attempt, strong);
+ mdr = request_start_peer(r.reqid, r.attempt, strong);
mdr->auth_pin(dn);
}
}
if (xlocked_it != strong->xlocked_dentries.end()) {
const auto ss_req_it = xlocked_it->second.find(ss);
if (ss_req_it != xlocked_it->second.end()) {
- const MMDSCacheRejoin::slave_reqid& r = ss_req_it->second;
+ const MMDSCacheRejoin::peer_reqid& r = ss_req_it->second;
dout(10) << " dn xlock by " << r << " on " << *dn << dendl;
MDRequestRef mdr = request_get(r.reqid); // should have this from auth_pin above.
ceph_assert(mdr->is_auth_pinned(dn));
for (const auto& r : authpinned_inodes_it->second) {
dout(10) << " inode authpin by " << r << " on " << *in << dendl;
- // get/create slave mdrequest
+ // get/create peer mdrequest
MDRequestRef mdr;
if (have_request(r.reqid))
mdr = request_get(r.reqid);
else
- mdr = request_start_slave(r.reqid, r.attempt, strong);
+ mdr = request_start_peer(r.reqid, r.attempt, strong);
if (strong->frozen_authpin_inodes.count(in->vino())) {
ceph_assert(!in->get_num_auth_pins());
mdr->freeze_auth_pin(in);
return true;
}
- // process caps that were exported by slave rename
- for (map<inodeno_t,pair<mds_rank_t,map<client_t,Capability::Export> > >::iterator p = rejoin_slave_exports.begin();
- p != rejoin_slave_exports.end();
+ // process caps that were exported by peer rename
+ for (map<inodeno_t,pair<mds_rank_t,map<client_t,Capability::Export> > >::iterator p = rejoin_peer_exports.begin();
+ p != rejoin_peer_exports.end();
++p) {
CInode *in = get_inode(p->first);
ceph_assert(in);
p->second.first, CEPH_CAP_FLAG_AUTH);
}
}
- rejoin_slave_exports.clear();
+ rejoin_peer_exports.clear();
rejoin_imported_caps.clear();
// process cap imports
dn->state_clear(CDentry::STATE_AUTH);
in->state_clear(CInode::STATE_AUTH);
}
- } else if (keep_dir && dnl->is_null()) { // keep null dentry for slave rollback
+ } else if (keep_dir && dnl->is_null()) { // keep null dentry for peer rollback
dout(20) << "trim_non_auth_subtree(" << dir << ") keeping dentry " << dn <<dendl;
} else { // just remove it
dout(20) << "trim_non_auth_subtree(" << dir << ") removing dentry " << dn << dendl;
p != active_requests.end();
++p) {
MDRequestRef& mdr = p->second;
- if (mdr->reqid.name.is_client() && !mdr->is_slave())
+ if (mdr->reqid.name.is_client() && !mdr->is_peer())
count++;
}
return count;
MDRequestRef MDCache::request_start(const cref_t<MClientRequest>& req)
{
- // did we win a forward race against a slave?
+ // did we win a forward race against a peer?
if (active_requests.count(req->get_reqid())) {
MDRequestRef& mdr = active_requests[req->get_reqid()];
ceph_assert(mdr);
- if (mdr->is_slave()) {
+ if (mdr->is_peer()) {
dout(10) << "request_start already had " << *mdr << ", waiting for finish" << dendl;
mdr->more()->waiting_for_finish.push_back(new C_MDS_RetryMessage(mds, req));
} else {
return mdr;
}
-MDRequestRef MDCache::request_start_slave(metareqid_t ri, __u32 attempt, const cref_t<Message> &m)
+MDRequestRef MDCache::request_start_peer(metareqid_t ri, __u32 attempt, const cref_t<Message> &m)
{
int by = m->get_source().num();
MDRequestImpl::Params params;
params.reqid = ri;
params.attempt = attempt;
- params.triggering_slave_req = m;
- params.slave_to = by;
+ params.triggering_peer_req = m;
+ params.peer_to = by;
params.initiated = m->get_recv_stamp();
params.throttled = m->get_throttle_stamp();
params.all_read = m->get_recv_complete_stamp();
mds->op_tracker.create_request<MDRequestImpl,MDRequestImpl::Params*>(¶ms);
ceph_assert(active_requests.count(mdr->reqid) == 0);
active_requests[mdr->reqid] = mdr;
- dout(7) << "request_start_slave " << *mdr << " by mds." << by << dendl;
+ dout(7) << "request_start_peer " << *mdr << " by mds." << by << dendl;
return mdr;
}
dout(7) << "request_finish " << *mdr << dendl;
mdr->mark_event("finishing request");
- // slave finisher?
- if (mdr->has_more() && mdr->more()->slave_commit) {
- Context *fin = mdr->more()->slave_commit;
- mdr->more()->slave_commit = 0;
+ // peer finisher?
+ if (mdr->has_more() && mdr->more()->peer_commit) {
+ Context *fin = mdr->more()->peer_commit;
+ mdr->more()->peer_commit = 0;
int ret;
if (mdr->aborted) {
mdr->aborted = false;
ret = -1;
- mdr->more()->slave_rolling_back = true;
+ mdr->more()->peer_rolling_back = true;
} else {
ret = 0;
mdr->committing = true;
{
if (mdr->client_request) {
mds->server->dispatch_client_request(mdr);
- } else if (mdr->slave_request) {
- mds->server->dispatch_slave_request(mdr);
+ } else if (mdr->peer_request) {
+ mds->server->dispatch_peer_request(mdr);
} else {
switch (mdr->internal_op) {
case CEPH_MDS_OP_FRAGMENTDIR:
if (!mdr->has_more())
return;
- // clean up slaves
+ // clean up peers
// (will implicitly drop remote dn pins)
- for (set<mds_rank_t>::iterator p = mdr->more()->slaves.begin();
- p != mdr->more()->slaves.end();
+ for (set<mds_rank_t>::iterator p = mdr->more()->peers.begin();
+ p != mdr->more()->peers.end();
++p) {
- auto r = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt,
- MMDSSlaveRequest::OP_FINISH);
+ auto r = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt,
+ MMDSPeerRequest::OP_FINISH);
if (mdr->killed && !mdr->committing) {
r->mark_abort();
}
}
- mdr->more()->slaves.clear(); /* we no longer have requests out to them, and
+ mdr->more()->peers.clear(); /* we no longer have requests out to them, and
* leaving them in can cause double-notifies as
* this function can get called more than once */
}
void MDCache::request_kill(MDRequestRef& mdr)
{
- // rollback slave requests is tricky. just let the request proceed.
+ // rollback peer requests is tricky. just let the request proceed.
if (mdr->has_more() &&
- (!mdr->more()->witnessed.empty() || !mdr->more()->waiting_on_slave.empty())) {
+ (!mdr->more()->witnessed.empty() || !mdr->more()->waiting_on_peer.empty())) {
if (!(mdr->locking_state & MutationImpl::ALL_LOCKED)) {
ceph_assert(mdr->more()->witnessed.empty());
mdr->aborted = true;
- dout(10) << "request_kill " << *mdr << " -- waiting for slave reply, delaying" << dendl;
+ dout(10) << "request_kill " << *mdr << " -- waiting for peer reply, delaying" << dendl;
} else {
- dout(10) << "request_kill " << *mdr << " -- already started slave prep, no-op" << dendl;
+ dout(10) << "request_kill " << *mdr << " -- already started peer prep, no-op" << dendl;
}
ceph_assert(mdr->used_prealloc_ino == 0);
dout(10) << "dispatch_fragment_dir " << basedirfrag << " bits " << info.bits
<< " on " << *diri << dendl;
- if (mdr->more()->slave_error)
+ if (mdr->more()->peer_error)
mdr->aborted = true;
if (!mdr->aborted) {
#include "messages/MMDSOpenInoReply.h"
#include "messages/MMDSResolve.h"
#include "messages/MMDSResolveAck.h"
-#include "messages/MMDSSlaveRequest.h"
+#include "messages/MMDSPeerRequest.h"
#include "messages/MMDSSnapUpdate.h"
#include "osdc/Filer.h"
int get_num_client_requests();
MDRequestRef request_start(const cref_t<MClientRequest>& req);
- MDRequestRef request_start_slave(metareqid_t rid, __u32 attempt, const cref_t<Message> &m);
+ MDRequestRef request_start_peer(metareqid_t rid, __u32 attempt, const cref_t<Message> &m);
MDRequestRef request_start_internal(int op);
bool have_request(metareqid_t rid) {
return active_requests.count(rid);
int flags, int linkunlink=0,
snapid_t follows=CEPH_NOSNAP);
- // slaves
- void add_uncommitted_leader(metareqid_t reqid, LogSegment *ls, set<mds_rank_t> &slaves, bool safe=false) {
+ // peers
+ void add_uncommitted_leader(metareqid_t reqid, LogSegment *ls, set<mds_rank_t> &peers, bool safe=false) {
uncommitted_leaders[reqid].ls = ls;
- uncommitted_leaders[reqid].slaves = slaves;
+ uncommitted_leaders[reqid].peers = peers;
uncommitted_leaders[reqid].safe = safe;
}
void wait_for_uncommitted_leader(metareqid_t reqid, MDSContext *c) {
}
bool have_uncommitted_leader(metareqid_t reqid, mds_rank_t from) {
auto p = uncommitted_leaders.find(reqid);
- return p != uncommitted_leaders.end() && p->second.slaves.count(from) > 0;
+ return p != uncommitted_leaders.end() && p->second.peers.count(from) > 0;
}
void log_leader_commit(metareqid_t reqid);
void logged_leader_update(metareqid_t reqid);
void _logged_leader_commit(metareqid_t reqid);
- void committed_leader_slave(metareqid_t r, mds_rank_t from);
+ void committed_leader_peer(metareqid_t r, mds_rank_t from);
void finish_committed_leaders();
- void add_uncommitted_slave(metareqid_t reqid, LogSegment*, mds_rank_t, MDSlaveUpdate *su=nullptr);
- void wait_for_uncommitted_slave(metareqid_t reqid, MDSContext *c) {
- uncommitted_slaves.at(reqid).waiters.push_back(c);
+ void add_uncommitted_peer(metareqid_t reqid, LogSegment*, mds_rank_t, MDPeerUpdate *su=nullptr);
+ void wait_for_uncommitted_peer(metareqid_t reqid, MDSContext *c) {
+ uncommitted_peers.at(reqid).waiters.push_back(c);
}
- void finish_uncommitted_slave(metareqid_t reqid, bool assert_exist=true);
- MDSlaveUpdate* get_uncommitted_slave(metareqid_t reqid, mds_rank_t leader);
- void _logged_slave_commit(mds_rank_t from, metareqid_t reqid);
+ void finish_uncommitted_peer(metareqid_t reqid, bool assert_exist=true);
+ MDPeerUpdate* get_uncommitted_peer(metareqid_t reqid, mds_rank_t leader);
+ void _logged_peer_commit(mds_rank_t from, metareqid_t reqid);
void set_recovery_set(set<mds_rank_t>& s);
void handle_mds_failure(mds_rank_t who);
void recalc_auth_bits(bool replay);
void remove_inode_recursive(CInode *in);
- bool is_ambiguous_slave_update(metareqid_t reqid, mds_rank_t leader) {
- auto p = ambiguous_slave_updates.find(leader);
- return p != ambiguous_slave_updates.end() && p->second.count(reqid);
+ bool is_ambiguous_peer_update(metareqid_t reqid, mds_rank_t leader) {
+ auto p = ambiguous_peer_updates.find(leader);
+ return p != ambiguous_peer_updates.end() && p->second.count(reqid);
}
- void add_ambiguous_slave_update(metareqid_t reqid, mds_rank_t leader) {
- ambiguous_slave_updates[leader].insert(reqid);
+ void add_ambiguous_peer_update(metareqid_t reqid, mds_rank_t leader) {
+ ambiguous_peer_updates[leader].insert(reqid);
}
- void remove_ambiguous_slave_update(metareqid_t reqid, mds_rank_t leader) {
- auto p = ambiguous_slave_updates.find(leader);
+ void remove_ambiguous_peer_update(metareqid_t reqid, mds_rank_t leader) {
+ auto p = ambiguous_peer_updates.find(leader);
auto q = p->second.find(reqid);
ceph_assert(q != p->second.end());
p->second.erase(q);
if (p->second.empty())
- ambiguous_slave_updates.erase(p);
+ ambiguous_peer_updates.erase(p);
}
void add_rollback(metareqid_t reqid, mds_rank_t leader) {
void try_trim_non_auth_subtree(CDir *dir);
bool can_trim_non_auth_dirfrag(CDir *dir) {
return my_ambiguous_imports.count((dir)->dirfrag()) == 0 &&
- uncommitted_slave_rename_olddir.count(dir->inode) == 0;
+ uncommitted_peer_rename_olddir.count(dir->inode) == 0;
}
/**
double export_ephemeral_random_max = 0.0;
protected:
- // track leader requests whose slaves haven't acknowledged commit
+ // track leader requests whose peers haven't acknowledged commit
struct uleader {
uleader() {}
- set<mds_rank_t> slaves;
+ set<mds_rank_t> peers;
LogSegment *ls = nullptr;
MDSContext::vec waiters;
bool safe = false;
bool recovering = false;
};
- struct uslave {
- uslave() {}
+ struct upeer {
+ upeer() {}
mds_rank_t leader;
LogSegment *ls = nullptr;
- MDSlaveUpdate *su = nullptr;
+ MDPeerUpdate *su = nullptr;
MDSContext::vec waiters;
};
friend class C_MDC_Join;
friend class C_MDC_RespondInternalRequest;
- friend class ESlaveUpdate;
+ friend class EPeerUpdate;
friend class ECommitted;
void set_readonly() { readonly = true; }
void disambiguate_other_imports();
void trim_unlinked_inodes();
- void send_slave_resolves();
+ void send_peer_resolves();
void send_subtree_resolves();
- void maybe_finish_slave_resolve();
+ void maybe_finish_peer_resolve();
void rejoin_walk(CDir *dir, const ref_t<MMDSCacheRejoin> &rejoin);
void handle_cache_rejoin(const cref_t<MMDSCacheRejoin> &m);
// from MMDSResolves
map<mds_rank_t, map<dirfrag_t, vector<dirfrag_t> > > other_ambiguous_imports;
- map<CInode*, int> uncommitted_slave_rename_olddir; // slave: preserve the non-auth dir until seeing commit.
- map<CInode*, int> uncommitted_slave_unlink; // slave: preserve the unlinked inode until seeing commit.
+ map<CInode*, int> uncommitted_peer_rename_olddir; // peer: preserve the non-auth dir until seeing commit.
+ map<CInode*, int> uncommitted_peer_unlink; // peer: preserve the unlinked inode until seeing commit.
- map<metareqid_t, uleader> uncommitted_leaders; // leader: req -> slave set
- map<metareqid_t, uslave> uncommitted_slaves; // slave: preserve the slave req until seeing commit.
+ map<metareqid_t, uleader> uncommitted_leaders; // leader: req -> peer set
+ map<metareqid_t, upeer> uncommitted_peers; // peer: preserve the peer req until seeing commit.
set<metareqid_t> pending_leaders;
- map<int, set<metareqid_t> > ambiguous_slave_updates;
+ map<int, set<metareqid_t> > ambiguous_peer_updates;
bool resolves_pending = false;
set<mds_rank_t> resolve_gather; // nodes i need resolves from
set<mds_rank_t> rejoin_ack_sent; // nodes i sent a rejoin to
set<mds_rank_t> rejoin_ack_gather; // nodes from whom i need a rejoin ack
map<mds_rank_t,map<inodeno_t,map<client_t,Capability::Import> > > rejoin_imported_caps;
- map<inodeno_t,pair<mds_rank_t,map<client_t,Capability::Export> > > rejoin_slave_exports;
+ map<inodeno_t,pair<mds_rank_t,map<client_t,Capability::Export> > > rejoin_peer_exports;
map<client_t,entity_inst_t> rejoin_client_map;
map<client_t,client_metadata_t> rejoin_client_metadata_map;
type == CEPH_MSG_CLIENT_RECONNECT ||
type == CEPH_MSG_CLIENT_RECLAIM ||
type == CEPH_MSG_CLIENT_REQUEST ||
- type == MSG_MDS_SLAVE_REQUEST ||
+ type == MSG_MDS_PEER_REQUEST ||
type == MSG_MDS_HEARTBEAT ||
type == MSG_MDS_TABLE_REQUEST ||
type == MSG_MDS_LOCK ||
case CEPH_MSG_CLIENT_REQUEST:
server->dispatch(m);
break;
- case MSG_MDS_SLAVE_REQUEST:
+ case MSG_MDS_PEER_REQUEST:
ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
server->dispatch(m);
break;
}
ceph_assert(it->second.state == EXPORT_LOCKING);
- if (mdr->more()->slave_error || dir->is_frozen() || dir->is_freezing()) {
+ if (mdr->more()->peer_error || dir->is_frozen() || dir->is_freezing()) {
dout(7) << "wouldblock|freezing|frozen, canceling export" << dendl;
export_try_cancel(dir);
return;
return (_more != nullptr) && (!_more->witnessed.empty());
}
-bool MDRequestImpl::slave_did_prepare()
+bool MDRequestImpl::peer_did_prepare()
{
- return has_more() && more()->slave_commit;
+ return has_more() && more()->peer_commit;
}
-bool MDRequestImpl::slave_rolling_back()
+bool MDRequestImpl::peer_rolling_back()
{
- return has_more() && more()->slave_rolling_back;
+ return has_more() && more()->peer_rolling_back;
}
bool MDRequestImpl::freeze_auth_pin(CInode *inode)
return req;
}
-void MDRequestImpl::reset_slave_request(const cref_t<MMDSSlaveRequest>& req)
+void MDRequestImpl::reset_peer_request(const cref_t<MMDSPeerRequest>& req)
{
msg_lock.lock();
- cref_t<MMDSSlaveRequest> old;
- old.swap(slave_request);
- slave_request = req;
+ cref_t<MMDSPeerRequest> old;
+ old.swap(peer_request);
+ peer_request = req;
msg_lock.unlock();
old.reset();
}
{
out << "request(" << reqid << " nref=" << nref;
//if (request) out << " " << *request;
- if (is_slave()) out << " slave_to mds." << slave_to_mds;
+ if (is_peer()) out << " peer_to mds." << peer_to_mds;
if (client_request) out << " cr=" << client_request;
- if (slave_request) out << " sr=" << slave_request;
+ if (peer_request) out << " sr=" << peer_request;
out << ")";
}
{
msg_lock.lock();
auto _client_request = client_request;
- auto _slave_request =slave_request;
+ auto _peer_request =peer_request;
msg_lock.unlock();
if (_client_request) {
f->dump_stream("client") << _client_request->get_orig_source();
f->dump_int("tid", _client_request->get_tid());
f->close_section(); // client_info
- } else if (is_slave() && _slave_request) { // replies go to an existing mdr
- f->dump_string("op_type", "slave_request");
+ } else if (is_peer() && _peer_request) { // replies go to an existing mdr
+ f->dump_string("op_type", "peer_request");
f->open_object_section("leader_info");
- f->dump_stream("leader") << _slave_request->get_orig_source();
+ f->dump_stream("leader") << _peer_request->get_orig_source();
f->close_section(); // leader_info
f->open_object_section("request_info");
- f->dump_int("attempt", _slave_request->get_attempt());
+ f->dump_int("attempt", _peer_request->get_attempt());
f->dump_string("op_type",
- MMDSSlaveRequest::get_opname(_slave_request->get_op()));
- f->dump_int("lock_type", _slave_request->get_lock_type());
- f->dump_stream("object_info") << _slave_request->get_object_info();
- f->dump_stream("srcdnpath") << _slave_request->srcdnpath;
- f->dump_stream("destdnpath") << _slave_request->destdnpath;
- f->dump_stream("witnesses") << _slave_request->witnesses;
+ MMDSPeerRequest::get_opname(_peer_request->get_op()));
+ f->dump_int("lock_type", _peer_request->get_lock_type());
+ f->dump_stream("object_info") << _peer_request->get_object_info();
+ f->dump_stream("srcdnpath") << _peer_request->srcdnpath;
+ f->dump_stream("destdnpath") << _peer_request->destdnpath;
+ f->dump_stream("witnesses") << _peer_request->witnesses;
f->dump_bool("has_inode_export",
- _slave_request->inode_export_v != 0);
- f->dump_int("inode_export_v", _slave_request->inode_export_v);
- f->dump_stream("op_stamp") << _slave_request->op_stamp;
+ _peer_request->inode_export_v != 0);
+ f->dump_int("inode_export_v", _peer_request->inode_export_v);
+ f->dump_stream("op_stamp") << _peer_request->op_stamp;
f->close_section(); // request_info
}
else if (internal_op != -1) { // internal request
{
msg_lock.lock();
auto _client_request = client_request;
- auto _slave_request = slave_request;
+ auto _peer_request = peer_request;
msg_lock.unlock();
if (_client_request) {
_client_request->print(stream);
- } else if (_slave_request) {
- _slave_request->print(stream);
+ } else if (_peer_request) {
+ _peer_request->print(stream);
} else if (internal_op >= 0) {
stream << "internal op " << ceph_mds_op_name(internal_op) << ":" << reqid;
} else {
- // drat, it's triggered by a slave request, but we don't have a message
+ // drat, it's triggered by a peer request, but we don't have a message
// FIXME
stream << "rejoin:" << reqid;
}
#include "common/TrackedOp.h"
#include "messages/MClientRequest.h"
-#include "messages/MMDSSlaveRequest.h"
+#include "messages/MMDSPeerRequest.h"
#include "messages/MClientReply.h"
class LogSegment;
// keep our default values synced with MDRequestParam's
MutationImpl() : TrackedOp(nullptr, utime_t()) {}
MutationImpl(OpTracker *tracker, utime_t initiated,
- const metareqid_t &ri, __u32 att=0, mds_rank_t slave_to=MDS_RANK_NONE)
+ const metareqid_t &ri, __u32 att=0, mds_rank_t peer_to=MDS_RANK_NONE)
: TrackedOp(tracker, initiated),
reqid(ri), attempt(att),
- slave_to_mds(slave_to) {}
+ peer_to_mds(peer_to) {}
~MutationImpl() override {
ceph_assert(!locking);
ceph_assert(!lock_cache);
return lock == last_locked;
}
- bool is_leader() const { return slave_to_mds == MDS_RANK_NONE; }
- bool is_slave() const { return slave_to_mds != MDS_RANK_NONE; }
+ bool is_leader() const { return peer_to_mds == MDS_RANK_NONE; }
+ bool is_peer() const { return peer_to_mds != MDS_RANK_NONE; }
client_t get_client() const {
if (reqid.name.is_client())
__u32 attempt = 0; // which attempt for this request
LogSegment *ls = nullptr; // the log segment i'm committing to
- // flag mutation as slave
- mds_rank_t slave_to_mds = MDS_RANK_NONE; // this is a slave request if >= 0.
+ // flag mutation as peer
+ mds_rank_t peer_to_mds = MDS_RANK_NONE; // this is a peer request if >= 0.
ceph::unordered_map<MDSCacheObject*, ObjectState> object_states;
int num_pins = 0;
struct More {
More() {}
- int slave_error = 0;
- std::set<mds_rank_t> slaves; // mds nodes that have slave requests to me (implies client_request)
- std::set<mds_rank_t> waiting_on_slave; // peers i'm waiting for slavereq replies from.
+ int peer_error = 0;
+ std::set<mds_rank_t> peers; // mds nodes that have peer requests to me (implies client_request)
+ std::set<mds_rank_t> waiting_on_peer; // peers i'm waiting for peerreq replies from.
// for rename/link/unlink
std::set<mds_rank_t> witnessed; // nodes who have journaled a RenamePrepare
std::map<MDSCacheObject*,version_t> pvmap;
- bool has_journaled_slaves = false;
- bool slave_update_journaled = false;
- bool slave_rolling_back = false;
+ bool has_journaled_peers = false;
+ bool peer_update_journaled = false;
+ bool peer_rolling_back = false;
// for rename
std::set<mds_rank_t> extra_witnesses; // replica list from srcdn auth (rename)
sr_t *srci_srnode = nullptr;
sr_t *desti_srnode = nullptr;
- // called when slave commits or aborts
- Context *slave_commit = nullptr;
+ // called when peer commits or aborts
+ Context *peer_commit = nullptr;
ceph::buffer::list rollback_bl;
MDSContext::vec waiting_for_finish;
metareqid_t reqid;
__u32 attempt = 0;
ceph::cref_t<MClientRequest> client_req;
- ceph::cref_t<Message> triggering_slave_req;
- mds_rank_t slave_to = MDS_RANK_NONE;
+ ceph::cref_t<Message> triggering_peer_req;
+ mds_rank_t peer_to = MDS_RANK_NONE;
utime_t initiated;
utime_t throttled, all_read, dispatched;
int internal_op = -1;
};
MDRequestImpl(const Params* params, OpTracker *tracker) :
MutationImpl(tracker, params->initiated,
- params->reqid, params->attempt, params->slave_to),
+ params->reqid, params->attempt, params->peer_to),
item_session_request(this), client_request(params->client_req),
internal_op(params->internal_op) {}
~MDRequestImpl() override;
More* more();
bool has_more() const;
bool has_witnesses();
- bool slave_did_prepare();
- bool slave_rolling_back();
+ bool peer_did_prepare();
+ bool peer_rolling_back();
bool freeze_auth_pin(CInode *inode);
void unfreeze_auth_pin(bool clear_inode=false);
void set_remote_frozen_auth_pin(CInode *inode);
void dump(ceph::Formatter *f) const override;
ceph::cref_t<MClientRequest> release_client_request();
- void reset_slave_request(const ceph::cref_t<MMDSSlaveRequest>& req=nullptr);
+ void reset_peer_request(const ceph::cref_t<MMDSPeerRequest>& req=nullptr);
Session *session = nullptr;
elist<MDRequestImpl*>::item item_session_request; // if not on list, op is aborted.
// inos we did a embedded cap release on, and may need to eval if we haven't since reissued
std::map<vinodeno_t, ceph_seq_t> cap_releases;
- // -- i am a slave request
- ceph::cref_t<MMDSSlaveRequest> slave_request; // slave request (if one is pending; implies slave == true)
+ // -- i am a peer request
+ ceph::cref_t<MMDSPeerRequest> peer_request; // peer request (if one is pending; implies peer == true)
// -- i am an internal op
int internal_op;
mutable ceph::spinlock msg_lock;
};
-struct MDSlaveUpdate {
- MDSlaveUpdate(int oo, ceph::buffer::list &rbl) :
+struct MDPeerUpdate {
+ MDPeerUpdate(int oo, ceph::buffer::list &rbl) :
origop(oo) {
rollback = std::move(rbl);
}
- ~MDSlaveUpdate() {
+ ~MDPeerUpdate() {
if (waiter)
waiter->complete(0);
}
#include "osdc/Objecter.h"
#include "events/EUpdate.h"
-#include "events/ESlaveUpdate.h"
+#include "events/EPeerUpdate.h"
#include "events/ESession.h"
#include "events/EOpen.h"
#include "events/ECommitted.h"
plb.add_u64_counter(l_mdss_handle_client_request, "handle_client_request",
"Client requests", "hcr", PerfCountersBuilder::PRIO_INTERESTING);
- plb.add_u64_counter(l_mdss_handle_slave_request, "handle_slave_request",
- "Slave requests", "hsr", PerfCountersBuilder::PRIO_INTERESTING);
+ plb.add_u64_counter(l_mdss_handle_peer_request, "handle_peer_request",
+ "Peer requests", "hsr", PerfCountersBuilder::PRIO_INTERESTING);
plb.add_u64_counter(l_mdss_handle_client_session,
"handle_client_session", "Client session messages", "hcs",
PerfCountersBuilder::PRIO_INTERESTING);
plb.set_prio_default(PerfCountersBuilder::PRIO_DEBUGONLY);
plb.add_u64_counter(l_mdss_dispatch_client_request, "dispatch_client_request",
"Client requests dispatched");
- plb.add_u64_counter(l_mdss_dispatch_slave_request, "dispatch_server_request",
+ plb.add_u64_counter(l_mdss_dispatch_peer_request, "dispatch_server_request",
"Server requests dispatched");
logger = plb.create_perf_counters();
*/
bool sessionclosed_isok = replay_unsafe_with_closed_session;
// active?
- // handle_slave_request()/handle_client_session() will wait if necessary
+ // handle_peer_request()/handle_client_session() will wait if necessary
if (m->get_type() == CEPH_MSG_CLIENT_REQUEST && !mds->is_active()) {
const auto &req = ref_cast<MClientRequest>(m);
if (mds->is_reconnect() || mds->get_want_state() == CEPH_MDS_STATE_RECONNECT) {
case CEPH_MSG_CLIENT_RECLAIM:
handle_client_reclaim(ref_cast<MClientReclaim>(m));
return;
- case MSG_MDS_SLAVE_REQUEST:
- handle_slave_request(ref_cast<MMDSSlaveRequest>(m));
+ case MSG_MDS_PEER_REQUEST:
+ handle_peer_request(ref_cast<MMDSPeerRequest>(m));
return;
default:
derr << "server unknown message " << m->get_type() << dendl;
return;
}
- if (mdr->has_more() && mdr->more()->has_journaled_slaves) {
- dout(10) << "early_reply - there are journaled slaves, not allowed." << dendl;
+ if (mdr->has_more() && mdr->more()->has_journaled_peers) {
+ dout(10) << "early_reply - there are journaled peers, not allowed." << dendl;
return;
}
void Server::dispatch_client_request(MDRequestRef& mdr)
{
// we shouldn't be waiting on anyone.
- ceph_assert(!mdr->has_more() || mdr->more()->waiting_on_slave.empty());
+ ceph_assert(!mdr->has_more() || mdr->more()->waiting_on_peer.empty());
if (mdr->killed) {
dout(10) << "request " << *mdr << " was killed" << dendl;
respond_to_request(mdr, -EROFS);
return;
}
- if (mdr->has_more() && mdr->more()->slave_error) {
- dout(10) << " got error from slaves" << dendl;
- respond_to_request(mdr, mdr->more()->slave_error);
+ if (mdr->has_more() && mdr->more()->peer_error) {
+ dout(10) << " got error from peers" << dendl;
+ respond_to_request(mdr, mdr->more()->peer_error);
return;
}
req->get_op() == CEPH_MDS_OP_MKSNAP ||
((req->get_op() == CEPH_MDS_OP_LINK ||
req->get_op() == CEPH_MDS_OP_RENAME) &&
- (!mdr->has_more() || mdr->more()->witnessed.empty())) // haven't started slave request
+ (!mdr->has_more() || mdr->more()->witnessed.empty())) // haven't started peer request
) {
dout(20) << __func__ << ": full, responding ENOSPC to op " << ceph_mds_op_name(req->get_op()) << dendl;
// ---------------------------------------
-// SLAVE REQUESTS
+// PEER REQUESTS
-void Server::handle_slave_request(const cref_t<MMDSSlaveRequest> &m)
+void Server::handle_peer_request(const cref_t<MMDSPeerRequest> &m)
{
- dout(4) << "handle_slave_request " << m->get_reqid() << " from " << m->get_source() << dendl;
+ dout(4) << "handle_peer_request " << m->get_reqid() << " from " << m->get_source() << dendl;
mds_rank_t from = mds_rank_t(m->get_source().num());
- if (logger) logger->inc(l_mdss_handle_slave_request);
+ if (logger) logger->inc(l_mdss_handle_peer_request);
// reply?
if (m->is_reply())
- return handle_slave_request_reply(m);
+ return handle_peer_request_reply(m);
// the purpose of rename notify is enforcing causal message ordering. making sure
// bystanders have received all messages from rename srcdn's auth MDS.
- if (m->get_op() == MMDSSlaveRequest::OP_RENAMENOTIFY) {
- auto reply = make_message<MMDSSlaveRequest>(m->get_reqid(), m->get_attempt(), MMDSSlaveRequest::OP_RENAMENOTIFYACK);
+ if (m->get_op() == MMDSPeerRequest::OP_RENAMENOTIFY) {
+ auto reply = make_message<MMDSPeerRequest>(m->get_reqid(), m->get_attempt(), MMDSPeerRequest::OP_RENAMENOTIFYACK);
mds->send_message(reply, m->get_connection());
return;
}
return;
}
- // am i a new slave?
+ // am i a new peer?
MDRequestRef mdr;
if (mdcache->have_request(m->get_reqid())) {
// existing?
<< ", closing out" << dendl;
mdcache->request_finish(mdr);
mdr.reset();
- } else if (mdr->slave_to_mds != from) {
- dout(10) << "local request " << *mdr << " not slave to mds." << from << dendl;
+ } else if (mdr->peer_to_mds != from) {
+ dout(10) << "local request " << *mdr << " not peer to mds." << from << dendl;
return;
}
- // may get these while mdr->slave_request is non-null
- if (m->get_op() == MMDSSlaveRequest::OP_DROPLOCKS) {
+ // may get these while mdr->peer_request is non-null
+ if (m->get_op() == MMDSPeerRequest::OP_DROPLOCKS) {
mds->locker->drop_locks(mdr.get());
return;
}
- if (m->get_op() == MMDSSlaveRequest::OP_FINISH) {
+ if (m->get_op() == MMDSPeerRequest::OP_FINISH) {
if (m->is_abort()) {
mdr->aborted = true;
- if (mdr->slave_request) {
+ if (mdr->peer_request) {
// only abort on-going xlock, wrlock and auth pin
- ceph_assert(!mdr->slave_did_prepare());
+ ceph_assert(!mdr->peer_did_prepare());
} else {
mdcache->request_finish(mdr);
}
}
if (!mdr.get()) {
// new?
- if (m->get_op() == MMDSSlaveRequest::OP_FINISH) {
- dout(10) << "missing slave request for " << m->get_reqid()
+ if (m->get_op() == MMDSPeerRequest::OP_FINISH) {
+ dout(10) << "missing peer request for " << m->get_reqid()
<< " OP_FINISH, must have lost race with a forward" << dendl;
return;
}
- mdr = mdcache->request_start_slave(m->get_reqid(), m->get_attempt(), m);
+ mdr = mdcache->request_start_peer(m->get_reqid(), m->get_attempt(), m);
mdr->set_op_stamp(m->op_stamp);
}
- ceph_assert(mdr->slave_request == 0); // only one at a time, please!
+ ceph_assert(mdr->peer_request == 0); // only one at a time, please!
if (straydn) {
mdr->pin(straydn);
return;
}
- mdr->reset_slave_request(m);
+ mdr->reset_peer_request(m);
- dispatch_slave_request(mdr);
+ dispatch_peer_request(mdr);
}
-void Server::handle_slave_request_reply(const cref_t<MMDSSlaveRequest> &m)
+void Server::handle_peer_request_reply(const cref_t<MMDSPeerRequest> &m)
{
mds_rank_t from = mds_rank_t(m->get_source().num());
if (!mds->is_clientreplay() && !mds->is_active() && !mds->is_stopping()) {
metareqid_t r = m->get_reqid();
if (!mdcache->have_uncommitted_leader(r, from)) {
- dout(10) << "handle_slave_request_reply ignoring slave reply from mds."
+ dout(10) << "handle_peer_request_reply ignoring peer reply from mds."
<< from << " reqid " << r << dendl;
return;
}
return;
}
- if (m->get_op() == MMDSSlaveRequest::OP_COMMITTED) {
+ if (m->get_op() == MMDSPeerRequest::OP_COMMITTED) {
metareqid_t r = m->get_reqid();
- mdcache->committed_leader_slave(r, from);
+ mdcache->committed_leader_peer(r, from);
return;
}
MDRequestRef mdr = mdcache->request_get(m->get_reqid());
if (m->get_attempt() != mdr->attempt) {
- dout(10) << "handle_slave_request_reply " << *mdr << " ignoring reply from other attempt "
+ dout(10) << "handle_peer_request_reply " << *mdr << " ignoring reply from other attempt "
<< m->get_attempt() << dendl;
return;
}
switch (m->get_op()) {
- case MMDSSlaveRequest::OP_XLOCKACK:
+ case MMDSPeerRequest::OP_XLOCKACK:
{
// identify lock, leader request
SimpleLock *lock = mds->locker->get_lock(m->get_lock_type(),
m->get_object_info());
- mdr->more()->slaves.insert(from);
+ mdr->more()->peers.insert(from);
lock->decode_locked_state(m->get_lock_data());
dout(10) << "got remote xlock on " << *lock << " on " << *lock->get_parent() << dendl;
mdr->emplace_lock(lock, MutationImpl::LockOp::XLOCK);
mdr->finish_locking(lock);
lock->get_xlock(mdr, mdr->get_client());
- ceph_assert(mdr->more()->waiting_on_slave.count(from));
- mdr->more()->waiting_on_slave.erase(from);
- ceph_assert(mdr->more()->waiting_on_slave.empty());
+ ceph_assert(mdr->more()->waiting_on_peer.count(from));
+ mdr->more()->waiting_on_peer.erase(from);
+ ceph_assert(mdr->more()->waiting_on_peer.empty());
mdcache->dispatch_request(mdr);
}
break;
- case MMDSSlaveRequest::OP_WRLOCKACK:
+ case MMDSPeerRequest::OP_WRLOCKACK:
{
// identify lock, leader request
SimpleLock *lock = mds->locker->get_lock(m->get_lock_type(),
m->get_object_info());
- mdr->more()->slaves.insert(from);
+ mdr->more()->peers.insert(from);
dout(10) << "got remote wrlock on " << *lock << " on " << *lock->get_parent() << dendl;
auto it = mdr->emplace_lock(lock, MutationImpl::LockOp::REMOTE_WRLOCK, from);
ceph_assert(it->is_remote_wrlock());
mdr->finish_locking(lock);
- ceph_assert(mdr->more()->waiting_on_slave.count(from));
- mdr->more()->waiting_on_slave.erase(from);
- ceph_assert(mdr->more()->waiting_on_slave.empty());
+ ceph_assert(mdr->more()->waiting_on_peer.count(from));
+ mdr->more()->waiting_on_peer.erase(from);
+ ceph_assert(mdr->more()->waiting_on_peer.empty());
mdcache->dispatch_request(mdr);
}
break;
- case MMDSSlaveRequest::OP_AUTHPINACK:
- handle_slave_auth_pin_ack(mdr, m);
+ case MMDSPeerRequest::OP_AUTHPINACK:
+ handle_peer_auth_pin_ack(mdr, m);
break;
- case MMDSSlaveRequest::OP_LINKPREPACK:
- handle_slave_link_prep_ack(mdr, m);
+ case MMDSPeerRequest::OP_LINKPREPACK:
+ handle_peer_link_prep_ack(mdr, m);
break;
- case MMDSSlaveRequest::OP_RMDIRPREPACK:
- handle_slave_rmdir_prep_ack(mdr, m);
+ case MMDSPeerRequest::OP_RMDIRPREPACK:
+ handle_peer_rmdir_prep_ack(mdr, m);
break;
- case MMDSSlaveRequest::OP_RENAMEPREPACK:
- handle_slave_rename_prep_ack(mdr, m);
+ case MMDSPeerRequest::OP_RENAMEPREPACK:
+ handle_peer_rename_prep_ack(mdr, m);
break;
- case MMDSSlaveRequest::OP_RENAMENOTIFYACK:
- handle_slave_rename_notify_ack(mdr, m);
+ case MMDSPeerRequest::OP_RENAMENOTIFYACK:
+ handle_peer_rename_notify_ack(mdr, m);
break;
default:
}
}
-void Server::dispatch_slave_request(MDRequestRef& mdr)
+void Server::dispatch_peer_request(MDRequestRef& mdr)
{
- dout(7) << "dispatch_slave_request " << *mdr << " " << *mdr->slave_request << dendl;
+ dout(7) << "dispatch_peer_request " << *mdr << " " << *mdr->peer_request << dendl;
if (mdr->aborted) {
dout(7) << " abort flag set, finishing" << dendl;
return;
}
- if (logger) logger->inc(l_mdss_dispatch_slave_request);
+ if (logger) logger->inc(l_mdss_dispatch_peer_request);
- int op = mdr->slave_request->get_op();
+ int op = mdr->peer_request->get_op();
switch (op) {
- case MMDSSlaveRequest::OP_XLOCK:
- case MMDSSlaveRequest::OP_WRLOCK:
+ case MMDSPeerRequest::OP_XLOCK:
+ case MMDSPeerRequest::OP_WRLOCK:
{
// identify object
- SimpleLock *lock = mds->locker->get_lock(mdr->slave_request->get_lock_type(),
- mdr->slave_request->get_object_info());
+ SimpleLock *lock = mds->locker->get_lock(mdr->peer_request->get_lock_type(),
+ mdr->peer_request->get_object_info());
if (!lock) {
dout(10) << "don't have object, dropping" << dendl;
ceph_abort(); // can this happen, if we auth pinned properly.
}
- if (op == MMDSSlaveRequest::OP_XLOCK && !lock->get_parent()->is_auth()) {
+ if (op == MMDSPeerRequest::OP_XLOCK && !lock->get_parent()->is_auth()) {
dout(10) << "not auth for remote xlock attempt, dropping on "
<< *lock << " on " << *lock->get_parent() << dendl;
} else {
int replycode = 0;
switch (op) {
- case MMDSSlaveRequest::OP_XLOCK:
+ case MMDSPeerRequest::OP_XLOCK:
lov.add_xlock(lock);
- replycode = MMDSSlaveRequest::OP_XLOCKACK;
+ replycode = MMDSPeerRequest::OP_XLOCKACK;
break;
- case MMDSSlaveRequest::OP_WRLOCK:
+ case MMDSPeerRequest::OP_WRLOCK:
lov.add_wrlock(lock);
- replycode = MMDSSlaveRequest::OP_WRLOCKACK;
+ replycode = MMDSPeerRequest::OP_WRLOCKACK;
break;
}
return;
// ack
- auto r = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, replycode);
+ auto r = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, replycode);
r->set_lock_type(lock->get_type());
lock->get_parent()->set_object_info(r->get_object_info());
- if (replycode == MMDSSlaveRequest::OP_XLOCKACK)
+ if (replycode == MMDSPeerRequest::OP_XLOCKACK)
lock->encode_locked_state(r->get_lock_data());
- mds->send_message(r, mdr->slave_request->get_connection());
+ mds->send_message(r, mdr->peer_request->get_connection());
}
// done.
- mdr->reset_slave_request();
+ mdr->reset_peer_request();
}
break;
- case MMDSSlaveRequest::OP_UNXLOCK:
- case MMDSSlaveRequest::OP_UNWRLOCK:
+ case MMDSPeerRequest::OP_UNXLOCK:
+ case MMDSPeerRequest::OP_UNWRLOCK:
{
- SimpleLock *lock = mds->locker->get_lock(mdr->slave_request->get_lock_type(),
- mdr->slave_request->get_object_info());
+ SimpleLock *lock = mds->locker->get_lock(mdr->peer_request->get_lock_type(),
+ mdr->peer_request->get_object_info());
ceph_assert(lock);
auto it = mdr->locks.find(lock);
ceph_assert(it != mdr->locks.end());
bool need_issue = false;
switch (op) {
- case MMDSSlaveRequest::OP_UNXLOCK:
+ case MMDSPeerRequest::OP_UNXLOCK:
mds->locker->xlock_finish(it, mdr.get(), &need_issue);
break;
- case MMDSSlaveRequest::OP_UNWRLOCK:
+ case MMDSPeerRequest::OP_UNWRLOCK:
mds->locker->wrlock_finish(it, mdr.get(), &need_issue);
break;
}
mds->locker->issue_caps(static_cast<CInode*>(lock->get_parent()));
// done. no ack necessary.
- mdr->reset_slave_request();
+ mdr->reset_peer_request();
}
break;
- case MMDSSlaveRequest::OP_AUTHPIN:
- handle_slave_auth_pin(mdr);
+ case MMDSPeerRequest::OP_AUTHPIN:
+ handle_peer_auth_pin(mdr);
break;
- case MMDSSlaveRequest::OP_LINKPREP:
- case MMDSSlaveRequest::OP_UNLINKPREP:
- handle_slave_link_prep(mdr);
+ case MMDSPeerRequest::OP_LINKPREP:
+ case MMDSPeerRequest::OP_UNLINKPREP:
+ handle_peer_link_prep(mdr);
break;
- case MMDSSlaveRequest::OP_RMDIRPREP:
- handle_slave_rmdir_prep(mdr);
+ case MMDSPeerRequest::OP_RMDIRPREP:
+ handle_peer_rmdir_prep(mdr);
break;
- case MMDSSlaveRequest::OP_RENAMEPREP:
- handle_slave_rename_prep(mdr);
+ case MMDSPeerRequest::OP_RENAMEPREP:
+ handle_peer_rename_prep(mdr);
break;
default:
}
}
-void Server::handle_slave_auth_pin(MDRequestRef& mdr)
+void Server::handle_peer_auth_pin(MDRequestRef& mdr)
{
- dout(10) << "handle_slave_auth_pin " << *mdr << dendl;
+ dout(10) << "handle_peer_auth_pin " << *mdr << dendl;
// build list of objects
list<MDSCacheObject*> objects;
CInode *auth_pin_freeze = NULL;
- bool nonblocking = mdr->slave_request->is_nonblocking();
+ bool nonblocking = mdr->peer_request->is_nonblocking();
bool fail = false, wouldblock = false, readonly = false;
- ref_t<MMDSSlaveRequest> reply;
+ ref_t<MMDSPeerRequest> reply;
if (mdcache->is_readonly()) {
dout(10) << " read-only FS" << dendl;
}
if (!fail) {
- for (const auto &oi : mdr->slave_request->get_authpins()) {
+ for (const auto &oi : mdr->peer_request->get_authpins()) {
MDSCacheObject *object = mdcache->get_object(oi);
if (!object) {
dout(10) << " don't have " << oi << dendl;
}
objects.push_back(object);
- if (oi == mdr->slave_request->get_authpin_freeze())
+ if (oi == mdr->peer_request->get_authpin_freeze())
auth_pin_freeze = static_cast<CInode*>(object);
}
}
mdr->more()->rename_inode != auth_pin_freeze)
mdr->unfreeze_auth_pin(true);
- /* handle_slave_rename_prep() call freeze_inode() to wait for all other operations
+ /* handle_peer_rename_prep() call freeze_inode() to wait for all other operations
* on the source inode to complete. This happens after all locks for the rename
* operation are acquired. But to acquire locks, we need auth pin locks' parent
* objects first. So there is an ABBA deadlock if someone auth pins the source inode
- * after locks are acquired and before Server::handle_slave_rename_prep() is called.
+ * after locks are acquired and before Server::handle_peer_rename_prep() is called.
* The solution is freeze the inode and prevent other MDRequests from getting new
* auth pins.
*/
}
}
- reply = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPINACK);
+ reply = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_AUTHPINACK);
if (fail) {
mdr->drop_local_auth_pins(); // just in case
}
}
- mds->send_message_mds(reply, mdr->slave_to_mds);
+ mds->send_message_mds(reply, mdr->peer_to_mds);
// clean up this request
- mdr->reset_slave_request();
+ mdr->reset_peer_request();
return;
blocked:
- if (mdr->slave_request->should_notify_blocking()) {
- reply = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPINACK);
+ if (mdr->peer_request->should_notify_blocking()) {
+ reply = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_AUTHPINACK);
reply->mark_req_blocked();
- mds->send_message_mds(reply, mdr->slave_to_mds);
- mdr->slave_request->clear_notify_blocking();
+ mds->send_message_mds(reply, mdr->peer_to_mds);
+ mdr->peer_request->clear_notify_blocking();
}
return;
}
-void Server::handle_slave_auth_pin_ack(MDRequestRef& mdr, const cref_t<MMDSSlaveRequest> &ack)
+void Server::handle_peer_auth_pin_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &ack)
{
- dout(10) << "handle_slave_auth_pin_ack on " << *mdr << " " << *ack << dendl;
+ dout(10) << "handle_peer_auth_pin_ack on " << *mdr << " " << *ack << dendl;
mds_rank_t from = mds_rank_t(ack->get_source().num());
if (ack->is_req_blocked()) {
mdr->disable_lock_cache();
- // slave auth pin is blocked, drop locks to avoid deadlock
+ // peer auth pin is blocked, drop locks to avoid deadlock
mds->locker->drop_locks(mdr.get(), nullptr);
return;
}
}
}
- // note slave
- mdr->more()->slaves.insert(from);
+ // note peer
+ mdr->more()->peers.insert(from);
// clear from waiting list
- auto ret = mdr->more()->waiting_on_slave.erase(from);
+ auto ret = mdr->more()->waiting_on_peer.erase(from);
ceph_assert(ret);
if (ack->is_error_rofs()) {
- mdr->more()->slave_error = -EROFS;
+ mdr->more()->peer_error = -EROFS;
} else if (ack->is_error_wouldblock()) {
- mdr->more()->slave_error = -EWOULDBLOCK;
+ mdr->more()->peer_error = -EWOULDBLOCK;
}
// go again?
- if (mdr->more()->waiting_on_slave.empty())
+ if (mdr->more()->waiting_on_peer.empty())
mdcache->dispatch_request(mdr);
else
- dout(10) << "still waiting on slaves " << mdr->more()->waiting_on_slave << dendl;
+ dout(10) << "still waiting on peers " << mdr->more()->waiting_on_peer << dendl;
}
if (mds->is_cluster_degraded() &&
!mds->mdsmap->is_clientreplay_or_active_or_stopping(linkauth)) {
dout(10) << " targeti auth mds." << linkauth << " is not active" << dendl;
- if (mdr->more()->waiting_on_slave.empty())
+ if (mdr->more()->waiting_on_peer.empty())
mds->wait_for_active_peer(linkauth, new C_MDS_RetryRequest(mdcache, mdr));
return;
}
dout(10) << " targeti auth must prepare nlink++/--" << dendl;
int op;
if (inc)
- op = MMDSSlaveRequest::OP_LINKPREP;
+ op = MMDSPeerRequest::OP_LINKPREP;
else
- op = MMDSSlaveRequest::OP_UNLINKPREP;
- auto req = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, op);
+ op = MMDSPeerRequest::OP_UNLINKPREP;
+ auto req = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, op);
targeti->set_object_info(req->get_object_info());
req->op_stamp = mdr->get_op_stamp();
if (auto& desti_srnode = mdr->more()->desti_srnode)
encode(*desti_srnode, req->desti_snapbl);
mds->send_message_mds(req, linkauth);
- ceph_assert(mdr->more()->waiting_on_slave.count(linkauth) == 0);
- mdr->more()->waiting_on_slave.insert(linkauth);
+ ceph_assert(mdr->more()->waiting_on_peer.count(linkauth) == 0);
+ mdr->more()->waiting_on_peer.insert(linkauth);
return;
}
dout(10) << " targeti auth has prepared nlink++/--" << dendl;
mdlog->start_entry(le);
le->metablob.add_client_req(mdr->reqid, mdr->client_request->get_oldest_client_tid());
if (!mdr->more()->witnessed.empty()) {
- dout(20) << " noting uncommitted_slaves " << mdr->more()->witnessed << dendl;
+ dout(20) << " noting uncommitted_peers " << mdr->more()->witnessed << dendl;
le->reqid = mdr->reqid;
- le->had_slaves = true;
+ le->had_peers = true;
mdcache->add_uncommitted_leader(mdr->reqid, mdr->ls, mdr->more()->witnessed);
}
// remote linking/unlinking
-class C_MDS_SlaveLinkPrep : public ServerLogContext {
+class C_MDS_PeerLinkPrep : public ServerLogContext {
CInode *targeti;
bool adjust_realm;
public:
- C_MDS_SlaveLinkPrep(Server *s, MDRequestRef& r, CInode *t, bool ar) :
+ C_MDS_PeerLinkPrep(Server *s, MDRequestRef& r, CInode *t, bool ar) :
ServerLogContext(s, r), targeti(t), adjust_realm(ar) { }
void finish(int r) override {
ceph_assert(r == 0);
- server->_logged_slave_link(mdr, targeti, adjust_realm);
+ server->_logged_peer_link(mdr, targeti, adjust_realm);
}
};
-class C_MDS_SlaveLinkCommit : public ServerContext {
+class C_MDS_PeerLinkCommit : public ServerContext {
MDRequestRef mdr;
CInode *targeti;
public:
- C_MDS_SlaveLinkCommit(Server *s, MDRequestRef& r, CInode *t) :
+ C_MDS_PeerLinkCommit(Server *s, MDRequestRef& r, CInode *t) :
ServerContext(s), mdr(r), targeti(t) { }
void finish(int r) override {
- server->_commit_slave_link(mdr, r, targeti);
+ server->_commit_peer_link(mdr, r, targeti);
}
};
-void Server::handle_slave_link_prep(MDRequestRef& mdr)
+void Server::handle_peer_link_prep(MDRequestRef& mdr)
{
- dout(10) << "handle_slave_link_prep " << *mdr
- << " on " << mdr->slave_request->get_object_info()
+ dout(10) << "handle_peer_link_prep " << *mdr
+ << " on " << mdr->peer_request->get_object_info()
<< dendl;
ceph_assert(g_conf()->mds_kill_link_at != 4);
- CInode *targeti = mdcache->get_inode(mdr->slave_request->get_object_info().ino);
+ CInode *targeti = mdcache->get_inode(mdr->peer_request->get_object_info().ino);
ceph_assert(targeti);
dout(10) << "targeti " << *targeti << dendl;
CDentry *dn = targeti->get_parent_dn();
CDentry::linkage_t *dnl = dn->get_linkage();
ceph_assert(dnl->is_primary());
- mdr->set_op_stamp(mdr->slave_request->op_stamp);
+ mdr->set_op_stamp(mdr->peer_request->op_stamp);
mdr->auth_pin(targeti);
- //ceph_abort(); // test hack: make sure leader can handle a slave that fails to prepare...
+ //ceph_abort(); // test hack: make sure leader can handle a peer that fails to prepare...
ceph_assert(g_conf()->mds_kill_link_at != 5);
// journal it
mdr->ls = mdlog->get_current_segment();
- ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_link_prep", mdr->reqid, mdr->slave_to_mds,
- ESlaveUpdate::OP_PREPARE, ESlaveUpdate::LINK);
+ EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_link_prep", mdr->reqid, mdr->peer_to_mds,
+ EPeerUpdate::OP_PREPARE, EPeerUpdate::LINK);
mdlog->start_entry(le);
auto &pi = dnl->get_inode()->project_inode();
bool inc;
bool adjust_realm = false;
bool realm_projected = false;
- if (mdr->slave_request->get_op() == MMDSSlaveRequest::OP_LINKPREP) {
+ if (mdr->peer_request->get_op() == MMDSPeerRequest::OP_LINKPREP) {
inc = true;
pi.inode.nlink++;
if (!targeti->is_projected_snaprealm_global()) {
inc = false;
pi.inode.nlink--;
if (targeti->is_projected_snaprealm_global()) {
- ceph_assert(mdr->slave_request->desti_snapbl.length());
- auto p = mdr->slave_request->desti_snapbl.cbegin();
+ ceph_assert(mdr->peer_request->desti_snapbl.length());
+ auto p = mdr->peer_request->desti_snapbl.cbegin();
sr_t *newsnap = targeti->project_snaprealm();
decode(*newsnap, p);
realm_projected = true;
} else {
- ceph_assert(mdr->slave_request->desti_snapbl.length() == 0);
+ ceph_assert(mdr->peer_request->desti_snapbl.length() == 0);
}
}
// commit case
mdcache->predirty_journal_parents(mdr, &le->commit, dnl->get_inode(), 0, PREDIRTY_SHALLOW|PREDIRTY_PRIMARY);
mdcache->journal_dirty_inode(mdr.get(), &le->commit, targeti);
- mdcache->add_uncommitted_slave(mdr->reqid, mdr->ls, mdr->slave_to_mds);
+ mdcache->add_uncommitted_peer(mdr->reqid, mdr->ls, mdr->peer_to_mds);
// set up commit waiter
- mdr->more()->slave_commit = new C_MDS_SlaveLinkCommit(this, mdr, targeti);
+ mdr->more()->peer_commit = new C_MDS_PeerLinkCommit(this, mdr, targeti);
- mdr->more()->slave_update_journaled = true;
- submit_mdlog_entry(le, new C_MDS_SlaveLinkPrep(this, mdr, targeti, adjust_realm),
+ mdr->more()->peer_update_journaled = true;
+ submit_mdlog_entry(le, new C_MDS_PeerLinkPrep(this, mdr, targeti, adjust_realm),
mdr, __func__);
mdlog->flush();
}
-void Server::_logged_slave_link(MDRequestRef& mdr, CInode *targeti, bool adjust_realm)
+void Server::_logged_peer_link(MDRequestRef& mdr, CInode *targeti, bool adjust_realm)
{
- dout(10) << "_logged_slave_link " << *mdr
+ dout(10) << "_logged_peer_link " << *mdr
<< " " << *targeti << dendl;
ceph_assert(g_conf()->mds_kill_link_at != 6);
mds->balancer->hit_inode(targeti, META_POP_IWR);
// done.
- mdr->reset_slave_request();
+ mdr->reset_peer_request();
if (adjust_realm) {
int op = CEPH_SNAP_OP_SPLIT;
// ack
if (!mdr->aborted) {
- auto reply = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_LINKPREPACK);
- mds->send_message_mds(reply, mdr->slave_to_mds);
+ auto reply = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_LINKPREPACK);
+ mds->send_message_mds(reply, mdr->peer_to_mds);
} else {
dout(10) << " abort flag set, finishing" << dendl;
mdcache->request_finish(mdr);
}
-struct C_MDS_CommittedSlave : public ServerLogContext {
- C_MDS_CommittedSlave(Server *s, MDRequestRef& m) : ServerLogContext(s, m) {}
+struct C_MDS_CommittedPeer : public ServerLogContext {
+ C_MDS_CommittedPeer(Server *s, MDRequestRef& m) : ServerLogContext(s, m) {}
void finish(int r) override {
- server->_committed_slave(mdr);
+ server->_committed_peer(mdr);
}
};
-void Server::_commit_slave_link(MDRequestRef& mdr, int r, CInode *targeti)
+void Server::_commit_peer_link(MDRequestRef& mdr, int r, CInode *targeti)
{
- dout(10) << "_commit_slave_link " << *mdr
+ dout(10) << "_commit_peer_link " << *mdr
<< " r=" << r
<< " " << *targeti << dendl;
mdr->cleanup();
// write a commit to the journal
- ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_link_commit", mdr->reqid, mdr->slave_to_mds,
- ESlaveUpdate::OP_COMMIT, ESlaveUpdate::LINK);
+ EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_link_commit", mdr->reqid, mdr->peer_to_mds,
+ EPeerUpdate::OP_COMMIT, EPeerUpdate::LINK);
mdlog->start_entry(le);
- submit_mdlog_entry(le, new C_MDS_CommittedSlave(this, mdr), mdr, __func__);
+ submit_mdlog_entry(le, new C_MDS_CommittedPeer(this, mdr), mdr, __func__);
mdlog->flush();
} else {
- do_link_rollback(mdr->more()->rollback_bl, mdr->slave_to_mds, mdr);
+ do_link_rollback(mdr->more()->rollback_bl, mdr->peer_to_mds, mdr);
}
}
-void Server::_committed_slave(MDRequestRef& mdr)
+void Server::_committed_peer(MDRequestRef& mdr)
{
- dout(10) << "_committed_slave " << *mdr << dendl;
+ dout(10) << "_committed_peer " << *mdr << dendl;
ceph_assert(g_conf()->mds_kill_link_at != 8);
- bool assert_exist = mdr->more()->slave_update_journaled;
- mdcache->finish_uncommitted_slave(mdr->reqid, assert_exist);
- auto req = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_COMMITTED);
- mds->send_message_mds(req, mdr->slave_to_mds);
+ bool assert_exist = mdr->more()->peer_update_journaled;
+ mdcache->finish_uncommitted_peer(mdr->reqid, assert_exist);
+ auto req = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_COMMITTED);
+ mds->send_message_mds(req, mdr->peer_to_mds);
mdcache->request_finish(mdr);
}
CInode *in = mdcache->get_inode(rollback.ino);
ceph_assert(in);
dout(10) << " target is " << *in << dendl;
- ceph_assert(!in->is_projected()); // live slave request hold versionlock xlock.
+ ceph_assert(!in->is_projected()); // live peer request hold versionlock xlock.
auto &pi = in->project_inode();
pi.inode.version = in->pre_dirty();
}
// journal it
- ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_link_rollback", rollback.reqid, leader,
- ESlaveUpdate::OP_ROLLBACK, ESlaveUpdate::LINK);
+ EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_link_rollback", rollback.reqid, leader,
+ EPeerUpdate::OP_ROLLBACK, EPeerUpdate::LINK);
mdlog->start_entry(le);
le->commit.add_dir_context(parent);
le->commit.add_dir(parent, true);
}
-void Server::handle_slave_link_prep_ack(MDRequestRef& mdr, const cref_t<MMDSSlaveRequest> &m)
+void Server::handle_peer_link_prep_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &m)
{
- dout(10) << "handle_slave_link_prep_ack " << *mdr
+ dout(10) << "handle_peer_link_prep_ack " << *mdr
<< " " << *m << dendl;
mds_rank_t from = mds_rank_t(m->get_source().num());
ceph_assert(g_conf()->mds_kill_link_at != 11);
- // note slave
- mdr->more()->slaves.insert(from);
+ // note peer
+ mdr->more()->peers.insert(from);
// witnessed!
ceph_assert(mdr->more()->witnessed.count(from) == 0);
mdr->more()->witnessed.insert(from);
ceph_assert(!m->is_not_journaled());
- mdr->more()->has_journaled_slaves = true;
+ mdr->more()->has_journaled_peers = true;
// remove from waiting list
- ceph_assert(mdr->more()->waiting_on_slave.count(from));
- mdr->more()->waiting_on_slave.erase(from);
+ ceph_assert(mdr->more()->waiting_on_peer.count(from));
+ mdr->more()->waiting_on_peer.erase(from);
- ceph_assert(mdr->more()->waiting_on_slave.empty());
+ ceph_assert(mdr->more()->waiting_on_peer.empty());
dispatch_client_request(mdr); // go again!
}
in->clear_snaprealm_global(new_srnode);
mdr->more()->desti_srnode = new_srnode;
} else if (dnl->is_primary()) {
- // prepare snaprealm blob for slave request
+ // prepare snaprealm blob for peer request
SnapRealm *realm = in->find_snaprealm();
snapid_t follows = realm->get_newest_seq();
if (in->snaprealm || follows + 1 > in->get_oldest_snap()) {
++p) {
if (mdr->more()->witnessed.count(*p)) {
dout(10) << " already witnessed by mds." << *p << dendl;
- } else if (mdr->more()->waiting_on_slave.count(*p)) {
+ } else if (mdr->more()->waiting_on_peer.count(*p)) {
dout(10) << " already waiting on witness mds." << *p << dendl;
} else {
if (!_rmdir_prepare_witness(mdr, *p, mdr->dn[0], straydn))
return;
}
}
- if (!mdr->more()->waiting_on_slave.empty())
+ if (!mdr->more()->waiting_on_peer.empty())
return; // we're waiting for a witness.
}
mdlog->start_entry(le);
le->metablob.add_client_req(mdr->reqid, mdr->client_request->get_oldest_client_tid());
if (!mdr->more()->witnessed.empty()) {
- dout(20) << " noting uncommitted_slaves " << mdr->more()->witnessed << dendl;
+ dout(20) << " noting uncommitted_peers " << mdr->more()->witnessed << dendl;
le->reqid = mdr->reqid;
- le->had_slaves = true;
+ le->had_peers = true;
mdcache->add_uncommitted_leader(mdr->reqid, mdr->ls, mdr->more()->witnessed);
}
if (mds->is_cluster_degraded() &&
!mds->mdsmap->is_clientreplay_or_active_or_stopping(who)) {
dout(10) << "_rmdir_prepare_witness mds." << who << " is not active" << dendl;
- if (mdr->more()->waiting_on_slave.empty())
+ if (mdr->more()->waiting_on_peer.empty())
mds->wait_for_active_peer(who, new C_MDS_RetryRequest(mdcache, mdr));
return false;
}
dout(10) << "_rmdir_prepare_witness mds." << who << dendl;
- auto req = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREP);
+ auto req = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RMDIRPREP);
req->srcdnpath = filepath(trace.front()->get_dir()->ino());
for (auto dn : trace)
req->srcdnpath.push_dentry(dn->get_name());
req->op_stamp = mdr->get_op_stamp();
mds->send_message_mds(req, who);
- ceph_assert(mdr->more()->waiting_on_slave.count(who) == 0);
- mdr->more()->waiting_on_slave.insert(who);
+ ceph_assert(mdr->more()->waiting_on_peer.count(who) == 0);
+ mdr->more()->waiting_on_peer.insert(who);
return true;
}
-struct C_MDS_SlaveRmdirPrep : public ServerLogContext {
+struct C_MDS_PeerRmdirPrep : public ServerLogContext {
CDentry *dn, *straydn;
- C_MDS_SlaveRmdirPrep(Server *s, MDRequestRef& r, CDentry *d, CDentry *st)
+ C_MDS_PeerRmdirPrep(Server *s, MDRequestRef& r, CDentry *d, CDentry *st)
: ServerLogContext(s, r), dn(d), straydn(st) {}
void finish(int r) override {
- server->_logged_slave_rmdir(mdr, dn, straydn);
+ server->_logged_peer_rmdir(mdr, dn, straydn);
}
};
-struct C_MDS_SlaveRmdirCommit : public ServerContext {
+struct C_MDS_PeerRmdirCommit : public ServerContext {
MDRequestRef mdr;
CDentry *straydn;
- C_MDS_SlaveRmdirCommit(Server *s, MDRequestRef& r, CDentry *sd)
+ C_MDS_PeerRmdirCommit(Server *s, MDRequestRef& r, CDentry *sd)
: ServerContext(s), mdr(r), straydn(sd) { }
void finish(int r) override {
- server->_commit_slave_rmdir(mdr, r, straydn);
+ server->_commit_peer_rmdir(mdr, r, straydn);
}
};
-void Server::handle_slave_rmdir_prep(MDRequestRef& mdr)
+void Server::handle_peer_rmdir_prep(MDRequestRef& mdr)
{
- dout(10) << "handle_slave_rmdir_prep " << *mdr
- << " " << mdr->slave_request->srcdnpath
- << " to " << mdr->slave_request->destdnpath
+ dout(10) << "handle_peer_rmdir_prep " << *mdr
+ << " " << mdr->peer_request->srcdnpath
+ << " to " << mdr->peer_request->destdnpath
<< dendl;
vector<CDentry*> trace;
- filepath srcpath(mdr->slave_request->srcdnpath);
+ filepath srcpath(mdr->peer_request->srcdnpath);
dout(10) << " src " << srcpath << dendl;
CInode *in;
CF_MDS_MDRContextFactory cf(mdcache, mdr, false);
if (r > 0) return;
if (r == -ESTALE) {
mdcache->find_ino_peers(srcpath.get_ino(), new C_MDS_RetryRequest(mdcache, mdr),
- mdr->slave_to_mds, true);
+ mdr->peer_to_mds, true);
return;
}
ceph_assert(r == 0);
CDentry *straydn = mdr->straydn;
dout(10) << " straydn " << *straydn << dendl;
- mdr->set_op_stamp(mdr->slave_request->op_stamp);
+ mdr->set_op_stamp(mdr->peer_request->op_stamp);
rmdir_rollback rollback;
rollback.reqid = mdr->reqid;
rollback.src_dname = dn->get_name();
rollback.dest_dir = straydn->get_dir()->dirfrag();
rollback.dest_dname = straydn->get_name();
- if (mdr->slave_request->desti_snapbl.length()) {
+ if (mdr->peer_request->desti_snapbl.length()) {
if (in->snaprealm) {
encode(true, rollback.snapbl);
in->encode_snap_blob(rollback.snapbl);
dout(20) << " rollback is " << mdr->more()->rollback_bl.length() << " bytes" << dendl;
// set up commit waiter
- mdr->more()->slave_commit = new C_MDS_SlaveRmdirCommit(this, mdr, straydn);
+ mdr->more()->peer_commit = new C_MDS_PeerRmdirCommit(this, mdr, straydn);
straydn->push_projected_linkage(in);
dn->push_projected_linkage();
if (!in->has_subtree_root_dirfrag(mds->get_nodeid())) {
dout(10) << " no auth subtree in " << *in << ", skipping journal" << dendl;
- _logged_slave_rmdir(mdr, dn, straydn);
+ _logged_peer_rmdir(mdr, dn, straydn);
return;
}
mdr->ls = mdlog->get_current_segment();
- ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rmdir", mdr->reqid, mdr->slave_to_mds,
- ESlaveUpdate::OP_PREPARE, ESlaveUpdate::RMDIR);
+ EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_rmdir", mdr->reqid, mdr->peer_to_mds,
+ EPeerUpdate::OP_PREPARE, EPeerUpdate::RMDIR);
mdlog->start_entry(le);
le->rollback = mdr->more()->rollback_bl;
le->commit.add_dir_context(straydn->get_dir());
le->commit.add_primary_dentry(straydn, in, true);
- // slave: no need to journal original dentry
+ // peer: no need to journal original dentry
dout(10) << " noting renamed (unlinked) dir ino " << in->ino() << " in metablob" << dendl;
le->commit.renamed_dirino = in->ino();
mdcache->project_subtree_rename(in, dn->get_dir(), straydn->get_dir());
- mdcache->add_uncommitted_slave(mdr->reqid, mdr->ls, mdr->slave_to_mds);
+ mdcache->add_uncommitted_peer(mdr->reqid, mdr->ls, mdr->peer_to_mds);
- mdr->more()->slave_update_journaled = true;
- submit_mdlog_entry(le, new C_MDS_SlaveRmdirPrep(this, mdr, dn, straydn),
+ mdr->more()->peer_update_journaled = true;
+ submit_mdlog_entry(le, new C_MDS_PeerRmdirPrep(this, mdr, dn, straydn),
mdr, __func__);
mdlog->flush();
}
-void Server::_logged_slave_rmdir(MDRequestRef& mdr, CDentry *dn, CDentry *straydn)
+void Server::_logged_peer_rmdir(MDRequestRef& mdr, CDentry *dn, CDentry *straydn)
{
- dout(10) << "_logged_slave_rmdir " << *mdr << " on " << *dn << dendl;
+ dout(10) << "_logged_peer_rmdir " << *mdr << " on " << *dn << dendl;
CInode *in = dn->get_linkage()->get_inode();
bool new_realm;
- if (mdr->slave_request->desti_snapbl.length()) {
+ if (mdr->peer_request->desti_snapbl.length()) {
new_realm = !in->snaprealm;
- in->decode_snap_blob(mdr->slave_request->desti_snapbl);
+ in->decode_snap_blob(mdr->peer_request->desti_snapbl);
ceph_assert(in->snaprealm);
ceph_assert(in->snaprealm->have_past_parents_open());
} else {
straydn->pop_projected_linkage();
dn->pop_projected_linkage();
- mdcache->adjust_subtree_after_rename(in, dn->get_dir(), mdr->more()->slave_update_journaled);
+ mdcache->adjust_subtree_after_rename(in, dn->get_dir(), mdr->more()->peer_update_journaled);
if (new_realm)
mdcache->do_realm_invalidate_and_update_notify(in, CEPH_SNAP_OP_SPLIT, false);
// done.
- mdr->reset_slave_request();
+ mdr->reset_peer_request();
mdr->straydn = 0;
if (!mdr->aborted) {
- auto reply = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREPACK);
- if (!mdr->more()->slave_update_journaled)
+ auto reply = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RMDIRPREPACK);
+ if (!mdr->more()->peer_update_journaled)
reply->mark_not_journaled();
- mds->send_message_mds(reply, mdr->slave_to_mds);
+ mds->send_message_mds(reply, mdr->peer_to_mds);
} else {
dout(10) << " abort flag set, finishing" << dendl;
mdcache->request_finish(mdr);
}
}
-void Server::handle_slave_rmdir_prep_ack(MDRequestRef& mdr, const cref_t<MMDSSlaveRequest> &ack)
+void Server::handle_peer_rmdir_prep_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &ack)
{
- dout(10) << "handle_slave_rmdir_prep_ack " << *mdr
+ dout(10) << "handle_peer_rmdir_prep_ack " << *mdr
<< " " << *ack << dendl;
mds_rank_t from = mds_rank_t(ack->get_source().num());
- mdr->more()->slaves.insert(from);
+ mdr->more()->peers.insert(from);
mdr->more()->witnessed.insert(from);
if (!ack->is_not_journaled())
- mdr->more()->has_journaled_slaves = true;
+ mdr->more()->has_journaled_peers = true;
// remove from waiting list
- ceph_assert(mdr->more()->waiting_on_slave.count(from));
- mdr->more()->waiting_on_slave.erase(from);
+ ceph_assert(mdr->more()->waiting_on_peer.count(from));
+ mdr->more()->waiting_on_peer.erase(from);
- if (mdr->more()->waiting_on_slave.empty())
+ if (mdr->more()->waiting_on_peer.empty())
dispatch_client_request(mdr); // go again!
else
- dout(10) << "still waiting on slaves " << mdr->more()->waiting_on_slave << dendl;
+ dout(10) << "still waiting on peers " << mdr->more()->waiting_on_peer << dendl;
}
-void Server::_commit_slave_rmdir(MDRequestRef& mdr, int r, CDentry *straydn)
+void Server::_commit_peer_rmdir(MDRequestRef& mdr, int r, CDentry *straydn)
{
- dout(10) << "_commit_slave_rmdir " << *mdr << " r=" << r << dendl;
+ dout(10) << "_commit_peer_rmdir " << *mdr << " r=" << r << dendl;
if (r == 0) {
- if (mdr->more()->slave_update_journaled) {
+ if (mdr->more()->peer_update_journaled) {
CInode *strayin = straydn->get_projected_linkage()->get_inode();
if (strayin && !strayin->snaprealm)
mdcache->clear_dirty_bits_for_stray(strayin);
mdr->cleanup();
- if (mdr->more()->slave_update_journaled) {
+ if (mdr->more()->peer_update_journaled) {
// write a commit to the journal
- ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rmdir_commit", mdr->reqid,
- mdr->slave_to_mds, ESlaveUpdate::OP_COMMIT,
- ESlaveUpdate::RMDIR);
+ EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_rmdir_commit", mdr->reqid,
+ mdr->peer_to_mds, EPeerUpdate::OP_COMMIT,
+ EPeerUpdate::RMDIR);
mdlog->start_entry(le);
- submit_mdlog_entry(le, new C_MDS_CommittedSlave(this, mdr), mdr, __func__);
+ submit_mdlog_entry(le, new C_MDS_CommittedPeer(this, mdr), mdr, __func__);
mdlog->flush();
} else {
- _committed_slave(mdr);
+ _committed_peer(mdr);
}
} else {
// abort
- do_rmdir_rollback(mdr->more()->rollback_bl, mdr->slave_to_mds, mdr);
+ do_rmdir_rollback(mdr->more()->rollback_bl, mdr->peer_to_mds, mdr);
}
}
}
}
- if (mdr && !mdr->more()->slave_update_journaled) {
+ if (mdr && !mdr->more()->peer_update_journaled) {
ceph_assert(!in->has_subtree_root_dirfrag(mds->get_nodeid()));
_rmdir_rollback_finish(mdr, rollback.reqid, dn, straydn);
}
- ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rmdir_rollback", rollback.reqid, leader,
- ESlaveUpdate::OP_ROLLBACK, ESlaveUpdate::RMDIR);
+ EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_rmdir_rollback", rollback.reqid, leader,
+ EPeerUpdate::OP_ROLLBACK, EPeerUpdate::RMDIR);
mdlog->start_entry(le);
le->commit.add_dir_context(dn->get_dir());
le->commit.add_primary_dentry(dn, in, true);
- // slave: no need to journal straydn
+ // peer: no need to journal straydn
dout(10) << " noting renamed (unlinked) dir ino " << in->ino() << " in metablob" << dendl;
le->commit.renamed_dirino = in->ino();
CInode *in = dn->get_linkage()->get_inode();
mdcache->adjust_subtree_after_rename(in, straydn->get_dir(),
- !mdr || mdr->more()->slave_update_journaled);
+ !mdr || mdr->more()->peer_update_journaled);
if (mds->is_resolve()) {
CDir *root = mdcache->get_subtree_root(straydn->get_dir());
if (*p == last) continue; // do it last!
if (mdr->more()->witnessed.count(*p)) {
dout(10) << " already witnessed by mds." << *p << dendl;
- } else if (mdr->more()->waiting_on_slave.count(*p)) {
+ } else if (mdr->more()->waiting_on_peer.count(*p)) {
dout(10) << " already waiting on witness mds." << *p << dendl;
} else {
if (!_rename_prepare_witness(mdr, *p, witnesses, srctrace, desttrace, straydn))
return;
}
}
- if (!mdr->more()->waiting_on_slave.empty())
+ if (!mdr->more()->waiting_on_peer.empty())
return; // we're waiting for a witness.
if (last != MDS_RANK_NONE && mdr->more()->witnessed.count(last) == 0) {
dout(10) << " preparing last witness (srcdn auth)" << dendl;
- ceph_assert(mdr->more()->waiting_on_slave.count(last) == 0);
+ ceph_assert(mdr->more()->waiting_on_peer.count(last) == 0);
_rename_prepare_witness(mdr, last, witnesses, srctrace, desttrace, straydn);
return;
}
- // test hack: bail after slave does prepare, so we can verify it's _live_ rollback.
- if (!mdr->more()->slaves.empty() && !srci->is_dir())
+ // test hack: bail after peer does prepare, so we can verify it's _live_ rollback.
+ if (!mdr->more()->peers.empty() && !srci->is_dir())
ceph_assert(g_conf()->mds_kill_rename_at != 3);
- if (!mdr->more()->slaves.empty() && srci->is_dir())
+ if (!mdr->more()->peers.empty() && srci->is_dir())
ceph_assert(g_conf()->mds_kill_rename_at != 4);
// -- declare now --
mdlog->start_entry(le);
le->metablob.add_client_req(mdr->reqid, mdr->client_request->get_oldest_client_tid());
if (!mdr->more()->witnessed.empty()) {
- dout(20) << " noting uncommitted_slaves " << mdr->more()->witnessed << dendl;
+ dout(20) << " noting uncommitted_peers " << mdr->more()->witnessed << dendl;
le->reqid = mdr->reqid;
- le->had_slaves = true;
+ le->had_peers = true;
mdcache->add_uncommitted_leader(mdr->reqid, mdr->ls, mdr->more()->witnessed);
// no need to send frozen auth pin to recovring auth MDS of srci
CInode *in = destdnl->get_inode();
bool need_eval = mdr->more()->cap_imports.count(in);
- // test hack: test slave commit
- if (!mdr->more()->slaves.empty() && !in->is_dir())
+ // test hack: test peer commit
+ if (!mdr->more()->peers.empty() && !in->is_dir())
ceph_assert(g_conf()->mds_kill_rename_at != 5);
- if (!mdr->more()->slaves.empty() && in->is_dir())
+ if (!mdr->more()->peers.empty() && in->is_dir())
ceph_assert(g_conf()->mds_kill_rename_at != 6);
// bump popularity
if (mds->is_cluster_degraded() &&
!mds->mdsmap->is_clientreplay_or_active_or_stopping(who)) {
dout(10) << "_rename_prepare_witness mds." << who << " is not active" << dendl;
- if (mdr->more()->waiting_on_slave.empty())
+ if (mdr->more()->waiting_on_peer.empty())
mds->wait_for_active_peer(who, new C_MDS_RetryRequest(mdcache, mdr));
return false;
}
dout(10) << "_rename_prepare_witness mds." << who << dendl;
- auto req = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREP);
+ auto req = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RENAMEPREP);
req->srcdnpath = filepath(srctrace.front()->get_dir()->ino());
for (auto dn : srctrace)
req->op_stamp = mdr->get_op_stamp();
mds->send_message_mds(req, who);
- ceph_assert(mdr->more()->waiting_on_slave.count(who) == 0);
- mdr->more()->waiting_on_slave.insert(who);
+ ceph_assert(mdr->more()->waiting_on_peer.count(who) == 0);
+ mdr->more()->waiting_on_peer.insert(who);
return true;
}
} else if (destdnl->is_remote()) {
if (oldin->is_auth()) {
sr_t *new_srnode = NULL;
- if (mdr->slave_request) {
- if (mdr->slave_request->desti_snapbl.length() > 0) {
+ if (mdr->peer_request) {
+ if (mdr->peer_request->desti_snapbl.length() > 0) {
new_srnode = new sr_t();
- auto p = mdr->slave_request->desti_snapbl.cbegin();
+ auto p = mdr->peer_request->desti_snapbl.cbegin();
decode(*new_srnode, p);
}
} else if (auto& desti_srnode = mdr->more()->desti_srnode) {
metablob->add_remote_dentry(destdn, true, srcdnl->get_remote_ino(), srcdnl->get_remote_d_type());
if (srci->is_auth() ) { // it's remote
- if (mdr->slave_request) {
- if (mdr->slave_request->srci_snapbl.length() > 0) {
+ if (mdr->peer_request) {
+ if (mdr->peer_request->srci_snapbl.length() > 0) {
sr_t *new_srnode = new sr_t();
- auto p = mdr->slave_request->srci_snapbl.cbegin();
+ auto p = mdr->peer_request->srci_snapbl.cbegin();
decode(*new_srnode, p);
srci->project_snaprealm(new_srnode);
}
if (srcdn->is_auth()) {
dout(10) << " journaling srcdn " << *srcdn << dendl;
mdcache->journal_cow_dentry(mdr.get(), metablob, srcdn, CEPH_NOSNAP, 0, srcdnl);
- // also journal the inode in case we need do slave rename rollback. It is Ok to add
+ // also journal the inode in case we need do peer rename rollback. It is Ok to add
// both primary and NULL dentries. Because during journal replay, null dentry is
// processed after primary dentry.
if (srcdnl->is_primary() && !srci->is_dir() && !destdn->is_auth())
oldin->early_pop_projected_snaprealm();
new_oldin_snaprealm = (oldin->snaprealm && !hadrealm);
} else {
- ceph_assert(mdr->slave_request);
- if (mdr->slave_request->desti_snapbl.length()) {
+ ceph_assert(mdr->peer_request);
+ if (mdr->peer_request->desti_snapbl.length()) {
new_oldin_snaprealm = !oldin->snaprealm;
- oldin->decode_snap_blob(mdr->slave_request->desti_snapbl);
+ oldin->decode_snap_blob(mdr->peer_request->desti_snapbl);
ceph_assert(oldin->snaprealm);
ceph_assert(oldin->snaprealm->have_past_parents_open());
}
destdn->get_dir()->unlink_inode(destdn, false);
straydn->pop_projected_linkage();
- if (mdr->is_slave() && !mdr->more()->slave_update_journaled)
+ if (mdr->is_peer() && !mdr->more()->peer_update_journaled)
ceph_assert(!straydn->is_projected()); // no other projected
// nlink-- targeti
destdn->get_dir()->unlink_inode(destdn, false);
if (oldin->is_auth()) {
oldin->pop_and_dirty_projected_inode(mdr->ls);
- } else if (mdr->slave_request) {
- if (mdr->slave_request->desti_snapbl.length() > 0) {
+ } else if (mdr->peer_request) {
+ if (mdr->peer_request->desti_snapbl.length() > 0) {
ceph_assert(oldin->snaprealm);
- oldin->decode_snap_blob(mdr->slave_request->desti_snapbl);
+ oldin->decode_snap_blob(mdr->peer_request->desti_snapbl);
}
} else if (auto& desti_srnode = mdr->more()->desti_srnode) {
delete desti_srnode;
in->early_pop_projected_snaprealm();
new_in_snaprealm = (in->snaprealm && !hadrealm);
} else {
- ceph_assert(mdr->slave_request);
- if (mdr->slave_request->srci_snapbl.length()) {
+ ceph_assert(mdr->peer_request);
+ if (mdr->peer_request->srci_snapbl.length()) {
new_in_snaprealm = !in->snaprealm;
- in->decode_snap_blob(mdr->slave_request->srci_snapbl);
+ in->decode_snap_blob(mdr->peer_request->srci_snapbl);
ceph_assert(in->snaprealm);
ceph_assert(in->snaprealm->have_past_parents_open());
}
if (!linkmerge) {
// destdn
destdnl = destdn->pop_projected_linkage();
- if (mdr->is_slave() && !mdr->more()->slave_update_journaled)
+ if (mdr->is_peer() && !mdr->more()->peer_update_journaled)
ceph_assert(!destdn->is_projected()); // no other projected
destdn->link_remote(destdnl, in);
// in
if (in->is_auth()) {
in->pop_and_dirty_projected_inode(mdr->ls);
- } else if (mdr->slave_request) {
- if (mdr->slave_request->srci_snapbl.length() > 0) {
+ } else if (mdr->peer_request) {
+ if (mdr->peer_request->srci_snapbl.length() > 0) {
ceph_assert(in->snaprealm);
- in->decode_snap_blob(mdr->slave_request->srci_snapbl);
+ in->decode_snap_blob(mdr->peer_request->srci_snapbl);
}
} else if (auto& srci_srnode = mdr->more()->srci_srnode) {
delete srci_srnode;
destdn->get_dir()->unlink_inode(destdn, false);
}
destdnl = destdn->pop_projected_linkage();
- if (mdr->is_slave() && !mdr->more()->slave_update_journaled)
+ if (mdr->is_peer() && !mdr->more()->peer_update_journaled)
ceph_assert(!destdn->is_projected()); // no other projected
// srcdn inode import?
if (srcdn->is_auth())
srcdn->mark_dirty(mdr->more()->pvmap[srcdn], mdr->ls);
srcdn->pop_projected_linkage();
- if (mdr->is_slave() && !mdr->more()->slave_update_journaled)
+ if (mdr->is_peer() && !mdr->more()->peer_update_journaled)
ceph_assert(!srcdn->is_projected()); // no other projected
// apply remaining projected inodes (nested)
// ------------
-// SLAVE
+// PEER
-class C_MDS_SlaveRenamePrep : public ServerLogContext {
+class C_MDS_PeerRenamePrep : public ServerLogContext {
CDentry *srcdn, *destdn, *straydn;
public:
- C_MDS_SlaveRenamePrep(Server *s, MDRequestRef& m, CDentry *sr, CDentry *de, CDentry *st) :
+ C_MDS_PeerRenamePrep(Server *s, MDRequestRef& m, CDentry *sr, CDentry *de, CDentry *st) :
ServerLogContext(s, m), srcdn(sr), destdn(de), straydn(st) {}
void finish(int r) override {
- server->_logged_slave_rename(mdr, srcdn, destdn, straydn);
+ server->_logged_peer_rename(mdr, srcdn, destdn, straydn);
}
};
-class C_MDS_SlaveRenameCommit : public ServerContext {
+class C_MDS_PeerRenameCommit : public ServerContext {
MDRequestRef mdr;
CDentry *srcdn, *destdn, *straydn;
public:
- C_MDS_SlaveRenameCommit(Server *s, MDRequestRef& m, CDentry *sr, CDentry *de, CDentry *st) :
+ C_MDS_PeerRenameCommit(Server *s, MDRequestRef& m, CDentry *sr, CDentry *de, CDentry *st) :
ServerContext(s), mdr(m), srcdn(sr), destdn(de), straydn(st) {}
void finish(int r) override {
- server->_commit_slave_rename(mdr, r, srcdn, destdn, straydn);
+ server->_commit_peer_rename(mdr, r, srcdn, destdn, straydn);
}
};
-class C_MDS_SlaveRenameSessionsFlushed : public ServerContext {
+class C_MDS_PeerRenameSessionsFlushed : public ServerContext {
MDRequestRef mdr;
public:
- C_MDS_SlaveRenameSessionsFlushed(Server *s, MDRequestRef& r) :
+ C_MDS_PeerRenameSessionsFlushed(Server *s, MDRequestRef& r) :
ServerContext(s), mdr(r) {}
void finish(int r) override {
- server->_slave_rename_sessions_flushed(mdr);
+ server->_peer_rename_sessions_flushed(mdr);
}
};
-void Server::handle_slave_rename_prep(MDRequestRef& mdr)
+void Server::handle_peer_rename_prep(MDRequestRef& mdr)
{
- dout(10) << "handle_slave_rename_prep " << *mdr
- << " " << mdr->slave_request->srcdnpath
- << " to " << mdr->slave_request->destdnpath
+ dout(10) << "handle_peer_rename_prep " << *mdr
+ << " " << mdr->peer_request->srcdnpath
+ << " to " << mdr->peer_request->destdnpath
<< dendl;
- if (mdr->slave_request->is_interrupted()) {
- dout(10) << " slave request interrupted, sending noop reply" << dendl;
- auto reply = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
+ if (mdr->peer_request->is_interrupted()) {
+ dout(10) << " peer request interrupted, sending noop reply" << dendl;
+ auto reply = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RENAMEPREPACK);
reply->mark_interrupted();
- mds->send_message_mds(reply, mdr->slave_to_mds);
- mdr->reset_slave_request();
+ mds->send_message_mds(reply, mdr->peer_to_mds);
+ mdr->reset_peer_request();
return;
}
// discover destdn
- filepath destpath(mdr->slave_request->destdnpath);
+ filepath destpath(mdr->peer_request->destdnpath);
dout(10) << " dest " << destpath << dendl;
vector<CDentry*> trace;
CF_MDS_MDRContextFactory cf(mdcache, mdr, false);
if (r > 0) return;
if (r == -ESTALE) {
mdcache->find_ino_peers(destpath.get_ino(), new C_MDS_RetryRequest(mdcache, mdr),
- mdr->slave_to_mds, true);
+ mdr->peer_to_mds, true);
return;
}
ceph_assert(r == 0); // we shouldn't get an error here!
mdr->pin(destdn);
// discover srcdn
- filepath srcpath(mdr->slave_request->srcdnpath);
+ filepath srcpath(mdr->peer_request->srcdnpath);
dout(10) << " src " << srcpath << dendl;
CInode *srci = nullptr;
r = mdcache->path_traverse(mdr, cf, srcpath,
if (destdnl->is_primary() && !linkmerge)
ceph_assert(straydn);
- mdr->set_op_stamp(mdr->slave_request->op_stamp);
+ mdr->set_op_stamp(mdr->peer_request->op_stamp);
mdr->more()->srcdn_auth_mds = srcdn->authority().first;
// set up commit waiter (early, to clean up any freezing etc we do)
- if (!mdr->more()->slave_commit)
- mdr->more()->slave_commit = new C_MDS_SlaveRenameCommit(this, mdr, srcdn, destdn, straydn);
+ if (!mdr->more()->peer_commit)
+ mdr->more()->peer_commit = new C_MDS_PeerRenameCommit(this, mdr, srcdn, destdn, straydn);
// am i srcdn auth?
if (srcdn->is_auth()) {
/*
* set ambiguous auth for srci
* NOTE: we don't worry about ambiguous cache expire as we do
- * with subtree migrations because all slaves will pin
+ * with subtree migrations because all peers will pin
* srcdn->get_inode() for duration of this rename.
*/
mdr->set_ambiguous_auth(srcdnl->get_inode());
// just mark the source inode as ambiguous auth if more than two MDS are involved.
- // the leader will send another OP_RENAMEPREP slave request later.
- if (mdr->slave_request->witnesses.size() > 1) {
+ // the leader will send another OP_RENAMEPREP peer request later.
+ if (mdr->peer_request->witnesses.size() > 1) {
dout(10) << " set srci ambiguous auth; providing srcdn replica list" << dendl;
reply_witness = true;
}
// make sure bystanders have received all lock related messages
for (set<mds_rank_t>::iterator p = srcdnrep.begin(); p != srcdnrep.end(); ++p) {
- if (*p == mdr->slave_to_mds ||
+ if (*p == mdr->peer_to_mds ||
(mds->is_cluster_degraded() &&
!mds->mdsmap->is_clientreplay_or_active_or_stopping(*p)))
continue;
- auto notify = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMENOTIFY);
+ auto notify = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RENAMENOTIFY);
mds->send_message_mds(notify, *p);
- mdr->more()->waiting_on_slave.insert(*p);
+ mdr->more()->waiting_on_peer.insert(*p);
}
// make sure clients have received all cap related messages
MDSGatherBuilder gather(g_ceph_context);
flush_client_sessions(export_client_set, gather);
if (gather.has_subs()) {
- mdr->more()->waiting_on_slave.insert(MDS_RANK_NONE);
- gather.set_finisher(new C_MDS_SlaveRenameSessionsFlushed(this, mdr));
+ mdr->more()->waiting_on_peer.insert(MDS_RANK_NONE);
+ gather.set_finisher(new C_MDS_PeerRenameSessionsFlushed(this, mdr));
gather.activate();
}
}
// is witness list sufficient?
for (set<mds_rank_t>::iterator p = srcdnrep.begin(); p != srcdnrep.end(); ++p) {
- if (*p == mdr->slave_to_mds ||
- mdr->slave_request->witnesses.count(*p)) continue;
+ if (*p == mdr->peer_to_mds ||
+ mdr->peer_request->witnesses.count(*p)) continue;
dout(10) << " witness list insufficient; providing srcdn replica list" << dendl;
reply_witness = true;
break;
if (reply_witness) {
ceph_assert(!srcdnrep.empty());
- auto reply = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
+ auto reply = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RENAMEPREPACK);
reply->witnesses.swap(srcdnrep);
- mds->send_message_mds(reply, mdr->slave_to_mds);
- mdr->reset_slave_request();
+ mds->send_message_mds(reply, mdr->peer_to_mds);
+ mdr->reset_peer_request();
return;
}
dout(10) << " witness list sufficient: includes all srcdn replicas" << dendl;
- if (!mdr->more()->waiting_on_slave.empty()) {
+ if (!mdr->more()->waiting_on_peer.empty()) {
dout(10) << " still waiting for rename notify acks from "
- << mdr->more()->waiting_on_slave << dendl;
+ << mdr->more()->waiting_on_peer << dendl;
return;
}
} else if (srcdnl->is_primary() && srcdn->authority() != destdn->authority()) {
rollback.stray.dirfrag_old_rctime = straydn->get_dir()->get_projected_fnode()->rstat.rctime;
rollback.stray.dname = straydn->get_name();
}
- if (mdr->slave_request->desti_snapbl.length()) {
+ if (mdr->peer_request->desti_snapbl.length()) {
CInode *oldin = destdnl->get_inode();
if (oldin->snaprealm) {
encode(true, rollback.desti_snapbl);
encode(false, rollback.desti_snapbl);
}
}
- if (mdr->slave_request->srci_snapbl.length()) {
+ if (mdr->peer_request->srci_snapbl.length()) {
if (srci->snaprealm) {
encode(true, rollback.srci_snapbl);
srci->encode_snap_blob(rollback.srci_snapbl);
// journal.
mdr->ls = mdlog->get_current_segment();
- ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rename_prep", mdr->reqid, mdr->slave_to_mds,
- ESlaveUpdate::OP_PREPARE, ESlaveUpdate::RENAME);
+ EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_rename_prep", mdr->reqid, mdr->peer_to_mds,
+ EPeerUpdate::OP_PREPARE, EPeerUpdate::RENAME);
mdlog->start_entry(le);
le->rollback = mdr->more()->rollback_bl;
- bufferlist blah; // inode import data... obviously not used if we're the slave
+ bufferlist blah; // inode import data... obviously not used if we're the peer
_rename_prepare(mdr, &le->commit, &blah, srcdn, destdn, straydn);
if (le->commit.empty()) {
dout(10) << " empty metablob, skipping journal" << dendl;
mdlog->cancel_entry(le);
mdr->ls = NULL;
- _logged_slave_rename(mdr, srcdn, destdn, straydn);
+ _logged_peer_rename(mdr, srcdn, destdn, straydn);
} else {
- mdcache->add_uncommitted_slave(mdr->reqid, mdr->ls, mdr->slave_to_mds);
- mdr->more()->slave_update_journaled = true;
- submit_mdlog_entry(le, new C_MDS_SlaveRenamePrep(this, mdr, srcdn, destdn, straydn),
+ mdcache->add_uncommitted_peer(mdr->reqid, mdr->ls, mdr->peer_to_mds);
+ mdr->more()->peer_update_journaled = true;
+ submit_mdlog_entry(le, new C_MDS_PeerRenamePrep(this, mdr, srcdn, destdn, straydn),
mdr, __func__);
mdlog->flush();
}
}
-void Server::_logged_slave_rename(MDRequestRef& mdr,
+void Server::_logged_peer_rename(MDRequestRef& mdr,
CDentry *srcdn, CDentry *destdn, CDentry *straydn)
{
- dout(10) << "_logged_slave_rename " << *mdr << dendl;
+ dout(10) << "_logged_peer_rename " << *mdr << dendl;
// prepare ack
- ref_t<MMDSSlaveRequest> reply;
+ ref_t<MMDSPeerRequest> reply;
if (!mdr->aborted) {
- reply = make_message<MMDSSlaveRequest>(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
- if (!mdr->more()->slave_update_journaled)
+ reply = make_message<MMDSPeerRequest>(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RENAMEPREPACK);
+ if (!mdr->more()->peer_update_journaled)
reply->mark_not_journaled();
}
mds->balancer->hit_inode(destdnl->get_inode(), META_POP_IWR);
// done.
- mdr->reset_slave_request();
+ mdr->reset_peer_request();
mdr->straydn = 0;
if (reply) {
- mds->send_message_mds(reply, mdr->slave_to_mds);
+ mds->send_message_mds(reply, mdr->peer_to_mds);
} else {
ceph_assert(mdr->aborted);
dout(10) << " abort flag set, finishing" << dendl;
}
}
-void Server::_commit_slave_rename(MDRequestRef& mdr, int r,
+void Server::_commit_peer_rename(MDRequestRef& mdr, int r,
CDentry *srcdn, CDentry *destdn, CDentry *straydn)
{
- dout(10) << "_commit_slave_rename " << *mdr << " r=" << r << dendl;
+ dout(10) << "_commit_peer_rename " << *mdr << " r=" << r << dendl;
CInode *in = destdn->get_linkage()->get_inode();
decode(peer_imported, bp);
dout(10) << " finishing inode export on " << *in << dendl;
- mdcache->migrator->finish_export_inode(in, mdr->slave_to_mds, peer_imported, finished);
+ mdcache->migrator->finish_export_inode(in, mdr->peer_to_mds, peer_imported, finished);
mds->queue_waiters(finished); // this includes SINGLEAUTH waiters.
// unfreeze
mdr->more()->is_ambiguous_auth = false;
}
- if (straydn && mdr->more()->slave_update_journaled) {
+ if (straydn && mdr->more()->peer_update_journaled) {
CInode *strayin = straydn->get_projected_linkage()->get_inode();
if (strayin && !strayin->snaprealm)
mdcache->clear_dirty_bits_for_stray(strayin);
mds->queue_waiters(finished);
mdr->cleanup();
- if (mdr->more()->slave_update_journaled) {
+ if (mdr->more()->peer_update_journaled) {
// write a commit to the journal
- ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rename_commit", mdr->reqid,
- mdr->slave_to_mds, ESlaveUpdate::OP_COMMIT,
- ESlaveUpdate::RENAME);
+ EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_rename_commit", mdr->reqid,
+ mdr->peer_to_mds, EPeerUpdate::OP_COMMIT,
+ EPeerUpdate::RENAME);
mdlog->start_entry(le);
- submit_mdlog_entry(le, new C_MDS_CommittedSlave(this, mdr), mdr, __func__);
+ submit_mdlog_entry(le, new C_MDS_CommittedPeer(this, mdr), mdr, __func__);
mdlog->flush();
} else {
- _committed_slave(mdr);
+ _committed_peer(mdr);
}
} else {
dout(10) << " reversing inode export of " << *in << dendl;
in->abort_export();
}
- if (mdcache->is_ambiguous_slave_update(mdr->reqid, mdr->slave_to_mds)) {
- mdcache->remove_ambiguous_slave_update(mdr->reqid, mdr->slave_to_mds);
- // rollback but preserve the slave request
- do_rename_rollback(mdr->more()->rollback_bl, mdr->slave_to_mds, mdr, false);
+ if (mdcache->is_ambiguous_peer_update(mdr->reqid, mdr->peer_to_mds)) {
+ mdcache->remove_ambiguous_peer_update(mdr->reqid, mdr->peer_to_mds);
+ // rollback but preserve the peer request
+ do_rename_rollback(mdr->more()->rollback_bl, mdr->peer_to_mds, mdr, false);
mdr->more()->rollback_bl.clear();
} else
- do_rename_rollback(mdr->more()->rollback_bl, mdr->slave_to_mds, mdr, true);
+ do_rename_rollback(mdr->more()->rollback_bl, mdr->peer_to_mds, mdr, true);
} else {
dout(10) << " rollback_bl empty, not rollback back rename (leader failed after getting extra witnesses?)" << dendl;
// singleauth
// can't use is_auth() in the resolve stage
mds_rank_t whoami = mds->get_nodeid();
- // slave
+ // peer
ceph_assert(!destdn || destdn->authority().first != whoami);
ceph_assert(!straydn || straydn->authority().first != whoami);
dout(0) << " desti back to " << *target << dendl;
// journal it
- ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rename_rollback", rollback.reqid, leader,
- ESlaveUpdate::OP_ROLLBACK, ESlaveUpdate::RENAME);
+ EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_rename_rollback", rollback.reqid, leader,
+ EPeerUpdate::OP_ROLLBACK, EPeerUpdate::RENAME);
mdlog->start_entry(le);
if (srcdn && (srcdn->authority().first == whoami || force_journal_src)) {
le->commit.add_primary_dentry(destdn, 0, true);
}
- // slave: no need to journal straydn
+ // peer: no need to journal straydn
if (target && target != in && target->authority().first == whoami) {
ceph_assert(rollback.orig_dest.remote_ino);
mdcache->project_subtree_rename(in, destdir, srcdir);
}
- if (mdr && !mdr->more()->slave_update_journaled) {
+ if (mdr && !mdr->more()->peer_update_journaled) {
ceph_assert(le->commit.empty());
mdlog->cancel_entry(le);
mut->ls = NULL;
} else {
ceph_assert(!le->commit.empty());
if (mdr)
- mdr->more()->slave_update_journaled = false;
+ mdr->more()->peer_update_journaled = false;
MDSLogContextBase *fin = new C_MDS_LoggedRenameRollback(this, mut, mdr,
srcdn, srcdnpv, destdn, straydn,
splits, finish_mdr);
if (finish_mdr || mdr->aborted)
mdcache->request_finish(mdr);
else
- mdr->more()->slave_rolling_back = false;
+ mdr->more()->peer_rolling_back = false;
}
mdcache->finish_rollback(mut->reqid, mdr);
mut->cleanup();
}
-void Server::handle_slave_rename_prep_ack(MDRequestRef& mdr, const cref_t<MMDSSlaveRequest> &ack)
+void Server::handle_peer_rename_prep_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &ack)
{
- dout(10) << "handle_slave_rename_prep_ack " << *mdr
+ dout(10) << "handle_peer_rename_prep_ack " << *mdr
<< " witnessed by " << ack->get_source()
<< " " << *ack << dendl;
mds_rank_t from = mds_rank_t(ack->get_source().num());
- // note slave
- mdr->more()->slaves.insert(from);
+ // note peer
+ mdr->more()->peers.insert(from);
if (mdr->more()->srcdn_auth_mds == from &&
mdr->more()->is_remote_frozen_authpin &&
!mdr->more()->is_ambiguous_auth) {
// witnessed? or add extra witnesses?
ceph_assert(mdr->more()->witnessed.count(from) == 0);
if (ack->is_interrupted()) {
- dout(10) << " slave request interrupted, noop" << dendl;
+ dout(10) << " peer request interrupted, noop" << dendl;
} else if (ack->witnesses.empty()) {
mdr->more()->witnessed.insert(from);
if (!ack->is_not_journaled())
- mdr->more()->has_journaled_slaves = true;
+ mdr->more()->has_journaled_peers = true;
} else {
dout(10) << " extra witnesses (srcdn replicas) are " << ack->witnesses << dendl;
mdr->more()->extra_witnesses = ack->witnesses;
}
// remove from waiting list
- ceph_assert(mdr->more()->waiting_on_slave.count(from));
- mdr->more()->waiting_on_slave.erase(from);
+ ceph_assert(mdr->more()->waiting_on_peer.count(from));
+ mdr->more()->waiting_on_peer.erase(from);
- if (mdr->more()->waiting_on_slave.empty())
+ if (mdr->more()->waiting_on_peer.empty())
dispatch_client_request(mdr); // go again!
else
- dout(10) << "still waiting on slaves " << mdr->more()->waiting_on_slave << dendl;
+ dout(10) << "still waiting on peers " << mdr->more()->waiting_on_peer << dendl;
}
-void Server::handle_slave_rename_notify_ack(MDRequestRef& mdr, const cref_t<MMDSSlaveRequest> &ack)
+void Server::handle_peer_rename_notify_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &ack)
{
- dout(10) << "handle_slave_rename_notify_ack " << *mdr << " from mds."
+ dout(10) << "handle_peer_rename_notify_ack " << *mdr << " from mds."
<< ack->get_source() << dendl;
- ceph_assert(mdr->is_slave());
+ ceph_assert(mdr->is_peer());
mds_rank_t from = mds_rank_t(ack->get_source().num());
- if (mdr->more()->waiting_on_slave.count(from)) {
- mdr->more()->waiting_on_slave.erase(from);
+ if (mdr->more()->waiting_on_peer.count(from)) {
+ mdr->more()->waiting_on_peer.erase(from);
- if (mdr->more()->waiting_on_slave.empty()) {
- if (mdr->slave_request)
- dispatch_slave_request(mdr);
+ if (mdr->more()->waiting_on_peer.empty()) {
+ if (mdr->peer_request)
+ dispatch_peer_request(mdr);
} else
dout(10) << " still waiting for rename notify acks from "
- << mdr->more()->waiting_on_slave << dendl;
+ << mdr->more()->waiting_on_peer << dendl;
}
}
-void Server::_slave_rename_sessions_flushed(MDRequestRef& mdr)
+void Server::_peer_rename_sessions_flushed(MDRequestRef& mdr)
{
- dout(10) << "_slave_rename_sessions_flushed " << *mdr << dendl;
+ dout(10) << "_peer_rename_sessions_flushed " << *mdr << dendl;
- if (mdr->more()->waiting_on_slave.count(MDS_RANK_NONE)) {
- mdr->more()->waiting_on_slave.erase(MDS_RANK_NONE);
+ if (mdr->more()->waiting_on_peer.count(MDS_RANK_NONE)) {
+ mdr->more()->waiting_on_peer.erase(MDS_RANK_NONE);
- if (mdr->more()->waiting_on_slave.empty()) {
- if (mdr->slave_request)
- dispatch_slave_request(mdr);
+ if (mdr->more()->waiting_on_peer.empty()) {
+ if (mdr->peer_request)
+ dispatch_peer_request(mdr);
} else
dout(10) << " still waiting for rename notify acks from "
- << mdr->more()->waiting_on_slave << dendl;
+ << mdr->more()->waiting_on_peer << dendl;
}
}
enum {
l_mdss_first = 1000,
l_mdss_dispatch_client_request,
- l_mdss_dispatch_slave_request,
+ l_mdss_dispatch_peer_request,
l_mdss_handle_client_request,
l_mdss_handle_client_session,
- l_mdss_handle_slave_request,
+ l_mdss_handle_peer_request,
l_mdss_req_create_latency,
l_mdss_req_getattr_latency,
l_mdss_req_getfilelock_latency,
void set_trace_dist(const ref_t<MClientReply> &reply, CInode *in, CDentry *dn,
MDRequestRef& mdr);
- void handle_slave_request(const cref_t<MMDSSlaveRequest> &m);
- void handle_slave_request_reply(const cref_t<MMDSSlaveRequest> &m);
- void dispatch_slave_request(MDRequestRef& mdr);
- void handle_slave_auth_pin(MDRequestRef& mdr);
- void handle_slave_auth_pin_ack(MDRequestRef& mdr, const cref_t<MMDSSlaveRequest> &ack);
+ void handle_peer_request(const cref_t<MMDSPeerRequest> &m);
+ void handle_peer_request_reply(const cref_t<MMDSPeerRequest> &m);
+ void dispatch_peer_request(MDRequestRef& mdr);
+ void handle_peer_auth_pin(MDRequestRef& mdr);
+ void handle_peer_auth_pin_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &ack);
// some helpers
bool check_fragment_space(MDRequestRef& mdr, CDir *in);
void _link_remote_finish(MDRequestRef& mdr, bool inc, CDentry *dn, CInode *targeti,
version_t);
- void handle_slave_link_prep(MDRequestRef& mdr);
- void _logged_slave_link(MDRequestRef& mdr, CInode *targeti, bool adjust_realm);
- void _commit_slave_link(MDRequestRef& mdr, int r, CInode *targeti);
- void _committed_slave(MDRequestRef& mdr); // use for rename, too
- void handle_slave_link_prep_ack(MDRequestRef& mdr, const cref_t<MMDSSlaveRequest> &m);
+ void handle_peer_link_prep(MDRequestRef& mdr);
+ void _logged_peer_link(MDRequestRef& mdr, CInode *targeti, bool adjust_realm);
+ void _commit_peer_link(MDRequestRef& mdr, int r, CInode *targeti);
+ void _committed_peer(MDRequestRef& mdr); // use for rename, too
+ void handle_peer_link_prep_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &m);
void do_link_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr);
void _link_rollback_finish(MutationRef& mut, MDRequestRef& mdr,
map<client_t,ref_t<MClientSnap>>& split);
CDentry *dn, CDentry *straydn,
version_t);
bool _rmdir_prepare_witness(MDRequestRef& mdr, mds_rank_t who, vector<CDentry*>& trace, CDentry *straydn);
- void handle_slave_rmdir_prep(MDRequestRef& mdr);
- void _logged_slave_rmdir(MDRequestRef& mdr, CDentry *srcdn, CDentry *straydn);
- void _commit_slave_rmdir(MDRequestRef& mdr, int r, CDentry *straydn);
- void handle_slave_rmdir_prep_ack(MDRequestRef& mdr, const cref_t<MMDSSlaveRequest> &ack);
+ void handle_peer_rmdir_prep(MDRequestRef& mdr);
+ void _logged_peer_rmdir(MDRequestRef& mdr, CDentry *srcdn, CDentry *straydn);
+ void _commit_peer_rmdir(MDRequestRef& mdr, int r, CDentry *straydn);
+ void handle_peer_rmdir_prep_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &ack);
void do_rmdir_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr);
void _rmdir_rollback_finish(MDRequestRef& mdr, metareqid_t reqid, CDentry *dn, CDentry *straydn);
void _rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, CDentry *straydn);
// slaving
- void handle_slave_rename_prep(MDRequestRef& mdr);
- void handle_slave_rename_prep_ack(MDRequestRef& mdr, const cref_t<MMDSSlaveRequest> &m);
- void handle_slave_rename_notify_ack(MDRequestRef& mdr, const cref_t<MMDSSlaveRequest> &m);
- void _slave_rename_sessions_flushed(MDRequestRef& mdr);
- void _logged_slave_rename(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, CDentry *straydn);
- void _commit_slave_rename(MDRequestRef& mdr, int r, CDentry *srcdn, CDentry *destdn, CDentry *straydn);
+ void handle_peer_rename_prep(MDRequestRef& mdr);
+ void handle_peer_rename_prep_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &m);
+ void handle_peer_rename_notify_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &m);
+ void _peer_rename_sessions_flushed(MDRequestRef& mdr);
+ void _logged_peer_rename(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, CDentry *straydn);
+ void _commit_peer_rename(MDRequestRef& mdr, int r, CDentry *srcdn, CDentry *destdn, CDentry *straydn);
void do_rename_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr, bool finish_mdr=false);
void _rename_rollback_finish(MutationRef& mut, MDRequestRef& mdr, CDentry *srcdn, version_t srcdnpv,
CDentry *destdn, CDentry *staydn, map<client_t,ref_t<MClientSnap>> splits[2],
void get_xlock(MutationRef who, client_t client) {
ceph_assert(get_xlock_by() == MutationRef());
ceph_assert(state == LOCK_XLOCK || is_locallock() ||
- state == LOCK_LOCK /* if we are a slave */);
+ state == LOCK_LOCK /* if we are a peer */);
parent->get(MDSCacheObject::PIN_LOCK);
more()->num_xlock++;
more()->xlock_by = who;
void set_xlock_done() {
ceph_assert(more()->xlock_by);
ceph_assert(state == LOCK_XLOCK || is_locallock() ||
- state == LOCK_LOCK /* if we are a slave */);
+ state == LOCK_LOCK /* if we are a peer */);
if (!is_locallock())
state = LOCK_XLOCKDONE;
more()->xlock_by.reset();
void put_xlock() {
ceph_assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE ||
state == LOCK_XLOCKSNAP || state == LOCK_LOCK_XLOCK ||
- state == LOCK_LOCK || /* if we are a leader of a slave */
+ state == LOCK_LOCK || /* if we are a leader of a peer */
is_locallock());
--more()->num_xlock;
parent->put(MDSCacheObject::PIN_LOCK);
class MDSRank;
class MDLog;
class LogSegment;
-struct MDSlaveUpdate;
+struct MDPeerUpdate;
/*
* a bunch of metadata in the journal
}
void update_segment(LogSegment *ls);
- void replay(MDSRank *mds, LogSegment *ls, MDSlaveUpdate *su=NULL);
+ void replay(MDSRank *mds, LogSegment *ls, MDPeerUpdate *su=NULL);
};
WRITE_CLASS_ENCODER_FEATURES(EMetaBlob)
WRITE_CLASS_ENCODER_FEATURES(EMetaBlob::fullbit)
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#ifndef CEPH_MDS_EPEERUPDATE_H
+#define CEPH_MDS_EPEERUPDATE_H
+
+#include <string_view>
+
+#include "../LogEvent.h"
+#include "EMetaBlob.h"
+
+/*
+ * rollback records, for remote/peer updates, which may need to be manually
+ * rolled back during journal replay. (or while active if leader fails, but in
+ * that case these records aren't needed.)
+ */
+struct link_rollback {
+ metareqid_t reqid;
+ inodeno_t ino;
+ bool was_inc;
+ utime_t old_ctime;
+ utime_t old_dir_mtime;
+ utime_t old_dir_rctime;
+ bufferlist snapbl;
+
+ link_rollback() : ino(0), was_inc(false) {}
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& bl);
+ void dump(Formatter *f) const;
+ static void generate_test_instances(std::list<link_rollback*>& ls);
+};
+WRITE_CLASS_ENCODER(link_rollback)
+
+/*
+ * this is only used on an empty dir with a dirfrag on a remote node.
+ * we are auth for nothing. all we need to do is relink the directory
+ * in the hierarchy properly during replay to avoid breaking the
+ * subtree map.
+ */
+struct rmdir_rollback {
+ metareqid_t reqid;
+ dirfrag_t src_dir;
+ string src_dname;
+ dirfrag_t dest_dir;
+ string dest_dname;
+ bufferlist snapbl;
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& bl);
+ void dump(Formatter *f) const;
+ static void generate_test_instances(std::list<rmdir_rollback*>& ls);
+};
+WRITE_CLASS_ENCODER(rmdir_rollback)
+
+struct rename_rollback {
+ struct drec {
+ dirfrag_t dirfrag;
+ utime_t dirfrag_old_mtime;
+ utime_t dirfrag_old_rctime;
+ inodeno_t ino, remote_ino;
+ string dname;
+ char remote_d_type;
+ utime_t old_ctime;
+
+ drec() : remote_d_type((char)S_IFREG) {}
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& bl);
+ void dump(Formatter *f) const;
+ static void generate_test_instances(std::list<drec*>& ls);
+ };
+ WRITE_CLASS_MEMBER_ENCODER(drec)
+
+ metareqid_t reqid;
+ drec orig_src, orig_dest;
+ drec stray; // we know this is null, but we want dname, old mtime/rctime
+ utime_t ctime;
+ bufferlist srci_snapbl;
+ bufferlist desti_snapbl;
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& bl);
+ void dump(Formatter *f) const;
+ static void generate_test_instances(std::list<rename_rollback*>& ls);
+};
+WRITE_CLASS_ENCODER(rename_rollback::drec)
+WRITE_CLASS_ENCODER(rename_rollback)
+
+
+class EPeerUpdate : public LogEvent {
+public:
+ const static int OP_PREPARE = 1;
+ const static int OP_COMMIT = 2;
+ const static int OP_ROLLBACK = 3;
+
+ const static int LINK = 1;
+ const static int RENAME = 2;
+ const static int RMDIR = 3;
+
+ /*
+ * we journal a rollback metablob that contains the unmodified metadata
+ * too, because we may be updating previously dirty metadata, which
+ * will allow old log segments to be trimmed. if we end of rolling back,
+ * those updates could be lost.. so we re-journal the unmodified metadata,
+ * and replay will apply _either_ commit or rollback.
+ */
+ EMetaBlob commit;
+ bufferlist rollback;
+ string type;
+ metareqid_t reqid;
+ mds_rank_t leader;
+ __u8 op; // prepare, commit, abort
+ __u8 origop; // link | rename
+
+ EPeerUpdate() : LogEvent(EVENT_PEERUPDATE), leader(0), op(0), origop(0) { }
+ EPeerUpdate(MDLog *mdlog, std::string_view s, metareqid_t ri, int leadermds, int o, int oo) :
+ LogEvent(EVENT_PEERUPDATE),
+ type(s),
+ reqid(ri),
+ leader(leadermds),
+ op(o), origop(oo) { }
+
+ void print(ostream& out) const override {
+ if (type.length())
+ out << type << " ";
+ out << " " << (int)op;
+ if (origop == LINK) out << " link";
+ if (origop == RENAME) out << " rename";
+ out << " " << reqid;
+ out << " for mds." << leader;
+ out << commit;
+ }
+
+ EMetaBlob *get_metablob() override { return &commit; }
+
+ void encode(bufferlist& bl, uint64_t features) const override;
+ void decode(bufferlist::const_iterator& bl) override;
+ void dump(Formatter *f) const override;
+ static void generate_test_instances(std::list<EPeerUpdate*>& ls);
+
+ void replay(MDSRank *mds) override;
+};
+WRITE_CLASS_ENCODER_FEATURES(EPeerUpdate)
+
+#endif
+++ /dev/null
-// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
-// vim: ts=8 sw=2 smarttab
-/*
- * Ceph - scalable distributed file system
- *
- * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
- *
- * This is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License version 2.1, as published by the Free Software
- * Foundation. See file COPYING.
- *
- */
-
-#ifndef CEPH_MDS_ESLAVEUPDATE_H
-#define CEPH_MDS_ESLAVEUPDATE_H
-
-#include <string_view>
-
-#include "../LogEvent.h"
-#include "EMetaBlob.h"
-
-/*
- * rollback records, for remote/slave updates, which may need to be manually
- * rolled back during journal replay. (or while active if leader fails, but in
- * that case these records aren't needed.)
- */
-struct link_rollback {
- metareqid_t reqid;
- inodeno_t ino;
- bool was_inc;
- utime_t old_ctime;
- utime_t old_dir_mtime;
- utime_t old_dir_rctime;
- bufferlist snapbl;
-
- link_rollback() : ino(0), was_inc(false) {}
-
- void encode(bufferlist& bl) const;
- void decode(bufferlist::const_iterator& bl);
- void dump(Formatter *f) const;
- static void generate_test_instances(std::list<link_rollback*>& ls);
-};
-WRITE_CLASS_ENCODER(link_rollback)
-
-/*
- * this is only used on an empty dir with a dirfrag on a remote node.
- * we are auth for nothing. all we need to do is relink the directory
- * in the hierarchy properly during replay to avoid breaking the
- * subtree map.
- */
-struct rmdir_rollback {
- metareqid_t reqid;
- dirfrag_t src_dir;
- string src_dname;
- dirfrag_t dest_dir;
- string dest_dname;
- bufferlist snapbl;
-
- void encode(bufferlist& bl) const;
- void decode(bufferlist::const_iterator& bl);
- void dump(Formatter *f) const;
- static void generate_test_instances(std::list<rmdir_rollback*>& ls);
-};
-WRITE_CLASS_ENCODER(rmdir_rollback)
-
-struct rename_rollback {
- struct drec {
- dirfrag_t dirfrag;
- utime_t dirfrag_old_mtime;
- utime_t dirfrag_old_rctime;
- inodeno_t ino, remote_ino;
- string dname;
- char remote_d_type;
- utime_t old_ctime;
-
- drec() : remote_d_type((char)S_IFREG) {}
-
- void encode(bufferlist& bl) const;
- void decode(bufferlist::const_iterator& bl);
- void dump(Formatter *f) const;
- static void generate_test_instances(std::list<drec*>& ls);
- };
- WRITE_CLASS_MEMBER_ENCODER(drec)
-
- metareqid_t reqid;
- drec orig_src, orig_dest;
- drec stray; // we know this is null, but we want dname, old mtime/rctime
- utime_t ctime;
- bufferlist srci_snapbl;
- bufferlist desti_snapbl;
-
- void encode(bufferlist& bl) const;
- void decode(bufferlist::const_iterator& bl);
- void dump(Formatter *f) const;
- static void generate_test_instances(std::list<rename_rollback*>& ls);
-};
-WRITE_CLASS_ENCODER(rename_rollback::drec)
-WRITE_CLASS_ENCODER(rename_rollback)
-
-
-class ESlaveUpdate : public LogEvent {
-public:
- const static int OP_PREPARE = 1;
- const static int OP_COMMIT = 2;
- const static int OP_ROLLBACK = 3;
-
- const static int LINK = 1;
- const static int RENAME = 2;
- const static int RMDIR = 3;
-
- /*
- * we journal a rollback metablob that contains the unmodified metadata
- * too, because we may be updating previously dirty metadata, which
- * will allow old log segments to be trimmed. if we end of rolling back,
- * those updates could be lost.. so we re-journal the unmodified metadata,
- * and replay will apply _either_ commit or rollback.
- */
- EMetaBlob commit;
- bufferlist rollback;
- string type;
- metareqid_t reqid;
- mds_rank_t leader;
- __u8 op; // prepare, commit, abort
- __u8 origop; // link | rename
-
- ESlaveUpdate() : LogEvent(EVENT_SLAVEUPDATE), leader(0), op(0), origop(0) { }
- ESlaveUpdate(MDLog *mdlog, std::string_view s, metareqid_t ri, int leadermds, int o, int oo) :
- LogEvent(EVENT_SLAVEUPDATE),
- type(s),
- reqid(ri),
- leader(leadermds),
- op(o), origop(oo) { }
-
- void print(ostream& out) const override {
- if (type.length())
- out << type << " ";
- out << " " << (int)op;
- if (origop == LINK) out << " link";
- if (origop == RENAME) out << " rename";
- out << " " << reqid;
- out << " for mds." << leader;
- out << commit;
- }
-
- EMetaBlob *get_metablob() override { return &commit; }
-
- void encode(bufferlist& bl, uint64_t features) const override;
- void decode(bufferlist::const_iterator& bl) override;
- void dump(Formatter *f) const override;
- static void generate_test_instances(std::list<ESlaveUpdate*>& ls);
-
- void replay(MDSRank *mds) override;
-};
-WRITE_CLASS_ENCODER_FEATURES(ESlaveUpdate)
-
-#endif
bufferlist client_map;
version_t cmapv;
metareqid_t reqid;
- bool had_slaves;
+ bool had_peers;
- EUpdate() : LogEvent(EVENT_UPDATE), cmapv(0), had_slaves(false) { }
+ EUpdate() : LogEvent(EVENT_UPDATE), cmapv(0), had_peers(false) { }
EUpdate(MDLog *mdlog, std::string_view s) :
LogEvent(EVENT_UPDATE),
- type(s), cmapv(0), had_slaves(false) { }
+ type(s), cmapv(0), had_peers(false) { }
void print(ostream& out) const override {
if (type.length())
#include "events/ENoOp.h"
#include "events/EUpdate.h"
-#include "events/ESlaveUpdate.h"
+#include "events/EPeerUpdate.h"
#include "events/EOpen.h"
#include "events/ECommitted.h"
#include "events/EPurged.h"
}
}
- // leader ops with possibly uncommitted slaves
+ // leader ops with possibly uncommitted peers
for (set<metareqid_t>::iterator p = uncommitted_leaders.begin();
p != uncommitted_leaders.end();
++p) {
- dout(10) << "try_to_expire waiting for slaves to ack commit on " << *p << dendl;
+ dout(10) << "try_to_expire waiting for peers to ack commit on " << *p << dendl;
mds->mdcache->wait_for_uncommitted_leader(*p, gather_bld.new_sub());
}
- // slave ops that haven't been committed
- for (set<metareqid_t>::iterator p = uncommitted_slaves.begin();
- p != uncommitted_slaves.end();
+ // peer ops that haven't been committed
+ for (set<metareqid_t>::iterator p = uncommitted_peers.begin();
+ p != uncommitted_peers.end();
++p) {
dout(10) << "try_to_expire waiting for leader to ack OP_FINISH on " << *p << dendl;
- mds->mdcache->wait_for_uncommitted_slave(*p, gather_bld.new_sub());
+ mds->mdcache->wait_for_uncommitted_peer(*p, gather_bld.new_sub());
}
// uncommitted fragments
ls.push_back(new EMetaBlob());
}
-void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
+void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDPeerUpdate *peerup)
{
dout(10) << "EMetaBlob.replay " << lump_map.size() << " dirlumps by " << client_name << dendl;
if (olddir) {
if (olddir->authority() != CDIR_AUTH_UNDEF &&
renamed_diri->authority() == CDIR_AUTH_UNDEF) {
- ceph_assert(slaveup); // auth to non-auth, must be slave prepare
+ ceph_assert(peerup); // auth to non-auth, must be peer prepare
frag_vec_t leaves;
renamed_diri->dirfragtree.get_leaves(leaves);
for (const auto& leaf : leaves) {
CDir *dir = renamed_diri->get_dirfrag(leaf);
ceph_assert(dir);
if (dir->get_dir_auth() == CDIR_AUTH_UNDEF)
- // preserve subtree bound until slave commit
- slaveup->olddirs.insert(dir->inode);
+ // preserve subtree bound until peer commit
+ peerup->olddirs.insert(dir->inode);
else
dir->state_set(CDir::STATE_AUTH);
// see if we can discard the subtree we renamed out of
CDir *root = mds->mdcache->get_subtree_root(olddir);
if (root->get_dir_auth() == CDIR_AUTH_UNDEF) {
- if (slaveup) // preserve the old dir until slave commit
- slaveup->olddirs.insert(olddir->inode);
+ if (peerup) // preserve the old dir until peer commit
+ peerup->olddirs.insert(olddir->inode);
else
mds->mdcache->try_trim_non_auth_subtree(root);
}
dout(10) << " unlinked set contains " << unlinked << dendl;
for (map<CInode*, CDir*>::iterator p = unlinked.begin(); p != unlinked.end(); ++p) {
CInode *in = p->first;
- if (slaveup) { // preserve unlinked inodes until slave commit
- slaveup->unlinked.insert(in);
+ if (peerup) { // preserve unlinked inodes until peer commit
+ peerup->unlinked.insert(in);
if (in->snaprealm)
in->snaprealm->adjust_parent();
} else
encode(client_map, bl);
encode(cmapv, bl);
encode(reqid, bl);
- encode(had_slaves, bl);
+ encode(had_peers, bl);
ENCODE_FINISH(bl);
}
if (struct_v >= 3)
decode(cmapv, bl);
decode(reqid, bl);
- decode(had_slaves, bl);
+ decode(had_peers, bl);
DECODE_FINISH(bl);
}
f->dump_int("client map length", client_map.length());
f->dump_int("client map version", cmapv);
f->dump_stream("reqid") << reqid;
- f->dump_string("had slaves", had_slaves ? "true" : "false");
+ f->dump_string("had peers", had_peers ? "true" : "false");
}
void EUpdate::generate_test_instances(std::list<EUpdate*>& ls)
if (client_map.length())
segment->sessionmapv = cmapv;
- if (had_slaves)
+ if (had_peers)
segment->uncommitted_leaders.insert(reqid);
}
auto&& segment = get_segment();
metablob.replay(mds, segment);
- if (had_slaves) {
- dout(10) << "EUpdate.replay " << reqid << " had slaves, expecting a matching ECommitted" << dendl;
+ if (had_peers) {
+ dout(10) << "EUpdate.replay " << reqid << " had peers, expecting a matching ECommitted" << dendl;
segment->uncommitted_leaders.insert(reqid);
- set<mds_rank_t> slaves;
- mds->mdcache->add_uncommitted_leader(reqid, segment, slaves, true);
+ set<mds_rank_t> peers;
+ mds->mdcache->add_uncommitted_leader(reqid, segment, peers, true);
}
if (client_map.length()) {
}
// -----------------------
-// ESlaveUpdate
+// EPeerUpdate
void link_rollback::encode(bufferlist &bl) const
{
ls.back()->stray.remote_d_type = IFTODT(S_IFREG);
}
-void ESlaveUpdate::encode(bufferlist &bl, uint64_t features) const
+void EPeerUpdate::encode(bufferlist &bl, uint64_t features) const
{
ENCODE_START(3, 3, bl);
encode(stamp, bl);
ENCODE_FINISH(bl);
}
-void ESlaveUpdate::decode(bufferlist::const_iterator &bl)
+void EPeerUpdate::decode(bufferlist::const_iterator &bl)
{
DECODE_START_LEGACY_COMPAT_LEN(3, 3, 3, bl);
if (struct_v >= 2)
DECODE_FINISH(bl);
}
-void ESlaveUpdate::dump(Formatter *f) const
+void EPeerUpdate::dump(Formatter *f) const
{
f->open_object_section("metablob");
commit.dump(f);
f->dump_int("original op", origop);
}
-void ESlaveUpdate::generate_test_instances(std::list<ESlaveUpdate*>& ls)
+void EPeerUpdate::generate_test_instances(std::list<EPeerUpdate*>& ls)
{
- ls.push_back(new ESlaveUpdate());
+ ls.push_back(new EPeerUpdate());
}
-void ESlaveUpdate::replay(MDSRank *mds)
+void EPeerUpdate::replay(MDSRank *mds)
{
- MDSlaveUpdate *su;
+ MDPeerUpdate *su;
auto&& segment = get_segment();
switch (op) {
- case ESlaveUpdate::OP_PREPARE:
- dout(10) << "ESlaveUpdate.replay prepare " << reqid << " for mds." << leader
+ case EPeerUpdate::OP_PREPARE:
+ dout(10) << "EPeerUpdate.replay prepare " << reqid << " for mds." << leader
<< ": applying commit, saving rollback info" << dendl;
- su = new MDSlaveUpdate(origop, rollback);
+ su = new MDPeerUpdate(origop, rollback);
commit.replay(mds, segment, su);
- mds->mdcache->add_uncommitted_slave(reqid, segment, leader, su);
+ mds->mdcache->add_uncommitted_peer(reqid, segment, leader, su);
break;
- case ESlaveUpdate::OP_COMMIT:
- dout(10) << "ESlaveUpdate.replay commit " << reqid << " for mds." << leader << dendl;
- mds->mdcache->finish_uncommitted_slave(reqid, false);
+ case EPeerUpdate::OP_COMMIT:
+ dout(10) << "EPeerUpdate.replay commit " << reqid << " for mds." << leader << dendl;
+ mds->mdcache->finish_uncommitted_peer(reqid, false);
break;
- case ESlaveUpdate::OP_ROLLBACK:
- dout(10) << "ESlaveUpdate.replay abort " << reqid << " for mds." << leader
+ case EPeerUpdate::OP_ROLLBACK:
+ dout(10) << "EPeerUpdate.replay abort " << reqid << " for mds." << leader
<< ": applying rollback commit blob" << dendl;
commit.replay(mds, segment);
- mds->mdcache->finish_uncommitted_slave(reqid, false);
+ mds->mdcache->finish_uncommitted_peer(reqid, false);
break;
default:
- mds->clog->error() << "invalid op in ESlaveUpdate";
+ mds->clog->error() << "invalid op in EPeerUpdate";
mds->damaged();
ceph_abort(); // Should be unreachable because damaged() calls respawn()
}
WRITE_CLASS_ENCODER(lock_bls)
// authpins, xlocks
- struct slave_reqid {
+ struct peer_reqid {
metareqid_t reqid;
__u32 attempt;
- slave_reqid() : attempt(0) {}
- slave_reqid(const metareqid_t& r, __u32 a)
+ peer_reqid() : attempt(0) {}
+ peer_reqid(const metareqid_t& r, __u32 a)
: reqid(r), attempt(a) {}
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
encode(bl, inode_base);
}
void add_inode_authpin(vinodeno_t ino, const metareqid_t& ri, __u32 attempt) {
- authpinned_inodes[ino].push_back(slave_reqid(ri, attempt));
+ authpinned_inodes[ino].push_back(peer_reqid(ri, attempt));
}
void add_inode_frozen_authpin(vinodeno_t ino, const metareqid_t& ri, __u32 attempt) {
- frozen_authpin_inodes[ino] = slave_reqid(ri, attempt);
+ frozen_authpin_inodes[ino] = peer_reqid(ri, attempt);
}
void add_inode_xlock(vinodeno_t ino, int lt, const metareqid_t& ri, __u32 attempt) {
- xlocked_inodes[ino][lt] = slave_reqid(ri, attempt);
+ xlocked_inodes[ino][lt] = peer_reqid(ri, attempt);
}
void add_inode_wrlock(vinodeno_t ino, int lt, const metareqid_t& ri, __u32 attempt) {
- wrlocked_inodes[ino][lt].push_back(slave_reqid(ri, attempt));
+ wrlocked_inodes[ino][lt].push_back(peer_reqid(ri, attempt));
}
void add_scatterlock_state(CInode *in) {
}
void add_dentry_authpin(dirfrag_t df, std::string_view dname, snapid_t last,
const metareqid_t& ri, __u32 attempt) {
- authpinned_dentries[df][string_snap_t(dname, last)].push_back(slave_reqid(ri, attempt));
+ authpinned_dentries[df][string_snap_t(dname, last)].push_back(peer_reqid(ri, attempt));
}
void add_dentry_xlock(dirfrag_t df, std::string_view dname, snapid_t last,
const metareqid_t& ri, __u32 attempt) {
- xlocked_dentries[df][string_snap_t(dname, last)] = slave_reqid(ri, attempt);
+ xlocked_dentries[df][string_snap_t(dname, last)] = peer_reqid(ri, attempt);
}
// -- encoding --
ceph::buffer::list inode_locks;
std::map<dirfrag_t, ceph::buffer::list> dirfrag_bases;
- std::map<vinodeno_t, std::list<slave_reqid> > authpinned_inodes;
- std::map<vinodeno_t, slave_reqid> frozen_authpin_inodes;
- std::map<vinodeno_t, std::map<__s32, slave_reqid> > xlocked_inodes;
- std::map<vinodeno_t, std::map<__s32, std::list<slave_reqid> > > wrlocked_inodes;
- std::map<dirfrag_t, std::map<string_snap_t, std::list<slave_reqid> > > authpinned_dentries;
- std::map<dirfrag_t, std::map<string_snap_t, slave_reqid> > xlocked_dentries;
+ std::map<vinodeno_t, std::list<peer_reqid> > authpinned_inodes;
+ std::map<vinodeno_t, peer_reqid> frozen_authpin_inodes;
+ std::map<vinodeno_t, std::map<__s32, peer_reqid> > xlocked_inodes;
+ std::map<vinodeno_t, std::map<__s32, std::list<peer_reqid> > > wrlocked_inodes;
+ std::map<dirfrag_t, std::map<string_snap_t, std::list<peer_reqid> > > authpinned_dentries;
+ std::map<dirfrag_t, std::map<string_snap_t, peer_reqid> > xlocked_dentries;
private:
template<class T, typename... Args>
WRITE_CLASS_ENCODER(MMDSCacheRejoin::dn_strong)
WRITE_CLASS_ENCODER(MMDSCacheRejoin::dn_weak)
WRITE_CLASS_ENCODER(MMDSCacheRejoin::lock_bls)
-WRITE_CLASS_ENCODER(MMDSCacheRejoin::slave_reqid)
+WRITE_CLASS_ENCODER(MMDSCacheRejoin::peer_reqid)
-inline std::ostream& operator<<(std::ostream& out, const MMDSCacheRejoin::slave_reqid& r) {
+inline std::ostream& operator<<(std::ostream& out, const MMDSCacheRejoin::peer_reqid& r) {
return out << r.reqid << '.' << r.attempt;
}
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+
+#ifndef CEPH_MMDSPEERREQUEST_H
+#define CEPH_MMDSPEERREQUEST_H
+
+#include "mds/mdstypes.h"
+#include "messages/MMDSOp.h"
+
+class MMDSPeerRequest : public MMDSOp {
+ static constexpr int HEAD_VERSION = 1;
+ static constexpr int COMPAT_VERSION = 1;
+public:
+ static constexpr int OP_XLOCK = 1;
+ static constexpr int OP_XLOCKACK = -1;
+ static constexpr int OP_UNXLOCK = 2;
+ static constexpr int OP_AUTHPIN = 3;
+ static constexpr int OP_AUTHPINACK = -3;
+
+ static constexpr int OP_LINKPREP = 4;
+ static constexpr int OP_UNLINKPREP = 5;
+ static constexpr int OP_LINKPREPACK = -4;
+
+ static constexpr int OP_RENAMEPREP = 7;
+ static constexpr int OP_RENAMEPREPACK = -7;
+
+ static constexpr int OP_WRLOCK = 8;
+ static constexpr int OP_WRLOCKACK = -8;
+ static constexpr int OP_UNWRLOCK = 9;
+
+ static constexpr int OP_RMDIRPREP = 10;
+ static constexpr int OP_RMDIRPREPACK = -10;
+
+ static constexpr int OP_DROPLOCKS = 11;
+
+ static constexpr int OP_RENAMENOTIFY = 12;
+ static constexpr int OP_RENAMENOTIFYACK = -12;
+
+ static constexpr int OP_FINISH = 17;
+ static constexpr int OP_COMMITTED = -18;
+
+ static constexpr int OP_ABORT = 20; // used for recovery only
+ //static constexpr int OP_COMMIT = 21; // used for recovery only
+
+
+ static const char *get_opname(int o) {
+ switch (o) {
+ case OP_XLOCK: return "xlock";
+ case OP_XLOCKACK: return "xlock_ack";
+ case OP_UNXLOCK: return "unxlock";
+ case OP_AUTHPIN: return "authpin";
+ case OP_AUTHPINACK: return "authpin_ack";
+
+ case OP_LINKPREP: return "link_prep";
+ case OP_LINKPREPACK: return "link_prep_ack";
+ case OP_UNLINKPREP: return "unlink_prep";
+
+ case OP_RENAMEPREP: return "rename_prep";
+ case OP_RENAMEPREPACK: return "rename_prep_ack";
+
+ case OP_FINISH: return "finish"; // commit
+ case OP_COMMITTED: return "committed";
+
+ case OP_WRLOCK: return "wrlock";
+ case OP_WRLOCKACK: return "wrlock_ack";
+ case OP_UNWRLOCK: return "unwrlock";
+
+ case OP_RMDIRPREP: return "rmdir_prep";
+ case OP_RMDIRPREPACK: return "rmdir_prep_ack";
+
+ case OP_DROPLOCKS: return "drop_locks";
+
+ case OP_RENAMENOTIFY: return "rename_notify";
+ case OP_RENAMENOTIFYACK: return "rename_notify_ack";
+
+ case OP_ABORT: return "abort";
+ //case OP_COMMIT: return "commit";
+
+ default: ceph_abort(); return 0;
+ }
+ }
+
+ private:
+ metareqid_t reqid;
+ __u32 attempt;
+ __s16 op;
+ mutable __u16 flags; /* XXX HACK for mark_interrupted */
+
+ static constexpr unsigned FLAG_NONBLOCKING = 1<<0;
+ static constexpr unsigned FLAG_WOULDBLOCK = 1<<1;
+ static constexpr unsigned FLAG_NOTJOURNALED = 1<<2;
+ static constexpr unsigned FLAG_EROFS = 1<<3;
+ static constexpr unsigned FLAG_ABORT = 1<<4;
+ static constexpr unsigned FLAG_INTERRUPTED = 1<<5;
+ static constexpr unsigned FLAG_NOTIFYBLOCKING = 1<<6;
+ static constexpr unsigned FLAG_REQBLOCKED = 1<<7;
+
+ // for locking
+ __u16 lock_type; // lock object type
+ MDSCacheObjectInfo object_info;
+
+ // for authpins
+ std::vector<MDSCacheObjectInfo> authpins;
+
+ public:
+ // for rename prep
+ filepath srcdnpath;
+ filepath destdnpath;
+ std::set<mds_rank_t> witnesses;
+ ceph::buffer::list inode_export;
+ version_t inode_export_v;
+ mds_rank_t srcdn_auth;
+ utime_t op_stamp;
+
+ mutable ceph::buffer::list straybl; // stray dir + dentry
+ ceph::buffer::list srci_snapbl;
+ ceph::buffer::list desti_snapbl;
+
+public:
+ metareqid_t get_reqid() const { return reqid; }
+ __u32 get_attempt() const { return attempt; }
+ int get_op() const { return op; }
+ bool is_reply() const { return op < 0; }
+
+ int get_lock_type() const { return lock_type; }
+ const MDSCacheObjectInfo &get_object_info() const { return object_info; }
+ MDSCacheObjectInfo &get_object_info() { return object_info; }
+ const MDSCacheObjectInfo &get_authpin_freeze() const { return object_info; }
+ MDSCacheObjectInfo &get_authpin_freeze() { return object_info; }
+
+ const std::vector<MDSCacheObjectInfo>& get_authpins() const { return authpins; }
+ std::vector<MDSCacheObjectInfo>& get_authpins() { return authpins; }
+ void mark_nonblocking() { flags |= FLAG_NONBLOCKING; }
+ bool is_nonblocking() const { return (flags & FLAG_NONBLOCKING); }
+ void mark_error_wouldblock() { flags |= FLAG_WOULDBLOCK; }
+ bool is_error_wouldblock() const { return (flags & FLAG_WOULDBLOCK); }
+ void mark_not_journaled() { flags |= FLAG_NOTJOURNALED; }
+ bool is_not_journaled() const { return (flags & FLAG_NOTJOURNALED); }
+ void mark_error_rofs() { flags |= FLAG_EROFS; }
+ bool is_error_rofs() const { return (flags & FLAG_EROFS); }
+ bool is_abort() const { return (flags & FLAG_ABORT); }
+ void mark_abort() { flags |= FLAG_ABORT; }
+ bool is_interrupted() const { return (flags & FLAG_INTERRUPTED); }
+ void mark_interrupted() const { flags |= FLAG_INTERRUPTED; }
+ bool should_notify_blocking() const { return (flags & FLAG_NOTIFYBLOCKING); }
+ void mark_notify_blocking() { flags |= FLAG_NOTIFYBLOCKING; }
+ void clear_notify_blocking() const { flags &= ~FLAG_NOTIFYBLOCKING; }
+ bool is_req_blocked() const { return (flags & FLAG_REQBLOCKED); }
+ void mark_req_blocked() { flags |= FLAG_REQBLOCKED; }
+
+ void set_lock_type(int t) { lock_type = t; }
+ const ceph::buffer::list& get_lock_data() const { return inode_export; }
+ ceph::buffer::list& get_lock_data() { return inode_export; }
+
+protected:
+ MMDSPeerRequest() : MMDSOp{MSG_MDS_PEER_REQUEST, HEAD_VERSION, COMPAT_VERSION} { }
+ MMDSPeerRequest(metareqid_t ri, __u32 att, int o) :
+ MMDSOp{MSG_MDS_PEER_REQUEST, HEAD_VERSION, COMPAT_VERSION},
+ reqid(ri), attempt(att), op(o), flags(0), lock_type(0),
+ inode_export_v(0), srcdn_auth(MDS_RANK_NONE) { }
+ ~MMDSPeerRequest() override {}
+
+public:
+ void encode_payload(uint64_t features) override {
+ using ceph::encode;
+ encode(reqid, payload);
+ encode(attempt, payload);
+ encode(op, payload);
+ encode(flags, payload);
+ encode(lock_type, payload);
+ encode(object_info, payload);
+ encode(authpins, payload);
+ encode(srcdnpath, payload);
+ encode(destdnpath, payload);
+ encode(witnesses, payload);
+ encode(op_stamp, payload);
+ encode(inode_export, payload);
+ encode(inode_export_v, payload);
+ encode(srcdn_auth, payload);
+ encode(straybl, payload);
+ encode(srci_snapbl, payload);
+ encode(desti_snapbl, payload);
+ }
+ void decode_payload() override {
+ using ceph::decode;
+ auto p = payload.cbegin();
+ decode(reqid, p);
+ decode(attempt, p);
+ decode(op, p);
+ decode(flags, p);
+ decode(lock_type, p);
+ decode(object_info, p);
+ decode(authpins, p);
+ decode(srcdnpath, p);
+ decode(destdnpath, p);
+ decode(witnesses, p);
+ decode(op_stamp, p);
+ decode(inode_export, p);
+ decode(inode_export_v, p);
+ decode(srcdn_auth, p);
+ decode(straybl, p);
+ decode(srci_snapbl, p);
+ decode(desti_snapbl, p);
+ }
+
+ std::string_view get_type_name() const override { return "peer_request"; }
+ void print(std::ostream& out) const override {
+ out << "peer_request(" << reqid
+ << "." << attempt
+ << " " << get_opname(op)
+ << ")";
+ }
+private:
+ template<class T, typename... Args>
+ friend boost::intrusive_ptr<T> ceph::make_message(Args&&... args);
+};
+
+#endif
std::map<dirfrag_t, std::vector<dirfrag_t>> subtrees;
std::map<dirfrag_t, std::vector<dirfrag_t>> ambiguous_imports;
- class slave_inode_cap {
+ class peer_inode_cap {
public:
inodeno_t ino;
std::map<client_t,Capability::Export> cap_exports;
- slave_inode_cap() {}
- slave_inode_cap(inodeno_t a, map<client_t, Capability::Export> b) : ino(a), cap_exports(b) {}
+ peer_inode_cap() {}
+ peer_inode_cap(inodeno_t a, map<client_t, Capability::Export> b) : ino(a), cap_exports(b) {}
void encode(ceph::buffer::list &bl) const
{
ENCODE_START(1, 1, bl);
DECODE_FINISH(blp);
}
};
- WRITE_CLASS_ENCODER(slave_inode_cap)
+ WRITE_CLASS_ENCODER(peer_inode_cap)
- struct slave_request {
+ struct peer_request {
ceph::buffer::list inode_caps;
bool committing;
- slave_request() : committing(false) {}
+ peer_request() : committing(false) {}
void encode(ceph::buffer::list &bl) const {
ENCODE_START(1, 1, bl);
encode(inode_caps, bl);
}
};
- std::map<metareqid_t, slave_request> slave_requests;
+ std::map<metareqid_t, peer_request> peer_requests;
// table client information
struct table_client {
void print(std::ostream& out) const override {
out << "mds_resolve(" << subtrees.size()
<< "+" << ambiguous_imports.size()
- << " subtrees +" << slave_requests.size() << " slave requests)";
+ << " subtrees +" << peer_requests.size() << " peer requests)";
}
void add_subtree(dirfrag_t im) {
ambiguous_imports[im] = m;
}
- void add_slave_request(metareqid_t reqid, bool committing) {
- slave_requests[reqid].committing = committing;
+ void add_peer_request(metareqid_t reqid, bool committing) {
+ peer_requests[reqid].committing = committing;
}
- void add_slave_request(metareqid_t reqid, ceph::buffer::list& bl) {
- slave_requests[reqid].inode_caps = std::move(bl);
+ void add_peer_request(metareqid_t reqid, ceph::buffer::list& bl) {
+ peer_requests[reqid].inode_caps = std::move(bl);
}
void add_table_commits(int table, const std::set<version_t>& pending_commits) {
using ceph::encode;
encode(subtrees, payload);
encode(ambiguous_imports, payload);
- encode(slave_requests, payload);
+ encode(peer_requests, payload);
encode(table_clients, payload);
}
void decode_payload() override {
auto p = payload.cbegin();
decode(subtrees, p);
decode(ambiguous_imports, p);
- decode(slave_requests, p);
+ decode(peer_requests, p);
decode(table_clients, p);
}
private:
friend boost::intrusive_ptr<T> ceph::make_message(Args&&... args);
};
-inline std::ostream& operator<<(std::ostream& out, const MMDSResolve::slave_request&) {
+inline std::ostream& operator<<(std::ostream& out, const MMDSResolve::peer_request&) {
return out;
}
-WRITE_CLASS_ENCODER(MMDSResolve::slave_request)
+WRITE_CLASS_ENCODER(MMDSResolve::peer_request)
WRITE_CLASS_ENCODER(MMDSResolve::table_client)
-WRITE_CLASS_ENCODER(MMDSResolve::slave_inode_cap)
+WRITE_CLASS_ENCODER(MMDSResolve::peer_inode_cap)
#endif
/*void print(ostream& out) const {
out << "resolve_ack.size()
<< "+" << ambiguous_imap.size()
- << " imports +" << slave_requests.size() << " slave requests)";
+ << " imports +" << peer_requests.size() << " peer requests)";
}
*/
+++ /dev/null
-// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
-// vim: ts=8 sw=2 smarttab
-/*
- * Ceph - scalable distributed file system
- *
- * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
- *
- * This is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License version 2.1, as published by the Free Software
- * Foundation. See file COPYING.
- *
- */
-
-
-#ifndef CEPH_MMDSSLAVEREQUEST_H
-#define CEPH_MMDSSLAVEREQUEST_H
-
-#include "mds/mdstypes.h"
-#include "messages/MMDSOp.h"
-
-class MMDSSlaveRequest : public MMDSOp {
- static constexpr int HEAD_VERSION = 1;
- static constexpr int COMPAT_VERSION = 1;
-public:
- static constexpr int OP_XLOCK = 1;
- static constexpr int OP_XLOCKACK = -1;
- static constexpr int OP_UNXLOCK = 2;
- static constexpr int OP_AUTHPIN = 3;
- static constexpr int OP_AUTHPINACK = -3;
-
- static constexpr int OP_LINKPREP = 4;
- static constexpr int OP_UNLINKPREP = 5;
- static constexpr int OP_LINKPREPACK = -4;
-
- static constexpr int OP_RENAMEPREP = 7;
- static constexpr int OP_RENAMEPREPACK = -7;
-
- static constexpr int OP_WRLOCK = 8;
- static constexpr int OP_WRLOCKACK = -8;
- static constexpr int OP_UNWRLOCK = 9;
-
- static constexpr int OP_RMDIRPREP = 10;
- static constexpr int OP_RMDIRPREPACK = -10;
-
- static constexpr int OP_DROPLOCKS = 11;
-
- static constexpr int OP_RENAMENOTIFY = 12;
- static constexpr int OP_RENAMENOTIFYACK = -12;
-
- static constexpr int OP_FINISH = 17;
- static constexpr int OP_COMMITTED = -18;
-
- static constexpr int OP_ABORT = 20; // used for recovery only
- //static constexpr int OP_COMMIT = 21; // used for recovery only
-
-
- static const char *get_opname(int o) {
- switch (o) {
- case OP_XLOCK: return "xlock";
- case OP_XLOCKACK: return "xlock_ack";
- case OP_UNXLOCK: return "unxlock";
- case OP_AUTHPIN: return "authpin";
- case OP_AUTHPINACK: return "authpin_ack";
-
- case OP_LINKPREP: return "link_prep";
- case OP_LINKPREPACK: return "link_prep_ack";
- case OP_UNLINKPREP: return "unlink_prep";
-
- case OP_RENAMEPREP: return "rename_prep";
- case OP_RENAMEPREPACK: return "rename_prep_ack";
-
- case OP_FINISH: return "finish"; // commit
- case OP_COMMITTED: return "committed";
-
- case OP_WRLOCK: return "wrlock";
- case OP_WRLOCKACK: return "wrlock_ack";
- case OP_UNWRLOCK: return "unwrlock";
-
- case OP_RMDIRPREP: return "rmdir_prep";
- case OP_RMDIRPREPACK: return "rmdir_prep_ack";
-
- case OP_DROPLOCKS: return "drop_locks";
-
- case OP_RENAMENOTIFY: return "rename_notify";
- case OP_RENAMENOTIFYACK: return "rename_notify_ack";
-
- case OP_ABORT: return "abort";
- //case OP_COMMIT: return "commit";
-
- default: ceph_abort(); return 0;
- }
- }
-
- private:
- metareqid_t reqid;
- __u32 attempt;
- __s16 op;
- mutable __u16 flags; /* XXX HACK for mark_interrupted */
-
- static constexpr unsigned FLAG_NONBLOCKING = 1<<0;
- static constexpr unsigned FLAG_WOULDBLOCK = 1<<1;
- static constexpr unsigned FLAG_NOTJOURNALED = 1<<2;
- static constexpr unsigned FLAG_EROFS = 1<<3;
- static constexpr unsigned FLAG_ABORT = 1<<4;
- static constexpr unsigned FLAG_INTERRUPTED = 1<<5;
- static constexpr unsigned FLAG_NOTIFYBLOCKING = 1<<6;
- static constexpr unsigned FLAG_REQBLOCKED = 1<<7;
-
- // for locking
- __u16 lock_type; // lock object type
- MDSCacheObjectInfo object_info;
-
- // for authpins
- std::vector<MDSCacheObjectInfo> authpins;
-
- public:
- // for rename prep
- filepath srcdnpath;
- filepath destdnpath;
- std::set<mds_rank_t> witnesses;
- ceph::buffer::list inode_export;
- version_t inode_export_v;
- mds_rank_t srcdn_auth;
- utime_t op_stamp;
-
- mutable ceph::buffer::list straybl; // stray dir + dentry
- ceph::buffer::list srci_snapbl;
- ceph::buffer::list desti_snapbl;
-
-public:
- metareqid_t get_reqid() const { return reqid; }
- __u32 get_attempt() const { return attempt; }
- int get_op() const { return op; }
- bool is_reply() const { return op < 0; }
-
- int get_lock_type() const { return lock_type; }
- const MDSCacheObjectInfo &get_object_info() const { return object_info; }
- MDSCacheObjectInfo &get_object_info() { return object_info; }
- const MDSCacheObjectInfo &get_authpin_freeze() const { return object_info; }
- MDSCacheObjectInfo &get_authpin_freeze() { return object_info; }
-
- const std::vector<MDSCacheObjectInfo>& get_authpins() const { return authpins; }
- std::vector<MDSCacheObjectInfo>& get_authpins() { return authpins; }
- void mark_nonblocking() { flags |= FLAG_NONBLOCKING; }
- bool is_nonblocking() const { return (flags & FLAG_NONBLOCKING); }
- void mark_error_wouldblock() { flags |= FLAG_WOULDBLOCK; }
- bool is_error_wouldblock() const { return (flags & FLAG_WOULDBLOCK); }
- void mark_not_journaled() { flags |= FLAG_NOTJOURNALED; }
- bool is_not_journaled() const { return (flags & FLAG_NOTJOURNALED); }
- void mark_error_rofs() { flags |= FLAG_EROFS; }
- bool is_error_rofs() const { return (flags & FLAG_EROFS); }
- bool is_abort() const { return (flags & FLAG_ABORT); }
- void mark_abort() { flags |= FLAG_ABORT; }
- bool is_interrupted() const { return (flags & FLAG_INTERRUPTED); }
- void mark_interrupted() const { flags |= FLAG_INTERRUPTED; }
- bool should_notify_blocking() const { return (flags & FLAG_NOTIFYBLOCKING); }
- void mark_notify_blocking() { flags |= FLAG_NOTIFYBLOCKING; }
- void clear_notify_blocking() const { flags &= ~FLAG_NOTIFYBLOCKING; }
- bool is_req_blocked() const { return (flags & FLAG_REQBLOCKED); }
- void mark_req_blocked() { flags |= FLAG_REQBLOCKED; }
-
- void set_lock_type(int t) { lock_type = t; }
- const ceph::buffer::list& get_lock_data() const { return inode_export; }
- ceph::buffer::list& get_lock_data() { return inode_export; }
-
-protected:
- MMDSSlaveRequest() : MMDSOp{MSG_MDS_SLAVE_REQUEST, HEAD_VERSION, COMPAT_VERSION} { }
- MMDSSlaveRequest(metareqid_t ri, __u32 att, int o) :
- MMDSOp{MSG_MDS_SLAVE_REQUEST, HEAD_VERSION, COMPAT_VERSION},
- reqid(ri), attempt(att), op(o), flags(0), lock_type(0),
- inode_export_v(0), srcdn_auth(MDS_RANK_NONE) { }
- ~MMDSSlaveRequest() override {}
-
-public:
- void encode_payload(uint64_t features) override {
- using ceph::encode;
- encode(reqid, payload);
- encode(attempt, payload);
- encode(op, payload);
- encode(flags, payload);
- encode(lock_type, payload);
- encode(object_info, payload);
- encode(authpins, payload);
- encode(srcdnpath, payload);
- encode(destdnpath, payload);
- encode(witnesses, payload);
- encode(op_stamp, payload);
- encode(inode_export, payload);
- encode(inode_export_v, payload);
- encode(srcdn_auth, payload);
- encode(straybl, payload);
- encode(srci_snapbl, payload);
- encode(desti_snapbl, payload);
- }
- void decode_payload() override {
- using ceph::decode;
- auto p = payload.cbegin();
- decode(reqid, p);
- decode(attempt, p);
- decode(op, p);
- decode(flags, p);
- decode(lock_type, p);
- decode(object_info, p);
- decode(authpins, p);
- decode(srcdnpath, p);
- decode(destdnpath, p);
- decode(witnesses, p);
- decode(op_stamp, p);
- decode(inode_export, p);
- decode(inode_export_v, p);
- decode(srcdn_auth, p);
- decode(straybl, p);
- decode(srci_snapbl, p);
- decode(desti_snapbl, p);
- }
-
- std::string_view get_type_name() const override { return "slave_request"; }
- void print(std::ostream& out) const override {
- out << "slave_request(" << reqid
- << "." << attempt
- << " " << get_opname(op)
- << ")";
- }
-private:
- template<class T, typename... Args>
- friend boost::intrusive_ptr<T> ceph::make_message(Args&&... args);
-};
-
-#endif
#include "messages/MClientQuota.h"
#include "messages/MClientMetrics.h"
-#include "messages/MMDSSlaveRequest.h"
+#include "messages/MMDSPeerRequest.h"
#include "messages/MMDSMap.h"
#include "messages/MFSMap.h"
break;
// mds
- case MSG_MDS_SLAVE_REQUEST:
- m = make_message<MMDSSlaveRequest>();
+ case MSG_MDS_PEER_REQUEST:
+ m = make_message<MMDSPeerRequest>();
break;
case CEPH_MSG_MDS_MAP:
// *** MDS ***
#define MSG_MDS_BEACON 100 // to monitor
-#define MSG_MDS_SLAVE_REQUEST 101
+#define MSG_MDS_PEER_REQUEST 101
#define MSG_MDS_TABLE_REQUEST 102
// 150 already in use (MSG_OSD_RECOVERY_RESERVE)
class MMDSOpenInoReply;
class MMDSResolveAck;
class MMDSResolve;
-class MMDSSlaveRequest;
+class MMDSPeerRequest;
class MMDSSnapUpdate;
class MMDSTableRequest;
class MMgrBeacon;
#include "messages/MMDSResolveAck.h"
MESSAGE(MMDSResolveAck)
-#include "messages/MMDSSlaveRequest.h"
-MESSAGE(MMDSSlaveRequest)
+#include "messages/MMDSPeerRequest.h"
+MESSAGE(MMDSPeerRequest)
#include "messages/MMDSSnapUpdate.h"
MESSAGE(MMDSSnapUpdate)
#include "mds/events/ESessions.h"
TYPE_FEATUREFUL_NOCOPY(ESessions)
-#include "mds/events/ESlaveUpdate.h"
+#include "mds/events/EPeerUpdate.h"
TYPE(link_rollback)
TYPE(rmdir_rollback)
TYPE(rename_rollback::drec)
TYPE(rename_rollback)
-TYPE_FEATUREFUL_NOCOPY(ESlaveUpdate)
+TYPE_FEATUREFUL_NOCOPY(EPeerUpdate)
#include "mds/events/ESubtreeMap.h"
TYPE_FEATUREFUL_NOCOPY(ESubtreeMap)