From e697cba66c6a63071bfae583ea9c7bcf57204a0d Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 14 Jul 2020 08:44:38 -0500 Subject: [PATCH] mds: slave -> peer Signed-off-by: Sage Weil --- ceph-object-corpus | 2 +- src/mds/Locker.cc | 74 +- src/mds/LogEvent.cc | 10 +- src/mds/LogEvent.h | 2 +- src/mds/LogSegment.h | 4 +- src/mds/MDCache.cc | 362 ++++---- src/mds/MDCache.h | 72 +- src/mds/MDSRank.cc | 4 +- src/mds/Migrator.cc | 2 +- src/mds/Mutation.cc | 56 +- src/mds/Mutation.h | 52 +- src/mds/Server.cc | 770 +++++++++--------- src/mds/Server.h | 44 +- src/mds/SimpleLock.h | 6 +- src/mds/events/EMetaBlob.h | 4 +- .../events/{ESlaveUpdate.h => EPeerUpdate.h} | 34 +- src/mds/events/EUpdate.h | 6 +- src/mds/journal.cc | 84 +- src/messages/MMDSCacheRejoin.h | 34 +- .../{MMDSSlaveRequest.h => MMDSPeerRequest.h} | 34 +- src/messages/MMDSResolve.h | 34 +- src/messages/MMDSResolveAck.h | 2 +- src/msg/Message.cc | 6 +- src/msg/Message.h | 2 +- src/msg/MessageRef.h | 2 +- src/tools/ceph-dencoder/common_types.h | 4 +- src/tools/ceph-dencoder/mds_types.h | 4 +- 27 files changed, 855 insertions(+), 855 deletions(-) rename src/mds/events/{ESlaveUpdate.h => EPeerUpdate.h} (86%) rename src/messages/{MMDSSlaveRequest.h => MMDSPeerRequest.h} (92%) diff --git a/ceph-object-corpus b/ceph-object-corpus index 45eaee4bb8756..70228ed56466b 160000 --- a/ceph-object-corpus +++ b/ceph-object-corpus @@ -1 +1 @@ -Subproject commit 45eaee4bb8756c0bcc8120b4b6efb43766b0116e +Subproject commit 70228ed56466b4be8a9abff9024f69820f68f6d0 diff --git a/src/mds/Locker.cc b/src/mds/Locker.cc index 909a570819245..4dbc1bd37a08a 100644 --- a/src/mds/Locker.cc +++ b/src/mds/Locker.cc @@ -26,7 +26,7 @@ #include "MDSRank.h" #include "MDSMap.h" #include "messages/MInodeFileCaps.h" -#include "messages/MMDSSlaveRequest.h" +#include "messages/MMDSPeerRequest.h" #include "Migrator.h" #include "msg/Messenger.h" #include "osdc/Objecter.h" @@ -259,7 +259,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, } } } else { - // if the lock is the latest locked one, it's possible that slave mds got the lock + // if the lock is the latest locked one, it's possible that peer mds got the lock // while there are recovering mds. if (!mdr->is_xlocked(lock) || mdr->is_last_locked(lock)) wait = true; @@ -287,7 +287,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, // leader. wrlock versionlock so we can pipeline dentry updates to journal. lov.add_wrlock(&dn->versionlock, i + 1); } else { - // slave. exclusively lock the dentry version (i.e. block other journal updates). + // peer. exclusively lock the dentry version (i.e. block other journal updates). // this makes rollback safe. lov.add_xlock(&dn->versionlock, i + 1); } @@ -301,7 +301,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, // leader. wrlock versionlock so we can pipeline inode updates to journal. lov.add_wrlock(&in->versionlock, i + 1); } else { - // slave. exclusively lock the inode version (i.e. block other journal updates). + // peer. exclusively lock the inode version (i.e. block other journal updates). // this makes rollback safe. lov.add_xlock(&in->versionlock, i + 1); } @@ -313,7 +313,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, mustpin.insert(object); } else if (!object->is_auth() && !lock->can_wrlock(_client) && // we might have to request a scatter - !mdr->is_slave()) { // if we are slave (remote_wrlock), the leader already authpinned + !mdr->is_peer()) { // if we are peer (remote_wrlock), the leader already authpinned dout(15) << " will also auth_pin " << *object << " in case we need to request a scatter" << dendl; mustpin.insert(object); @@ -461,13 +461,13 @@ bool Locker::acquire_locks(MDRequestRef& mdr, if (mds->is_cluster_degraded() && !mds->mdsmap->is_clientreplay_or_active_or_stopping(p.first)) { dout(10) << " mds." << p.first << " is not active" << dendl; - if (mdr->more()->waiting_on_slave.empty()) + if (mdr->more()->waiting_on_peer.empty()) mds->wait_for_active_peer(p.first, new C_MDS_RetryRequest(mdcache, mdr)); return false; } - auto req = make_message(mdr->reqid, mdr->attempt, - MMDSSlaveRequest::OP_AUTHPIN); + auto req = make_message(mdr->reqid, mdr->attempt, + MMDSPeerRequest::OP_AUTHPIN); for (auto& o : p.second) { dout(10) << " req remote auth_pin of " << *o << dendl; MDSCacheObjectInfo info; @@ -485,7 +485,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, mds->send_message_mds(req, p.first); // put in waiting list - auto ret = mdr->more()->waiting_on_slave.insert(p.first); + auto ret = mdr->more()->waiting_on_peer.insert(p.first); ceph_assert(ret.second); } return false; @@ -632,7 +632,7 @@ void Locker::set_xlocks_done(MutationImpl *mut, bool skip_dentry) void Locker::_drop_locks(MutationImpl *mut, set *pneed_issue, bool drop_rdlocks) { - set slaves; + set peers; for (auto it = mut->locks.begin(); it != mut->locks.end(); ) { SimpleLock *lock = it->lock; @@ -646,13 +646,13 @@ void Locker::_drop_locks(MutationImpl *mut, set *pneed_issue, pneed_issue->insert(static_cast(obj)); } else { ceph_assert(lock->get_sm()->can_remote_xlock); - slaves.insert(obj->authority().first); + peers.insert(obj->authority().first); lock->put_xlock(); mut->locks.erase(it++); } } else if (it->is_wrlock() || it->is_remote_wrlock()) { if (it->is_remote_wrlock()) { - slaves.insert(it->wrlock_target); + peers.insert(it->wrlock_target); it->clear_remote_wrlock(); } if (it->is_wrlock()) { @@ -680,13 +680,13 @@ void Locker::_drop_locks(MutationImpl *mut, set *pneed_issue, } } - for (set::iterator p = slaves.begin(); p != slaves.end(); ++p) { + for (set::iterator p = peers.begin(); p != peers.end(); ++p) { if (!mds->is_cluster_degraded() || mds->mdsmap->get_state(*p) >= MDSMap::STATE_REJOIN) { dout(10) << "_drop_non_rdlocks dropping remote locks on mds." << *p << dendl; - auto slavereq = make_message(mut->reqid, mut->attempt, - MMDSSlaveRequest::OP_DROPLOCKS); - mds->send_message_mds(slavereq, *p); + auto peerreq = make_message(mut->reqid, mut->attempt, + MMDSPeerRequest::OP_DROPLOCKS); + mds->send_message_mds(peerreq, *p); } } } @@ -903,8 +903,8 @@ void Locker::create_lock_cache(MDRequestRef& mdr, CInode *diri, file_layout_t *d return; } - if (mdr->has_more() && !mdr->more()->slaves.empty()) { - dout(10) << " there are slaves requests for " << *mdr << ", noop" << dendl; + if (mdr->has_more() && !mdr->more()->peers.empty()) { + dout(10) << " there are peers requests for " << *mdr << ", noop" << dendl; return; } @@ -1846,21 +1846,21 @@ void Locker::remote_wrlock_start(SimpleLock *lock, mds_rank_t target, MDRequestR if (mds->is_cluster_degraded() && !mds->mdsmap->is_clientreplay_or_active_or_stopping(target)) { dout(7) << " mds." << target << " is not active" << dendl; - if (mut->more()->waiting_on_slave.empty()) + if (mut->more()->waiting_on_peer.empty()) mds->wait_for_active_peer(target, new C_MDS_RetryRequest(mdcache, mut)); return; } // send lock request mut->start_locking(lock, target); - mut->more()->slaves.insert(target); - auto r = make_message(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_WRLOCK); + mut->more()->peers.insert(target); + auto r = make_message(mut->reqid, mut->attempt, MMDSPeerRequest::OP_WRLOCK); r->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(r->get_object_info()); mds->send_message_mds(r, target); - ceph_assert(mut->more()->waiting_on_slave.count(target) == 0); - mut->more()->waiting_on_slave.insert(target); + ceph_assert(mut->more()->waiting_on_peer.count(target) == 0); + mut->more()->waiting_on_peer.insert(target); } void Locker::remote_wrlock_finish(const MutationImpl::lock_iterator& it, MutationImpl *mut) @@ -1878,10 +1878,10 @@ void Locker::remote_wrlock_finish(const MutationImpl::lock_iterator& it, Mutatio << " " << *lock->get_parent() << dendl; if (!mds->is_cluster_degraded() || mds->mdsmap->get_state(target) >= MDSMap::STATE_REJOIN) { - auto slavereq = make_message(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNWRLOCK); - slavereq->set_lock_type(lock->get_type()); - lock->get_parent()->set_object_info(slavereq->get_object_info()); - mds->send_message_mds(slavereq, target); + auto peerreq = make_message(mut->reqid, mut->attempt, MMDSPeerRequest::OP_UNWRLOCK); + peerreq->set_lock_type(lock->get_type()); + lock->get_parent()->set_object_info(peerreq->get_object_info()); + mds->send_message_mds(peerreq, target); } } @@ -1941,7 +1941,7 @@ bool Locker::xlock_start(SimpleLock *lock, MDRequestRef& mut) } else { // replica ceph_assert(lock->get_sm()->can_remote_xlock); - ceph_assert(!mut->slave_request); + ceph_assert(!mut->peer_request); // wait for single auth if (lock->get_parent()->is_ambiguous_auth()) { @@ -1955,21 +1955,21 @@ bool Locker::xlock_start(SimpleLock *lock, MDRequestRef& mut) if (mds->is_cluster_degraded() && !mds->mdsmap->is_clientreplay_or_active_or_stopping(auth)) { dout(7) << " mds." << auth << " is not active" << dendl; - if (mut->more()->waiting_on_slave.empty()) + if (mut->more()->waiting_on_peer.empty()) mds->wait_for_active_peer(auth, new C_MDS_RetryRequest(mdcache, mut)); return false; } // send lock request - mut->more()->slaves.insert(auth); + mut->more()->peers.insert(auth); mut->start_locking(lock, auth); - auto r = make_message(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_XLOCK); + auto r = make_message(mut->reqid, mut->attempt, MMDSPeerRequest::OP_XLOCK); r->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(r->get_object_info()); mds->send_message_mds(r, auth); - ceph_assert(mut->more()->waiting_on_slave.count(auth) == 0); - mut->more()->waiting_on_slave.insert(auth); + ceph_assert(mut->more()->waiting_on_peer.count(auth) == 0); + mut->more()->waiting_on_peer.insert(auth); return false; } @@ -2032,10 +2032,10 @@ void Locker::xlock_finish(const MutationImpl::lock_iterator& it, MutationImpl *m mds_rank_t auth = lock->get_parent()->authority().first; if (!mds->is_cluster_degraded() || mds->mdsmap->get_state(auth) >= MDSMap::STATE_REJOIN) { - auto slavereq = make_message(mut->reqid, mut->attempt, MMDSSlaveRequest::OP_UNXLOCK); - slavereq->set_lock_type(lock->get_type()); - lock->get_parent()->set_object_info(slavereq->get_object_info()); - mds->send_message_mds(slavereq, auth); + auto peerreq = make_message(mut->reqid, mut->attempt, MMDSPeerRequest::OP_UNXLOCK); + peerreq->set_lock_type(lock->get_type()); + lock->get_parent()->set_object_info(peerreq->get_object_info()); + mds->send_message_mds(peerreq, auth); } // others waiting? lock->finish_waiters(SimpleLock::WAIT_STABLE | diff --git a/src/mds/LogEvent.cc b/src/mds/LogEvent.cc index bdffc6a6afffb..3df8f327c6c9c 100644 --- a/src/mds/LogEvent.cc +++ b/src/mds/LogEvent.cc @@ -29,7 +29,7 @@ #include "events/ESessions.h" #include "events/EUpdate.h" -#include "events/ESlaveUpdate.h" +#include "events/EPeerUpdate.h" #include "events/EOpen.h" #include "events/ECommitted.h" #include "events/EPurged.h" @@ -82,7 +82,7 @@ std::string_view LogEvent::get_type_str() const case EVENT_SESSIONS_OLD: return "SESSIONS_OLD"; case EVENT_SESSIONS: return "SESSIONS"; case EVENT_UPDATE: return "UPDATE"; - case EVENT_SLAVEUPDATE: return "SLAVEUPDATE"; + case EVENT_PEERUPDATE: return "PEERUPDATE"; case EVENT_OPEN: return "OPEN"; case EVENT_COMMITTED: return "COMMITTED"; case EVENT_PURGED: return "PURGED"; @@ -108,7 +108,7 @@ const std::map LogEvent::types = { {"SESSIONS_OLD", EVENT_SESSIONS_OLD}, {"SESSIONS", EVENT_SESSIONS}, {"UPDATE", EVENT_UPDATE}, - {"SLAVEUPDATE", EVENT_SLAVEUPDATE}, + {"PEERUPDATE", EVENT_PEERUPDATE}, {"OPEN", EVENT_OPEN}, {"COMMITTED", EVENT_COMMITTED}, {"PURGED", EVENT_PURGED}, @@ -174,8 +174,8 @@ std::unique_ptr LogEvent::decode_event(bufferlist::const_iterator& p, case EVENT_UPDATE: le = std::make_unique(); break; - case EVENT_SLAVEUPDATE: - le = std::make_unique(); + case EVENT_PEERUPDATE: + le = std::make_unique(); break; case EVENT_OPEN: le = std::make_unique(); diff --git a/src/mds/LogEvent.h b/src/mds/LogEvent.h index b2f3352d43cde..4e368c97b5e9d 100644 --- a/src/mds/LogEvent.h +++ b/src/mds/LogEvent.h @@ -31,7 +31,7 @@ #define EVENT_SESSIONS 12 #define EVENT_UPDATE 20 -#define EVENT_SLAVEUPDATE 21 +#define EVENT_PEERUPDATE 21 #define EVENT_OPEN 22 #define EVENT_COMMITTED 23 #define EVENT_PURGED 24 diff --git a/src/mds/LogSegment.h b/src/mds/LogSegment.h index d07f9bd6eb14a..93d88d61860e8 100644 --- a/src/mds/LogSegment.h +++ b/src/mds/LogSegment.h @@ -32,7 +32,7 @@ class CDir; class CInode; class CDentry; class MDSRank; -struct MDSlaveUpdate; +struct MDPeerUpdate; class LogSegment { public: @@ -89,7 +89,7 @@ class LogSegment { map > pending_commit_tids; // mdstable set uncommitted_leaders; - set uncommitted_slaves; + set uncommitted_peers; set uncommitted_fragments; // client request ids diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc index 845839f367ae3..28a7e2b7ae870 100644 --- a/src/mds/MDCache.cc +++ b/src/mds/MDCache.cc @@ -57,7 +57,7 @@ #include "events/ESubtreeMap.h" #include "events/EUpdate.h" -#include "events/ESlaveUpdate.h" +#include "events/EPeerUpdate.h" #include "events/EImportFinish.h" #include "events/EFragment.h" #include "events/ECommitted.h" @@ -1849,7 +1849,7 @@ void MDCache::project_rstat_inode_to_frag(CInode *cur, CDir *parent, snapid_t fi if (cur->last >= floor) { bool update = true; if (cur->state_test(CInode::STATE_AMBIGUOUSAUTH) && cur->is_auth()) { - // rename src inode is not projected in the slave rename prep case. so we should + // rename src inode is not projected in the peer rename prep case. so we should // avoid updateing the inode. ceph_assert(linkunlink < 0); ceph_assert(cur->is_frozen_inode()); @@ -2258,7 +2258,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, // because we are about to write into the dirfrag fnode and that needs // to commit before the lock can cycle. if (linkunlink) { - ceph_assert(pin->nestlock.get_num_wrlocks() || mut->is_slave()); + ceph_assert(pin->nestlock.get_num_wrlocks() || mut->is_peer()); } if (!mut->is_wrlocked(&pin->nestlock)) { @@ -2328,7 +2328,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, if (!mut->is_wrlocked(&pin->versionlock)) mds->locker->local_wrlock_grab(&pin->versionlock, mut); - ceph_assert(mut->is_wrlocked(&pin->nestlock) || mut->is_slave()); + ceph_assert(mut->is_wrlocked(&pin->nestlock) || mut->is_peer()); pin->last_dirstat_prop = mut->get_mds_stamp(); @@ -2444,14 +2444,14 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, // =================================== -// slave requests +// peer requests /* - * some handlers for leader requests with slaves. we need to make - * sure slaves journal commits before we forget we leadered them and + * some handlers for leader requests with peers. we need to make + * sure leader journal commits before we forget we leadered them and * remove them from the uncommitted_leaders map (used during recovery - * to commit|abort slaves). + * to commit|abort peers). */ struct C_MDC_CommittedLeader : public MDCacheLogContext { metareqid_t reqid; @@ -2480,12 +2480,12 @@ void MDCache::_logged_leader_commit(metareqid_t reqid) // while active... -void MDCache::committed_leader_slave(metareqid_t r, mds_rank_t from) +void MDCache::committed_leader_peer(metareqid_t r, mds_rank_t from) { - dout(10) << "committed_leader_slave mds." << from << " on " << r << dendl; + dout(10) << "committed_leader_peer mds." << from << " on " << r << dendl; ceph_assert(uncommitted_leaders.count(r)); - uncommitted_leaders[r].slaves.erase(from); - if (!uncommitted_leaders[r].recovering && uncommitted_leaders[r].slaves.empty()) + uncommitted_leaders[r].peers.erase(from); + if (!uncommitted_leaders[r].recovering && uncommitted_leaders[r].peers.empty()) log_leader_commit(r); } @@ -2503,9 +2503,9 @@ void MDCache::logged_leader_update(metareqid_t reqid) } /* - * Leader may crash after receiving all slaves' commit acks, but before journalling - * the final commit. Slaves may crash after journalling the slave commit, but before - * sending commit ack to the leader. Commit leaders with no uncommitted slave when + * Leader may crash after receiving all peers' commit acks, but before journalling + * the final commit. Peers may crash after journalling the peer commit, but before + * sending commit ack to the leader. Commit leaders with no uncommitted peer when * resolve finishes. */ void MDCache::finish_committed_leaders() @@ -2514,7 +2514,7 @@ void MDCache::finish_committed_leaders() p != uncommitted_leaders.end(); ++p) { p->second.recovering = false; - if (!p->second.committing && p->second.slaves.empty()) { + if (!p->second.committing && p->second.peers.empty()) { dout(10) << "finish_committed_leaders " << p->first << dendl; log_leader_commit(p->first); } @@ -2522,28 +2522,28 @@ void MDCache::finish_committed_leaders() } /* - * at end of resolve... we must journal a commit|abort for all slave + * at end of resolve... we must journal a commit|abort for all peer * updates, before moving on. * * this is so that the leader can safely journal ECommitted on ops it * leaders when it reaches up:active (all other recovering nodes must * complete resolve before that happens). */ -struct C_MDC_SlaveCommit : public MDCacheLogContext { +struct C_MDC_PeerCommit : public MDCacheLogContext { mds_rank_t from; metareqid_t reqid; - C_MDC_SlaveCommit(MDCache *c, int f, metareqid_t r) : MDCacheLogContext(c), from(f), reqid(r) {} + C_MDC_PeerCommit(MDCache *c, int f, metareqid_t r) : MDCacheLogContext(c), from(f), reqid(r) {} void finish(int r) override { - mdcache->_logged_slave_commit(from, reqid); + mdcache->_logged_peer_commit(from, reqid); } }; -void MDCache::_logged_slave_commit(mds_rank_t from, metareqid_t reqid) +void MDCache::_logged_peer_commit(mds_rank_t from, metareqid_t reqid) { - dout(10) << "_logged_slave_commit from mds." << from << " " << reqid << dendl; + dout(10) << "_logged_peer_commit from mds." << from << " " << reqid << dendl; // send a message - auto req = make_message(reqid, 0, MMDSSlaveRequest::OP_COMMITTED); + auto req = make_message(reqid, 0, MMDSPeerRequest::OP_COMMITTED); mds->send_message_mds(req, from); } @@ -2755,14 +2755,14 @@ void MDCache::resolve_start(MDSContext *resolve_done_) void MDCache::send_resolves() { - send_slave_resolves(); + send_peer_resolves(); if (!resolve_done) { // I'm survivor: refresh snap cache mds->snapclient->sync( new MDSInternalContextWrapper(mds, new LambdaContext([this](int r) { - maybe_finish_slave_resolve(); + maybe_finish_peer_resolve(); }) ) ); @@ -2783,20 +2783,20 @@ void MDCache::send_resolves() send_subtree_resolves(); } -void MDCache::send_slave_resolves() +void MDCache::send_peer_resolves() { - dout(10) << "send_slave_resolves" << dendl; + dout(10) << "send_peer_resolves" << dendl; map> resolves; if (mds->is_resolve()) { - for (map::iterator p = uncommitted_slaves.begin(); - p != uncommitted_slaves.end(); + for (map::iterator p = uncommitted_peers.begin(); + p != uncommitted_peers.end(); ++p) { mds_rank_t leader = p->second.leader; auto &m = resolves[leader]; if (!m) m = make_message(); - m->add_slave_request(p->first, false); + m->add_peer_request(p->first, false); } } else { set resolve_set; @@ -2805,13 +2805,13 @@ void MDCache::send_slave_resolves() p != active_requests.end(); ++p) { MDRequestRef& mdr = p->second; - if (!mdr->is_slave()) + if (!mdr->is_peer()) continue; - if (!mdr->slave_did_prepare() && !mdr->committing) { + if (!mdr->peer_did_prepare() && !mdr->committing) { continue; } - mds_rank_t leader = mdr->slave_to_mds; - if (resolve_set.count(leader) || is_ambiguous_slave_update(p->first, leader)) { + mds_rank_t leader = mdr->peer_to_mds; + if (resolve_set.count(leader) || is_ambiguous_peer_update(p->first, leader)) { dout(10) << " including uncommitted " << *mdr << dendl; if (!resolves.count(leader)) resolves[leader] = make_message(); @@ -2822,18 +2822,18 @@ void MDCache::send_slave_resolves() map cap_map; in->export_client_caps(cap_map); bufferlist bl; - MMDSResolve::slave_inode_cap inode_caps(in->ino(), cap_map); + MMDSResolve::peer_inode_cap inode_caps(in->ino(), cap_map); encode(inode_caps, bl); - resolves[leader]->add_slave_request(p->first, bl); + resolves[leader]->add_peer_request(p->first, bl); } else { - resolves[leader]->add_slave_request(p->first, mdr->committing); + resolves[leader]->add_peer_request(p->first, mdr->committing); } } } } for (auto &p : resolves) { - dout(10) << "sending slave resolve to mds." << p.first << dendl; + dout(10) << "sending peer resolve to mds." << p.first << dendl; mds->send_message_mds(p.second, p.first); resolve_ack_gather.insert(p.first); } @@ -2948,7 +2948,7 @@ void MDCache::send_subtree_resolves() resolves_pending = false; } -void MDCache::maybe_finish_slave_resolve() { +void MDCache::maybe_finish_peer_resolve() { if (resolve_ack_gather.empty() && resolve_need_rollback.empty()) { // snap cache get synced or I'm in resolve state if (mds->snapclient->is_synced() || resolve_done) @@ -2965,7 +2965,7 @@ void MDCache::handle_mds_failure(mds_rank_t who) resolve_gather.insert(who); discard_delayed_resolve(who); - ambiguous_slave_updates.erase(who); + ambiguous_peer_updates.erase(who); rejoin_gather.insert(who); rejoin_sent.erase(who); // i need to send another @@ -2985,60 +2985,60 @@ void MDCache::handle_mds_failure(mds_rank_t who) // tell the balancer too. mds->balancer->handle_mds_failure(who); - // clean up any requests slave to/from this node + // clean up any requests peer to/from this node list finish; for (ceph::unordered_map::iterator p = active_requests.begin(); p != active_requests.end(); ++p) { MDRequestRef& mdr = p->second; - // slave to the failed node? - if (mdr->slave_to_mds == who) { - if (mdr->slave_did_prepare()) { - dout(10) << " slave request " << *mdr << " uncommitted, will resolve shortly" << dendl; - if (is_ambiguous_slave_update(p->first, mdr->slave_to_mds)) - remove_ambiguous_slave_update(p->first, mdr->slave_to_mds); - - if (!mdr->more()->waiting_on_slave.empty()) { + // peer to the failed node? + if (mdr->peer_to_mds == who) { + if (mdr->peer_did_prepare()) { + dout(10) << " peer request " << *mdr << " uncommitted, will resolve shortly" << dendl; + if (is_ambiguous_peer_update(p->first, mdr->peer_to_mds)) + remove_ambiguous_peer_update(p->first, mdr->peer_to_mds); + + if (!mdr->more()->waiting_on_peer.empty()) { ceph_assert(mdr->more()->srcdn_auth_mds == mds->get_nodeid()); // will rollback, no need to wait - mdr->reset_slave_request(); - mdr->more()->waiting_on_slave.clear(); + mdr->reset_peer_request(); + mdr->more()->waiting_on_peer.clear(); } } else if (!mdr->committing) { - dout(10) << " slave request " << *mdr << " has no prepare, finishing up" << dendl; - if (mdr->slave_request || mdr->slave_rolling_back()) + dout(10) << " peer request " << *mdr << " has no prepare, finishing up" << dendl; + if (mdr->peer_request || mdr->peer_rolling_back()) mdr->aborted = true; else finish.push_back(mdr); } } - if (mdr->is_slave() && mdr->slave_did_prepare()) { - if (mdr->more()->waiting_on_slave.count(who)) { + if (mdr->is_peer() && mdr->peer_did_prepare()) { + if (mdr->more()->waiting_on_peer.count(who)) { ceph_assert(mdr->more()->srcdn_auth_mds == mds->get_nodeid()); - dout(10) << " slave request " << *mdr << " no longer need rename notity ack from mds." + dout(10) << " peer request " << *mdr << " no longer need rename notity ack from mds." << who << dendl; - mdr->more()->waiting_on_slave.erase(who); - if (mdr->more()->waiting_on_slave.empty() && mdr->slave_request) + mdr->more()->waiting_on_peer.erase(who); + if (mdr->more()->waiting_on_peer.empty() && mdr->peer_request) mds->queue_waiter(new C_MDS_RetryRequest(this, mdr)); } if (mdr->more()->srcdn_auth_mds == who && - mds->mdsmap->is_clientreplay_or_active_or_stopping(mdr->slave_to_mds)) { + mds->mdsmap->is_clientreplay_or_active_or_stopping(mdr->peer_to_mds)) { // rename srcdn's auth mds failed, resolve even I'm a survivor. - dout(10) << " slave request " << *mdr << " uncommitted, will resolve shortly" << dendl; - add_ambiguous_slave_update(p->first, mdr->slave_to_mds); + dout(10) << " peer request " << *mdr << " uncommitted, will resolve shortly" << dendl; + add_ambiguous_peer_update(p->first, mdr->peer_to_mds); } - } else if (mdr->slave_request) { - const cref_t &slave_req = mdr->slave_request; - // FIXME: Slave rename request can arrive after we notice mds failure. + } else if (mdr->peer_request) { + const cref_t &peer_req = mdr->peer_request; + // FIXME: Peer rename request can arrive after we notice mds failure. // This can cause mds to crash (does not affect integrity of FS). - if (slave_req->get_op() == MMDSSlaveRequest::OP_RENAMEPREP && - slave_req->srcdn_auth == who) - slave_req->mark_interrupted(); + if (peer_req->get_op() == MMDSPeerRequest::OP_RENAMEPREP && + peer_req->srcdn_auth == who) + peer_req->mark_interrupted(); } - // failed node is slave? + // failed node is peer? if (mdr->is_leader() && !mdr->committing) { if (mdr->more()->srcdn_auth_mds == who) { dout(10) << " leader request " << *mdr << " waiting for rename srcdn's auth mds." @@ -3053,15 +3053,15 @@ void MDCache::handle_mds_failure(mds_rank_t who) if (mdr->more()->witnessed.count(who)) { mds_rank_t srcdn_auth = mdr->more()->srcdn_auth_mds; - if (srcdn_auth >= 0 && mdr->more()->waiting_on_slave.count(srcdn_auth)) { + if (srcdn_auth >= 0 && mdr->more()->waiting_on_peer.count(srcdn_auth)) { dout(10) << " leader request " << *mdr << " waiting for rename srcdn's auth mds." << mdr->more()->srcdn_auth_mds << " to reply" << dendl; - // waiting for the slave (rename srcdn's auth mds), delay sending resolve ack - // until either the request is committing or the slave also fails. - ceph_assert(mdr->more()->waiting_on_slave.size() == 1); + // waiting for the peer (rename srcdn's auth mds), delay sending resolve ack + // until either the request is committing or the peer also fails. + ceph_assert(mdr->more()->waiting_on_peer.size() == 1); pending_leaders.insert(p->first); } else { - dout(10) << " leader request " << *mdr << " no longer witnessed by slave mds." + dout(10) << " leader request " << *mdr << " no longer witnessed by peer mds." << who << " to recover" << dendl; if (srcdn_auth >= 0) ceph_assert(mdr->more()->witnessed.count(srcdn_auth) == 0); @@ -3071,12 +3071,12 @@ void MDCache::handle_mds_failure(mds_rank_t who) } } - if (mdr->more()->waiting_on_slave.count(who)) { - dout(10) << " leader request " << *mdr << " waiting for slave mds." << who + if (mdr->more()->waiting_on_peer.count(who)) { + dout(10) << " leader request " << *mdr << " waiting for peer mds." << who << " to recover" << dendl; // retry request when peer recovers - mdr->more()->waiting_on_slave.erase(who); - if (mdr->more()->waiting_on_slave.empty()) + mdr->more()->waiting_on_peer.erase(who); + if (mdr->more()->waiting_on_peer.empty()) mds->wait_for_active_peer(who, new C_MDS_RetryRequest(this, mdr)); } @@ -3088,15 +3088,15 @@ void MDCache::handle_mds_failure(mds_rank_t who) for (map::iterator p = uncommitted_leaders.begin(); p != uncommitted_leaders.end(); ++p) { - // The failed MDS may have already committed the slave update - if (p->second.slaves.count(who)) { + // The failed MDS may have already committed the peer update + if (p->second.peers.count(who)) { p->second.recovering = true; - p->second.slaves.erase(who); + p->second.peers.erase(who); } } while (!finish.empty()) { - dout(10) << "cleaning up slave request " << *finish.front() << dendl; + dout(10) << "cleaning up peer request " << *finish.front() << dendl; request_finish(finish.front()); finish.pop_front(); } @@ -3225,10 +3225,10 @@ void MDCache::handle_resolve(const cref_t &m) discard_delayed_resolve(from); - // ambiguous slave requests? - if (!m->slave_requests.empty()) { + // ambiguous peer requests? + if (!m->peer_requests.empty()) { if (mds->is_clientreplay() || mds->is_active() || mds->is_stopping()) { - for (auto p = m->slave_requests.begin(); p != m->slave_requests.end(); ++p) { + for (auto p = m->peer_requests.begin(); p != m->peer_requests.end(); ++p) { if (uncommitted_leaders.count(p->first) && !uncommitted_leaders[p->first].safe) { ceph_assert(!p->second.committing); pending_leaders.insert(p->first); @@ -3236,29 +3236,29 @@ void MDCache::handle_resolve(const cref_t &m) } if (!pending_leaders.empty()) { - dout(10) << " still have pending updates, delay processing slave resolve" << dendl; + dout(10) << " still have pending updates, delay processing peer resolve" << dendl; delayed_resolve[from] = m; return; } } auto ack = make_message(); - for (const auto &p : m->slave_requests) { + for (const auto &p : m->peer_requests) { if (uncommitted_leaders.count(p.first)) { //mds->sessionmap.have_completed_request(p.first)) { // COMMIT if (p.second.committing) { - // already committing, waiting for the OP_COMMITTED slave reply - dout(10) << " already committing slave request " << p << " noop "<< dendl; + // already committing, waiting for the OP_COMMITTED peer reply + dout(10) << " already committing peer request " << p << " noop "<< dendl; } else { - dout(10) << " ambiguous slave request " << p << " will COMMIT" << dendl; + dout(10) << " ambiguous peer request " << p << " will COMMIT" << dendl; ack->add_commit(p.first); } - uncommitted_leaders[p.first].slaves.insert(from); // wait for slave OP_COMMITTED before we log ECommitted + uncommitted_leaders[p.first].peers.insert(from); // wait for peer OP_COMMITTED before we log ECommitted if (p.second.inode_caps.length() > 0) { - // slave wants to export caps (rename) + // peer wants to export caps (rename) ceph_assert(mds->is_resolve()); - MMDSResolve::slave_inode_cap inode_caps; + MMDSResolve::peer_inode_cap inode_caps; auto q = p.second.inode_caps.cbegin(); decode(inode_caps, q); inodeno_t ino = inode_caps.ino; @@ -3279,15 +3279,15 @@ void MDCache::handle_resolve(const cref_t &m) } // will process these caps in rejoin stage - rejoin_slave_exports[ino].first = from; - rejoin_slave_exports[ino].second.swap(cap_exports); + rejoin_peer_exports[ino].first = from; + rejoin_peer_exports[ino].second.swap(cap_exports); - // send information of imported caps back to slave + // send information of imported caps back to peer encode(rejoin_imported_caps[from][ino], ack->commit[p.first]); } } else { // ABORT - dout(10) << " ambiguous slave request " << p << " will ABORT" << dendl; + dout(10) << " ambiguous peer request " << p << " will ABORT" << dendl; ceph_assert(!p.second.committing); ack->add_abort(p.first); } @@ -3440,60 +3440,60 @@ void MDCache::handle_resolve_ack(const cref_t &ack) return; } - if (ambiguous_slave_updates.count(from)) { + if (ambiguous_peer_updates.count(from)) { ceph_assert(mds->mdsmap->is_clientreplay_or_active_or_stopping(from)); ceph_assert(mds->is_clientreplay() || mds->is_active() || mds->is_stopping()); } for (const auto &p : ack->commit) { - dout(10) << " commit on slave " << p.first << dendl; + dout(10) << " commit on peer " << p.first << dendl; - if (ambiguous_slave_updates.count(from)) { - remove_ambiguous_slave_update(p.first, from); + if (ambiguous_peer_updates.count(from)) { + remove_ambiguous_peer_update(p.first, from); continue; } if (mds->is_resolve()) { // replay - MDSlaveUpdate *su = get_uncommitted_slave(p.first, from); + MDPeerUpdate *su = get_uncommitted_peer(p.first, from); ceph_assert(su); // log commit - mds->mdlog->start_submit_entry(new ESlaveUpdate(mds->mdlog, "unknown", p.first, from, - ESlaveUpdate::OP_COMMIT, su->origop), - new C_MDC_SlaveCommit(this, from, p.first)); + mds->mdlog->start_submit_entry(new EPeerUpdate(mds->mdlog, "unknown", p.first, from, + EPeerUpdate::OP_COMMIT, su->origop), + new C_MDC_PeerCommit(this, from, p.first)); mds->mdlog->flush(); - finish_uncommitted_slave(p.first); + finish_uncommitted_peer(p.first); } else { MDRequestRef mdr = request_get(p.first); // information about leader imported caps if (p.second.length() > 0) mdr->more()->inode_import.share(p.second); - ceph_assert(mdr->slave_request == 0); // shouldn't be doing anything! + ceph_assert(mdr->peer_request == 0); // shouldn't be doing anything! request_finish(mdr); } } for (const auto &metareq : ack->abort) { - dout(10) << " abort on slave " << metareq << dendl; + dout(10) << " abort on peer " << metareq << dendl; if (mds->is_resolve()) { - MDSlaveUpdate *su = get_uncommitted_slave(metareq, from); + MDPeerUpdate *su = get_uncommitted_peer(metareq, from); ceph_assert(su); // perform rollback (and journal a rollback entry) // note: this will hold up the resolve a bit, until the rollback entries journal. MDRequestRef null_ref; switch (su->origop) { - case ESlaveUpdate::LINK: + case EPeerUpdate::LINK: mds->server->do_link_rollback(su->rollback, from, null_ref); break; - case ESlaveUpdate::RENAME: + case EPeerUpdate::RENAME: mds->server->do_rename_rollback(su->rollback, from, null_ref); break; - case ESlaveUpdate::RMDIR: + case EPeerUpdate::RMDIR: mds->server->do_rmdir_rollback(su->rollback, from, null_ref); break; default: @@ -3502,8 +3502,8 @@ void MDCache::handle_resolve_ack(const cref_t &ack) } else { MDRequestRef mdr = request_get(metareq); mdr->aborted = true; - if (mdr->slave_request) { - if (mdr->slave_did_prepare()) // journaling slave prepare ? + if (mdr->peer_request) { + if (mdr->peer_did_prepare()) // journaling peer prepare ? add_rollback(metareq, from); } else { request_finish(mdr); @@ -3511,20 +3511,20 @@ void MDCache::handle_resolve_ack(const cref_t &ack) } } - if (!ambiguous_slave_updates.count(from)) { + if (!ambiguous_peer_updates.count(from)) { resolve_ack_gather.erase(from); - maybe_finish_slave_resolve(); + maybe_finish_peer_resolve(); } } -void MDCache::add_uncommitted_slave(metareqid_t reqid, LogSegment *ls, mds_rank_t leader, MDSlaveUpdate *su) +void MDCache::add_uncommitted_peer(metareqid_t reqid, LogSegment *ls, mds_rank_t leader, MDPeerUpdate *su) { - auto const &ret = uncommitted_slaves.emplace(std::piecewise_construct, + auto const &ret = uncommitted_peers.emplace(std::piecewise_construct, std::forward_as_tuple(reqid), std::forward_as_tuple()); ceph_assert(ret.second); - ls->uncommitted_slaves.insert(reqid); - uslave &u = ret.first->second; + ls->uncommitted_peers.insert(reqid); + upeer &u = ret.first->second; u.leader = leader; u.ls = ls; u.su = su; @@ -3532,26 +3532,26 @@ void MDCache::add_uncommitted_slave(metareqid_t reqid, LogSegment *ls, mds_rank_ return; } for(set::iterator p = su->olddirs.begin(); p != su->olddirs.end(); ++p) - uncommitted_slave_rename_olddir[*p]++; + uncommitted_peer_rename_olddir[*p]++; for(set::iterator p = su->unlinked.begin(); p != su->unlinked.end(); ++p) - uncommitted_slave_unlink[*p]++; + uncommitted_peer_unlink[*p]++; } -void MDCache::finish_uncommitted_slave(metareqid_t reqid, bool assert_exist) +void MDCache::finish_uncommitted_peer(metareqid_t reqid, bool assert_exist) { - auto it = uncommitted_slaves.find(reqid); - if (it == uncommitted_slaves.end()) { + auto it = uncommitted_peers.find(reqid); + if (it == uncommitted_peers.end()) { ceph_assert(!assert_exist); return; } - uslave &u = it->second; - MDSlaveUpdate* su = u.su; + upeer &u = it->second; + MDPeerUpdate* su = u.su; if (!u.waiters.empty()) { mds->queue_waiters(u.waiters); } - u.ls->uncommitted_slaves.erase(reqid); - uncommitted_slaves.erase(it); + u.ls->uncommitted_peers.erase(reqid); + uncommitted_peers.erase(it); if (su == nullptr) { return; @@ -3559,11 +3559,11 @@ void MDCache::finish_uncommitted_slave(metareqid_t reqid, bool assert_exist) // discard the non-auth subtree we renamed out of for(set::iterator p = su->olddirs.begin(); p != su->olddirs.end(); ++p) { CInode *diri = *p; - map::iterator it = uncommitted_slave_rename_olddir.find(diri); - ceph_assert(it != uncommitted_slave_rename_olddir.end()); + map::iterator it = uncommitted_peer_rename_olddir.find(diri); + ceph_assert(it != uncommitted_peer_rename_olddir.end()); it->second--; if (it->second == 0) { - uncommitted_slave_rename_olddir.erase(it); + uncommitted_peer_rename_olddir.erase(it); auto&& ls = diri->get_dirfrags(); for (const auto& dir : ls) { CDir *root = get_subtree_root(dir); @@ -3576,14 +3576,14 @@ void MDCache::finish_uncommitted_slave(metareqid_t reqid, bool assert_exist) } else ceph_assert(it->second > 0); } - // removed the inodes that were unlinked by slave update + // removed the inodes that were unlinked by peer update for(set::iterator p = su->unlinked.begin(); p != su->unlinked.end(); ++p) { CInode *in = *p; - map::iterator it = uncommitted_slave_unlink.find(in); - ceph_assert(it != uncommitted_slave_unlink.end()); + map::iterator it = uncommitted_peer_unlink.find(in); + ceph_assert(it != uncommitted_peer_unlink.end()); it->second--; if (it->second == 0) { - uncommitted_slave_unlink.erase(it); + uncommitted_peer_unlink.erase(it); if (!in->get_projected_parent_dn()) mds->mdcache->remove_inode_recursive(in); } else @@ -3592,12 +3592,12 @@ void MDCache::finish_uncommitted_slave(metareqid_t reqid, bool assert_exist) delete su; } -MDSlaveUpdate* MDCache::get_uncommitted_slave(metareqid_t reqid, mds_rank_t leader) +MDPeerUpdate* MDCache::get_uncommitted_peer(metareqid_t reqid, mds_rank_t leader) { - MDSlaveUpdate* su = nullptr; - auto it = uncommitted_slaves.find(reqid); - if (it != uncommitted_slaves.end() && + MDPeerUpdate* su = nullptr; + auto it = uncommitted_peers.find(reqid); + if (it != uncommitted_peers.end() && it->second.leader == leader) { su = it->second.su; } @@ -3608,12 +3608,12 @@ void MDCache::finish_rollback(metareqid_t reqid, MDRequestRef& mdr) { auto p = resolve_need_rollback.find(mdr->reqid); ceph_assert(p != resolve_need_rollback.end()); if (mds->is_resolve()) { - finish_uncommitted_slave(reqid, false); + finish_uncommitted_peer(reqid, false); } else if (mdr) { - finish_uncommitted_slave(mdr->reqid, mdr->more()->slave_update_journaled); + finish_uncommitted_peer(mdr->reqid, mdr->more()->peer_update_journaled); } resolve_need_rollback.erase(p); - maybe_finish_slave_resolve(); + maybe_finish_peer_resolve(); } void MDCache::disambiguate_other_imports() @@ -4193,7 +4193,7 @@ void MDCache::rejoin_send_rejoins() p != active_requests.end(); ++p) { MDRequestRef& mdr = p->second; - if (mdr->is_slave()) + if (mdr->is_peer()) continue; // auth pins for (const auto& q : mdr->object_states) { @@ -4841,17 +4841,17 @@ void MDCache::handle_cache_rejoin_strong(const cref_t &strong) // dn auth_pin? const auto pinned_it = strong->authpinned_dentries.find(dirfrag); if (pinned_it != strong->authpinned_dentries.end()) { - const auto slave_reqid_it = pinned_it->second.find(ss); - if (slave_reqid_it != pinned_it->second.end()) { - for (const auto &r : slave_reqid_it->second) { + const auto peer_reqid_it = pinned_it->second.find(ss); + if (peer_reqid_it != pinned_it->second.end()) { + for (const auto &r : peer_reqid_it->second) { dout(10) << " dn authpin by " << r << " on " << *dn << dendl; - // get/create slave mdrequest + // get/create peer mdrequest MDRequestRef mdr; if (have_request(r.reqid)) mdr = request_get(r.reqid); else - mdr = request_start_slave(r.reqid, r.attempt, strong); + mdr = request_start_peer(r.reqid, r.attempt, strong); mdr->auth_pin(dn); } } @@ -4862,7 +4862,7 @@ void MDCache::handle_cache_rejoin_strong(const cref_t &strong) if (xlocked_it != strong->xlocked_dentries.end()) { const auto ss_req_it = xlocked_it->second.find(ss); if (ss_req_it != xlocked_it->second.end()) { - const MMDSCacheRejoin::slave_reqid& r = ss_req_it->second; + const MMDSCacheRejoin::peer_reqid& r = ss_req_it->second; dout(10) << " dn xlock by " << r << " on " << *dn << dendl; MDRequestRef mdr = request_get(r.reqid); // should have this from auth_pin above. ceph_assert(mdr->is_auth_pinned(dn)); @@ -4941,12 +4941,12 @@ void MDCache::handle_cache_rejoin_strong(const cref_t &strong) for (const auto& r : authpinned_inodes_it->second) { dout(10) << " inode authpin by " << r << " on " << *in << dendl; - // get/create slave mdrequest + // get/create peer mdrequest MDRequestRef mdr; if (have_request(r.reqid)) mdr = request_get(r.reqid); else - mdr = request_start_slave(r.reqid, r.attempt, strong); + mdr = request_start_peer(r.reqid, r.attempt, strong); if (strong->frozen_authpin_inodes.count(in->vino())) { ceph_assert(!in->get_num_auth_pins()); mdr->freeze_auth_pin(in); @@ -5485,9 +5485,9 @@ bool MDCache::process_imported_caps() return true; } - // process caps that were exported by slave rename - for (map > >::iterator p = rejoin_slave_exports.begin(); - p != rejoin_slave_exports.end(); + // process caps that were exported by peer rename + for (map > >::iterator p = rejoin_peer_exports.begin(); + p != rejoin_peer_exports.end(); ++p) { CInode *in = get_inode(p->first); ceph_assert(in); @@ -5516,7 +5516,7 @@ bool MDCache::process_imported_caps() p->second.first, CEPH_CAP_FLAG_AUTH); } } - rejoin_slave_exports.clear(); + rejoin_peer_exports.clear(); rejoin_imported_caps.clear(); // process cap imports @@ -7325,7 +7325,7 @@ bool MDCache::trim_non_auth_subtree(CDir *dir) dn->state_clear(CDentry::STATE_AUTH); in->state_clear(CInode::STATE_AUTH); } - } else if (keep_dir && dnl->is_null()) { // keep null dentry for slave rollback + } else if (keep_dir && dnl->is_null()) { // keep null dentry for peer rollback dout(20) << "trim_non_auth_subtree(" << dir << ") keeping dentry " << dn <second; - if (mdr->reqid.name.is_client() && !mdr->is_slave()) + if (mdr->reqid.name.is_client() && !mdr->is_peer()) count++; } return count; @@ -9521,11 +9521,11 @@ int MDCache::get_num_client_requests() MDRequestRef MDCache::request_start(const cref_t& req) { - // did we win a forward race against a slave? + // did we win a forward race against a peer? if (active_requests.count(req->get_reqid())) { MDRequestRef& mdr = active_requests[req->get_reqid()]; ceph_assert(mdr); - if (mdr->is_slave()) { + if (mdr->is_peer()) { dout(10) << "request_start already had " << *mdr << ", waiting for finish" << dendl; mdr->more()->waiting_for_finish.push_back(new C_MDS_RetryMessage(mds, req)); } else { @@ -9552,14 +9552,14 @@ MDRequestRef MDCache::request_start(const cref_t& req) return mdr; } -MDRequestRef MDCache::request_start_slave(metareqid_t ri, __u32 attempt, const cref_t &m) +MDRequestRef MDCache::request_start_peer(metareqid_t ri, __u32 attempt, const cref_t &m) { int by = m->get_source().num(); MDRequestImpl::Params params; params.reqid = ri; params.attempt = attempt; - params.triggering_slave_req = m; - params.slave_to = by; + params.triggering_peer_req = m; + params.peer_to = by; params.initiated = m->get_recv_stamp(); params.throttled = m->get_throttle_stamp(); params.all_read = m->get_recv_complete_stamp(); @@ -9568,7 +9568,7 @@ MDRequestRef MDCache::request_start_slave(metareqid_t ri, __u32 attempt, const c mds->op_tracker.create_request(¶ms); ceph_assert(active_requests.count(mdr->reqid) == 0); active_requests[mdr->reqid] = mdr; - dout(7) << "request_start_slave " << *mdr << " by mds." << by << dendl; + dout(7) << "request_start_peer " << *mdr << " by mds." << by << dendl; return mdr; } @@ -9605,15 +9605,15 @@ void MDCache::request_finish(MDRequestRef& mdr) dout(7) << "request_finish " << *mdr << dendl; mdr->mark_event("finishing request"); - // slave finisher? - if (mdr->has_more() && mdr->more()->slave_commit) { - Context *fin = mdr->more()->slave_commit; - mdr->more()->slave_commit = 0; + // peer finisher? + if (mdr->has_more() && mdr->more()->peer_commit) { + Context *fin = mdr->more()->peer_commit; + mdr->more()->peer_commit = 0; int ret; if (mdr->aborted) { mdr->aborted = false; ret = -1; - mdr->more()->slave_rolling_back = true; + mdr->more()->peer_rolling_back = true; } else { ret = 0; mdr->committing = true; @@ -9674,8 +9674,8 @@ void MDCache::dispatch_request(MDRequestRef& mdr) { if (mdr->client_request) { mds->server->dispatch_client_request(mdr); - } else if (mdr->slave_request) { - mds->server->dispatch_slave_request(mdr); + } else if (mdr->peer_request) { + mds->server->dispatch_peer_request(mdr); } else { switch (mdr->internal_op) { case CEPH_MDS_OP_FRAGMENTDIR: @@ -9711,13 +9711,13 @@ void MDCache::request_drop_foreign_locks(MDRequestRef& mdr) if (!mdr->has_more()) return; - // clean up slaves + // clean up peers // (will implicitly drop remote dn pins) - for (set::iterator p = mdr->more()->slaves.begin(); - p != mdr->more()->slaves.end(); + for (set::iterator p = mdr->more()->peers.begin(); + p != mdr->more()->peers.end(); ++p) { - auto r = make_message(mdr->reqid, mdr->attempt, - MMDSSlaveRequest::OP_FINISH); + auto r = make_message(mdr->reqid, mdr->attempt, + MMDSPeerRequest::OP_FINISH); if (mdr->killed && !mdr->committing) { r->mark_abort(); @@ -9756,7 +9756,7 @@ void MDCache::request_drop_foreign_locks(MDRequestRef& mdr) } } - mdr->more()->slaves.clear(); /* we no longer have requests out to them, and + mdr->more()->peers.clear(); /* we no longer have requests out to them, and * leaving them in can cause double-notifies as * this function can get called more than once */ } @@ -9811,15 +9811,15 @@ void MDCache::request_cleanup(MDRequestRef& mdr) void MDCache::request_kill(MDRequestRef& mdr) { - // rollback slave requests is tricky. just let the request proceed. + // rollback peer requests is tricky. just let the request proceed. if (mdr->has_more() && - (!mdr->more()->witnessed.empty() || !mdr->more()->waiting_on_slave.empty())) { + (!mdr->more()->witnessed.empty() || !mdr->more()->waiting_on_peer.empty())) { if (!(mdr->locking_state & MutationImpl::ALL_LOCKED)) { ceph_assert(mdr->more()->witnessed.empty()); mdr->aborted = true; - dout(10) << "request_kill " << *mdr << " -- waiting for slave reply, delaying" << dendl; + dout(10) << "request_kill " << *mdr << " -- waiting for peer reply, delaying" << dendl; } else { - dout(10) << "request_kill " << *mdr << " -- already started slave prep, no-op" << dendl; + dout(10) << "request_kill " << *mdr << " -- already started peer prep, no-op" << dendl; } ceph_assert(mdr->used_prealloc_ino == 0); @@ -11893,7 +11893,7 @@ void MDCache::dispatch_fragment_dir(MDRequestRef& mdr) dout(10) << "dispatch_fragment_dir " << basedirfrag << " bits " << info.bits << " on " << *diri << dendl; - if (mdr->more()->slave_error) + if (mdr->more()->peer_error) mdr->aborted = true; if (!mdr->aborted) { diff --git a/src/mds/MDCache.h b/src/mds/MDCache.h index bfa22d688283f..8760766f9d592 100644 --- a/src/mds/MDCache.h +++ b/src/mds/MDCache.h @@ -46,7 +46,7 @@ #include "messages/MMDSOpenInoReply.h" #include "messages/MMDSResolve.h" #include "messages/MMDSResolveAck.h" -#include "messages/MMDSSlaveRequest.h" +#include "messages/MMDSPeerRequest.h" #include "messages/MMDSSnapUpdate.h" #include "osdc/Filer.h" @@ -384,7 +384,7 @@ class MDCache { int get_num_client_requests(); MDRequestRef request_start(const cref_t& req); - MDRequestRef request_start_slave(metareqid_t rid, __u32 attempt, const cref_t &m); + MDRequestRef request_start_peer(metareqid_t rid, __u32 attempt, const cref_t &m); MDRequestRef request_start_internal(int op); bool have_request(metareqid_t rid) { return active_requests.count(rid); @@ -424,10 +424,10 @@ class MDCache { int flags, int linkunlink=0, snapid_t follows=CEPH_NOSNAP); - // slaves - void add_uncommitted_leader(metareqid_t reqid, LogSegment *ls, set &slaves, bool safe=false) { + // peers + void add_uncommitted_leader(metareqid_t reqid, LogSegment *ls, set &peers, bool safe=false) { uncommitted_leaders[reqid].ls = ls; - uncommitted_leaders[reqid].slaves = slaves; + uncommitted_leaders[reqid].peers = peers; uncommitted_leaders[reqid].safe = safe; } void wait_for_uncommitted_leader(metareqid_t reqid, MDSContext *c) { @@ -435,21 +435,21 @@ class MDCache { } bool have_uncommitted_leader(metareqid_t reqid, mds_rank_t from) { auto p = uncommitted_leaders.find(reqid); - return p != uncommitted_leaders.end() && p->second.slaves.count(from) > 0; + return p != uncommitted_leaders.end() && p->second.peers.count(from) > 0; } void log_leader_commit(metareqid_t reqid); void logged_leader_update(metareqid_t reqid); void _logged_leader_commit(metareqid_t reqid); - void committed_leader_slave(metareqid_t r, mds_rank_t from); + void committed_leader_peer(metareqid_t r, mds_rank_t from); void finish_committed_leaders(); - void add_uncommitted_slave(metareqid_t reqid, LogSegment*, mds_rank_t, MDSlaveUpdate *su=nullptr); - void wait_for_uncommitted_slave(metareqid_t reqid, MDSContext *c) { - uncommitted_slaves.at(reqid).waiters.push_back(c); + void add_uncommitted_peer(metareqid_t reqid, LogSegment*, mds_rank_t, MDPeerUpdate *su=nullptr); + void wait_for_uncommitted_peer(metareqid_t reqid, MDSContext *c) { + uncommitted_peers.at(reqid).waiters.push_back(c); } - void finish_uncommitted_slave(metareqid_t reqid, bool assert_exist=true); - MDSlaveUpdate* get_uncommitted_slave(metareqid_t reqid, mds_rank_t leader); - void _logged_slave_commit(mds_rank_t from, metareqid_t reqid); + void finish_uncommitted_peer(metareqid_t reqid, bool assert_exist=true); + MDPeerUpdate* get_uncommitted_peer(metareqid_t reqid, mds_rank_t leader); + void _logged_peer_commit(mds_rank_t from, metareqid_t reqid); void set_recovery_set(set& s); void handle_mds_failure(mds_rank_t who); @@ -458,20 +458,20 @@ class MDCache { void recalc_auth_bits(bool replay); void remove_inode_recursive(CInode *in); - bool is_ambiguous_slave_update(metareqid_t reqid, mds_rank_t leader) { - auto p = ambiguous_slave_updates.find(leader); - return p != ambiguous_slave_updates.end() && p->second.count(reqid); + bool is_ambiguous_peer_update(metareqid_t reqid, mds_rank_t leader) { + auto p = ambiguous_peer_updates.find(leader); + return p != ambiguous_peer_updates.end() && p->second.count(reqid); } - void add_ambiguous_slave_update(metareqid_t reqid, mds_rank_t leader) { - ambiguous_slave_updates[leader].insert(reqid); + void add_ambiguous_peer_update(metareqid_t reqid, mds_rank_t leader) { + ambiguous_peer_updates[leader].insert(reqid); } - void remove_ambiguous_slave_update(metareqid_t reqid, mds_rank_t leader) { - auto p = ambiguous_slave_updates.find(leader); + void remove_ambiguous_peer_update(metareqid_t reqid, mds_rank_t leader) { + auto p = ambiguous_peer_updates.find(leader); auto q = p->second.find(reqid); ceph_assert(q != p->second.end()); p->second.erase(q); if (p->second.empty()) - ambiguous_slave_updates.erase(p); + ambiguous_peer_updates.erase(p); } void add_rollback(metareqid_t reqid, mds_rank_t leader) { @@ -619,7 +619,7 @@ class MDCache { void try_trim_non_auth_subtree(CDir *dir); bool can_trim_non_auth_dirfrag(CDir *dir) { return my_ambiguous_imports.count((dir)->dirfrag()) == 0 && - uncommitted_slave_rename_olddir.count(dir->inode) == 0; + uncommitted_peer_rename_olddir.count(dir->inode) == 0; } /** @@ -1004,10 +1004,10 @@ class MDCache { double export_ephemeral_random_max = 0.0; protected: - // track leader requests whose slaves haven't acknowledged commit + // track leader requests whose peers haven't acknowledged commit struct uleader { uleader() {} - set slaves; + set peers; LogSegment *ls = nullptr; MDSContext::vec waiters; bool safe = false; @@ -1015,11 +1015,11 @@ class MDCache { bool recovering = false; }; - struct uslave { - uslave() {} + struct upeer { + upeer() {} mds_rank_t leader; LogSegment *ls = nullptr; - MDSlaveUpdate *su = nullptr; + MDPeerUpdate *su = nullptr; MDSContext::vec waiters; }; @@ -1048,7 +1048,7 @@ class MDCache { friend class C_MDC_Join; friend class C_MDC_RespondInternalRequest; - friend class ESlaveUpdate; + friend class EPeerUpdate; friend class ECommitted; void set_readonly() { readonly = true; } @@ -1062,9 +1062,9 @@ class MDCache { void disambiguate_other_imports(); void trim_unlinked_inodes(); - void send_slave_resolves(); + void send_peer_resolves(); void send_subtree_resolves(); - void maybe_finish_slave_resolve(); + void maybe_finish_peer_resolve(); void rejoin_walk(CDir *dir, const ref_t &rejoin); void handle_cache_rejoin(const cref_t &m); @@ -1165,14 +1165,14 @@ class MDCache { // from MMDSResolves map > > other_ambiguous_imports; - map uncommitted_slave_rename_olddir; // slave: preserve the non-auth dir until seeing commit. - map uncommitted_slave_unlink; // slave: preserve the unlinked inode until seeing commit. + map uncommitted_peer_rename_olddir; // peer: preserve the non-auth dir until seeing commit. + map uncommitted_peer_unlink; // peer: preserve the unlinked inode until seeing commit. - map uncommitted_leaders; // leader: req -> slave set - map uncommitted_slaves; // slave: preserve the slave req until seeing commit. + map uncommitted_leaders; // leader: req -> peer set + map uncommitted_peers; // peer: preserve the peer req until seeing commit. set pending_leaders; - map > ambiguous_slave_updates; + map > ambiguous_peer_updates; bool resolves_pending = false; set resolve_gather; // nodes i need resolves from @@ -1188,7 +1188,7 @@ class MDCache { set rejoin_ack_sent; // nodes i sent a rejoin to set rejoin_ack_gather; // nodes from whom i need a rejoin ack map > > rejoin_imported_caps; - map > > rejoin_slave_exports; + map > > rejoin_peer_exports; map rejoin_client_map; map rejoin_client_metadata_map; diff --git a/src/mds/MDSRank.cc b/src/mds/MDSRank.cc index 5847dc10a3312..57a5f74dfefc1 100644 --- a/src/mds/MDSRank.cc +++ b/src/mds/MDSRank.cc @@ -1166,7 +1166,7 @@ bool MDSRank::is_valid_message(const cref_t &m) { type == CEPH_MSG_CLIENT_RECONNECT || type == CEPH_MSG_CLIENT_RECLAIM || type == CEPH_MSG_CLIENT_REQUEST || - type == MSG_MDS_SLAVE_REQUEST || + type == MSG_MDS_PEER_REQUEST || type == MSG_MDS_HEARTBEAT || type == MSG_MDS_TABLE_REQUEST || type == MSG_MDS_LOCK || @@ -1219,7 +1219,7 @@ void MDSRank::handle_message(const cref_t &m) case CEPH_MSG_CLIENT_REQUEST: server->dispatch(m); break; - case MSG_MDS_SLAVE_REQUEST: + case MSG_MDS_PEER_REQUEST: ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS); server->dispatch(m); break; diff --git a/src/mds/Migrator.cc b/src/mds/Migrator.cc index a1bbe57f78ae2..1589dc8482cb6 100644 --- a/src/mds/Migrator.cc +++ b/src/mds/Migrator.cc @@ -1035,7 +1035,7 @@ void Migrator::dispatch_export_dir(MDRequestRef& mdr, int count) } ceph_assert(it->second.state == EXPORT_LOCKING); - if (mdr->more()->slave_error || dir->is_frozen() || dir->is_freezing()) { + if (mdr->more()->peer_error || dir->is_frozen() || dir->is_freezing()) { dout(7) << "wouldblock|freezing|frozen, canceling export" << dendl; export_try_cancel(dir); return; diff --git a/src/mds/Mutation.cc b/src/mds/Mutation.cc index db4fb206422aa..cf791ead06e1d 100644 --- a/src/mds/Mutation.cc +++ b/src/mds/Mutation.cc @@ -311,14 +311,14 @@ bool MDRequestImpl::has_witnesses() return (_more != nullptr) && (!_more->witnessed.empty()); } -bool MDRequestImpl::slave_did_prepare() +bool MDRequestImpl::peer_did_prepare() { - return has_more() && more()->slave_commit; + return has_more() && more()->peer_commit; } -bool MDRequestImpl::slave_rolling_back() +bool MDRequestImpl::peer_rolling_back() { - return has_more() && more()->slave_rolling_back; + return has_more() && more()->peer_rolling_back; } bool MDRequestImpl::freeze_auth_pin(CInode *inode) @@ -467,12 +467,12 @@ cref_t MDRequestImpl::release_client_request() return req; } -void MDRequestImpl::reset_slave_request(const cref_t& req) +void MDRequestImpl::reset_peer_request(const cref_t& req) { msg_lock.lock(); - cref_t old; - old.swap(slave_request); - slave_request = req; + cref_t old; + old.swap(peer_request); + peer_request = req; msg_lock.unlock(); old.reset(); } @@ -481,9 +481,9 @@ void MDRequestImpl::print(ostream &out) const { out << "request(" << reqid << " nref=" << nref; //if (request) out << " " << *request; - if (is_slave()) out << " slave_to mds." << slave_to_mds; + if (is_peer()) out << " peer_to mds." << peer_to_mds; if (client_request) out << " cr=" << client_request; - if (slave_request) out << " sr=" << slave_request; + if (peer_request) out << " sr=" << peer_request; out << ")"; } @@ -499,7 +499,7 @@ void MDRequestImpl::_dump(Formatter *f) const { msg_lock.lock(); auto _client_request = client_request; - auto _slave_request =slave_request; + auto _peer_request =peer_request; msg_lock.unlock(); if (_client_request) { @@ -508,25 +508,25 @@ void MDRequestImpl::_dump(Formatter *f) const f->dump_stream("client") << _client_request->get_orig_source(); f->dump_int("tid", _client_request->get_tid()); f->close_section(); // client_info - } else if (is_slave() && _slave_request) { // replies go to an existing mdr - f->dump_string("op_type", "slave_request"); + } else if (is_peer() && _peer_request) { // replies go to an existing mdr + f->dump_string("op_type", "peer_request"); f->open_object_section("leader_info"); - f->dump_stream("leader") << _slave_request->get_orig_source(); + f->dump_stream("leader") << _peer_request->get_orig_source(); f->close_section(); // leader_info f->open_object_section("request_info"); - f->dump_int("attempt", _slave_request->get_attempt()); + f->dump_int("attempt", _peer_request->get_attempt()); f->dump_string("op_type", - MMDSSlaveRequest::get_opname(_slave_request->get_op())); - f->dump_int("lock_type", _slave_request->get_lock_type()); - f->dump_stream("object_info") << _slave_request->get_object_info(); - f->dump_stream("srcdnpath") << _slave_request->srcdnpath; - f->dump_stream("destdnpath") << _slave_request->destdnpath; - f->dump_stream("witnesses") << _slave_request->witnesses; + MMDSPeerRequest::get_opname(_peer_request->get_op())); + f->dump_int("lock_type", _peer_request->get_lock_type()); + f->dump_stream("object_info") << _peer_request->get_object_info(); + f->dump_stream("srcdnpath") << _peer_request->srcdnpath; + f->dump_stream("destdnpath") << _peer_request->destdnpath; + f->dump_stream("witnesses") << _peer_request->witnesses; f->dump_bool("has_inode_export", - _slave_request->inode_export_v != 0); - f->dump_int("inode_export_v", _slave_request->inode_export_v); - f->dump_stream("op_stamp") << _slave_request->op_stamp; + _peer_request->inode_export_v != 0); + f->dump_int("inode_export_v", _peer_request->inode_export_v); + f->dump_stream("op_stamp") << _peer_request->op_stamp; f->close_section(); // request_info } else if (internal_op != -1) { // internal request @@ -552,17 +552,17 @@ void MDRequestImpl::_dump_op_descriptor_unlocked(ostream& stream) const { msg_lock.lock(); auto _client_request = client_request; - auto _slave_request = slave_request; + auto _peer_request = peer_request; msg_lock.unlock(); if (_client_request) { _client_request->print(stream); - } else if (_slave_request) { - _slave_request->print(stream); + } else if (_peer_request) { + _peer_request->print(stream); } else if (internal_op >= 0) { stream << "internal op " << ceph_mds_op_name(internal_op) << ":" << reqid; } else { - // drat, it's triggered by a slave request, but we don't have a message + // drat, it's triggered by a peer request, but we don't have a message // FIXME stream << "rejoin:" << reqid; } diff --git a/src/mds/Mutation.h b/src/mds/Mutation.h index 84138010f2811..9bbb4b87c2b18 100644 --- a/src/mds/Mutation.h +++ b/src/mds/Mutation.h @@ -28,7 +28,7 @@ #include "common/TrackedOp.h" #include "messages/MClientRequest.h" -#include "messages/MMDSSlaveRequest.h" +#include "messages/MMDSPeerRequest.h" #include "messages/MClientReply.h" class LogSegment; @@ -118,10 +118,10 @@ public: // keep our default values synced with MDRequestParam's MutationImpl() : TrackedOp(nullptr, utime_t()) {} MutationImpl(OpTracker *tracker, utime_t initiated, - const metareqid_t &ri, __u32 att=0, mds_rank_t slave_to=MDS_RANK_NONE) + const metareqid_t &ri, __u32 att=0, mds_rank_t peer_to=MDS_RANK_NONE) : TrackedOp(tracker, initiated), reqid(ri), attempt(att), - slave_to_mds(slave_to) {} + peer_to_mds(peer_to) {} ~MutationImpl() override { ceph_assert(!locking); ceph_assert(!lock_cache); @@ -159,8 +159,8 @@ public: return lock == last_locked; } - bool is_leader() const { return slave_to_mds == MDS_RANK_NONE; } - bool is_slave() const { return slave_to_mds != MDS_RANK_NONE; } + bool is_leader() const { return peer_to_mds == MDS_RANK_NONE; } + bool is_peer() const { return peer_to_mds != MDS_RANK_NONE; } client_t get_client() const { if (reqid.name.is_client()) @@ -222,8 +222,8 @@ public: __u32 attempt = 0; // which attempt for this request LogSegment *ls = nullptr; // the log segment i'm committing to - // flag mutation as slave - mds_rank_t slave_to_mds = MDS_RANK_NONE; // this is a slave request if >= 0. + // flag mutation as peer + mds_rank_t peer_to_mds = MDS_RANK_NONE; // this is a peer request if >= 0. ceph::unordered_map object_states; int num_pins = 0; @@ -282,17 +282,17 @@ struct MDRequestImpl : public MutationImpl { struct More { More() {} - int slave_error = 0; - std::set slaves; // mds nodes that have slave requests to me (implies client_request) - std::set waiting_on_slave; // peers i'm waiting for slavereq replies from. + int peer_error = 0; + std::set peers; // mds nodes that have peer requests to me (implies client_request) + std::set waiting_on_peer; // peers i'm waiting for peerreq replies from. // for rename/link/unlink std::set witnessed; // nodes who have journaled a RenamePrepare std::map pvmap; - bool has_journaled_slaves = false; - bool slave_update_journaled = false; - bool slave_rolling_back = false; + bool has_journaled_peers = false; + bool peer_update_journaled = false; + bool peer_rolling_back = false; // for rename std::set extra_witnesses; // replica list from srcdn auth (rename) @@ -318,8 +318,8 @@ struct MDRequestImpl : public MutationImpl { sr_t *srci_srnode = nullptr; sr_t *desti_srnode = nullptr; - // called when slave commits or aborts - Context *slave_commit = nullptr; + // called when peer commits or aborts + Context *peer_commit = nullptr; ceph::buffer::list rollback_bl; MDSContext::vec waiting_for_finish; @@ -352,15 +352,15 @@ struct MDRequestImpl : public MutationImpl { metareqid_t reqid; __u32 attempt = 0; ceph::cref_t client_req; - ceph::cref_t triggering_slave_req; - mds_rank_t slave_to = MDS_RANK_NONE; + ceph::cref_t triggering_peer_req; + mds_rank_t peer_to = MDS_RANK_NONE; utime_t initiated; utime_t throttled, all_read, dispatched; int internal_op = -1; }; MDRequestImpl(const Params* params, OpTracker *tracker) : MutationImpl(tracker, params->initiated, - params->reqid, params->attempt, params->slave_to), + params->reqid, params->attempt, params->peer_to), item_session_request(this), client_request(params->client_req), internal_op(params->internal_op) {} ~MDRequestImpl() override; @@ -368,8 +368,8 @@ struct MDRequestImpl : public MutationImpl { More* more(); bool has_more() const; bool has_witnesses(); - bool slave_did_prepare(); - bool slave_rolling_back(); + bool peer_did_prepare(); + bool peer_rolling_back(); bool freeze_auth_pin(CInode *inode); void unfreeze_auth_pin(bool clear_inode=false); void set_remote_frozen_auth_pin(CInode *inode); @@ -394,7 +394,7 @@ struct MDRequestImpl : public MutationImpl { void dump(ceph::Formatter *f) const override; ceph::cref_t release_client_request(); - void reset_slave_request(const ceph::cref_t& req=nullptr); + void reset_peer_request(const ceph::cref_t& req=nullptr); Session *session = nullptr; elist::item item_session_request; // if not on list, op is aborted. @@ -430,8 +430,8 @@ struct MDRequestImpl : public MutationImpl { // inos we did a embedded cap release on, and may need to eval if we haven't since reissued std::map cap_releases; - // -- i am a slave request - ceph::cref_t slave_request; // slave request (if one is pending; implies slave == true) + // -- i am a peer request + ceph::cref_t peer_request; // peer request (if one is pending; implies peer == true) // -- i am an internal op int internal_op; @@ -453,12 +453,12 @@ private: mutable ceph::spinlock msg_lock; }; -struct MDSlaveUpdate { - MDSlaveUpdate(int oo, ceph::buffer::list &rbl) : +struct MDPeerUpdate { + MDPeerUpdate(int oo, ceph::buffer::list &rbl) : origop(oo) { rollback = std::move(rbl); } - ~MDSlaveUpdate() { + ~MDPeerUpdate() { if (waiter) waiter->complete(0); } diff --git a/src/mds/Server.cc b/src/mds/Server.cc index 2ea24f7c46f0b..300de98f76643 100644 --- a/src/mds/Server.cc +++ b/src/mds/Server.cc @@ -37,7 +37,7 @@ #include "osdc/Objecter.h" #include "events/EUpdate.h" -#include "events/ESlaveUpdate.h" +#include "events/EPeerUpdate.h" #include "events/ESession.h" #include "events/EOpen.h" #include "events/ECommitted.h" @@ -165,8 +165,8 @@ void Server::create_logger() plb.add_u64_counter(l_mdss_handle_client_request, "handle_client_request", "Client requests", "hcr", PerfCountersBuilder::PRIO_INTERESTING); - plb.add_u64_counter(l_mdss_handle_slave_request, "handle_slave_request", - "Slave requests", "hsr", PerfCountersBuilder::PRIO_INTERESTING); + plb.add_u64_counter(l_mdss_handle_peer_request, "handle_peer_request", + "Peer requests", "hsr", PerfCountersBuilder::PRIO_INTERESTING); plb.add_u64_counter(l_mdss_handle_client_session, "handle_client_session", "Client session messages", "hcs", PerfCountersBuilder::PRIO_INTERESTING); @@ -235,7 +235,7 @@ void Server::create_logger() plb.set_prio_default(PerfCountersBuilder::PRIO_DEBUGONLY); plb.add_u64_counter(l_mdss_dispatch_client_request, "dispatch_client_request", "Client requests dispatched"); - plb.add_u64_counter(l_mdss_dispatch_slave_request, "dispatch_server_request", + plb.add_u64_counter(l_mdss_dispatch_peer_request, "dispatch_server_request", "Server requests dispatched"); logger = plb.create_perf_counters(); @@ -274,7 +274,7 @@ void Server::dispatch(const cref_t &m) */ bool sessionclosed_isok = replay_unsafe_with_closed_session; // active? - // handle_slave_request()/handle_client_session() will wait if necessary + // handle_peer_request()/handle_client_session() will wait if necessary if (m->get_type() == CEPH_MSG_CLIENT_REQUEST && !mds->is_active()) { const auto &req = ref_cast(m); if (mds->is_reconnect() || mds->get_want_state() == CEPH_MDS_STATE_RECONNECT) { @@ -335,8 +335,8 @@ void Server::dispatch(const cref_t &m) case CEPH_MSG_CLIENT_RECLAIM: handle_client_reclaim(ref_cast(m)); return; - case MSG_MDS_SLAVE_REQUEST: - handle_slave_request(ref_cast(m)); + case MSG_MDS_PEER_REQUEST: + handle_peer_request(ref_cast(m)); return; default: derr << "server unknown message " << m->get_type() << dendl; @@ -2012,8 +2012,8 @@ void Server::early_reply(MDRequestRef& mdr, CInode *tracei, CDentry *tracedn) return; } - if (mdr->has_more() && mdr->more()->has_journaled_slaves) { - dout(10) << "early_reply - there are journaled slaves, not allowed." << dendl; + if (mdr->has_more() && mdr->more()->has_journaled_peers) { + dout(10) << "early_reply - there are journaled peers, not allowed." << dendl; return; } @@ -2419,7 +2419,7 @@ void Server::handle_osd_map() void Server::dispatch_client_request(MDRequestRef& mdr) { // we shouldn't be waiting on anyone. - ceph_assert(!mdr->has_more() || mdr->more()->waiting_on_slave.empty()); + ceph_assert(!mdr->has_more() || mdr->more()->waiting_on_peer.empty()); if (mdr->killed) { dout(10) << "request " << *mdr << " was killed" << dendl; @@ -2454,9 +2454,9 @@ void Server::dispatch_client_request(MDRequestRef& mdr) respond_to_request(mdr, -EROFS); return; } - if (mdr->has_more() && mdr->more()->slave_error) { - dout(10) << " got error from slaves" << dendl; - respond_to_request(mdr, mdr->more()->slave_error); + if (mdr->has_more() && mdr->more()->peer_error) { + dout(10) << " got error from peers" << dendl; + respond_to_request(mdr, mdr->more()->peer_error); return; } @@ -2471,7 +2471,7 @@ void Server::dispatch_client_request(MDRequestRef& mdr) req->get_op() == CEPH_MDS_OP_MKSNAP || ((req->get_op() == CEPH_MDS_OP_LINK || req->get_op() == CEPH_MDS_OP_RENAME) && - (!mdr->has_more() || mdr->more()->witnessed.empty())) // haven't started slave request + (!mdr->has_more() || mdr->more()->witnessed.empty())) // haven't started peer request ) { dout(20) << __func__ << ": full, responding ENOSPC to op " << ceph_mds_op_name(req->get_op()) << dendl; @@ -2590,23 +2590,23 @@ void Server::dispatch_client_request(MDRequestRef& mdr) // --------------------------------------- -// SLAVE REQUESTS +// PEER REQUESTS -void Server::handle_slave_request(const cref_t &m) +void Server::handle_peer_request(const cref_t &m) { - dout(4) << "handle_slave_request " << m->get_reqid() << " from " << m->get_source() << dendl; + dout(4) << "handle_peer_request " << m->get_reqid() << " from " << m->get_source() << dendl; mds_rank_t from = mds_rank_t(m->get_source().num()); - if (logger) logger->inc(l_mdss_handle_slave_request); + if (logger) logger->inc(l_mdss_handle_peer_request); // reply? if (m->is_reply()) - return handle_slave_request_reply(m); + return handle_peer_request_reply(m); // the purpose of rename notify is enforcing causal message ordering. making sure // bystanders have received all messages from rename srcdn's auth MDS. - if (m->get_op() == MMDSSlaveRequest::OP_RENAMENOTIFY) { - auto reply = make_message(m->get_reqid(), m->get_attempt(), MMDSSlaveRequest::OP_RENAMENOTIFYACK); + if (m->get_op() == MMDSPeerRequest::OP_RENAMENOTIFY) { + auto reply = make_message(m->get_reqid(), m->get_attempt(), MMDSPeerRequest::OP_RENAMENOTIFYACK); mds->send_message(reply, m->get_connection()); return; } @@ -2624,7 +2624,7 @@ void Server::handle_slave_request(const cref_t &m) return; } - // am i a new slave? + // am i a new peer? MDRequestRef mdr; if (mdcache->have_request(m->get_reqid())) { // existing? @@ -2643,22 +2643,22 @@ void Server::handle_slave_request(const cref_t &m) << ", closing out" << dendl; mdcache->request_finish(mdr); mdr.reset(); - } else if (mdr->slave_to_mds != from) { - dout(10) << "local request " << *mdr << " not slave to mds." << from << dendl; + } else if (mdr->peer_to_mds != from) { + dout(10) << "local request " << *mdr << " not peer to mds." << from << dendl; return; } - // may get these while mdr->slave_request is non-null - if (m->get_op() == MMDSSlaveRequest::OP_DROPLOCKS) { + // may get these while mdr->peer_request is non-null + if (m->get_op() == MMDSPeerRequest::OP_DROPLOCKS) { mds->locker->drop_locks(mdr.get()); return; } - if (m->get_op() == MMDSSlaveRequest::OP_FINISH) { + if (m->get_op() == MMDSPeerRequest::OP_FINISH) { if (m->is_abort()) { mdr->aborted = true; - if (mdr->slave_request) { + if (mdr->peer_request) { // only abort on-going xlock, wrlock and auth pin - ceph_assert(!mdr->slave_did_prepare()); + ceph_assert(!mdr->peer_did_prepare()); } else { mdcache->request_finish(mdr); } @@ -2673,15 +2673,15 @@ void Server::handle_slave_request(const cref_t &m) } if (!mdr.get()) { // new? - if (m->get_op() == MMDSSlaveRequest::OP_FINISH) { - dout(10) << "missing slave request for " << m->get_reqid() + if (m->get_op() == MMDSPeerRequest::OP_FINISH) { + dout(10) << "missing peer request for " << m->get_reqid() << " OP_FINISH, must have lost race with a forward" << dendl; return; } - mdr = mdcache->request_start_slave(m->get_reqid(), m->get_attempt(), m); + mdr = mdcache->request_start_peer(m->get_reqid(), m->get_attempt(), m); mdr->set_op_stamp(m->op_stamp); } - ceph_assert(mdr->slave_request == 0); // only one at a time, please! + ceph_assert(mdr->peer_request == 0); // only one at a time, please! if (straydn) { mdr->pin(straydn); @@ -2695,19 +2695,19 @@ void Server::handle_slave_request(const cref_t &m) return; } - mdr->reset_slave_request(m); + mdr->reset_peer_request(m); - dispatch_slave_request(mdr); + dispatch_peer_request(mdr); } -void Server::handle_slave_request_reply(const cref_t &m) +void Server::handle_peer_request_reply(const cref_t &m) { mds_rank_t from = mds_rank_t(m->get_source().num()); if (!mds->is_clientreplay() && !mds->is_active() && !mds->is_stopping()) { metareqid_t r = m->get_reqid(); if (!mdcache->have_uncommitted_leader(r, from)) { - dout(10) << "handle_slave_request_reply ignoring slave reply from mds." + dout(10) << "handle_peer_request_reply ignoring peer reply from mds." << from << " reqid " << r << dendl; return; } @@ -2716,45 +2716,45 @@ void Server::handle_slave_request_reply(const cref_t &m) return; } - if (m->get_op() == MMDSSlaveRequest::OP_COMMITTED) { + if (m->get_op() == MMDSPeerRequest::OP_COMMITTED) { metareqid_t r = m->get_reqid(); - mdcache->committed_leader_slave(r, from); + mdcache->committed_leader_peer(r, from); return; } MDRequestRef mdr = mdcache->request_get(m->get_reqid()); if (m->get_attempt() != mdr->attempt) { - dout(10) << "handle_slave_request_reply " << *mdr << " ignoring reply from other attempt " + dout(10) << "handle_peer_request_reply " << *mdr << " ignoring reply from other attempt " << m->get_attempt() << dendl; return; } switch (m->get_op()) { - case MMDSSlaveRequest::OP_XLOCKACK: + case MMDSPeerRequest::OP_XLOCKACK: { // identify lock, leader request SimpleLock *lock = mds->locker->get_lock(m->get_lock_type(), m->get_object_info()); - mdr->more()->slaves.insert(from); + mdr->more()->peers.insert(from); lock->decode_locked_state(m->get_lock_data()); dout(10) << "got remote xlock on " << *lock << " on " << *lock->get_parent() << dendl; mdr->emplace_lock(lock, MutationImpl::LockOp::XLOCK); mdr->finish_locking(lock); lock->get_xlock(mdr, mdr->get_client()); - ceph_assert(mdr->more()->waiting_on_slave.count(from)); - mdr->more()->waiting_on_slave.erase(from); - ceph_assert(mdr->more()->waiting_on_slave.empty()); + ceph_assert(mdr->more()->waiting_on_peer.count(from)); + mdr->more()->waiting_on_peer.erase(from); + ceph_assert(mdr->more()->waiting_on_peer.empty()); mdcache->dispatch_request(mdr); } break; - case MMDSSlaveRequest::OP_WRLOCKACK: + case MMDSPeerRequest::OP_WRLOCKACK: { // identify lock, leader request SimpleLock *lock = mds->locker->get_lock(m->get_lock_type(), m->get_object_info()); - mdr->more()->slaves.insert(from); + mdr->more()->peers.insert(from); dout(10) << "got remote wrlock on " << *lock << " on " << *lock->get_parent() << dendl; auto it = mdr->emplace_lock(lock, MutationImpl::LockOp::REMOTE_WRLOCK, from); ceph_assert(it->is_remote_wrlock()); @@ -2762,31 +2762,31 @@ void Server::handle_slave_request_reply(const cref_t &m) mdr->finish_locking(lock); - ceph_assert(mdr->more()->waiting_on_slave.count(from)); - mdr->more()->waiting_on_slave.erase(from); - ceph_assert(mdr->more()->waiting_on_slave.empty()); + ceph_assert(mdr->more()->waiting_on_peer.count(from)); + mdr->more()->waiting_on_peer.erase(from); + ceph_assert(mdr->more()->waiting_on_peer.empty()); mdcache->dispatch_request(mdr); } break; - case MMDSSlaveRequest::OP_AUTHPINACK: - handle_slave_auth_pin_ack(mdr, m); + case MMDSPeerRequest::OP_AUTHPINACK: + handle_peer_auth_pin_ack(mdr, m); break; - case MMDSSlaveRequest::OP_LINKPREPACK: - handle_slave_link_prep_ack(mdr, m); + case MMDSPeerRequest::OP_LINKPREPACK: + handle_peer_link_prep_ack(mdr, m); break; - case MMDSSlaveRequest::OP_RMDIRPREPACK: - handle_slave_rmdir_prep_ack(mdr, m); + case MMDSPeerRequest::OP_RMDIRPREPACK: + handle_peer_rmdir_prep_ack(mdr, m); break; - case MMDSSlaveRequest::OP_RENAMEPREPACK: - handle_slave_rename_prep_ack(mdr, m); + case MMDSPeerRequest::OP_RENAMEPREPACK: + handle_peer_rename_prep_ack(mdr, m); break; - case MMDSSlaveRequest::OP_RENAMENOTIFYACK: - handle_slave_rename_notify_ack(mdr, m); + case MMDSPeerRequest::OP_RENAMENOTIFYACK: + handle_peer_rename_notify_ack(mdr, m); break; default: @@ -2794,9 +2794,9 @@ void Server::handle_slave_request_reply(const cref_t &m) } } -void Server::dispatch_slave_request(MDRequestRef& mdr) +void Server::dispatch_peer_request(MDRequestRef& mdr) { - dout(7) << "dispatch_slave_request " << *mdr << " " << *mdr->slave_request << dendl; + dout(7) << "dispatch_peer_request " << *mdr << " " << *mdr->peer_request << dendl; if (mdr->aborted) { dout(7) << " abort flag set, finishing" << dendl; @@ -2804,22 +2804,22 @@ void Server::dispatch_slave_request(MDRequestRef& mdr) return; } - if (logger) logger->inc(l_mdss_dispatch_slave_request); + if (logger) logger->inc(l_mdss_dispatch_peer_request); - int op = mdr->slave_request->get_op(); + int op = mdr->peer_request->get_op(); switch (op) { - case MMDSSlaveRequest::OP_XLOCK: - case MMDSSlaveRequest::OP_WRLOCK: + case MMDSPeerRequest::OP_XLOCK: + case MMDSPeerRequest::OP_WRLOCK: { // identify object - SimpleLock *lock = mds->locker->get_lock(mdr->slave_request->get_lock_type(), - mdr->slave_request->get_object_info()); + SimpleLock *lock = mds->locker->get_lock(mdr->peer_request->get_lock_type(), + mdr->peer_request->get_object_info()); if (!lock) { dout(10) << "don't have object, dropping" << dendl; ceph_abort(); // can this happen, if we auth pinned properly. } - if (op == MMDSSlaveRequest::OP_XLOCK && !lock->get_parent()->is_auth()) { + if (op == MMDSPeerRequest::OP_XLOCK && !lock->get_parent()->is_auth()) { dout(10) << "not auth for remote xlock attempt, dropping on " << *lock << " on " << *lock->get_parent() << dendl; } else { @@ -2834,13 +2834,13 @@ void Server::dispatch_slave_request(MDRequestRef& mdr) int replycode = 0; switch (op) { - case MMDSSlaveRequest::OP_XLOCK: + case MMDSPeerRequest::OP_XLOCK: lov.add_xlock(lock); - replycode = MMDSSlaveRequest::OP_XLOCKACK; + replycode = MMDSPeerRequest::OP_XLOCKACK; break; - case MMDSSlaveRequest::OP_WRLOCK: + case MMDSPeerRequest::OP_WRLOCK: lov.add_wrlock(lock); - replycode = MMDSSlaveRequest::OP_WRLOCKACK; + replycode = MMDSPeerRequest::OP_WRLOCKACK; break; } @@ -2848,33 +2848,33 @@ void Server::dispatch_slave_request(MDRequestRef& mdr) return; // ack - auto r = make_message(mdr->reqid, mdr->attempt, replycode); + auto r = make_message(mdr->reqid, mdr->attempt, replycode); r->set_lock_type(lock->get_type()); lock->get_parent()->set_object_info(r->get_object_info()); - if (replycode == MMDSSlaveRequest::OP_XLOCKACK) + if (replycode == MMDSPeerRequest::OP_XLOCKACK) lock->encode_locked_state(r->get_lock_data()); - mds->send_message(r, mdr->slave_request->get_connection()); + mds->send_message(r, mdr->peer_request->get_connection()); } // done. - mdr->reset_slave_request(); + mdr->reset_peer_request(); } break; - case MMDSSlaveRequest::OP_UNXLOCK: - case MMDSSlaveRequest::OP_UNWRLOCK: + case MMDSPeerRequest::OP_UNXLOCK: + case MMDSPeerRequest::OP_UNWRLOCK: { - SimpleLock *lock = mds->locker->get_lock(mdr->slave_request->get_lock_type(), - mdr->slave_request->get_object_info()); + SimpleLock *lock = mds->locker->get_lock(mdr->peer_request->get_lock_type(), + mdr->peer_request->get_object_info()); ceph_assert(lock); auto it = mdr->locks.find(lock); ceph_assert(it != mdr->locks.end()); bool need_issue = false; switch (op) { - case MMDSSlaveRequest::OP_UNXLOCK: + case MMDSPeerRequest::OP_UNXLOCK: mds->locker->xlock_finish(it, mdr.get(), &need_issue); break; - case MMDSSlaveRequest::OP_UNWRLOCK: + case MMDSPeerRequest::OP_UNWRLOCK: mds->locker->wrlock_finish(it, mdr.get(), &need_issue); break; } @@ -2882,25 +2882,25 @@ void Server::dispatch_slave_request(MDRequestRef& mdr) mds->locker->issue_caps(static_cast(lock->get_parent())); // done. no ack necessary. - mdr->reset_slave_request(); + mdr->reset_peer_request(); } break; - case MMDSSlaveRequest::OP_AUTHPIN: - handle_slave_auth_pin(mdr); + case MMDSPeerRequest::OP_AUTHPIN: + handle_peer_auth_pin(mdr); break; - case MMDSSlaveRequest::OP_LINKPREP: - case MMDSSlaveRequest::OP_UNLINKPREP: - handle_slave_link_prep(mdr); + case MMDSPeerRequest::OP_LINKPREP: + case MMDSPeerRequest::OP_UNLINKPREP: + handle_peer_link_prep(mdr); break; - case MMDSSlaveRequest::OP_RMDIRPREP: - handle_slave_rmdir_prep(mdr); + case MMDSPeerRequest::OP_RMDIRPREP: + handle_peer_rmdir_prep(mdr); break; - case MMDSSlaveRequest::OP_RENAMEPREP: - handle_slave_rename_prep(mdr); + case MMDSPeerRequest::OP_RENAMEPREP: + handle_peer_rename_prep(mdr); break; default: @@ -2908,16 +2908,16 @@ void Server::dispatch_slave_request(MDRequestRef& mdr) } } -void Server::handle_slave_auth_pin(MDRequestRef& mdr) +void Server::handle_peer_auth_pin(MDRequestRef& mdr) { - dout(10) << "handle_slave_auth_pin " << *mdr << dendl; + dout(10) << "handle_peer_auth_pin " << *mdr << dendl; // build list of objects list objects; CInode *auth_pin_freeze = NULL; - bool nonblocking = mdr->slave_request->is_nonblocking(); + bool nonblocking = mdr->peer_request->is_nonblocking(); bool fail = false, wouldblock = false, readonly = false; - ref_t reply; + ref_t reply; if (mdcache->is_readonly()) { dout(10) << " read-only FS" << dendl; @@ -2926,7 +2926,7 @@ void Server::handle_slave_auth_pin(MDRequestRef& mdr) } if (!fail) { - for (const auto &oi : mdr->slave_request->get_authpins()) { + for (const auto &oi : mdr->peer_request->get_authpins()) { MDSCacheObject *object = mdcache->get_object(oi); if (!object) { dout(10) << " don't have " << oi << dendl; @@ -2935,7 +2935,7 @@ void Server::handle_slave_auth_pin(MDRequestRef& mdr) } objects.push_back(object); - if (oi == mdr->slave_request->get_authpin_freeze()) + if (oi == mdr->peer_request->get_authpin_freeze()) auth_pin_freeze = static_cast(object); } } @@ -2974,11 +2974,11 @@ void Server::handle_slave_auth_pin(MDRequestRef& mdr) mdr->more()->rename_inode != auth_pin_freeze) mdr->unfreeze_auth_pin(true); - /* handle_slave_rename_prep() call freeze_inode() to wait for all other operations + /* handle_peer_rename_prep() call freeze_inode() to wait for all other operations * on the source inode to complete. This happens after all locks for the rename * operation are acquired. But to acquire locks, we need auth pin locks' parent * objects first. So there is an ABBA deadlock if someone auth pins the source inode - * after locks are acquired and before Server::handle_slave_rename_prep() is called. + * after locks are acquired and before Server::handle_peer_rename_prep() is called. * The solution is freeze the inode and prevent other MDRequests from getting new * auth pins. */ @@ -2992,7 +2992,7 @@ void Server::handle_slave_auth_pin(MDRequestRef& mdr) } } - reply = make_message(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPINACK); + reply = make_message(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_AUTHPINACK); if (fail) { mdr->drop_local_auth_pins(); // just in case @@ -3018,30 +3018,30 @@ void Server::handle_slave_auth_pin(MDRequestRef& mdr) } } - mds->send_message_mds(reply, mdr->slave_to_mds); + mds->send_message_mds(reply, mdr->peer_to_mds); // clean up this request - mdr->reset_slave_request(); + mdr->reset_peer_request(); return; blocked: - if (mdr->slave_request->should_notify_blocking()) { - reply = make_message(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_AUTHPINACK); + if (mdr->peer_request->should_notify_blocking()) { + reply = make_message(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_AUTHPINACK); reply->mark_req_blocked(); - mds->send_message_mds(reply, mdr->slave_to_mds); - mdr->slave_request->clear_notify_blocking(); + mds->send_message_mds(reply, mdr->peer_to_mds); + mdr->peer_request->clear_notify_blocking(); } return; } -void Server::handle_slave_auth_pin_ack(MDRequestRef& mdr, const cref_t &ack) +void Server::handle_peer_auth_pin_ack(MDRequestRef& mdr, const cref_t &ack) { - dout(10) << "handle_slave_auth_pin_ack on " << *mdr << " " << *ack << dendl; + dout(10) << "handle_peer_auth_pin_ack on " << *mdr << " " << *ack << dendl; mds_rank_t from = mds_rank_t(ack->get_source().num()); if (ack->is_req_blocked()) { mdr->disable_lock_cache(); - // slave auth pin is blocked, drop locks to avoid deadlock + // peer auth pin is blocked, drop locks to avoid deadlock mds->locker->drop_locks(mdr.get(), nullptr); return; } @@ -3079,24 +3079,24 @@ void Server::handle_slave_auth_pin_ack(MDRequestRef& mdr, const cref_tmore()->slaves.insert(from); + // note peer + mdr->more()->peers.insert(from); // clear from waiting list - auto ret = mdr->more()->waiting_on_slave.erase(from); + auto ret = mdr->more()->waiting_on_peer.erase(from); ceph_assert(ret); if (ack->is_error_rofs()) { - mdr->more()->slave_error = -EROFS; + mdr->more()->peer_error = -EROFS; } else if (ack->is_error_wouldblock()) { - mdr->more()->slave_error = -EWOULDBLOCK; + mdr->more()->peer_error = -EWOULDBLOCK; } // go again? - if (mdr->more()->waiting_on_slave.empty()) + if (mdr->more()->waiting_on_peer.empty()) mdcache->dispatch_request(mdr); else - dout(10) << "still waiting on slaves " << mdr->more()->waiting_on_slave << dendl; + dout(10) << "still waiting on peers " << mdr->more()->waiting_on_peer << dendl; } @@ -6441,7 +6441,7 @@ void Server::_link_remote(MDRequestRef& mdr, bool inc, CDentry *dn, CInode *targ if (mds->is_cluster_degraded() && !mds->mdsmap->is_clientreplay_or_active_or_stopping(linkauth)) { dout(10) << " targeti auth mds." << linkauth << " is not active" << dendl; - if (mdr->more()->waiting_on_slave.empty()) + if (mdr->more()->waiting_on_peer.empty()) mds->wait_for_active_peer(linkauth, new C_MDS_RetryRequest(mdcache, mdr)); return; } @@ -6449,18 +6449,18 @@ void Server::_link_remote(MDRequestRef& mdr, bool inc, CDentry *dn, CInode *targ dout(10) << " targeti auth must prepare nlink++/--" << dendl; int op; if (inc) - op = MMDSSlaveRequest::OP_LINKPREP; + op = MMDSPeerRequest::OP_LINKPREP; else - op = MMDSSlaveRequest::OP_UNLINKPREP; - auto req = make_message(mdr->reqid, mdr->attempt, op); + op = MMDSPeerRequest::OP_UNLINKPREP; + auto req = make_message(mdr->reqid, mdr->attempt, op); targeti->set_object_info(req->get_object_info()); req->op_stamp = mdr->get_op_stamp(); if (auto& desti_srnode = mdr->more()->desti_srnode) encode(*desti_srnode, req->desti_snapbl); mds->send_message_mds(req, linkauth); - ceph_assert(mdr->more()->waiting_on_slave.count(linkauth) == 0); - mdr->more()->waiting_on_slave.insert(linkauth); + ceph_assert(mdr->more()->waiting_on_peer.count(linkauth) == 0); + mdr->more()->waiting_on_peer.insert(linkauth); return; } dout(10) << " targeti auth has prepared nlink++/--" << dendl; @@ -6480,9 +6480,9 @@ void Server::_link_remote(MDRequestRef& mdr, bool inc, CDentry *dn, CInode *targ mdlog->start_entry(le); le->metablob.add_client_req(mdr->reqid, mdr->client_request->get_oldest_client_tid()); if (!mdr->more()->witnessed.empty()) { - dout(20) << " noting uncommitted_slaves " << mdr->more()->witnessed << dendl; + dout(20) << " noting uncommitted_peers " << mdr->more()->witnessed << dendl; le->reqid = mdr->reqid; - le->had_slaves = true; + le->had_peers = true; mdcache->add_uncommitted_leader(mdr->reqid, mdr->ls, mdr->more()->witnessed); } @@ -6552,55 +6552,55 @@ void Server::_link_remote_finish(MDRequestRef& mdr, bool inc, // remote linking/unlinking -class C_MDS_SlaveLinkPrep : public ServerLogContext { +class C_MDS_PeerLinkPrep : public ServerLogContext { CInode *targeti; bool adjust_realm; public: - C_MDS_SlaveLinkPrep(Server *s, MDRequestRef& r, CInode *t, bool ar) : + C_MDS_PeerLinkPrep(Server *s, MDRequestRef& r, CInode *t, bool ar) : ServerLogContext(s, r), targeti(t), adjust_realm(ar) { } void finish(int r) override { ceph_assert(r == 0); - server->_logged_slave_link(mdr, targeti, adjust_realm); + server->_logged_peer_link(mdr, targeti, adjust_realm); } }; -class C_MDS_SlaveLinkCommit : public ServerContext { +class C_MDS_PeerLinkCommit : public ServerContext { MDRequestRef mdr; CInode *targeti; public: - C_MDS_SlaveLinkCommit(Server *s, MDRequestRef& r, CInode *t) : + C_MDS_PeerLinkCommit(Server *s, MDRequestRef& r, CInode *t) : ServerContext(s), mdr(r), targeti(t) { } void finish(int r) override { - server->_commit_slave_link(mdr, r, targeti); + server->_commit_peer_link(mdr, r, targeti); } }; -void Server::handle_slave_link_prep(MDRequestRef& mdr) +void Server::handle_peer_link_prep(MDRequestRef& mdr) { - dout(10) << "handle_slave_link_prep " << *mdr - << " on " << mdr->slave_request->get_object_info() + dout(10) << "handle_peer_link_prep " << *mdr + << " on " << mdr->peer_request->get_object_info() << dendl; ceph_assert(g_conf()->mds_kill_link_at != 4); - CInode *targeti = mdcache->get_inode(mdr->slave_request->get_object_info().ino); + CInode *targeti = mdcache->get_inode(mdr->peer_request->get_object_info().ino); ceph_assert(targeti); dout(10) << "targeti " << *targeti << dendl; CDentry *dn = targeti->get_parent_dn(); CDentry::linkage_t *dnl = dn->get_linkage(); ceph_assert(dnl->is_primary()); - mdr->set_op_stamp(mdr->slave_request->op_stamp); + mdr->set_op_stamp(mdr->peer_request->op_stamp); mdr->auth_pin(targeti); - //ceph_abort(); // test hack: make sure leader can handle a slave that fails to prepare... + //ceph_abort(); // test hack: make sure leader can handle a peer that fails to prepare... ceph_assert(g_conf()->mds_kill_link_at != 5); // journal it mdr->ls = mdlog->get_current_segment(); - ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_link_prep", mdr->reqid, mdr->slave_to_mds, - ESlaveUpdate::OP_PREPARE, ESlaveUpdate::LINK); + EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_link_prep", mdr->reqid, mdr->peer_to_mds, + EPeerUpdate::OP_PREPARE, EPeerUpdate::LINK); mdlog->start_entry(le); auto &pi = dnl->get_inode()->project_inode(); @@ -6609,7 +6609,7 @@ void Server::handle_slave_link_prep(MDRequestRef& mdr) bool inc; bool adjust_realm = false; bool realm_projected = false; - if (mdr->slave_request->get_op() == MMDSSlaveRequest::OP_LINKPREP) { + if (mdr->peer_request->get_op() == MMDSPeerRequest::OP_LINKPREP) { inc = true; pi.inode.nlink++; if (!targeti->is_projected_snaprealm_global()) { @@ -6623,8 +6623,8 @@ void Server::handle_slave_link_prep(MDRequestRef& mdr) inc = false; pi.inode.nlink--; if (targeti->is_projected_snaprealm_global()) { - ceph_assert(mdr->slave_request->desti_snapbl.length()); - auto p = mdr->slave_request->desti_snapbl.cbegin(); + ceph_assert(mdr->peer_request->desti_snapbl.length()); + auto p = mdr->peer_request->desti_snapbl.cbegin(); sr_t *newsnap = targeti->project_snaprealm(); decode(*newsnap, p); @@ -6634,7 +6634,7 @@ void Server::handle_slave_link_prep(MDRequestRef& mdr) realm_projected = true; } else { - ceph_assert(mdr->slave_request->desti_snapbl.length() == 0); + ceph_assert(mdr->peer_request->desti_snapbl.length() == 0); } } @@ -6665,20 +6665,20 @@ void Server::handle_slave_link_prep(MDRequestRef& mdr) // commit case mdcache->predirty_journal_parents(mdr, &le->commit, dnl->get_inode(), 0, PREDIRTY_SHALLOW|PREDIRTY_PRIMARY); mdcache->journal_dirty_inode(mdr.get(), &le->commit, targeti); - mdcache->add_uncommitted_slave(mdr->reqid, mdr->ls, mdr->slave_to_mds); + mdcache->add_uncommitted_peer(mdr->reqid, mdr->ls, mdr->peer_to_mds); // set up commit waiter - mdr->more()->slave_commit = new C_MDS_SlaveLinkCommit(this, mdr, targeti); + mdr->more()->peer_commit = new C_MDS_PeerLinkCommit(this, mdr, targeti); - mdr->more()->slave_update_journaled = true; - submit_mdlog_entry(le, new C_MDS_SlaveLinkPrep(this, mdr, targeti, adjust_realm), + mdr->more()->peer_update_journaled = true; + submit_mdlog_entry(le, new C_MDS_PeerLinkPrep(this, mdr, targeti, adjust_realm), mdr, __func__); mdlog->flush(); } -void Server::_logged_slave_link(MDRequestRef& mdr, CInode *targeti, bool adjust_realm) +void Server::_logged_peer_link(MDRequestRef& mdr, CInode *targeti, bool adjust_realm) { - dout(10) << "_logged_slave_link " << *mdr + dout(10) << "_logged_peer_link " << *mdr << " " << *targeti << dendl; ceph_assert(g_conf()->mds_kill_link_at != 6); @@ -6691,7 +6691,7 @@ void Server::_logged_slave_link(MDRequestRef& mdr, CInode *targeti, bool adjust_ mds->balancer->hit_inode(targeti, META_POP_IWR); // done. - mdr->reset_slave_request(); + mdr->reset_peer_request(); if (adjust_realm) { int op = CEPH_SNAP_OP_SPLIT; @@ -6701,8 +6701,8 @@ void Server::_logged_slave_link(MDRequestRef& mdr, CInode *targeti, bool adjust_ // ack if (!mdr->aborted) { - auto reply = make_message(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_LINKPREPACK); - mds->send_message_mds(reply, mdr->slave_to_mds); + auto reply = make_message(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_LINKPREPACK); + mds->send_message_mds(reply, mdr->peer_to_mds); } else { dout(10) << " abort flag set, finishing" << dendl; mdcache->request_finish(mdr); @@ -6710,16 +6710,16 @@ void Server::_logged_slave_link(MDRequestRef& mdr, CInode *targeti, bool adjust_ } -struct C_MDS_CommittedSlave : public ServerLogContext { - C_MDS_CommittedSlave(Server *s, MDRequestRef& m) : ServerLogContext(s, m) {} +struct C_MDS_CommittedPeer : public ServerLogContext { + C_MDS_CommittedPeer(Server *s, MDRequestRef& m) : ServerLogContext(s, m) {} void finish(int r) override { - server->_committed_slave(mdr); + server->_committed_peer(mdr); } }; -void Server::_commit_slave_link(MDRequestRef& mdr, int r, CInode *targeti) +void Server::_commit_peer_link(MDRequestRef& mdr, int r, CInode *targeti) { - dout(10) << "_commit_slave_link " << *mdr + dout(10) << "_commit_peer_link " << *mdr << " r=" << r << " " << *targeti << dendl; @@ -6730,26 +6730,26 @@ void Server::_commit_slave_link(MDRequestRef& mdr, int r, CInode *targeti) mdr->cleanup(); // write a commit to the journal - ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_link_commit", mdr->reqid, mdr->slave_to_mds, - ESlaveUpdate::OP_COMMIT, ESlaveUpdate::LINK); + EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_link_commit", mdr->reqid, mdr->peer_to_mds, + EPeerUpdate::OP_COMMIT, EPeerUpdate::LINK); mdlog->start_entry(le); - submit_mdlog_entry(le, new C_MDS_CommittedSlave(this, mdr), mdr, __func__); + submit_mdlog_entry(le, new C_MDS_CommittedPeer(this, mdr), mdr, __func__); mdlog->flush(); } else { - do_link_rollback(mdr->more()->rollback_bl, mdr->slave_to_mds, mdr); + do_link_rollback(mdr->more()->rollback_bl, mdr->peer_to_mds, mdr); } } -void Server::_committed_slave(MDRequestRef& mdr) +void Server::_committed_peer(MDRequestRef& mdr) { - dout(10) << "_committed_slave " << *mdr << dendl; + dout(10) << "_committed_peer " << *mdr << dendl; ceph_assert(g_conf()->mds_kill_link_at != 8); - bool assert_exist = mdr->more()->slave_update_journaled; - mdcache->finish_uncommitted_slave(mdr->reqid, assert_exist); - auto req = make_message(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_COMMITTED); - mds->send_message_mds(req, mdr->slave_to_mds); + bool assert_exist = mdr->more()->peer_update_journaled; + mdcache->finish_uncommitted_peer(mdr->reqid, assert_exist); + auto req = make_message(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_COMMITTED); + mds->send_message_mds(req, mdr->peer_to_mds); mdcache->request_finish(mdr); } @@ -6787,7 +6787,7 @@ void Server::do_link_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& CInode *in = mdcache->get_inode(rollback.ino); ceph_assert(in); dout(10) << " target is " << *in << dendl; - ceph_assert(!in->is_projected()); // live slave request hold versionlock xlock. + ceph_assert(!in->is_projected()); // live peer request hold versionlock xlock. auto &pi = in->project_inode(); pi.inode.version = in->pre_dirty(); @@ -6835,8 +6835,8 @@ void Server::do_link_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& } // journal it - ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_link_rollback", rollback.reqid, leader, - ESlaveUpdate::OP_ROLLBACK, ESlaveUpdate::LINK); + EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_link_rollback", rollback.reqid, leader, + EPeerUpdate::OP_ROLLBACK, EPeerUpdate::LINK); mdlog->start_entry(le); le->commit.add_dir_context(parent); le->commit.add_dir(parent, true); @@ -6868,28 +6868,28 @@ void Server::_link_rollback_finish(MutationRef& mut, MDRequestRef& mdr, } -void Server::handle_slave_link_prep_ack(MDRequestRef& mdr, const cref_t &m) +void Server::handle_peer_link_prep_ack(MDRequestRef& mdr, const cref_t &m) { - dout(10) << "handle_slave_link_prep_ack " << *mdr + dout(10) << "handle_peer_link_prep_ack " << *mdr << " " << *m << dendl; mds_rank_t from = mds_rank_t(m->get_source().num()); ceph_assert(g_conf()->mds_kill_link_at != 11); - // note slave - mdr->more()->slaves.insert(from); + // note peer + mdr->more()->peers.insert(from); // witnessed! ceph_assert(mdr->more()->witnessed.count(from) == 0); mdr->more()->witnessed.insert(from); ceph_assert(!m->is_not_journaled()); - mdr->more()->has_journaled_slaves = true; + mdr->more()->has_journaled_peers = true; // remove from waiting list - ceph_assert(mdr->more()->waiting_on_slave.count(from)); - mdr->more()->waiting_on_slave.erase(from); + ceph_assert(mdr->more()->waiting_on_peer.count(from)); + mdr->more()->waiting_on_peer.erase(from); - ceph_assert(mdr->more()->waiting_on_slave.empty()); + ceph_assert(mdr->more()->waiting_on_peer.empty()); dispatch_client_request(mdr); // go again! } @@ -7008,7 +7008,7 @@ void Server::handle_client_unlink(MDRequestRef& mdr) in->clear_snaprealm_global(new_srnode); mdr->more()->desti_srnode = new_srnode; } else if (dnl->is_primary()) { - // prepare snaprealm blob for slave request + // prepare snaprealm blob for peer request SnapRealm *realm = in->find_snaprealm(); snapid_t follows = realm->get_newest_seq(); if (in->snaprealm || follows + 1 > in->get_oldest_snap()) { @@ -7031,14 +7031,14 @@ void Server::handle_client_unlink(MDRequestRef& mdr) ++p) { if (mdr->more()->witnessed.count(*p)) { dout(10) << " already witnessed by mds." << *p << dendl; - } else if (mdr->more()->waiting_on_slave.count(*p)) { + } else if (mdr->more()->waiting_on_peer.count(*p)) { dout(10) << " already waiting on witness mds." << *p << dendl; } else { if (!_rmdir_prepare_witness(mdr, *p, mdr->dn[0], straydn)) return; } } - if (!mdr->more()->waiting_on_slave.empty()) + if (!mdr->more()->waiting_on_peer.empty()) return; // we're waiting for a witness. } @@ -7082,9 +7082,9 @@ void Server::_unlink_local(MDRequestRef& mdr, CDentry *dn, CDentry *straydn) mdlog->start_entry(le); le->metablob.add_client_req(mdr->reqid, mdr->client_request->get_oldest_client_tid()); if (!mdr->more()->witnessed.empty()) { - dout(20) << " noting uncommitted_slaves " << mdr->more()->witnessed << dendl; + dout(20) << " noting uncommitted_peers " << mdr->more()->witnessed << dendl; le->reqid = mdr->reqid; - le->had_slaves = true; + le->had_peers = true; mdcache->add_uncommitted_leader(mdr->reqid, mdr->ls, mdr->more()->witnessed); } @@ -7227,13 +7227,13 @@ bool Server::_rmdir_prepare_witness(MDRequestRef& mdr, mds_rank_t who, vectoris_cluster_degraded() && !mds->mdsmap->is_clientreplay_or_active_or_stopping(who)) { dout(10) << "_rmdir_prepare_witness mds." << who << " is not active" << dendl; - if (mdr->more()->waiting_on_slave.empty()) + if (mdr->more()->waiting_on_peer.empty()) mds->wait_for_active_peer(who, new C_MDS_RetryRequest(mdcache, mdr)); return false; } dout(10) << "_rmdir_prepare_witness mds." << who << dendl; - auto req = make_message(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREP); + auto req = make_message(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RMDIRPREP); req->srcdnpath = filepath(trace.front()->get_dir()->ino()); for (auto dn : trace) req->srcdnpath.push_dentry(dn->get_name()); @@ -7244,39 +7244,39 @@ bool Server::_rmdir_prepare_witness(MDRequestRef& mdr, mds_rank_t who, vectorop_stamp = mdr->get_op_stamp(); mds->send_message_mds(req, who); - ceph_assert(mdr->more()->waiting_on_slave.count(who) == 0); - mdr->more()->waiting_on_slave.insert(who); + ceph_assert(mdr->more()->waiting_on_peer.count(who) == 0); + mdr->more()->waiting_on_peer.insert(who); return true; } -struct C_MDS_SlaveRmdirPrep : public ServerLogContext { +struct C_MDS_PeerRmdirPrep : public ServerLogContext { CDentry *dn, *straydn; - C_MDS_SlaveRmdirPrep(Server *s, MDRequestRef& r, CDentry *d, CDentry *st) + C_MDS_PeerRmdirPrep(Server *s, MDRequestRef& r, CDentry *d, CDentry *st) : ServerLogContext(s, r), dn(d), straydn(st) {} void finish(int r) override { - server->_logged_slave_rmdir(mdr, dn, straydn); + server->_logged_peer_rmdir(mdr, dn, straydn); } }; -struct C_MDS_SlaveRmdirCommit : public ServerContext { +struct C_MDS_PeerRmdirCommit : public ServerContext { MDRequestRef mdr; CDentry *straydn; - C_MDS_SlaveRmdirCommit(Server *s, MDRequestRef& r, CDentry *sd) + C_MDS_PeerRmdirCommit(Server *s, MDRequestRef& r, CDentry *sd) : ServerContext(s), mdr(r), straydn(sd) { } void finish(int r) override { - server->_commit_slave_rmdir(mdr, r, straydn); + server->_commit_peer_rmdir(mdr, r, straydn); } }; -void Server::handle_slave_rmdir_prep(MDRequestRef& mdr) +void Server::handle_peer_rmdir_prep(MDRequestRef& mdr) { - dout(10) << "handle_slave_rmdir_prep " << *mdr - << " " << mdr->slave_request->srcdnpath - << " to " << mdr->slave_request->destdnpath + dout(10) << "handle_peer_rmdir_prep " << *mdr + << " " << mdr->peer_request->srcdnpath + << " to " << mdr->peer_request->destdnpath << dendl; vector trace; - filepath srcpath(mdr->slave_request->srcdnpath); + filepath srcpath(mdr->peer_request->srcdnpath); dout(10) << " src " << srcpath << dendl; CInode *in; CF_MDS_MDRContextFactory cf(mdcache, mdr, false); @@ -7286,7 +7286,7 @@ void Server::handle_slave_rmdir_prep(MDRequestRef& mdr) if (r > 0) return; if (r == -ESTALE) { mdcache->find_ino_peers(srcpath.get_ino(), new C_MDS_RetryRequest(mdcache, mdr), - mdr->slave_to_mds, true); + mdr->peer_to_mds, true); return; } ceph_assert(r == 0); @@ -7298,7 +7298,7 @@ void Server::handle_slave_rmdir_prep(MDRequestRef& mdr) CDentry *straydn = mdr->straydn; dout(10) << " straydn " << *straydn << dendl; - mdr->set_op_stamp(mdr->slave_request->op_stamp); + mdr->set_op_stamp(mdr->peer_request->op_stamp); rmdir_rollback rollback; rollback.reqid = mdr->reqid; @@ -7306,7 +7306,7 @@ void Server::handle_slave_rmdir_prep(MDRequestRef& mdr) rollback.src_dname = dn->get_name(); rollback.dest_dir = straydn->get_dir()->dirfrag(); rollback.dest_dname = straydn->get_name(); - if (mdr->slave_request->desti_snapbl.length()) { + if (mdr->peer_request->desti_snapbl.length()) { if (in->snaprealm) { encode(true, rollback.snapbl); in->encode_snap_blob(rollback.snapbl); @@ -7319,7 +7319,7 @@ void Server::handle_slave_rmdir_prep(MDRequestRef& mdr) dout(20) << " rollback is " << mdr->more()->rollback_bl.length() << " bytes" << dendl; // set up commit waiter - mdr->more()->slave_commit = new C_MDS_SlaveRmdirCommit(this, mdr, straydn); + mdr->more()->peer_commit = new C_MDS_PeerRmdirCommit(this, mdr, straydn); straydn->push_projected_linkage(in); dn->push_projected_linkage(); @@ -7329,41 +7329,41 @@ void Server::handle_slave_rmdir_prep(MDRequestRef& mdr) if (!in->has_subtree_root_dirfrag(mds->get_nodeid())) { dout(10) << " no auth subtree in " << *in << ", skipping journal" << dendl; - _logged_slave_rmdir(mdr, dn, straydn); + _logged_peer_rmdir(mdr, dn, straydn); return; } mdr->ls = mdlog->get_current_segment(); - ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rmdir", mdr->reqid, mdr->slave_to_mds, - ESlaveUpdate::OP_PREPARE, ESlaveUpdate::RMDIR); + EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_rmdir", mdr->reqid, mdr->peer_to_mds, + EPeerUpdate::OP_PREPARE, EPeerUpdate::RMDIR); mdlog->start_entry(le); le->rollback = mdr->more()->rollback_bl; le->commit.add_dir_context(straydn->get_dir()); le->commit.add_primary_dentry(straydn, in, true); - // slave: no need to journal original dentry + // peer: no need to journal original dentry dout(10) << " noting renamed (unlinked) dir ino " << in->ino() << " in metablob" << dendl; le->commit.renamed_dirino = in->ino(); mdcache->project_subtree_rename(in, dn->get_dir(), straydn->get_dir()); - mdcache->add_uncommitted_slave(mdr->reqid, mdr->ls, mdr->slave_to_mds); + mdcache->add_uncommitted_peer(mdr->reqid, mdr->ls, mdr->peer_to_mds); - mdr->more()->slave_update_journaled = true; - submit_mdlog_entry(le, new C_MDS_SlaveRmdirPrep(this, mdr, dn, straydn), + mdr->more()->peer_update_journaled = true; + submit_mdlog_entry(le, new C_MDS_PeerRmdirPrep(this, mdr, dn, straydn), mdr, __func__); mdlog->flush(); } -void Server::_logged_slave_rmdir(MDRequestRef& mdr, CDentry *dn, CDentry *straydn) +void Server::_logged_peer_rmdir(MDRequestRef& mdr, CDentry *dn, CDentry *straydn) { - dout(10) << "_logged_slave_rmdir " << *mdr << " on " << *dn << dendl; + dout(10) << "_logged_peer_rmdir " << *mdr << " on " << *dn << dendl; CInode *in = dn->get_linkage()->get_inode(); bool new_realm; - if (mdr->slave_request->desti_snapbl.length()) { + if (mdr->peer_request->desti_snapbl.length()) { new_realm = !in->snaprealm; - in->decode_snap_blob(mdr->slave_request->desti_snapbl); + in->decode_snap_blob(mdr->peer_request->desti_snapbl); ceph_assert(in->snaprealm); ceph_assert(in->snaprealm->have_past_parents_open()); } else { @@ -7376,54 +7376,54 @@ void Server::_logged_slave_rmdir(MDRequestRef& mdr, CDentry *dn, CDentry *strayd straydn->pop_projected_linkage(); dn->pop_projected_linkage(); - mdcache->adjust_subtree_after_rename(in, dn->get_dir(), mdr->more()->slave_update_journaled); + mdcache->adjust_subtree_after_rename(in, dn->get_dir(), mdr->more()->peer_update_journaled); if (new_realm) mdcache->do_realm_invalidate_and_update_notify(in, CEPH_SNAP_OP_SPLIT, false); // done. - mdr->reset_slave_request(); + mdr->reset_peer_request(); mdr->straydn = 0; if (!mdr->aborted) { - auto reply = make_message(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RMDIRPREPACK); - if (!mdr->more()->slave_update_journaled) + auto reply = make_message(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RMDIRPREPACK); + if (!mdr->more()->peer_update_journaled) reply->mark_not_journaled(); - mds->send_message_mds(reply, mdr->slave_to_mds); + mds->send_message_mds(reply, mdr->peer_to_mds); } else { dout(10) << " abort flag set, finishing" << dendl; mdcache->request_finish(mdr); } } -void Server::handle_slave_rmdir_prep_ack(MDRequestRef& mdr, const cref_t &ack) +void Server::handle_peer_rmdir_prep_ack(MDRequestRef& mdr, const cref_t &ack) { - dout(10) << "handle_slave_rmdir_prep_ack " << *mdr + dout(10) << "handle_peer_rmdir_prep_ack " << *mdr << " " << *ack << dendl; mds_rank_t from = mds_rank_t(ack->get_source().num()); - mdr->more()->slaves.insert(from); + mdr->more()->peers.insert(from); mdr->more()->witnessed.insert(from); if (!ack->is_not_journaled()) - mdr->more()->has_journaled_slaves = true; + mdr->more()->has_journaled_peers = true; // remove from waiting list - ceph_assert(mdr->more()->waiting_on_slave.count(from)); - mdr->more()->waiting_on_slave.erase(from); + ceph_assert(mdr->more()->waiting_on_peer.count(from)); + mdr->more()->waiting_on_peer.erase(from); - if (mdr->more()->waiting_on_slave.empty()) + if (mdr->more()->waiting_on_peer.empty()) dispatch_client_request(mdr); // go again! else - dout(10) << "still waiting on slaves " << mdr->more()->waiting_on_slave << dendl; + dout(10) << "still waiting on peers " << mdr->more()->waiting_on_peer << dendl; } -void Server::_commit_slave_rmdir(MDRequestRef& mdr, int r, CDentry *straydn) +void Server::_commit_peer_rmdir(MDRequestRef& mdr, int r, CDentry *straydn) { - dout(10) << "_commit_slave_rmdir " << *mdr << " r=" << r << dendl; + dout(10) << "_commit_peer_rmdir " << *mdr << " r=" << r << dendl; if (r == 0) { - if (mdr->more()->slave_update_journaled) { + if (mdr->more()->peer_update_journaled) { CInode *strayin = straydn->get_projected_linkage()->get_inode(); if (strayin && !strayin->snaprealm) mdcache->clear_dirty_bits_for_stray(strayin); @@ -7431,20 +7431,20 @@ void Server::_commit_slave_rmdir(MDRequestRef& mdr, int r, CDentry *straydn) mdr->cleanup(); - if (mdr->more()->slave_update_journaled) { + if (mdr->more()->peer_update_journaled) { // write a commit to the journal - ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rmdir_commit", mdr->reqid, - mdr->slave_to_mds, ESlaveUpdate::OP_COMMIT, - ESlaveUpdate::RMDIR); + EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_rmdir_commit", mdr->reqid, + mdr->peer_to_mds, EPeerUpdate::OP_COMMIT, + EPeerUpdate::RMDIR); mdlog->start_entry(le); - submit_mdlog_entry(le, new C_MDS_CommittedSlave(this, mdr), mdr, __func__); + submit_mdlog_entry(le, new C_MDS_CommittedPeer(this, mdr), mdr, __func__); mdlog->flush(); } else { - _committed_slave(mdr); + _committed_peer(mdr); } } else { // abort - do_rmdir_rollback(mdr->more()->rollback_bl, mdr->slave_to_mds, mdr); + do_rmdir_rollback(mdr->more()->rollback_bl, mdr->peer_to_mds, mdr); } } @@ -7502,7 +7502,7 @@ void Server::do_rmdir_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& } } - if (mdr && !mdr->more()->slave_update_journaled) { + if (mdr && !mdr->more()->peer_update_journaled) { ceph_assert(!in->has_subtree_root_dirfrag(mds->get_nodeid())); _rmdir_rollback_finish(mdr, rollback.reqid, dn, straydn); @@ -7510,13 +7510,13 @@ void Server::do_rmdir_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& } - ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rmdir_rollback", rollback.reqid, leader, - ESlaveUpdate::OP_ROLLBACK, ESlaveUpdate::RMDIR); + EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_rmdir_rollback", rollback.reqid, leader, + EPeerUpdate::OP_ROLLBACK, EPeerUpdate::RMDIR); mdlog->start_entry(le); le->commit.add_dir_context(dn->get_dir()); le->commit.add_primary_dentry(dn, in, true); - // slave: no need to journal straydn + // peer: no need to journal straydn dout(10) << " noting renamed (unlinked) dir ino " << in->ino() << " in metablob" << dendl; le->commit.renamed_dirino = in->ino(); @@ -7540,7 +7540,7 @@ void Server::_rmdir_rollback_finish(MDRequestRef& mdr, metareqid_t reqid, CDentr CInode *in = dn->get_linkage()->get_inode(); mdcache->adjust_subtree_after_rename(in, straydn->get_dir(), - !mdr || mdr->more()->slave_update_journaled); + !mdr || mdr->more()->peer_update_journaled); if (mds->is_resolve()) { CDir *root = mdcache->get_subtree_root(straydn->get_dir()); @@ -8009,27 +8009,27 @@ void Server::handle_client_rename(MDRequestRef& mdr) if (*p == last) continue; // do it last! if (mdr->more()->witnessed.count(*p)) { dout(10) << " already witnessed by mds." << *p << dendl; - } else if (mdr->more()->waiting_on_slave.count(*p)) { + } else if (mdr->more()->waiting_on_peer.count(*p)) { dout(10) << " already waiting on witness mds." << *p << dendl; } else { if (!_rename_prepare_witness(mdr, *p, witnesses, srctrace, desttrace, straydn)) return; } } - if (!mdr->more()->waiting_on_slave.empty()) + if (!mdr->more()->waiting_on_peer.empty()) return; // we're waiting for a witness. if (last != MDS_RANK_NONE && mdr->more()->witnessed.count(last) == 0) { dout(10) << " preparing last witness (srcdn auth)" << dendl; - ceph_assert(mdr->more()->waiting_on_slave.count(last) == 0); + ceph_assert(mdr->more()->waiting_on_peer.count(last) == 0); _rename_prepare_witness(mdr, last, witnesses, srctrace, desttrace, straydn); return; } - // test hack: bail after slave does prepare, so we can verify it's _live_ rollback. - if (!mdr->more()->slaves.empty() && !srci->is_dir()) + // test hack: bail after peer does prepare, so we can verify it's _live_ rollback. + if (!mdr->more()->peers.empty() && !srci->is_dir()) ceph_assert(g_conf()->mds_kill_rename_at != 3); - if (!mdr->more()->slaves.empty() && srci->is_dir()) + if (!mdr->more()->peers.empty() && srci->is_dir()) ceph_assert(g_conf()->mds_kill_rename_at != 4); // -- declare now -- @@ -8041,10 +8041,10 @@ void Server::handle_client_rename(MDRequestRef& mdr) mdlog->start_entry(le); le->metablob.add_client_req(mdr->reqid, mdr->client_request->get_oldest_client_tid()); if (!mdr->more()->witnessed.empty()) { - dout(20) << " noting uncommitted_slaves " << mdr->more()->witnessed << dendl; + dout(20) << " noting uncommitted_peers " << mdr->more()->witnessed << dendl; le->reqid = mdr->reqid; - le->had_slaves = true; + le->had_peers = true; mdcache->add_uncommitted_leader(mdr->reqid, mdr->ls, mdr->more()->witnessed); // no need to send frozen auth pin to recovring auth MDS of srci @@ -8079,10 +8079,10 @@ void Server::_rename_finish(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, CInode *in = destdnl->get_inode(); bool need_eval = mdr->more()->cap_imports.count(in); - // test hack: test slave commit - if (!mdr->more()->slaves.empty() && !in->is_dir()) + // test hack: test peer commit + if (!mdr->more()->peers.empty() && !in->is_dir()) ceph_assert(g_conf()->mds_kill_rename_at != 5); - if (!mdr->more()->slaves.empty() && in->is_dir()) + if (!mdr->more()->peers.empty() && in->is_dir()) ceph_assert(g_conf()->mds_kill_rename_at != 6); // bump popularity @@ -8117,13 +8117,13 @@ bool Server::_rename_prepare_witness(MDRequestRef& mdr, mds_rank_t who, setis_cluster_degraded() && !mds->mdsmap->is_clientreplay_or_active_or_stopping(who)) { dout(10) << "_rename_prepare_witness mds." << who << " is not active" << dendl; - if (mdr->more()->waiting_on_slave.empty()) + if (mdr->more()->waiting_on_peer.empty()) mds->wait_for_active_peer(who, new C_MDS_RetryRequest(mdcache, mdr)); return false; } dout(10) << "_rename_prepare_witness mds." << who << dendl; - auto req = make_message(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREP); + auto req = make_message(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RENAMEPREP); req->srcdnpath = filepath(srctrace.front()->get_dir()->ino()); for (auto dn : srctrace) @@ -8147,8 +8147,8 @@ bool Server::_rename_prepare_witness(MDRequestRef& mdr, mds_rank_t who, setop_stamp = mdr->get_op_stamp(); mds->send_message_mds(req, who); - ceph_assert(mdr->more()->waiting_on_slave.count(who) == 0); - mdr->more()->waiting_on_slave.insert(who); + ceph_assert(mdr->more()->waiting_on_peer.count(who) == 0); + mdr->more()->waiting_on_peer.insert(who); return true; } @@ -8432,10 +8432,10 @@ void Server::_rename_prepare(MDRequestRef& mdr, } else if (destdnl->is_remote()) { if (oldin->is_auth()) { sr_t *new_srnode = NULL; - if (mdr->slave_request) { - if (mdr->slave_request->desti_snapbl.length() > 0) { + if (mdr->peer_request) { + if (mdr->peer_request->desti_snapbl.length() > 0) { new_srnode = new sr_t(); - auto p = mdr->slave_request->desti_snapbl.cbegin(); + auto p = mdr->peer_request->desti_snapbl.cbegin(); decode(*new_srnode, p); } } else if (auto& desti_srnode = mdr->more()->desti_srnode) { @@ -8468,10 +8468,10 @@ void Server::_rename_prepare(MDRequestRef& mdr, metablob->add_remote_dentry(destdn, true, srcdnl->get_remote_ino(), srcdnl->get_remote_d_type()); if (srci->is_auth() ) { // it's remote - if (mdr->slave_request) { - if (mdr->slave_request->srci_snapbl.length() > 0) { + if (mdr->peer_request) { + if (mdr->peer_request->srci_snapbl.length() > 0) { sr_t *new_srnode = new sr_t(); - auto p = mdr->slave_request->srci_snapbl.cbegin(); + auto p = mdr->peer_request->srci_snapbl.cbegin(); decode(*new_srnode, p); srci->project_snaprealm(new_srnode); } @@ -8520,7 +8520,7 @@ void Server::_rename_prepare(MDRequestRef& mdr, if (srcdn->is_auth()) { dout(10) << " journaling srcdn " << *srcdn << dendl; mdcache->journal_cow_dentry(mdr.get(), metablob, srcdn, CEPH_NOSNAP, 0, srcdnl); - // also journal the inode in case we need do slave rename rollback. It is Ok to add + // also journal the inode in case we need do peer rename rollback. It is Ok to add // both primary and NULL dentries. Because during journal replay, null dentry is // processed after primary dentry. if (srcdnl->is_primary() && !srci->is_dir() && !destdn->is_auth()) @@ -8581,10 +8581,10 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C oldin->early_pop_projected_snaprealm(); new_oldin_snaprealm = (oldin->snaprealm && !hadrealm); } else { - ceph_assert(mdr->slave_request); - if (mdr->slave_request->desti_snapbl.length()) { + ceph_assert(mdr->peer_request); + if (mdr->peer_request->desti_snapbl.length()) { new_oldin_snaprealm = !oldin->snaprealm; - oldin->decode_snap_blob(mdr->slave_request->desti_snapbl); + oldin->decode_snap_blob(mdr->peer_request->desti_snapbl); ceph_assert(oldin->snaprealm); ceph_assert(oldin->snaprealm->have_past_parents_open()); } @@ -8593,7 +8593,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C destdn->get_dir()->unlink_inode(destdn, false); straydn->pop_projected_linkage(); - if (mdr->is_slave() && !mdr->more()->slave_update_journaled) + if (mdr->is_peer() && !mdr->more()->peer_update_journaled) ceph_assert(!straydn->is_projected()); // no other projected // nlink-- targeti @@ -8605,10 +8605,10 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C destdn->get_dir()->unlink_inode(destdn, false); if (oldin->is_auth()) { oldin->pop_and_dirty_projected_inode(mdr->ls); - } else if (mdr->slave_request) { - if (mdr->slave_request->desti_snapbl.length() > 0) { + } else if (mdr->peer_request) { + if (mdr->peer_request->desti_snapbl.length() > 0) { ceph_assert(oldin->snaprealm); - oldin->decode_snap_blob(mdr->slave_request->desti_snapbl); + oldin->decode_snap_blob(mdr->peer_request->desti_snapbl); } } else if (auto& desti_srnode = mdr->more()->desti_srnode) { delete desti_srnode; @@ -8630,10 +8630,10 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C in->early_pop_projected_snaprealm(); new_in_snaprealm = (in->snaprealm && !hadrealm); } else { - ceph_assert(mdr->slave_request); - if (mdr->slave_request->srci_snapbl.length()) { + ceph_assert(mdr->peer_request); + if (mdr->peer_request->srci_snapbl.length()) { new_in_snaprealm = !in->snaprealm; - in->decode_snap_blob(mdr->slave_request->srci_snapbl); + in->decode_snap_blob(mdr->peer_request->srci_snapbl); ceph_assert(in->snaprealm); ceph_assert(in->snaprealm->have_past_parents_open()); } @@ -8647,7 +8647,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C if (!linkmerge) { // destdn destdnl = destdn->pop_projected_linkage(); - if (mdr->is_slave() && !mdr->more()->slave_update_journaled) + if (mdr->is_peer() && !mdr->more()->peer_update_journaled) ceph_assert(!destdn->is_projected()); // no other projected destdn->link_remote(destdnl, in); @@ -8656,10 +8656,10 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C // in if (in->is_auth()) { in->pop_and_dirty_projected_inode(mdr->ls); - } else if (mdr->slave_request) { - if (mdr->slave_request->srci_snapbl.length() > 0) { + } else if (mdr->peer_request) { + if (mdr->peer_request->srci_snapbl.length() > 0) { ceph_assert(in->snaprealm); - in->decode_snap_blob(mdr->slave_request->srci_snapbl); + in->decode_snap_blob(mdr->peer_request->srci_snapbl); } } else if (auto& srci_srnode = mdr->more()->srci_srnode) { delete srci_srnode; @@ -8675,7 +8675,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C destdn->get_dir()->unlink_inode(destdn, false); } destdnl = destdn->pop_projected_linkage(); - if (mdr->is_slave() && !mdr->more()->slave_update_journaled) + if (mdr->is_peer() && !mdr->more()->peer_update_journaled) ceph_assert(!destdn->is_projected()); // no other projected // srcdn inode import? @@ -8725,7 +8725,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C if (srcdn->is_auth()) srcdn->mark_dirty(mdr->more()->pvmap[srcdn], mdr->ls); srcdn->pop_projected_linkage(); - if (mdr->is_slave() && !mdr->more()->slave_update_journaled) + if (mdr->is_peer() && !mdr->more()->peer_update_journaled) ceph_assert(!srcdn->is_projected()); // no other projected // apply remaining projected inodes (nested) @@ -8751,57 +8751,57 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C // ------------ -// SLAVE +// PEER -class C_MDS_SlaveRenamePrep : public ServerLogContext { +class C_MDS_PeerRenamePrep : public ServerLogContext { CDentry *srcdn, *destdn, *straydn; public: - C_MDS_SlaveRenamePrep(Server *s, MDRequestRef& m, CDentry *sr, CDentry *de, CDentry *st) : + C_MDS_PeerRenamePrep(Server *s, MDRequestRef& m, CDentry *sr, CDentry *de, CDentry *st) : ServerLogContext(s, m), srcdn(sr), destdn(de), straydn(st) {} void finish(int r) override { - server->_logged_slave_rename(mdr, srcdn, destdn, straydn); + server->_logged_peer_rename(mdr, srcdn, destdn, straydn); } }; -class C_MDS_SlaveRenameCommit : public ServerContext { +class C_MDS_PeerRenameCommit : public ServerContext { MDRequestRef mdr; CDentry *srcdn, *destdn, *straydn; public: - C_MDS_SlaveRenameCommit(Server *s, MDRequestRef& m, CDentry *sr, CDentry *de, CDentry *st) : + C_MDS_PeerRenameCommit(Server *s, MDRequestRef& m, CDentry *sr, CDentry *de, CDentry *st) : ServerContext(s), mdr(m), srcdn(sr), destdn(de), straydn(st) {} void finish(int r) override { - server->_commit_slave_rename(mdr, r, srcdn, destdn, straydn); + server->_commit_peer_rename(mdr, r, srcdn, destdn, straydn); } }; -class C_MDS_SlaveRenameSessionsFlushed : public ServerContext { +class C_MDS_PeerRenameSessionsFlushed : public ServerContext { MDRequestRef mdr; public: - C_MDS_SlaveRenameSessionsFlushed(Server *s, MDRequestRef& r) : + C_MDS_PeerRenameSessionsFlushed(Server *s, MDRequestRef& r) : ServerContext(s), mdr(r) {} void finish(int r) override { - server->_slave_rename_sessions_flushed(mdr); + server->_peer_rename_sessions_flushed(mdr); } }; -void Server::handle_slave_rename_prep(MDRequestRef& mdr) +void Server::handle_peer_rename_prep(MDRequestRef& mdr) { - dout(10) << "handle_slave_rename_prep " << *mdr - << " " << mdr->slave_request->srcdnpath - << " to " << mdr->slave_request->destdnpath + dout(10) << "handle_peer_rename_prep " << *mdr + << " " << mdr->peer_request->srcdnpath + << " to " << mdr->peer_request->destdnpath << dendl; - if (mdr->slave_request->is_interrupted()) { - dout(10) << " slave request interrupted, sending noop reply" << dendl; - auto reply = make_message(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); + if (mdr->peer_request->is_interrupted()) { + dout(10) << " peer request interrupted, sending noop reply" << dendl; + auto reply = make_message(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RENAMEPREPACK); reply->mark_interrupted(); - mds->send_message_mds(reply, mdr->slave_to_mds); - mdr->reset_slave_request(); + mds->send_message_mds(reply, mdr->peer_to_mds); + mdr->reset_peer_request(); return; } // discover destdn - filepath destpath(mdr->slave_request->destdnpath); + filepath destpath(mdr->peer_request->destdnpath); dout(10) << " dest " << destpath << dendl; vector trace; CF_MDS_MDRContextFactory cf(mdcache, mdr, false); @@ -8811,7 +8811,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) if (r > 0) return; if (r == -ESTALE) { mdcache->find_ino_peers(destpath.get_ino(), new C_MDS_RetryRequest(mdcache, mdr), - mdr->slave_to_mds, true); + mdr->peer_to_mds, true); return; } ceph_assert(r == 0); // we shouldn't get an error here! @@ -8822,7 +8822,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) mdr->pin(destdn); // discover srcdn - filepath srcpath(mdr->slave_request->srcdnpath); + filepath srcpath(mdr->peer_request->srcdnpath); dout(10) << " src " << srcpath << dendl; CInode *srci = nullptr; r = mdcache->path_traverse(mdr, cf, srcpath, @@ -8845,12 +8845,12 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) if (destdnl->is_primary() && !linkmerge) ceph_assert(straydn); - mdr->set_op_stamp(mdr->slave_request->op_stamp); + mdr->set_op_stamp(mdr->peer_request->op_stamp); mdr->more()->srcdn_auth_mds = srcdn->authority().first; // set up commit waiter (early, to clean up any freezing etc we do) - if (!mdr->more()->slave_commit) - mdr->more()->slave_commit = new C_MDS_SlaveRenameCommit(this, mdr, srcdn, destdn, straydn); + if (!mdr->more()->peer_commit) + mdr->more()->peer_commit = new C_MDS_PeerRenameCommit(this, mdr, srcdn, destdn, straydn); // am i srcdn auth? if (srcdn->is_auth()) { @@ -8880,27 +8880,27 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) /* * set ambiguous auth for srci * NOTE: we don't worry about ambiguous cache expire as we do - * with subtree migrations because all slaves will pin + * with subtree migrations because all peers will pin * srcdn->get_inode() for duration of this rename. */ mdr->set_ambiguous_auth(srcdnl->get_inode()); // just mark the source inode as ambiguous auth if more than two MDS are involved. - // the leader will send another OP_RENAMEPREP slave request later. - if (mdr->slave_request->witnesses.size() > 1) { + // the leader will send another OP_RENAMEPREP peer request later. + if (mdr->peer_request->witnesses.size() > 1) { dout(10) << " set srci ambiguous auth; providing srcdn replica list" << dendl; reply_witness = true; } // make sure bystanders have received all lock related messages for (set::iterator p = srcdnrep.begin(); p != srcdnrep.end(); ++p) { - if (*p == mdr->slave_to_mds || + if (*p == mdr->peer_to_mds || (mds->is_cluster_degraded() && !mds->mdsmap->is_clientreplay_or_active_or_stopping(*p))) continue; - auto notify = make_message(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMENOTIFY); + auto notify = make_message(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RENAMENOTIFY); mds->send_message_mds(notify, *p); - mdr->more()->waiting_on_slave.insert(*p); + mdr->more()->waiting_on_peer.insert(*p); } // make sure clients have received all cap related messages @@ -8910,16 +8910,16 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) MDSGatherBuilder gather(g_ceph_context); flush_client_sessions(export_client_set, gather); if (gather.has_subs()) { - mdr->more()->waiting_on_slave.insert(MDS_RANK_NONE); - gather.set_finisher(new C_MDS_SlaveRenameSessionsFlushed(this, mdr)); + mdr->more()->waiting_on_peer.insert(MDS_RANK_NONE); + gather.set_finisher(new C_MDS_PeerRenameSessionsFlushed(this, mdr)); gather.activate(); } } // is witness list sufficient? for (set::iterator p = srcdnrep.begin(); p != srcdnrep.end(); ++p) { - if (*p == mdr->slave_to_mds || - mdr->slave_request->witnesses.count(*p)) continue; + if (*p == mdr->peer_to_mds || + mdr->peer_request->witnesses.count(*p)) continue; dout(10) << " witness list insufficient; providing srcdn replica list" << dendl; reply_witness = true; break; @@ -8927,16 +8927,16 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) if (reply_witness) { ceph_assert(!srcdnrep.empty()); - auto reply = make_message(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); + auto reply = make_message(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RENAMEPREPACK); reply->witnesses.swap(srcdnrep); - mds->send_message_mds(reply, mdr->slave_to_mds); - mdr->reset_slave_request(); + mds->send_message_mds(reply, mdr->peer_to_mds); + mdr->reset_peer_request(); return; } dout(10) << " witness list sufficient: includes all srcdn replicas" << dendl; - if (!mdr->more()->waiting_on_slave.empty()) { + if (!mdr->more()->waiting_on_peer.empty()) { dout(10) << " still waiting for rename notify acks from " - << mdr->more()->waiting_on_slave << dendl; + << mdr->more()->waiting_on_peer << dendl; return; } } else if (srcdnl->is_primary() && srcdn->authority() != destdn->authority()) { @@ -8978,7 +8978,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) rollback.stray.dirfrag_old_rctime = straydn->get_dir()->get_projected_fnode()->rstat.rctime; rollback.stray.dname = straydn->get_name(); } - if (mdr->slave_request->desti_snapbl.length()) { + if (mdr->peer_request->desti_snapbl.length()) { CInode *oldin = destdnl->get_inode(); if (oldin->snaprealm) { encode(true, rollback.desti_snapbl); @@ -8987,7 +8987,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) encode(false, rollback.desti_snapbl); } } - if (mdr->slave_request->srci_snapbl.length()) { + if (mdr->peer_request->srci_snapbl.length()) { if (srci->snaprealm) { encode(true, rollback.srci_snapbl); srci->encode_snap_blob(rollback.srci_snapbl); @@ -9001,38 +9001,38 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) // journal. mdr->ls = mdlog->get_current_segment(); - ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rename_prep", mdr->reqid, mdr->slave_to_mds, - ESlaveUpdate::OP_PREPARE, ESlaveUpdate::RENAME); + EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_rename_prep", mdr->reqid, mdr->peer_to_mds, + EPeerUpdate::OP_PREPARE, EPeerUpdate::RENAME); mdlog->start_entry(le); le->rollback = mdr->more()->rollback_bl; - bufferlist blah; // inode import data... obviously not used if we're the slave + bufferlist blah; // inode import data... obviously not used if we're the peer _rename_prepare(mdr, &le->commit, &blah, srcdn, destdn, straydn); if (le->commit.empty()) { dout(10) << " empty metablob, skipping journal" << dendl; mdlog->cancel_entry(le); mdr->ls = NULL; - _logged_slave_rename(mdr, srcdn, destdn, straydn); + _logged_peer_rename(mdr, srcdn, destdn, straydn); } else { - mdcache->add_uncommitted_slave(mdr->reqid, mdr->ls, mdr->slave_to_mds); - mdr->more()->slave_update_journaled = true; - submit_mdlog_entry(le, new C_MDS_SlaveRenamePrep(this, mdr, srcdn, destdn, straydn), + mdcache->add_uncommitted_peer(mdr->reqid, mdr->ls, mdr->peer_to_mds); + mdr->more()->peer_update_journaled = true; + submit_mdlog_entry(le, new C_MDS_PeerRenamePrep(this, mdr, srcdn, destdn, straydn), mdr, __func__); mdlog->flush(); } } -void Server::_logged_slave_rename(MDRequestRef& mdr, +void Server::_logged_peer_rename(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, CDentry *straydn) { - dout(10) << "_logged_slave_rename " << *mdr << dendl; + dout(10) << "_logged_peer_rename " << *mdr << dendl; // prepare ack - ref_t reply; + ref_t reply; if (!mdr->aborted) { - reply = make_message(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); - if (!mdr->more()->slave_update_journaled) + reply = make_message(mdr->reqid, mdr->attempt, MMDSPeerRequest::OP_RENAMEPREPACK); + if (!mdr->more()->peer_update_journaled) reply->mark_not_journaled(); } @@ -9089,11 +9089,11 @@ void Server::_logged_slave_rename(MDRequestRef& mdr, mds->balancer->hit_inode(destdnl->get_inode(), META_POP_IWR); // done. - mdr->reset_slave_request(); + mdr->reset_peer_request(); mdr->straydn = 0; if (reply) { - mds->send_message_mds(reply, mdr->slave_to_mds); + mds->send_message_mds(reply, mdr->peer_to_mds); } else { ceph_assert(mdr->aborted); dout(10) << " abort flag set, finishing" << dendl; @@ -9101,10 +9101,10 @@ void Server::_logged_slave_rename(MDRequestRef& mdr, } } -void Server::_commit_slave_rename(MDRequestRef& mdr, int r, +void Server::_commit_peer_rename(MDRequestRef& mdr, int r, CDentry *srcdn, CDentry *destdn, CDentry *straydn) { - dout(10) << "_commit_slave_rename " << *mdr << " r=" << r << dendl; + dout(10) << "_commit_peer_rename " << *mdr << " r=" << r << dendl; CInode *in = destdn->get_linkage()->get_inode(); @@ -9137,7 +9137,7 @@ void Server::_commit_slave_rename(MDRequestRef& mdr, int r, decode(peer_imported, bp); dout(10) << " finishing inode export on " << *in << dendl; - mdcache->migrator->finish_export_inode(in, mdr->slave_to_mds, peer_imported, finished); + mdcache->migrator->finish_export_inode(in, mdr->peer_to_mds, peer_imported, finished); mds->queue_waiters(finished); // this includes SINGLEAUTH waiters. // unfreeze @@ -9151,7 +9151,7 @@ void Server::_commit_slave_rename(MDRequestRef& mdr, int r, mdr->more()->is_ambiguous_auth = false; } - if (straydn && mdr->more()->slave_update_journaled) { + if (straydn && mdr->more()->peer_update_journaled) { CInode *strayin = straydn->get_projected_linkage()->get_inode(); if (strayin && !strayin->snaprealm) mdcache->clear_dirty_bits_for_stray(strayin); @@ -9160,16 +9160,16 @@ void Server::_commit_slave_rename(MDRequestRef& mdr, int r, mds->queue_waiters(finished); mdr->cleanup(); - if (mdr->more()->slave_update_journaled) { + if (mdr->more()->peer_update_journaled) { // write a commit to the journal - ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rename_commit", mdr->reqid, - mdr->slave_to_mds, ESlaveUpdate::OP_COMMIT, - ESlaveUpdate::RENAME); + EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_rename_commit", mdr->reqid, + mdr->peer_to_mds, EPeerUpdate::OP_COMMIT, + EPeerUpdate::RENAME); mdlog->start_entry(le); - submit_mdlog_entry(le, new C_MDS_CommittedSlave(this, mdr), mdr, __func__); + submit_mdlog_entry(le, new C_MDS_CommittedPeer(this, mdr), mdr, __func__); mdlog->flush(); } else { - _committed_slave(mdr); + _committed_peer(mdr); } } else { @@ -9181,13 +9181,13 @@ void Server::_commit_slave_rename(MDRequestRef& mdr, int r, dout(10) << " reversing inode export of " << *in << dendl; in->abort_export(); } - if (mdcache->is_ambiguous_slave_update(mdr->reqid, mdr->slave_to_mds)) { - mdcache->remove_ambiguous_slave_update(mdr->reqid, mdr->slave_to_mds); - // rollback but preserve the slave request - do_rename_rollback(mdr->more()->rollback_bl, mdr->slave_to_mds, mdr, false); + if (mdcache->is_ambiguous_peer_update(mdr->reqid, mdr->peer_to_mds)) { + mdcache->remove_ambiguous_peer_update(mdr->reqid, mdr->peer_to_mds); + // rollback but preserve the peer request + do_rename_rollback(mdr->more()->rollback_bl, mdr->peer_to_mds, mdr, false); mdr->more()->rollback_bl.clear(); } else - do_rename_rollback(mdr->more()->rollback_bl, mdr->slave_to_mds, mdr, true); + do_rename_rollback(mdr->more()->rollback_bl, mdr->peer_to_mds, mdr, true); } else { dout(10) << " rollback_bl empty, not rollback back rename (leader failed after getting extra witnesses?)" << dendl; // singleauth @@ -9334,7 +9334,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef // can't use is_auth() in the resolve stage mds_rank_t whoami = mds->get_nodeid(); - // slave + // peer ceph_assert(!destdn || destdn->authority().first != whoami); ceph_assert(!straydn || straydn->authority().first != whoami); @@ -9492,8 +9492,8 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef dout(0) << " desti back to " << *target << dendl; // journal it - ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rename_rollback", rollback.reqid, leader, - ESlaveUpdate::OP_ROLLBACK, ESlaveUpdate::RENAME); + EPeerUpdate *le = new EPeerUpdate(mdlog, "peer_rename_rollback", rollback.reqid, leader, + EPeerUpdate::OP_ROLLBACK, EPeerUpdate::RENAME); mdlog->start_entry(le); if (srcdn && (srcdn->authority().first == whoami || force_journal_src)) { @@ -9516,7 +9516,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef le->commit.add_primary_dentry(destdn, 0, true); } - // slave: no need to journal straydn + // peer: no need to journal straydn if (target && target != in && target->authority().first == whoami) { ceph_assert(rollback.orig_dest.remote_ino); @@ -9550,7 +9550,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef mdcache->project_subtree_rename(in, destdir, srcdir); } - if (mdr && !mdr->more()->slave_update_journaled) { + if (mdr && !mdr->more()->peer_update_journaled) { ceph_assert(le->commit.empty()); mdlog->cancel_entry(le); mut->ls = NULL; @@ -9558,7 +9558,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef } else { ceph_assert(!le->commit.empty()); if (mdr) - mdr->more()->slave_update_journaled = false; + mdr->more()->peer_update_journaled = false; MDSLogContextBase *fin = new C_MDS_LoggedRenameRollback(this, mut, mdr, srcdn, srcdnpv, destdn, straydn, splits, finish_mdr); @@ -9635,7 +9635,7 @@ void Server::_rename_rollback_finish(MutationRef& mut, MDRequestRef& mdr, CDentr if (finish_mdr || mdr->aborted) mdcache->request_finish(mdr); else - mdr->more()->slave_rolling_back = false; + mdr->more()->peer_rolling_back = false; } mdcache->finish_rollback(mut->reqid, mdr); @@ -9643,15 +9643,15 @@ void Server::_rename_rollback_finish(MutationRef& mut, MDRequestRef& mdr, CDentr mut->cleanup(); } -void Server::handle_slave_rename_prep_ack(MDRequestRef& mdr, const cref_t &ack) +void Server::handle_peer_rename_prep_ack(MDRequestRef& mdr, const cref_t &ack) { - dout(10) << "handle_slave_rename_prep_ack " << *mdr + dout(10) << "handle_peer_rename_prep_ack " << *mdr << " witnessed by " << ack->get_source() << " " << *ack << dendl; mds_rank_t from = mds_rank_t(ack->get_source().num()); - // note slave - mdr->more()->slaves.insert(from); + // note peer + mdr->more()->peers.insert(from); if (mdr->more()->srcdn_auth_mds == from && mdr->more()->is_remote_frozen_authpin && !mdr->more()->is_ambiguous_auth) { @@ -9661,11 +9661,11 @@ void Server::handle_slave_rename_prep_ack(MDRequestRef& mdr, const cref_tmore()->witnessed.count(from) == 0); if (ack->is_interrupted()) { - dout(10) << " slave request interrupted, noop" << dendl; + dout(10) << " peer request interrupted, noop" << dendl; } else if (ack->witnesses.empty()) { mdr->more()->witnessed.insert(from); if (!ack->is_not_journaled()) - mdr->more()->has_journaled_slaves = true; + mdr->more()->has_journaled_peers = true; } else { dout(10) << " extra witnesses (srcdn replicas) are " << ack->witnesses << dendl; mdr->more()->extra_witnesses = ack->witnesses; @@ -9680,47 +9680,47 @@ void Server::handle_slave_rename_prep_ack(MDRequestRef& mdr, const cref_tmore()->waiting_on_slave.count(from)); - mdr->more()->waiting_on_slave.erase(from); + ceph_assert(mdr->more()->waiting_on_peer.count(from)); + mdr->more()->waiting_on_peer.erase(from); - if (mdr->more()->waiting_on_slave.empty()) + if (mdr->more()->waiting_on_peer.empty()) dispatch_client_request(mdr); // go again! else - dout(10) << "still waiting on slaves " << mdr->more()->waiting_on_slave << dendl; + dout(10) << "still waiting on peers " << mdr->more()->waiting_on_peer << dendl; } -void Server::handle_slave_rename_notify_ack(MDRequestRef& mdr, const cref_t &ack) +void Server::handle_peer_rename_notify_ack(MDRequestRef& mdr, const cref_t &ack) { - dout(10) << "handle_slave_rename_notify_ack " << *mdr << " from mds." + dout(10) << "handle_peer_rename_notify_ack " << *mdr << " from mds." << ack->get_source() << dendl; - ceph_assert(mdr->is_slave()); + ceph_assert(mdr->is_peer()); mds_rank_t from = mds_rank_t(ack->get_source().num()); - if (mdr->more()->waiting_on_slave.count(from)) { - mdr->more()->waiting_on_slave.erase(from); + if (mdr->more()->waiting_on_peer.count(from)) { + mdr->more()->waiting_on_peer.erase(from); - if (mdr->more()->waiting_on_slave.empty()) { - if (mdr->slave_request) - dispatch_slave_request(mdr); + if (mdr->more()->waiting_on_peer.empty()) { + if (mdr->peer_request) + dispatch_peer_request(mdr); } else dout(10) << " still waiting for rename notify acks from " - << mdr->more()->waiting_on_slave << dendl; + << mdr->more()->waiting_on_peer << dendl; } } -void Server::_slave_rename_sessions_flushed(MDRequestRef& mdr) +void Server::_peer_rename_sessions_flushed(MDRequestRef& mdr) { - dout(10) << "_slave_rename_sessions_flushed " << *mdr << dendl; + dout(10) << "_peer_rename_sessions_flushed " << *mdr << dendl; - if (mdr->more()->waiting_on_slave.count(MDS_RANK_NONE)) { - mdr->more()->waiting_on_slave.erase(MDS_RANK_NONE); + if (mdr->more()->waiting_on_peer.count(MDS_RANK_NONE)) { + mdr->more()->waiting_on_peer.erase(MDS_RANK_NONE); - if (mdr->more()->waiting_on_slave.empty()) { - if (mdr->slave_request) - dispatch_slave_request(mdr); + if (mdr->more()->waiting_on_peer.empty()) { + if (mdr->peer_request) + dispatch_peer_request(mdr); } else dout(10) << " still waiting for rename notify acks from " - << mdr->more()->waiting_on_slave << dendl; + << mdr->more()->waiting_on_peer << dendl; } } diff --git a/src/mds/Server.h b/src/mds/Server.h index 1ac34ce899ba1..30cbd19d8a4d5 100644 --- a/src/mds/Server.h +++ b/src/mds/Server.h @@ -45,10 +45,10 @@ class MetricsHandler; enum { l_mdss_first = 1000, l_mdss_dispatch_client_request, - l_mdss_dispatch_slave_request, + l_mdss_dispatch_peer_request, l_mdss_handle_client_request, l_mdss_handle_client_session, - l_mdss_handle_slave_request, + l_mdss_handle_peer_request, l_mdss_req_create_latency, l_mdss_req_getattr_latency, l_mdss_req_getfilelock_latency, @@ -165,11 +165,11 @@ public: void set_trace_dist(const ref_t &reply, CInode *in, CDentry *dn, MDRequestRef& mdr); - void handle_slave_request(const cref_t &m); - void handle_slave_request_reply(const cref_t &m); - void dispatch_slave_request(MDRequestRef& mdr); - void handle_slave_auth_pin(MDRequestRef& mdr); - void handle_slave_auth_pin_ack(MDRequestRef& mdr, const cref_t &ack); + void handle_peer_request(const cref_t &m); + void handle_peer_request_reply(const cref_t &m); + void dispatch_peer_request(MDRequestRef& mdr); + void handle_peer_auth_pin(MDRequestRef& mdr); + void handle_peer_auth_pin_ack(MDRequestRef& mdr, const cref_t &ack); // some helpers bool check_fragment_space(MDRequestRef& mdr, CDir *in); @@ -242,11 +242,11 @@ public: void _link_remote_finish(MDRequestRef& mdr, bool inc, CDentry *dn, CInode *targeti, version_t); - void handle_slave_link_prep(MDRequestRef& mdr); - void _logged_slave_link(MDRequestRef& mdr, CInode *targeti, bool adjust_realm); - void _commit_slave_link(MDRequestRef& mdr, int r, CInode *targeti); - void _committed_slave(MDRequestRef& mdr); // use for rename, too - void handle_slave_link_prep_ack(MDRequestRef& mdr, const cref_t &m); + void handle_peer_link_prep(MDRequestRef& mdr); + void _logged_peer_link(MDRequestRef& mdr, CInode *targeti, bool adjust_realm); + void _commit_peer_link(MDRequestRef& mdr, int r, CInode *targeti); + void _committed_peer(MDRequestRef& mdr); // use for rename, too + void handle_peer_link_prep_ack(MDRequestRef& mdr, const cref_t &m); void do_link_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr); void _link_rollback_finish(MutationRef& mut, MDRequestRef& mdr, map>& split); @@ -260,10 +260,10 @@ public: CDentry *dn, CDentry *straydn, version_t); bool _rmdir_prepare_witness(MDRequestRef& mdr, mds_rank_t who, vector& trace, CDentry *straydn); - void handle_slave_rmdir_prep(MDRequestRef& mdr); - void _logged_slave_rmdir(MDRequestRef& mdr, CDentry *srcdn, CDentry *straydn); - void _commit_slave_rmdir(MDRequestRef& mdr, int r, CDentry *straydn); - void handle_slave_rmdir_prep_ack(MDRequestRef& mdr, const cref_t &ack); + void handle_peer_rmdir_prep(MDRequestRef& mdr); + void _logged_peer_rmdir(MDRequestRef& mdr, CDentry *srcdn, CDentry *straydn); + void _commit_peer_rmdir(MDRequestRef& mdr, int r, CDentry *straydn); + void handle_peer_rmdir_prep_ack(MDRequestRef& mdr, const cref_t &ack); void do_rmdir_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr); void _rmdir_rollback_finish(MDRequestRef& mdr, metareqid_t reqid, CDentry *dn, CDentry *straydn); @@ -294,12 +294,12 @@ public: void _rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, CDentry *straydn); // slaving - void handle_slave_rename_prep(MDRequestRef& mdr); - void handle_slave_rename_prep_ack(MDRequestRef& mdr, const cref_t &m); - void handle_slave_rename_notify_ack(MDRequestRef& mdr, const cref_t &m); - void _slave_rename_sessions_flushed(MDRequestRef& mdr); - void _logged_slave_rename(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, CDentry *straydn); - void _commit_slave_rename(MDRequestRef& mdr, int r, CDentry *srcdn, CDentry *destdn, CDentry *straydn); + void handle_peer_rename_prep(MDRequestRef& mdr); + void handle_peer_rename_prep_ack(MDRequestRef& mdr, const cref_t &m); + void handle_peer_rename_notify_ack(MDRequestRef& mdr, const cref_t &m); + void _peer_rename_sessions_flushed(MDRequestRef& mdr); + void _logged_peer_rename(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, CDentry *straydn); + void _commit_peer_rename(MDRequestRef& mdr, int r, CDentry *srcdn, CDentry *destdn, CDentry *straydn); void do_rename_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr, bool finish_mdr=false); void _rename_rollback_finish(MutationRef& mut, MDRequestRef& mdr, CDentry *srcdn, version_t srcdnpv, CDentry *destdn, CDentry *staydn, map> splits[2], diff --git a/src/mds/SimpleLock.h b/src/mds/SimpleLock.h index bf8b4cee90b99..dd43679056799 100644 --- a/src/mds/SimpleLock.h +++ b/src/mds/SimpleLock.h @@ -398,7 +398,7 @@ public: void get_xlock(MutationRef who, client_t client) { ceph_assert(get_xlock_by() == MutationRef()); ceph_assert(state == LOCK_XLOCK || is_locallock() || - state == LOCK_LOCK /* if we are a slave */); + state == LOCK_LOCK /* if we are a peer */); parent->get(MDSCacheObject::PIN_LOCK); more()->num_xlock++; more()->xlock_by = who; @@ -407,7 +407,7 @@ public: void set_xlock_done() { ceph_assert(more()->xlock_by); ceph_assert(state == LOCK_XLOCK || is_locallock() || - state == LOCK_LOCK /* if we are a slave */); + state == LOCK_LOCK /* if we are a peer */); if (!is_locallock()) state = LOCK_XLOCKDONE; more()->xlock_by.reset(); @@ -415,7 +415,7 @@ public: void put_xlock() { ceph_assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE || state == LOCK_XLOCKSNAP || state == LOCK_LOCK_XLOCK || - state == LOCK_LOCK || /* if we are a leader of a slave */ + state == LOCK_LOCK || /* if we are a leader of a peer */ is_locallock()); --more()->num_xlock; parent->put(MDSCacheObject::PIN_LOCK); diff --git a/src/mds/events/EMetaBlob.h b/src/mds/events/EMetaBlob.h index 52bcce20e1983..29bff82128e9b 100644 --- a/src/mds/events/EMetaBlob.h +++ b/src/mds/events/EMetaBlob.h @@ -29,7 +29,7 @@ class MDSRank; class MDLog; class LogSegment; -struct MDSlaveUpdate; +struct MDPeerUpdate; /* * a bunch of metadata in the journal @@ -590,7 +590,7 @@ private: } void update_segment(LogSegment *ls); - void replay(MDSRank *mds, LogSegment *ls, MDSlaveUpdate *su=NULL); + void replay(MDSRank *mds, LogSegment *ls, MDPeerUpdate *su=NULL); }; WRITE_CLASS_ENCODER_FEATURES(EMetaBlob) WRITE_CLASS_ENCODER_FEATURES(EMetaBlob::fullbit) diff --git a/src/mds/events/ESlaveUpdate.h b/src/mds/events/EPeerUpdate.h similarity index 86% rename from src/mds/events/ESlaveUpdate.h rename to src/mds/events/EPeerUpdate.h index e33692c49a5c2..38f53735eb1c7 100644 --- a/src/mds/events/ESlaveUpdate.h +++ b/src/mds/events/EPeerUpdate.h @@ -1,4 +1,4 @@ -// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system @@ -7,13 +7,13 @@ * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public - * License version 2.1, as published by the Free Software + * License version 2.1, as published by the Free Software * Foundation. See file COPYING. - * + * */ -#ifndef CEPH_MDS_ESLAVEUPDATE_H -#define CEPH_MDS_ESLAVEUPDATE_H +#ifndef CEPH_MDS_EPEERUPDATE_H +#define CEPH_MDS_EPEERUPDATE_H #include @@ -21,10 +21,10 @@ #include "EMetaBlob.h" /* - * rollback records, for remote/slave updates, which may need to be manually + * rollback records, for remote/peer updates, which may need to be manually * rolled back during journal replay. (or while active if leader fails, but in * that case these records aren't needed.) - */ + */ struct link_rollback { metareqid_t reqid; inodeno_t ino; @@ -73,7 +73,7 @@ struct rename_rollback { string dname; char remote_d_type; utime_t old_ctime; - + drec() : remote_d_type((char)S_IFREG) {} void encode(bufferlist& bl) const; @@ -99,19 +99,19 @@ WRITE_CLASS_ENCODER(rename_rollback::drec) WRITE_CLASS_ENCODER(rename_rollback) -class ESlaveUpdate : public LogEvent { +class EPeerUpdate : public LogEvent { public: const static int OP_PREPARE = 1; const static int OP_COMMIT = 2; const static int OP_ROLLBACK = 3; - + const static int LINK = 1; const static int RENAME = 2; const static int RMDIR = 3; /* * we journal a rollback metablob that contains the unmodified metadata - * too, because we may be updating previously dirty metadata, which + * too, because we may be updating previously dirty metadata, which * will allow old log segments to be trimmed. if we end of rolling back, * those updates could be lost.. so we re-journal the unmodified metadata, * and replay will apply _either_ commit or rollback. @@ -124,14 +124,14 @@ public: __u8 op; // prepare, commit, abort __u8 origop; // link | rename - ESlaveUpdate() : LogEvent(EVENT_SLAVEUPDATE), leader(0), op(0), origop(0) { } - ESlaveUpdate(MDLog *mdlog, std::string_view s, metareqid_t ri, int leadermds, int o, int oo) : - LogEvent(EVENT_SLAVEUPDATE), + EPeerUpdate() : LogEvent(EVENT_PEERUPDATE), leader(0), op(0), origop(0) { } + EPeerUpdate(MDLog *mdlog, std::string_view s, metareqid_t ri, int leadermds, int o, int oo) : + LogEvent(EVENT_PEERUPDATE), type(s), reqid(ri), leader(leadermds), op(o), origop(oo) { } - + void print(ostream& out) const override { if (type.length()) out << type << " "; @@ -148,10 +148,10 @@ public: void encode(bufferlist& bl, uint64_t features) const override; void decode(bufferlist::const_iterator& bl) override; void dump(Formatter *f) const override; - static void generate_test_instances(std::list& ls); + static void generate_test_instances(std::list& ls); void replay(MDSRank *mds) override; }; -WRITE_CLASS_ENCODER_FEATURES(ESlaveUpdate) +WRITE_CLASS_ENCODER_FEATURES(EPeerUpdate) #endif diff --git a/src/mds/events/EUpdate.h b/src/mds/events/EUpdate.h index b9f173f82836e..d320014a1f036 100644 --- a/src/mds/events/EUpdate.h +++ b/src/mds/events/EUpdate.h @@ -27,12 +27,12 @@ public: bufferlist client_map; version_t cmapv; metareqid_t reqid; - bool had_slaves; + bool had_peers; - EUpdate() : LogEvent(EVENT_UPDATE), cmapv(0), had_slaves(false) { } + EUpdate() : LogEvent(EVENT_UPDATE), cmapv(0), had_peers(false) { } EUpdate(MDLog *mdlog, std::string_view s) : LogEvent(EVENT_UPDATE), - type(s), cmapv(0), had_slaves(false) { } + type(s), cmapv(0), had_peers(false) { } void print(ostream& out) const override { if (type.length()) diff --git a/src/mds/journal.cc b/src/mds/journal.cc index 57eb0aca03ef0..1b18d9b044e9b 100644 --- a/src/mds/journal.cc +++ b/src/mds/journal.cc @@ -23,7 +23,7 @@ #include "events/ENoOp.h" #include "events/EUpdate.h" -#include "events/ESlaveUpdate.h" +#include "events/EPeerUpdate.h" #include "events/EOpen.h" #include "events/ECommitted.h" #include "events/EPurged.h" @@ -111,20 +111,20 @@ void LogSegment::try_to_expire(MDSRank *mds, MDSGatherBuilder &gather_bld, int o } } - // leader ops with possibly uncommitted slaves + // leader ops with possibly uncommitted peers for (set::iterator p = uncommitted_leaders.begin(); p != uncommitted_leaders.end(); ++p) { - dout(10) << "try_to_expire waiting for slaves to ack commit on " << *p << dendl; + dout(10) << "try_to_expire waiting for peers to ack commit on " << *p << dendl; mds->mdcache->wait_for_uncommitted_leader(*p, gather_bld.new_sub()); } - // slave ops that haven't been committed - for (set::iterator p = uncommitted_slaves.begin(); - p != uncommitted_slaves.end(); + // peer ops that haven't been committed + for (set::iterator p = uncommitted_peers.begin(); + p != uncommitted_peers.end(); ++p) { dout(10) << "try_to_expire waiting for leader to ack OP_FINISH on " << *p << dendl; - mds->mdcache->wait_for_uncommitted_slave(*p, gather_bld.new_sub()); + mds->mdcache->wait_for_uncommitted_peer(*p, gather_bld.new_sub()); } // uncommitted fragments @@ -1072,7 +1072,7 @@ void EMetaBlob::generate_test_instances(std::list& ls) ls.push_back(new EMetaBlob()); } -void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) +void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDPeerUpdate *peerup) { dout(10) << "EMetaBlob.replay " << lump_map.size() << " dirlumps by " << client_name << dendl; @@ -1364,15 +1364,15 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) if (olddir) { if (olddir->authority() != CDIR_AUTH_UNDEF && renamed_diri->authority() == CDIR_AUTH_UNDEF) { - ceph_assert(slaveup); // auth to non-auth, must be slave prepare + ceph_assert(peerup); // auth to non-auth, must be peer prepare frag_vec_t leaves; renamed_diri->dirfragtree.get_leaves(leaves); for (const auto& leaf : leaves) { CDir *dir = renamed_diri->get_dirfrag(leaf); ceph_assert(dir); if (dir->get_dir_auth() == CDIR_AUTH_UNDEF) - // preserve subtree bound until slave commit - slaveup->olddirs.insert(dir->inode); + // preserve subtree bound until peer commit + peerup->olddirs.insert(dir->inode); else dir->state_set(CDir::STATE_AUTH); @@ -1386,8 +1386,8 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) // see if we can discard the subtree we renamed out of CDir *root = mds->mdcache->get_subtree_root(olddir); if (root->get_dir_auth() == CDIR_AUTH_UNDEF) { - if (slaveup) // preserve the old dir until slave commit - slaveup->olddirs.insert(olddir->inode); + if (peerup) // preserve the old dir until peer commit + peerup->olddirs.insert(olddir->inode); else mds->mdcache->try_trim_non_auth_subtree(root); } @@ -1432,8 +1432,8 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) dout(10) << " unlinked set contains " << unlinked << dendl; for (map::iterator p = unlinked.begin(); p != unlinked.end(); ++p) { CInode *in = p->first; - if (slaveup) { // preserve unlinked inodes until slave commit - slaveup->unlinked.insert(in); + if (peerup) { // preserve unlinked inodes until peer commit + peerup->unlinked.insert(in); if (in->snaprealm) in->snaprealm->adjust_parent(); } else @@ -2074,7 +2074,7 @@ void EUpdate::encode(bufferlist &bl, uint64_t features) const encode(client_map, bl); encode(cmapv, bl); encode(reqid, bl); - encode(had_slaves, bl); + encode(had_peers, bl); ENCODE_FINISH(bl); } @@ -2089,7 +2089,7 @@ void EUpdate::decode(bufferlist::const_iterator &bl) if (struct_v >= 3) decode(cmapv, bl); decode(reqid, bl); - decode(had_slaves, bl); + decode(had_peers, bl); DECODE_FINISH(bl); } @@ -2103,7 +2103,7 @@ void EUpdate::dump(Formatter *f) const f->dump_int("client map length", client_map.length()); f->dump_int("client map version", cmapv); f->dump_stream("reqid") << reqid; - f->dump_string("had slaves", had_slaves ? "true" : "false"); + f->dump_string("had peers", had_peers ? "true" : "false"); } void EUpdate::generate_test_instances(std::list& ls) @@ -2120,7 +2120,7 @@ void EUpdate::update_segment() if (client_map.length()) segment->sessionmapv = cmapv; - if (had_slaves) + if (had_peers) segment->uncommitted_leaders.insert(reqid); } @@ -2129,11 +2129,11 @@ void EUpdate::replay(MDSRank *mds) auto&& segment = get_segment(); metablob.replay(mds, segment); - if (had_slaves) { - dout(10) << "EUpdate.replay " << reqid << " had slaves, expecting a matching ECommitted" << dendl; + if (had_peers) { + dout(10) << "EUpdate.replay " << reqid << " had peers, expecting a matching ECommitted" << dendl; segment->uncommitted_leaders.insert(reqid); - set slaves; - mds->mdcache->add_uncommitted_leader(reqid, segment, slaves, true); + set peers; + mds->mdcache->add_uncommitted_leader(reqid, segment, peers, true); } if (client_map.length()) { @@ -2277,7 +2277,7 @@ void ECommitted::generate_test_instances(std::list& ls) } // ----------------------- -// ESlaveUpdate +// EPeerUpdate void link_rollback::encode(bufferlist &bl) const { @@ -2469,7 +2469,7 @@ void rename_rollback::generate_test_instances(std::list& ls) ls.back()->stray.remote_d_type = IFTODT(S_IFREG); } -void ESlaveUpdate::encode(bufferlist &bl, uint64_t features) const +void EPeerUpdate::encode(bufferlist &bl, uint64_t features) const { ENCODE_START(3, 3, bl); encode(stamp, bl); @@ -2483,7 +2483,7 @@ void ESlaveUpdate::encode(bufferlist &bl, uint64_t features) const ENCODE_FINISH(bl); } -void ESlaveUpdate::decode(bufferlist::const_iterator &bl) +void EPeerUpdate::decode(bufferlist::const_iterator &bl) { DECODE_START_LEGACY_COMPAT_LEN(3, 3, 3, bl); if (struct_v >= 2) @@ -2498,7 +2498,7 @@ void ESlaveUpdate::decode(bufferlist::const_iterator &bl) DECODE_FINISH(bl); } -void ESlaveUpdate::dump(Formatter *f) const +void EPeerUpdate::dump(Formatter *f) const { f->open_object_section("metablob"); commit.dump(f); @@ -2512,38 +2512,38 @@ void ESlaveUpdate::dump(Formatter *f) const f->dump_int("original op", origop); } -void ESlaveUpdate::generate_test_instances(std::list& ls) +void EPeerUpdate::generate_test_instances(std::list& ls) { - ls.push_back(new ESlaveUpdate()); + ls.push_back(new EPeerUpdate()); } -void ESlaveUpdate::replay(MDSRank *mds) +void EPeerUpdate::replay(MDSRank *mds) { - MDSlaveUpdate *su; + MDPeerUpdate *su; auto&& segment = get_segment(); switch (op) { - case ESlaveUpdate::OP_PREPARE: - dout(10) << "ESlaveUpdate.replay prepare " << reqid << " for mds." << leader + case EPeerUpdate::OP_PREPARE: + dout(10) << "EPeerUpdate.replay prepare " << reqid << " for mds." << leader << ": applying commit, saving rollback info" << dendl; - su = new MDSlaveUpdate(origop, rollback); + su = new MDPeerUpdate(origop, rollback); commit.replay(mds, segment, su); - mds->mdcache->add_uncommitted_slave(reqid, segment, leader, su); + mds->mdcache->add_uncommitted_peer(reqid, segment, leader, su); break; - case ESlaveUpdate::OP_COMMIT: - dout(10) << "ESlaveUpdate.replay commit " << reqid << " for mds." << leader << dendl; - mds->mdcache->finish_uncommitted_slave(reqid, false); + case EPeerUpdate::OP_COMMIT: + dout(10) << "EPeerUpdate.replay commit " << reqid << " for mds." << leader << dendl; + mds->mdcache->finish_uncommitted_peer(reqid, false); break; - case ESlaveUpdate::OP_ROLLBACK: - dout(10) << "ESlaveUpdate.replay abort " << reqid << " for mds." << leader + case EPeerUpdate::OP_ROLLBACK: + dout(10) << "EPeerUpdate.replay abort " << reqid << " for mds." << leader << ": applying rollback commit blob" << dendl; commit.replay(mds, segment); - mds->mdcache->finish_uncommitted_slave(reqid, false); + mds->mdcache->finish_uncommitted_peer(reqid, false); break; default: - mds->clog->error() << "invalid op in ESlaveUpdate"; + mds->clog->error() << "invalid op in EPeerUpdate"; mds->damaged(); ceph_abort(); // Should be unreachable because damaged() calls respawn() } diff --git a/src/messages/MMDSCacheRejoin.h b/src/messages/MMDSCacheRejoin.h index b0a87af1e3b00..09f304ae34803 100644 --- a/src/messages/MMDSCacheRejoin.h +++ b/src/messages/MMDSCacheRejoin.h @@ -155,11 +155,11 @@ public: WRITE_CLASS_ENCODER(lock_bls) // authpins, xlocks - struct slave_reqid { + struct peer_reqid { metareqid_t reqid; __u32 attempt; - slave_reqid() : attempt(0) {} - slave_reqid(const metareqid_t& r, __u32 a) + peer_reqid() : attempt(0) {} + peer_reqid(const metareqid_t& r, __u32 a) : reqid(r), attempt(a) {} void encode(ceph::buffer::list& bl) const { using ceph::encode; @@ -202,16 +202,16 @@ public: encode(bl, inode_base); } void add_inode_authpin(vinodeno_t ino, const metareqid_t& ri, __u32 attempt) { - authpinned_inodes[ino].push_back(slave_reqid(ri, attempt)); + authpinned_inodes[ino].push_back(peer_reqid(ri, attempt)); } void add_inode_frozen_authpin(vinodeno_t ino, const metareqid_t& ri, __u32 attempt) { - frozen_authpin_inodes[ino] = slave_reqid(ri, attempt); + frozen_authpin_inodes[ino] = peer_reqid(ri, attempt); } void add_inode_xlock(vinodeno_t ino, int lt, const metareqid_t& ri, __u32 attempt) { - xlocked_inodes[ino][lt] = slave_reqid(ri, attempt); + xlocked_inodes[ino][lt] = peer_reqid(ri, attempt); } void add_inode_wrlock(vinodeno_t ino, int lt, const metareqid_t& ri, __u32 attempt) { - wrlocked_inodes[ino][lt].push_back(slave_reqid(ri, attempt)); + wrlocked_inodes[ino][lt].push_back(peer_reqid(ri, attempt)); } void add_scatterlock_state(CInode *in) { @@ -246,11 +246,11 @@ public: } void add_dentry_authpin(dirfrag_t df, std::string_view dname, snapid_t last, const metareqid_t& ri, __u32 attempt) { - authpinned_dentries[df][string_snap_t(dname, last)].push_back(slave_reqid(ri, attempt)); + authpinned_dentries[df][string_snap_t(dname, last)].push_back(peer_reqid(ri, attempt)); } void add_dentry_xlock(dirfrag_t df, std::string_view dname, snapid_t last, const metareqid_t& ri, __u32 attempt) { - xlocked_dentries[df][string_snap_t(dname, last)] = slave_reqid(ri, attempt); + xlocked_dentries[df][string_snap_t(dname, last)] = peer_reqid(ri, attempt); } // -- encoding -- @@ -330,12 +330,12 @@ public: ceph::buffer::list inode_locks; std::map dirfrag_bases; - std::map > authpinned_inodes; - std::map frozen_authpin_inodes; - std::map > xlocked_inodes; - std::map > > wrlocked_inodes; - std::map > > authpinned_dentries; - std::map > xlocked_dentries; + std::map > authpinned_inodes; + std::map frozen_authpin_inodes; + std::map > xlocked_inodes; + std::map > > wrlocked_inodes; + std::map > > authpinned_dentries; + std::map > xlocked_dentries; private: template @@ -354,9 +354,9 @@ WRITE_CLASS_ENCODER(MMDSCacheRejoin::dirfrag_strong) WRITE_CLASS_ENCODER(MMDSCacheRejoin::dn_strong) WRITE_CLASS_ENCODER(MMDSCacheRejoin::dn_weak) WRITE_CLASS_ENCODER(MMDSCacheRejoin::lock_bls) -WRITE_CLASS_ENCODER(MMDSCacheRejoin::slave_reqid) +WRITE_CLASS_ENCODER(MMDSCacheRejoin::peer_reqid) -inline std::ostream& operator<<(std::ostream& out, const MMDSCacheRejoin::slave_reqid& r) { +inline std::ostream& operator<<(std::ostream& out, const MMDSCacheRejoin::peer_reqid& r) { return out << r.reqid << '.' << r.attempt; } diff --git a/src/messages/MMDSSlaveRequest.h b/src/messages/MMDSPeerRequest.h similarity index 92% rename from src/messages/MMDSSlaveRequest.h rename to src/messages/MMDSPeerRequest.h index 4c1f2a6c287cb..11b90247d3c8c 100644 --- a/src/messages/MMDSSlaveRequest.h +++ b/src/messages/MMDSPeerRequest.h @@ -1,4 +1,4 @@ -// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system @@ -7,19 +7,19 @@ * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public - * License version 2.1, as published by the Free Software + * License version 2.1, as published by the Free Software * Foundation. See file COPYING. - * + * */ -#ifndef CEPH_MMDSSLAVEREQUEST_H -#define CEPH_MMDSSLAVEREQUEST_H +#ifndef CEPH_MMDSPEERREQUEST_H +#define CEPH_MMDSPEERREQUEST_H #include "mds/mdstypes.h" #include "messages/MMDSOp.h" -class MMDSSlaveRequest : public MMDSOp { +class MMDSPeerRequest : public MMDSOp { static constexpr int HEAD_VERSION = 1; static constexpr int COMPAT_VERSION = 1; public: @@ -56,7 +56,7 @@ public: static const char *get_opname(int o) { - switch (o) { + switch (o) { case OP_XLOCK: return "xlock"; case OP_XLOCKACK: return "xlock_ack"; case OP_UNXLOCK: return "unxlock"; @@ -110,7 +110,7 @@ public: // for locking __u16 lock_type; // lock object type MDSCacheObjectInfo object_info; - + // for authpins std::vector authpins; @@ -165,12 +165,12 @@ public: ceph::buffer::list& get_lock_data() { return inode_export; } protected: - MMDSSlaveRequest() : MMDSOp{MSG_MDS_SLAVE_REQUEST, HEAD_VERSION, COMPAT_VERSION} { } - MMDSSlaveRequest(metareqid_t ri, __u32 att, int o) : - MMDSOp{MSG_MDS_SLAVE_REQUEST, HEAD_VERSION, COMPAT_VERSION}, + MMDSPeerRequest() : MMDSOp{MSG_MDS_PEER_REQUEST, HEAD_VERSION, COMPAT_VERSION} { } + MMDSPeerRequest(metareqid_t ri, __u32 att, int o) : + MMDSOp{MSG_MDS_PEER_REQUEST, HEAD_VERSION, COMPAT_VERSION}, reqid(ri), attempt(att), op(o), flags(0), lock_type(0), inode_export_v(0), srcdn_auth(MDS_RANK_NONE) { } - ~MMDSSlaveRequest() override {} + ~MMDSPeerRequest() override {} public: void encode_payload(uint64_t features) override { @@ -215,16 +215,16 @@ public: decode(desti_snapbl, p); } - std::string_view get_type_name() const override { return "slave_request"; } + std::string_view get_type_name() const override { return "peer_request"; } void print(std::ostream& out) const override { - out << "slave_request(" << reqid + out << "peer_request(" << reqid << "." << attempt - << " " << get_opname(op) + << " " << get_opname(op) << ")"; - } + } private: template - friend boost::intrusive_ptr ceph::make_message(Args&&... args); + friend boost::intrusive_ptr ceph::make_message(Args&&... args); }; #endif diff --git a/src/messages/MMDSResolve.h b/src/messages/MMDSResolve.h index 0de6d1150cd97..2fa890ec3c6a8 100644 --- a/src/messages/MMDSResolve.h +++ b/src/messages/MMDSResolve.h @@ -27,12 +27,12 @@ public: std::map> subtrees; std::map> ambiguous_imports; - class slave_inode_cap { + class peer_inode_cap { public: inodeno_t ino; std::map cap_exports; - slave_inode_cap() {} - slave_inode_cap(inodeno_t a, map b) : ino(a), cap_exports(b) {} + peer_inode_cap() {} + peer_inode_cap(inodeno_t a, map b) : ino(a), cap_exports(b) {} void encode(ceph::buffer::list &bl) const { ENCODE_START(1, 1, bl); @@ -48,12 +48,12 @@ public: DECODE_FINISH(blp); } }; - WRITE_CLASS_ENCODER(slave_inode_cap) + WRITE_CLASS_ENCODER(peer_inode_cap) - struct slave_request { + struct peer_request { ceph::buffer::list inode_caps; bool committing; - slave_request() : committing(false) {} + peer_request() : committing(false) {} void encode(ceph::buffer::list &bl) const { ENCODE_START(1, 1, bl); encode(inode_caps, bl); @@ -68,7 +68,7 @@ public: } }; - std::map slave_requests; + std::map peer_requests; // table client information struct table_client { @@ -104,7 +104,7 @@ public: void print(std::ostream& out) const override { out << "mds_resolve(" << subtrees.size() << "+" << ambiguous_imports.size() - << " subtrees +" << slave_requests.size() << " slave requests)"; + << " subtrees +" << peer_requests.size() << " peer requests)"; } void add_subtree(dirfrag_t im) { @@ -118,12 +118,12 @@ public: ambiguous_imports[im] = m; } - void add_slave_request(metareqid_t reqid, bool committing) { - slave_requests[reqid].committing = committing; + void add_peer_request(metareqid_t reqid, bool committing) { + peer_requests[reqid].committing = committing; } - void add_slave_request(metareqid_t reqid, ceph::buffer::list& bl) { - slave_requests[reqid].inode_caps = std::move(bl); + void add_peer_request(metareqid_t reqid, ceph::buffer::list& bl) { + peer_requests[reqid].inode_caps = std::move(bl); } void add_table_commits(int table, const std::set& pending_commits) { @@ -134,7 +134,7 @@ public: using ceph::encode; encode(subtrees, payload); encode(ambiguous_imports, payload); - encode(slave_requests, payload); + encode(peer_requests, payload); encode(table_clients, payload); } void decode_payload() override { @@ -142,7 +142,7 @@ public: auto p = payload.cbegin(); decode(subtrees, p); decode(ambiguous_imports, p); - decode(slave_requests, p); + decode(peer_requests, p); decode(table_clients, p); } private: @@ -150,11 +150,11 @@ private: friend boost::intrusive_ptr ceph::make_message(Args&&... args); }; -inline std::ostream& operator<<(std::ostream& out, const MMDSResolve::slave_request&) { +inline std::ostream& operator<<(std::ostream& out, const MMDSResolve::peer_request&) { return out; } -WRITE_CLASS_ENCODER(MMDSResolve::slave_request) +WRITE_CLASS_ENCODER(MMDSResolve::peer_request) WRITE_CLASS_ENCODER(MMDSResolve::table_client) -WRITE_CLASS_ENCODER(MMDSResolve::slave_inode_cap) +WRITE_CLASS_ENCODER(MMDSResolve::peer_inode_cap) #endif diff --git a/src/messages/MMDSResolveAck.h b/src/messages/MMDSResolveAck.h index de92d30f8bc04..4af32fb29c303 100644 --- a/src/messages/MMDSResolveAck.h +++ b/src/messages/MMDSResolveAck.h @@ -35,7 +35,7 @@ public: /*void print(ostream& out) const { out << "resolve_ack.size() << "+" << ambiguous_imap.size() - << " imports +" << slave_requests.size() << " slave requests)"; + << " imports +" << peer_requests.size() << " peer requests)"; } */ diff --git a/src/msg/Message.cc b/src/msg/Message.cc index efd33ddafc4bc..3dd7c02ee7ac9 100644 --- a/src/msg/Message.cc +++ b/src/msg/Message.cc @@ -130,7 +130,7 @@ #include "messages/MClientQuota.h" #include "messages/MClientMetrics.h" -#include "messages/MMDSSlaveRequest.h" +#include "messages/MMDSPeerRequest.h" #include "messages/MMDSMap.h" #include "messages/MFSMap.h" @@ -694,8 +694,8 @@ Message *decode_message(CephContext *cct, break; // mds - case MSG_MDS_SLAVE_REQUEST: - m = make_message(); + case MSG_MDS_PEER_REQUEST: + m = make_message(); break; case CEPH_MSG_MDS_MAP: diff --git a/src/msg/Message.h b/src/msg/Message.h index d1554ff9dfe03..59700bbb7f9e8 100644 --- a/src/msg/Message.h +++ b/src/msg/Message.h @@ -146,7 +146,7 @@ // *** MDS *** #define MSG_MDS_BEACON 100 // to monitor -#define MSG_MDS_SLAVE_REQUEST 101 +#define MSG_MDS_PEER_REQUEST 101 #define MSG_MDS_TABLE_REQUEST 102 // 150 already in use (MSG_OSD_RECOVERY_RESERVE) diff --git a/src/msg/MessageRef.h b/src/msg/MessageRef.h index 4f30f0b352a91..2c11aced52b34 100644 --- a/src/msg/MessageRef.h +++ b/src/msg/MessageRef.h @@ -88,7 +88,7 @@ class MMDSOpenIno; class MMDSOpenInoReply; class MMDSResolveAck; class MMDSResolve; -class MMDSSlaveRequest; +class MMDSPeerRequest; class MMDSSnapUpdate; class MMDSTableRequest; class MMgrBeacon; diff --git a/src/tools/ceph-dencoder/common_types.h b/src/tools/ceph-dencoder/common_types.h index 36ff42e81a17c..fdad619a62688 100644 --- a/src/tools/ceph-dencoder/common_types.h +++ b/src/tools/ceph-dencoder/common_types.h @@ -273,8 +273,8 @@ MESSAGE(MMDSResolve) #include "messages/MMDSResolveAck.h" MESSAGE(MMDSResolveAck) -#include "messages/MMDSSlaveRequest.h" -MESSAGE(MMDSSlaveRequest) +#include "messages/MMDSPeerRequest.h" +MESSAGE(MMDSPeerRequest) #include "messages/MMDSSnapUpdate.h" MESSAGE(MMDSSnapUpdate) diff --git a/src/tools/ceph-dencoder/mds_types.h b/src/tools/ceph-dencoder/mds_types.h index 583d5fc7c87c2..9406bf88bc740 100644 --- a/src/tools/ceph-dencoder/mds_types.h +++ b/src/tools/ceph-dencoder/mds_types.h @@ -90,12 +90,12 @@ TYPE_FEATUREFUL_NOCOPY(ESession) #include "mds/events/ESessions.h" TYPE_FEATUREFUL_NOCOPY(ESessions) -#include "mds/events/ESlaveUpdate.h" +#include "mds/events/EPeerUpdate.h" TYPE(link_rollback) TYPE(rmdir_rollback) TYPE(rename_rollback::drec) TYPE(rename_rollback) -TYPE_FEATUREFUL_NOCOPY(ESlaveUpdate) +TYPE_FEATUREFUL_NOCOPY(EPeerUpdate) #include "mds/events/ESubtreeMap.h" TYPE_FEATUREFUL_NOCOPY(ESubtreeMap) -- 2.39.5