From 5e2130262e53ce47fc05985c296342044431b698 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 13 Jul 2020 16:36:12 -0500 Subject: [PATCH] mds: master -> leader Signed-off-by: Sage Weil --- src/mds/Locker.cc | 16 ++-- src/mds/LogSegment.h | 2 +- src/mds/MDCache.cc | 134 +++++++++++++++++----------------- src/mds/MDCache.h | 60 +++++++-------- src/mds/Mutation.cc | 6 +- src/mds/Mutation.h | 4 +- src/mds/Server.cc | 52 ++++++------- src/mds/Server.h | 6 +- src/mds/SimpleLock.h | 2 +- src/mds/events/ESlaveUpdate.h | 12 +-- src/mds/journal.cc | 36 ++++----- 11 files changed, 165 insertions(+), 165 deletions(-) diff --git a/src/mds/Locker.cc b/src/mds/Locker.cc index 07ec858a4af4e..909a570819245 100644 --- a/src/mds/Locker.cc +++ b/src/mds/Locker.cc @@ -242,7 +242,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, if ((lock->get_type() == CEPH_LOCK_ISNAP || lock->get_type() == CEPH_LOCK_IPOLICY) && mds->is_cluster_degraded() && - mdr->is_master() && + mdr->is_leader() && !mdr->is_queued_for_replay()) { // waiting for recovering mds, to guarantee replayed requests and mksnap/setlayout // get processed in proper order. @@ -283,8 +283,8 @@ bool Locker::acquire_locks(MDRequestRef& mdr, CDentry *dn = static_cast(object); if (!dn->is_auth()) continue; - if (mdr->is_master()) { - // master. wrlock versionlock so we can pipeline dentry updates to journal. + if (mdr->is_leader()) { + // leader. wrlock versionlock so we can pipeline dentry updates to journal. lov.add_wrlock(&dn->versionlock, i + 1); } else { // slave. exclusively lock the dentry version (i.e. block other journal updates). @@ -297,8 +297,8 @@ bool Locker::acquire_locks(MDRequestRef& mdr, CInode *in = static_cast(object); if (!in->is_auth()) continue; - if (mdr->is_master()) { - // master. wrlock versionlock so we can pipeline inode updates to journal. + if (mdr->is_leader()) { + // leader. wrlock versionlock so we can pipeline inode updates to journal. lov.add_wrlock(&in->versionlock, i + 1); } else { // slave. exclusively lock the inode version (i.e. block other journal updates). @@ -313,7 +313,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, mustpin.insert(object); } else if (!object->is_auth() && !lock->can_wrlock(_client) && // we might have to request a scatter - !mdr->is_slave()) { // if we are slave (remote_wrlock), the master already authpinned + !mdr->is_slave()) { // if we are slave (remote_wrlock), the leader already authpinned dout(15) << " will also auth_pin " << *object << " in case we need to request a scatter" << dendl; mustpin.insert(object); @@ -556,7 +556,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, continue; } - ceph_assert(mdr->is_master()); + ceph_assert(mdr->is_leader()); if (lock->needs_recover()) { if (mds->is_cluster_degraded()) { if (!mdr->is_queued_for_replay()) { @@ -1414,7 +1414,7 @@ void Locker::try_eval(SimpleLock *lock, bool *pneed_issue) * * We can defer while freezing without causing a deadlock. Honor * scatter_wanted flag here. This will never get deferred by the - * checks above due to the auth_pin held by the master. + * checks above due to the auth_pin held by the leader. */ if (lock->is_scatterlock()) { ScatterLock *slock = static_cast(lock); diff --git a/src/mds/LogSegment.h b/src/mds/LogSegment.h index 3aad27bf29ee7..d07f9bd6eb14a 100644 --- a/src/mds/LogSegment.h +++ b/src/mds/LogSegment.h @@ -88,7 +88,7 @@ class LogSegment { MDSContext* purged_cb = nullptr; map > pending_commit_tids; // mdstable - set uncommitted_masters; + set uncommitted_leaders; set uncommitted_slaves; set uncommitted_fragments; diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc index caf7cf1c10e37..845839f367ae3 100644 --- a/src/mds/MDCache.cc +++ b/src/mds/MDCache.cc @@ -2448,75 +2448,75 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, /* - * some handlers for master requests with slaves. we need to make - * sure slaves journal commits before we forget we mastered them and - * remove them from the uncommitted_masters map (used during recovery + * some handlers for leader requests with slaves. we need to make + * sure slaves journal commits before we forget we leadered them and + * remove them from the uncommitted_leaders map (used during recovery * to commit|abort slaves). */ -struct C_MDC_CommittedMaster : public MDCacheLogContext { +struct C_MDC_CommittedLeader : public MDCacheLogContext { metareqid_t reqid; - C_MDC_CommittedMaster(MDCache *s, metareqid_t r) : MDCacheLogContext(s), reqid(r) {} + C_MDC_CommittedLeader(MDCache *s, metareqid_t r) : MDCacheLogContext(s), reqid(r) {} void finish(int r) override { - mdcache->_logged_master_commit(reqid); + mdcache->_logged_leader_commit(reqid); } }; -void MDCache::log_master_commit(metareqid_t reqid) +void MDCache::log_leader_commit(metareqid_t reqid) { - dout(10) << "log_master_commit " << reqid << dendl; - uncommitted_masters[reqid].committing = true; + dout(10) << "log_leader_commit " << reqid << dendl; + uncommitted_leaders[reqid].committing = true; mds->mdlog->start_submit_entry(new ECommitted(reqid), - new C_MDC_CommittedMaster(this, reqid)); + new C_MDC_CommittedLeader(this, reqid)); } -void MDCache::_logged_master_commit(metareqid_t reqid) +void MDCache::_logged_leader_commit(metareqid_t reqid) { - dout(10) << "_logged_master_commit " << reqid << dendl; - ceph_assert(uncommitted_masters.count(reqid)); - uncommitted_masters[reqid].ls->uncommitted_masters.erase(reqid); - mds->queue_waiters(uncommitted_masters[reqid].waiters); - uncommitted_masters.erase(reqid); + dout(10) << "_logged_leader_commit " << reqid << dendl; + ceph_assert(uncommitted_leaders.count(reqid)); + uncommitted_leaders[reqid].ls->uncommitted_leaders.erase(reqid); + mds->queue_waiters(uncommitted_leaders[reqid].waiters); + uncommitted_leaders.erase(reqid); } // while active... -void MDCache::committed_master_slave(metareqid_t r, mds_rank_t from) +void MDCache::committed_leader_slave(metareqid_t r, mds_rank_t from) { - dout(10) << "committed_master_slave mds." << from << " on " << r << dendl; - ceph_assert(uncommitted_masters.count(r)); - uncommitted_masters[r].slaves.erase(from); - if (!uncommitted_masters[r].recovering && uncommitted_masters[r].slaves.empty()) - log_master_commit(r); + dout(10) << "committed_leader_slave mds." << from << " on " << r << dendl; + ceph_assert(uncommitted_leaders.count(r)); + uncommitted_leaders[r].slaves.erase(from); + if (!uncommitted_leaders[r].recovering && uncommitted_leaders[r].slaves.empty()) + log_leader_commit(r); } -void MDCache::logged_master_update(metareqid_t reqid) +void MDCache::logged_leader_update(metareqid_t reqid) { - dout(10) << "logged_master_update " << reqid << dendl; - ceph_assert(uncommitted_masters.count(reqid)); - uncommitted_masters[reqid].safe = true; - auto p = pending_masters.find(reqid); - if (p != pending_masters.end()) { - pending_masters.erase(p); - if (pending_masters.empty()) + dout(10) << "logged_leader_update " << reqid << dendl; + ceph_assert(uncommitted_leaders.count(reqid)); + uncommitted_leaders[reqid].safe = true; + auto p = pending_leaders.find(reqid); + if (p != pending_leaders.end()) { + pending_leaders.erase(p); + if (pending_leaders.empty()) process_delayed_resolve(); } } /* - * Master may crash after receiving all slaves' commit acks, but before journalling + * Leader may crash after receiving all slaves' commit acks, but before journalling * the final commit. Slaves may crash after journalling the slave commit, but before - * sending commit ack to the master. Commit masters with no uncommitted slave when + * sending commit ack to the leader. Commit leaders with no uncommitted slave when * resolve finishes. */ -void MDCache::finish_committed_masters() +void MDCache::finish_committed_leaders() { - for (map::iterator p = uncommitted_masters.begin(); - p != uncommitted_masters.end(); + for (map::iterator p = uncommitted_leaders.begin(); + p != uncommitted_leaders.end(); ++p) { p->second.recovering = false; if (!p->second.committing && p->second.slaves.empty()) { - dout(10) << "finish_committed_masters " << p->first << dendl; - log_master_commit(p->first); + dout(10) << "finish_committed_leaders " << p->first << dendl; + log_leader_commit(p->first); } } } @@ -2525,8 +2525,8 @@ void MDCache::finish_committed_masters() * at end of resolve... we must journal a commit|abort for all slave * updates, before moving on. * - * this is so that the master can safely journal ECommitted on ops it - * masters when it reaches up:active (all other recovering nodes must + * this is so that the leader can safely journal ECommitted on ops it + * leaders when it reaches up:active (all other recovering nodes must * complete resolve before that happens). */ struct C_MDC_SlaveCommit : public MDCacheLogContext { @@ -2793,8 +2793,8 @@ void MDCache::send_slave_resolves() for (map::iterator p = uncommitted_slaves.begin(); p != uncommitted_slaves.end(); ++p) { - mds_rank_t master = p->second.master; - auto &m = resolves[master]; + mds_rank_t leader = p->second.leader; + auto &m = resolves[leader]; if (!m) m = make_message(); m->add_slave_request(p->first, false); } @@ -2810,11 +2810,11 @@ void MDCache::send_slave_resolves() if (!mdr->slave_did_prepare() && !mdr->committing) { continue; } - mds_rank_t master = mdr->slave_to_mds; - if (resolve_set.count(master) || is_ambiguous_slave_update(p->first, master)) { + mds_rank_t leader = mdr->slave_to_mds; + if (resolve_set.count(leader) || is_ambiguous_slave_update(p->first, leader)) { dout(10) << " including uncommitted " << *mdr << dendl; - if (!resolves.count(master)) - resolves[master] = make_message(); + if (!resolves.count(leader)) + resolves[leader] = make_message(); if (!mdr->committing && mdr->has_more() && mdr->more()->is_inode_exporter) { // re-send cap exports @@ -2824,9 +2824,9 @@ void MDCache::send_slave_resolves() bufferlist bl; MMDSResolve::slave_inode_cap inode_caps(in->ino(), cap_map); encode(inode_caps, bl); - resolves[master]->add_slave_request(p->first, bl); + resolves[leader]->add_slave_request(p->first, bl); } else { - resolves[master]->add_slave_request(p->first, mdr->committing); + resolves[leader]->add_slave_request(p->first, mdr->committing); } } } @@ -3039,29 +3039,29 @@ void MDCache::handle_mds_failure(mds_rank_t who) } // failed node is slave? - if (mdr->is_master() && !mdr->committing) { + if (mdr->is_leader() && !mdr->committing) { if (mdr->more()->srcdn_auth_mds == who) { - dout(10) << " master request " << *mdr << " waiting for rename srcdn's auth mds." + dout(10) << " leader request " << *mdr << " waiting for rename srcdn's auth mds." << who << " to recover" << dendl; ceph_assert(mdr->more()->witnessed.count(who) == 0); if (mdr->more()->is_ambiguous_auth) mdr->clear_ambiguous_auth(); // rename srcdn's auth mds failed, all witnesses will rollback mdr->more()->witnessed.clear(); - pending_masters.erase(p->first); + pending_leaders.erase(p->first); } if (mdr->more()->witnessed.count(who)) { mds_rank_t srcdn_auth = mdr->more()->srcdn_auth_mds; if (srcdn_auth >= 0 && mdr->more()->waiting_on_slave.count(srcdn_auth)) { - dout(10) << " master request " << *mdr << " waiting for rename srcdn's auth mds." + dout(10) << " leader request " << *mdr << " waiting for rename srcdn's auth mds." << mdr->more()->srcdn_auth_mds << " to reply" << dendl; // waiting for the slave (rename srcdn's auth mds), delay sending resolve ack // until either the request is committing or the slave also fails. ceph_assert(mdr->more()->waiting_on_slave.size() == 1); - pending_masters.insert(p->first); + pending_leaders.insert(p->first); } else { - dout(10) << " master request " << *mdr << " no longer witnessed by slave mds." + dout(10) << " leader request " << *mdr << " no longer witnessed by slave mds." << who << " to recover" << dendl; if (srcdn_auth >= 0) ceph_assert(mdr->more()->witnessed.count(srcdn_auth) == 0); @@ -3072,7 +3072,7 @@ void MDCache::handle_mds_failure(mds_rank_t who) } if (mdr->more()->waiting_on_slave.count(who)) { - dout(10) << " master request " << *mdr << " waiting for slave mds." << who + dout(10) << " leader request " << *mdr << " waiting for slave mds." << who << " to recover" << dendl; // retry request when peer recovers mdr->more()->waiting_on_slave.erase(who); @@ -3085,8 +3085,8 @@ void MDCache::handle_mds_failure(mds_rank_t who) } } - for (map::iterator p = uncommitted_masters.begin(); - p != uncommitted_masters.end(); + for (map::iterator p = uncommitted_leaders.begin(); + p != uncommitted_leaders.end(); ++p) { // The failed MDS may have already committed the slave update if (p->second.slaves.count(who)) { @@ -3229,13 +3229,13 @@ void MDCache::handle_resolve(const cref_t &m) if (!m->slave_requests.empty()) { if (mds->is_clientreplay() || mds->is_active() || mds->is_stopping()) { for (auto p = m->slave_requests.begin(); p != m->slave_requests.end(); ++p) { - if (uncommitted_masters.count(p->first) && !uncommitted_masters[p->first].safe) { + if (uncommitted_leaders.count(p->first) && !uncommitted_leaders[p->first].safe) { ceph_assert(!p->second.committing); - pending_masters.insert(p->first); + pending_leaders.insert(p->first); } } - if (!pending_masters.empty()) { + if (!pending_leaders.empty()) { dout(10) << " still have pending updates, delay processing slave resolve" << dendl; delayed_resolve[from] = m; return; @@ -3244,7 +3244,7 @@ void MDCache::handle_resolve(const cref_t &m) auto ack = make_message(); for (const auto &p : m->slave_requests) { - if (uncommitted_masters.count(p.first)) { //mds->sessionmap.have_completed_request(p.first)) { + if (uncommitted_leaders.count(p.first)) { //mds->sessionmap.have_completed_request(p.first)) { // COMMIT if (p.second.committing) { // already committing, waiting for the OP_COMMITTED slave reply @@ -3253,7 +3253,7 @@ void MDCache::handle_resolve(const cref_t &m) dout(10) << " ambiguous slave request " << p << " will COMMIT" << dendl; ack->add_commit(p.first); } - uncommitted_masters[p.first].slaves.insert(from); // wait for slave OP_COMMITTED before we log ECommitted + uncommitted_leaders[p.first].slaves.insert(from); // wait for slave OP_COMMITTED before we log ECommitted if (p.second.inode_caps.length() > 0) { // slave wants to export caps (rename) @@ -3417,7 +3417,7 @@ void MDCache::maybe_resolve_finish() dout(10) << "maybe_resolve_finish got all resolves+resolve_acks, done." << dendl; disambiguate_my_imports(); - finish_committed_masters(); + finish_committed_leaders(); if (resolve_done) { ceph_assert(mds->is_resolve()); @@ -3467,7 +3467,7 @@ void MDCache::handle_resolve_ack(const cref_t &ack) finish_uncommitted_slave(p.first); } else { MDRequestRef mdr = request_get(p.first); - // information about master imported caps + // information about leader imported caps if (p.second.length() > 0) mdr->more()->inode_import.share(p.second); @@ -3517,7 +3517,7 @@ void MDCache::handle_resolve_ack(const cref_t &ack) } } -void MDCache::add_uncommitted_slave(metareqid_t reqid, LogSegment *ls, mds_rank_t master, MDSlaveUpdate *su) +void MDCache::add_uncommitted_slave(metareqid_t reqid, LogSegment *ls, mds_rank_t leader, MDSlaveUpdate *su) { auto const &ret = uncommitted_slaves.emplace(std::piecewise_construct, std::forward_as_tuple(reqid), @@ -3525,7 +3525,7 @@ void MDCache::add_uncommitted_slave(metareqid_t reqid, LogSegment *ls, mds_rank_ ceph_assert(ret.second); ls->uncommitted_slaves.insert(reqid); uslave &u = ret.first->second; - u.master = master; + u.leader = leader; u.ls = ls; u.su = su; if (su == nullptr) { @@ -3592,13 +3592,13 @@ void MDCache::finish_uncommitted_slave(metareqid_t reqid, bool assert_exist) delete su; } -MDSlaveUpdate* MDCache::get_uncommitted_slave(metareqid_t reqid, mds_rank_t master) +MDSlaveUpdate* MDCache::get_uncommitted_slave(metareqid_t reqid, mds_rank_t leader) { MDSlaveUpdate* su = nullptr; auto it = uncommitted_slaves.find(reqid); if (it != uncommitted_slaves.end() && - it->second.master == master) { + it->second.leader == leader) { su = it->second.su; } return su; diff --git a/src/mds/MDCache.h b/src/mds/MDCache.h index 11fe5b9e3a6b3..bfa22d688283f 100644 --- a/src/mds/MDCache.h +++ b/src/mds/MDCache.h @@ -425,30 +425,30 @@ class MDCache { snapid_t follows=CEPH_NOSNAP); // slaves - void add_uncommitted_master(metareqid_t reqid, LogSegment *ls, set &slaves, bool safe=false) { - uncommitted_masters[reqid].ls = ls; - uncommitted_masters[reqid].slaves = slaves; - uncommitted_masters[reqid].safe = safe; + void add_uncommitted_leader(metareqid_t reqid, LogSegment *ls, set &slaves, bool safe=false) { + uncommitted_leaders[reqid].ls = ls; + uncommitted_leaders[reqid].slaves = slaves; + uncommitted_leaders[reqid].safe = safe; } - void wait_for_uncommitted_master(metareqid_t reqid, MDSContext *c) { - uncommitted_masters[reqid].waiters.push_back(c); + void wait_for_uncommitted_leader(metareqid_t reqid, MDSContext *c) { + uncommitted_leaders[reqid].waiters.push_back(c); } - bool have_uncommitted_master(metareqid_t reqid, mds_rank_t from) { - auto p = uncommitted_masters.find(reqid); - return p != uncommitted_masters.end() && p->second.slaves.count(from) > 0; + bool have_uncommitted_leader(metareqid_t reqid, mds_rank_t from) { + auto p = uncommitted_leaders.find(reqid); + return p != uncommitted_leaders.end() && p->second.slaves.count(from) > 0; } - void log_master_commit(metareqid_t reqid); - void logged_master_update(metareqid_t reqid); - void _logged_master_commit(metareqid_t reqid); - void committed_master_slave(metareqid_t r, mds_rank_t from); - void finish_committed_masters(); + void log_leader_commit(metareqid_t reqid); + void logged_leader_update(metareqid_t reqid); + void _logged_leader_commit(metareqid_t reqid); + void committed_leader_slave(metareqid_t r, mds_rank_t from); + void finish_committed_leaders(); void add_uncommitted_slave(metareqid_t reqid, LogSegment*, mds_rank_t, MDSlaveUpdate *su=nullptr); void wait_for_uncommitted_slave(metareqid_t reqid, MDSContext *c) { uncommitted_slaves.at(reqid).waiters.push_back(c); } void finish_uncommitted_slave(metareqid_t reqid, bool assert_exist=true); - MDSlaveUpdate* get_uncommitted_slave(metareqid_t reqid, mds_rank_t master); + MDSlaveUpdate* get_uncommitted_slave(metareqid_t reqid, mds_rank_t leader); void _logged_slave_commit(mds_rank_t from, metareqid_t reqid); void set_recovery_set(set& s); @@ -458,15 +458,15 @@ class MDCache { void recalc_auth_bits(bool replay); void remove_inode_recursive(CInode *in); - bool is_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) { - auto p = ambiguous_slave_updates.find(master); + bool is_ambiguous_slave_update(metareqid_t reqid, mds_rank_t leader) { + auto p = ambiguous_slave_updates.find(leader); return p != ambiguous_slave_updates.end() && p->second.count(reqid); } - void add_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) { - ambiguous_slave_updates[master].insert(reqid); + void add_ambiguous_slave_update(metareqid_t reqid, mds_rank_t leader) { + ambiguous_slave_updates[leader].insert(reqid); } - void remove_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) { - auto p = ambiguous_slave_updates.find(master); + void remove_ambiguous_slave_update(metareqid_t reqid, mds_rank_t leader) { + auto p = ambiguous_slave_updates.find(leader); auto q = p->second.find(reqid); ceph_assert(q != p->second.end()); p->second.erase(q); @@ -474,8 +474,8 @@ class MDCache { ambiguous_slave_updates.erase(p); } - void add_rollback(metareqid_t reqid, mds_rank_t master) { - resolve_need_rollback[reqid] = master; + void add_rollback(metareqid_t reqid, mds_rank_t leader) { + resolve_need_rollback[reqid] = leader; } void finish_rollback(metareqid_t reqid, MDRequestRef& mdr); @@ -943,7 +943,7 @@ class MDCache { void repair_dirfrag_stats(CDir *dir); void upgrade_inode_snaprealm(CInode *in); - // my master + // my leader MDSRank *mds; // -- my cache -- @@ -1004,9 +1004,9 @@ class MDCache { double export_ephemeral_random_max = 0.0; protected: - // track master requests whose slaves haven't acknowledged commit - struct umaster { - umaster() {} + // track leader requests whose slaves haven't acknowledged commit + struct uleader { + uleader() {} set slaves; LogSegment *ls = nullptr; MDSContext::vec waiters; @@ -1017,7 +1017,7 @@ class MDCache { struct uslave { uslave() {} - mds_rank_t master; + mds_rank_t leader; LogSegment *ls = nullptr; MDSlaveUpdate *su = nullptr; MDSContext::vec waiters; @@ -1168,10 +1168,10 @@ class MDCache { map uncommitted_slave_rename_olddir; // slave: preserve the non-auth dir until seeing commit. map uncommitted_slave_unlink; // slave: preserve the unlinked inode until seeing commit. - map uncommitted_masters; // master: req -> slave set + map uncommitted_leaders; // leader: req -> slave set map uncommitted_slaves; // slave: preserve the slave req until seeing commit. - set pending_masters; + set pending_leaders; map > ambiguous_slave_updates; bool resolves_pending = false; diff --git a/src/mds/Mutation.cc b/src/mds/Mutation.cc index 16f1a7383ab60..db4fb206422aa 100644 --- a/src/mds/Mutation.cc +++ b/src/mds/Mutation.cc @@ -510,9 +510,9 @@ void MDRequestImpl::_dump(Formatter *f) const f->close_section(); // client_info } else if (is_slave() && _slave_request) { // replies go to an existing mdr f->dump_string("op_type", "slave_request"); - f->open_object_section("master_info"); - f->dump_stream("master") << _slave_request->get_orig_source(); - f->close_section(); // master_info + f->open_object_section("leader_info"); + f->dump_stream("leader") << _slave_request->get_orig_source(); + f->close_section(); // leader_info f->open_object_section("request_info"); f->dump_int("attempt", _slave_request->get_attempt()); diff --git a/src/mds/Mutation.h b/src/mds/Mutation.h index c1f9aff4375eb..84138010f2811 100644 --- a/src/mds/Mutation.h +++ b/src/mds/Mutation.h @@ -159,7 +159,7 @@ public: return lock == last_locked; } - bool is_master() const { return slave_to_mds == MDS_RANK_NONE; } + bool is_leader() const { return slave_to_mds == MDS_RANK_NONE; } bool is_slave() const { return slave_to_mds != MDS_RANK_NONE; } client_t get_client() const { @@ -399,7 +399,7 @@ struct MDRequestImpl : public MutationImpl { Session *session = nullptr; elist::item item_session_request; // if not on list, op is aborted. - // -- i am a client (master) request + // -- i am a client (leader) request ceph::cref_t client_request; // client request (if any) // tree and depth info of path1 and path2 diff --git a/src/mds/Server.cc b/src/mds/Server.cc index 7e65e9957c5e3..2ea24f7c46f0b 100644 --- a/src/mds/Server.cc +++ b/src/mds/Server.cc @@ -2706,7 +2706,7 @@ void Server::handle_slave_request_reply(const cref_t &m) if (!mds->is_clientreplay() && !mds->is_active() && !mds->is_stopping()) { metareqid_t r = m->get_reqid(); - if (!mdcache->have_uncommitted_master(r, from)) { + if (!mdcache->have_uncommitted_leader(r, from)) { dout(10) << "handle_slave_request_reply ignoring slave reply from mds." << from << " reqid " << r << dendl; return; @@ -2718,7 +2718,7 @@ void Server::handle_slave_request_reply(const cref_t &m) if (m->get_op() == MMDSSlaveRequest::OP_COMMITTED) { metareqid_t r = m->get_reqid(); - mdcache->committed_master_slave(r, from); + mdcache->committed_leader_slave(r, from); return; } @@ -2732,7 +2732,7 @@ void Server::handle_slave_request_reply(const cref_t &m) switch (m->get_op()) { case MMDSSlaveRequest::OP_XLOCKACK: { - // identify lock, master request + // identify lock, leader request SimpleLock *lock = mds->locker->get_lock(m->get_lock_type(), m->get_object_info()); mdr->more()->slaves.insert(from); @@ -2751,7 +2751,7 @@ void Server::handle_slave_request_reply(const cref_t &m) case MMDSSlaveRequest::OP_WRLOCKACK: { - // identify lock, master request + // identify lock, leader request SimpleLock *lock = mds->locker->get_lock(m->get_lock_type(), m->get_object_info()); mdr->more()->slaves.insert(from); @@ -6483,7 +6483,7 @@ void Server::_link_remote(MDRequestRef& mdr, bool inc, CDentry *dn, CInode *targ dout(20) << " noting uncommitted_slaves " << mdr->more()->witnessed << dendl; le->reqid = mdr->reqid; le->had_slaves = true; - mdcache->add_uncommitted_master(mdr->reqid, mdr->ls, mdr->more()->witnessed); + mdcache->add_uncommitted_leader(mdr->reqid, mdr->ls, mdr->more()->witnessed); } if (inc) { @@ -6514,7 +6514,7 @@ void Server::_link_remote_finish(MDRequestRef& mdr, bool inc, ceph_assert(g_conf()->mds_kill_link_at != 3); if (!mdr->more()->witnessed.empty()) - mdcache->logged_master_update(mdr->reqid); + mdcache->logged_leader_update(mdr->reqid); if (inc) { // link the new dentry @@ -6594,7 +6594,7 @@ void Server::handle_slave_link_prep(MDRequestRef& mdr) mdr->auth_pin(targeti); - //ceph_abort(); // test hack: make sure master can handle a slave that fails to prepare... + //ceph_abort(); // test hack: make sure leader can handle a slave that fails to prepare... ceph_assert(g_conf()->mds_kill_link_at != 5); // journal it @@ -6765,7 +6765,7 @@ struct C_MDS_LoggedLinkRollback : public ServerLogContext { } }; -void Server::do_link_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& mdr) +void Server::do_link_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr) { link_rollback rollback; auto p = rbl.cbegin(); @@ -6778,7 +6778,7 @@ void Server::do_link_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& ceph_assert(g_conf()->mds_kill_link_at != 9); - mdcache->add_rollback(rollback.reqid, master); // need to finish this update before resolve finishes + mdcache->add_rollback(rollback.reqid, leader); // need to finish this update before resolve finishes ceph_assert(mdr || mds->is_resolve()); MutationRef mut(new MutationImpl(nullptr, utime_t(), rollback.reqid)); @@ -6835,7 +6835,7 @@ void Server::do_link_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& } // journal it - ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_link_rollback", rollback.reqid, master, + ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_link_rollback", rollback.reqid, leader, ESlaveUpdate::OP_ROLLBACK, ESlaveUpdate::LINK); mdlog->start_entry(le); le->commit.add_dir_context(parent); @@ -7085,7 +7085,7 @@ void Server::_unlink_local(MDRequestRef& mdr, CDentry *dn, CDentry *straydn) dout(20) << " noting uncommitted_slaves " << mdr->more()->witnessed << dendl; le->reqid = mdr->reqid; le->had_slaves = true; - mdcache->add_uncommitted_master(mdr->reqid, mdr->ls, mdr->more()->witnessed); + mdcache->add_uncommitted_leader(mdr->reqid, mdr->ls, mdr->more()->witnessed); } if (straydn) { @@ -7164,7 +7164,7 @@ void Server::_unlink_local_finish(MDRequestRef& mdr, dout(10) << "_unlink_local_finish " << *dn << dendl; if (!mdr->more()->witnessed.empty()) - mdcache->logged_master_update(mdr->reqid); + mdcache->logged_leader_update(mdr->reqid); CInode *strayin = NULL; bool hadrealm = false; @@ -7459,7 +7459,7 @@ struct C_MDS_LoggedRmdirRollback : public ServerLogContext { } }; -void Server::do_rmdir_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& mdr) +void Server::do_rmdir_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr) { // unlink the other rollback methods, the rmdir rollback is only // needed to record the subtree changes in the journal for inode @@ -7471,7 +7471,7 @@ void Server::do_rmdir_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& decode(rollback, p); dout(10) << "do_rmdir_rollback on " << rollback.reqid << dendl; - mdcache->add_rollback(rollback.reqid, master); // need to finish this update before resolve finishes + mdcache->add_rollback(rollback.reqid, leader); // need to finish this update before resolve finishes ceph_assert(mdr || mds->is_resolve()); CDir *dir = mdcache->get_dirfrag(rollback.src_dir); @@ -7510,7 +7510,7 @@ void Server::do_rmdir_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& } - ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rmdir_rollback", rollback.reqid, master, + ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rmdir_rollback", rollback.reqid, leader, ESlaveUpdate::OP_ROLLBACK, ESlaveUpdate::RMDIR); mdlog->start_entry(le); @@ -7635,15 +7635,15 @@ public: /** handle_client_rename * - * rename master is the destdn auth. this is because cached inodes + * rename leader is the destdn auth. this is because cached inodes * must remain connected. thus, any replica of srci, must also * replicate destdn, and possibly straydn, so that srci (and * destdn->inode) remain connected during the rename. * - * to do this, we freeze srci, then master (destdn auth) verifies that + * to do this, we freeze srci, then leader (destdn auth) verifies that * all other nodes have also replciated destdn and straydn. note that * destdn replicas need not also replicate srci. this only works when - * destdn is master. + * destdn is leader. * * This function takes responsibility for the passed mdr. */ @@ -8046,7 +8046,7 @@ void Server::handle_client_rename(MDRequestRef& mdr) le->reqid = mdr->reqid; le->had_slaves = true; - mdcache->add_uncommitted_master(mdr->reqid, mdr->ls, mdr->more()->witnessed); + mdcache->add_uncommitted_leader(mdr->reqid, mdr->ls, mdr->more()->witnessed); // no need to send frozen auth pin to recovring auth MDS of srci mdr->more()->is_remote_frozen_authpin = false; } @@ -8068,7 +8068,7 @@ void Server::_rename_finish(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, dout(10) << "_rename_finish " << *mdr << dendl; if (!mdr->more()->witnessed.empty()) - mdcache->logged_master_update(mdr->reqid); + mdcache->logged_leader_update(mdr->reqid); // apply _rename_apply(mdr, srcdn, destdn, straydn); @@ -8886,7 +8886,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) mdr->set_ambiguous_auth(srcdnl->get_inode()); // just mark the source inode as ambiguous auth if more than two MDS are involved. - // the master will send another OP_RENAMEPREP slave request later. + // the leader will send another OP_RENAMEPREP slave request later. if (mdr->slave_request->witnesses.size() > 1) { dout(10) << " set srci ambiguous auth; providing srcdn replica list" << dendl; reply_witness = true; @@ -9175,7 +9175,7 @@ void Server::_commit_slave_rename(MDRequestRef& mdr, int r, // abort // rollback_bl may be empty if we froze the inode but had to provide an expanded - // witness list from the master, and they failed before we tried prep again. + // witness list from the leader, and they failed before we tried prep again. if (mdr->more()->rollback_bl.length()) { if (mdr->more()->is_inode_exporter) { dout(10) << " reversing inode export of " << *in << dendl; @@ -9189,7 +9189,7 @@ void Server::_commit_slave_rename(MDRequestRef& mdr, int r, } else do_rename_rollback(mdr->more()->rollback_bl, mdr->slave_to_mds, mdr, true); } else { - dout(10) << " rollback_bl empty, not rollback back rename (master failed after getting extra witnesses?)" << dendl; + dout(10) << " rollback_bl empty, not rollback back rename (leader failed after getting extra witnesses?)" << dendl; // singleauth if (mdr->more()->is_ambiguous_auth) { if (srcdn->is_auth()) @@ -9257,7 +9257,7 @@ struct C_MDS_LoggedRenameRollback : public ServerLogContext { } }; -void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& mdr, +void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr, bool finish_mdr) { rename_rollback rollback; @@ -9266,7 +9266,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef dout(10) << "do_rename_rollback on " << rollback.reqid << dendl; // need to finish this update before sending resolve to claim the subtree - mdcache->add_rollback(rollback.reqid, master); + mdcache->add_rollback(rollback.reqid, leader); MutationRef mut(new MutationImpl(nullptr, utime_t(), rollback.reqid)); mut->ls = mds->mdlog->get_current_segment(); @@ -9492,7 +9492,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef dout(0) << " desti back to " << *target << dendl; // journal it - ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rename_rollback", rollback.reqid, master, + ESlaveUpdate *le = new ESlaveUpdate(mdlog, "slave_rename_rollback", rollback.reqid, leader, ESlaveUpdate::OP_ROLLBACK, ESlaveUpdate::RENAME); mdlog->start_entry(le); diff --git a/src/mds/Server.h b/src/mds/Server.h index 054090e18ddc8..1ac34ce899ba1 100644 --- a/src/mds/Server.h +++ b/src/mds/Server.h @@ -247,7 +247,7 @@ public: void _commit_slave_link(MDRequestRef& mdr, int r, CInode *targeti); void _committed_slave(MDRequestRef& mdr); // use for rename, too void handle_slave_link_prep_ack(MDRequestRef& mdr, const cref_t &m); - void do_link_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& mdr); + void do_link_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr); void _link_rollback_finish(MutationRef& mut, MDRequestRef& mdr, map>& split); @@ -264,7 +264,7 @@ public: void _logged_slave_rmdir(MDRequestRef& mdr, CDentry *srcdn, CDentry *straydn); void _commit_slave_rmdir(MDRequestRef& mdr, int r, CDentry *straydn); void handle_slave_rmdir_prep_ack(MDRequestRef& mdr, const cref_t &ack); - void do_rmdir_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& mdr); + void do_rmdir_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr); void _rmdir_rollback_finish(MDRequestRef& mdr, metareqid_t reqid, CDentry *dn, CDentry *straydn); // rename @@ -300,7 +300,7 @@ public: void _slave_rename_sessions_flushed(MDRequestRef& mdr); void _logged_slave_rename(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, CDentry *straydn); void _commit_slave_rename(MDRequestRef& mdr, int r, CDentry *srcdn, CDentry *destdn, CDentry *straydn); - void do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& mdr, bool finish_mdr=false); + void do_rename_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr, bool finish_mdr=false); void _rename_rollback_finish(MutationRef& mut, MDRequestRef& mdr, CDentry *srcdn, version_t srcdnpv, CDentry *destdn, CDentry *staydn, map> splits[2], bool finish_mdr); diff --git a/src/mds/SimpleLock.h b/src/mds/SimpleLock.h index bef9dd76dc9a4..bf8b4cee90b99 100644 --- a/src/mds/SimpleLock.h +++ b/src/mds/SimpleLock.h @@ -415,7 +415,7 @@ public: void put_xlock() { ceph_assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE || state == LOCK_XLOCKSNAP || state == LOCK_LOCK_XLOCK || - state == LOCK_LOCK || /* if we are a master of a slave */ + state == LOCK_LOCK || /* if we are a leader of a slave */ is_locallock()); --more()->num_xlock; parent->put(MDSCacheObject::PIN_LOCK); diff --git a/src/mds/events/ESlaveUpdate.h b/src/mds/events/ESlaveUpdate.h index 674166fbdfe89..e33692c49a5c2 100644 --- a/src/mds/events/ESlaveUpdate.h +++ b/src/mds/events/ESlaveUpdate.h @@ -22,7 +22,7 @@ /* * rollback records, for remote/slave updates, which may need to be manually - * rolled back during journal replay. (or while active if master fails, but in + * rolled back during journal replay. (or while active if leader fails, but in * that case these records aren't needed.) */ struct link_rollback { @@ -120,16 +120,16 @@ public: bufferlist rollback; string type; metareqid_t reqid; - mds_rank_t master; + mds_rank_t leader; __u8 op; // prepare, commit, abort __u8 origop; // link | rename - ESlaveUpdate() : LogEvent(EVENT_SLAVEUPDATE), master(0), op(0), origop(0) { } - ESlaveUpdate(MDLog *mdlog, std::string_view s, metareqid_t ri, int mastermds, int o, int oo) : + ESlaveUpdate() : LogEvent(EVENT_SLAVEUPDATE), leader(0), op(0), origop(0) { } + ESlaveUpdate(MDLog *mdlog, std::string_view s, metareqid_t ri, int leadermds, int o, int oo) : LogEvent(EVENT_SLAVEUPDATE), type(s), reqid(ri), - master(mastermds), + leader(leadermds), op(o), origop(oo) { } void print(ostream& out) const override { @@ -139,7 +139,7 @@ public: if (origop == LINK) out << " link"; if (origop == RENAME) out << " rename"; out << " " << reqid; - out << " for mds." << master; + out << " for mds." << leader; out << commit; } diff --git a/src/mds/journal.cc b/src/mds/journal.cc index 42139d96f2002..57eb0aca03ef0 100644 --- a/src/mds/journal.cc +++ b/src/mds/journal.cc @@ -111,19 +111,19 @@ void LogSegment::try_to_expire(MDSRank *mds, MDSGatherBuilder &gather_bld, int o } } - // master ops with possibly uncommitted slaves - for (set::iterator p = uncommitted_masters.begin(); - p != uncommitted_masters.end(); + // leader ops with possibly uncommitted slaves + for (set::iterator p = uncommitted_leaders.begin(); + p != uncommitted_leaders.end(); ++p) { dout(10) << "try_to_expire waiting for slaves to ack commit on " << *p << dendl; - mds->mdcache->wait_for_uncommitted_master(*p, gather_bld.new_sub()); + mds->mdcache->wait_for_uncommitted_leader(*p, gather_bld.new_sub()); } // slave ops that haven't been committed for (set::iterator p = uncommitted_slaves.begin(); p != uncommitted_slaves.end(); ++p) { - dout(10) << "try_to_expire waiting for master to ack OP_FINISH on " << *p << dendl; + dout(10) << "try_to_expire waiting for leader to ack OP_FINISH on " << *p << dendl; mds->mdcache->wait_for_uncommitted_slave(*p, gather_bld.new_sub()); } @@ -2121,7 +2121,7 @@ void EUpdate::update_segment() segment->sessionmapv = cmapv; if (had_slaves) - segment->uncommitted_masters.insert(reqid); + segment->uncommitted_leaders.insert(reqid); } void EUpdate::replay(MDSRank *mds) @@ -2131,9 +2131,9 @@ void EUpdate::replay(MDSRank *mds) if (had_slaves) { dout(10) << "EUpdate.replay " << reqid << " had slaves, expecting a matching ECommitted" << dendl; - segment->uncommitted_masters.insert(reqid); + segment->uncommitted_leaders.insert(reqid); set slaves; - mds->mdcache->add_uncommitted_master(reqid, segment, slaves, true); + mds->mdcache->add_uncommitted_leader(reqid, segment, slaves, true); } if (client_map.length()) { @@ -2237,10 +2237,10 @@ void EOpen::replay(MDSRank *mds) void ECommitted::replay(MDSRank *mds) { - if (mds->mdcache->uncommitted_masters.count(reqid)) { + if (mds->mdcache->uncommitted_leaders.count(reqid)) { dout(10) << "ECommitted.replay " << reqid << dendl; - mds->mdcache->uncommitted_masters[reqid].ls->uncommitted_masters.erase(reqid); - mds->mdcache->uncommitted_masters.erase(reqid); + mds->mdcache->uncommitted_leaders[reqid].ls->uncommitted_leaders.erase(reqid); + mds->mdcache->uncommitted_leaders.erase(reqid); } else { dout(10) << "ECommitted.replay " << reqid << " -- didn't see original op" << dendl; } @@ -2475,7 +2475,7 @@ void ESlaveUpdate::encode(bufferlist &bl, uint64_t features) const encode(stamp, bl); encode(type, bl); encode(reqid, bl); - encode(master, bl); + encode(leader, bl); encode(op, bl); encode(origop, bl); encode(commit, bl, features); @@ -2490,7 +2490,7 @@ void ESlaveUpdate::decode(bufferlist::const_iterator &bl) decode(stamp, bl); decode(type, bl); decode(reqid, bl); - decode(master, bl); + decode(leader, bl); decode(op, bl); decode(origop, bl); decode(commit, bl); @@ -2507,7 +2507,7 @@ void ESlaveUpdate::dump(Formatter *f) const f->dump_int("rollback length", rollback.length()); f->dump_string("type", type); f->dump_stream("metareqid") << reqid; - f->dump_int("master", master); + f->dump_int("leader", leader); f->dump_int("op", op); f->dump_int("original op", origop); } @@ -2523,20 +2523,20 @@ void ESlaveUpdate::replay(MDSRank *mds) auto&& segment = get_segment(); switch (op) { case ESlaveUpdate::OP_PREPARE: - dout(10) << "ESlaveUpdate.replay prepare " << reqid << " for mds." << master + dout(10) << "ESlaveUpdate.replay prepare " << reqid << " for mds." << leader << ": applying commit, saving rollback info" << dendl; su = new MDSlaveUpdate(origop, rollback); commit.replay(mds, segment, su); - mds->mdcache->add_uncommitted_slave(reqid, segment, master, su); + mds->mdcache->add_uncommitted_slave(reqid, segment, leader, su); break; case ESlaveUpdate::OP_COMMIT: - dout(10) << "ESlaveUpdate.replay commit " << reqid << " for mds." << master << dendl; + dout(10) << "ESlaveUpdate.replay commit " << reqid << " for mds." << leader << dendl; mds->mdcache->finish_uncommitted_slave(reqid, false); break; case ESlaveUpdate::OP_ROLLBACK: - dout(10) << "ESlaveUpdate.replay abort " << reqid << " for mds." << master + dout(10) << "ESlaveUpdate.replay abort " << reqid << " for mds." << leader << ": applying rollback commit blob" << dendl; commit.replay(mds, segment); mds->mdcache->finish_uncommitted_slave(reqid, false); -- 2.39.5