From c7de92df601ae7b74634e4b8438d81dfb88c2290 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 29 Oct 2018 11:03:00 +0800 Subject: [PATCH] mds: handle state change race In multi-mds cluster, recovering mds may receive mdsmap that changes its state after other mds. Furthermore, the recovering mds may receive messages tiggered by its state change from other mds before it receive corresponding mdsmap. Fixes: http://tracker.ceph.com/issues/37594 Signed-off-by: "Yan, Zheng" (cherry picked from commit d3a444473abc98e5ce8121af24538a141a292777) Conflicts: src/mds/Locker.cc src/mds/MDCache.cc src/mds/MDSRank.h --- src/mds/Locker.cc | 8 +++++++- src/mds/MDCache.cc | 8 +++++++- src/mds/MDSRank.cc | 1 + src/mds/MDSRank.h | 6 +++++- 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/mds/Locker.cc b/src/mds/Locker.cc index ae95f025743..7bf717ed4b9 100644 --- a/src/mds/Locker.cc +++ b/src/mds/Locker.cc @@ -2273,7 +2273,13 @@ void Locker::request_inode_file_caps(CInode *in) void Locker::handle_inode_file_caps(MInodeFileCaps *m) { // nobody should be talking to us during recovery. - assert(mds->is_clientreplay() || mds->is_active() || mds->is_stopping()); + if (mds->get_state() < MDSMap::STATE_CLIENTREPLAY) { + if (mds->get_want_state() >= MDSMap::STATE_CLIENTREPLAY) { + mds->wait_for_replay(new C_MDS_RetryMessage(mds, m)); + return; + } + ceph_assert(!"got unexpected message during recovery"); + } // ok CInode *in = mdcache->get_inode(m->get_ino()); diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc index 9ec17384adb..124bf6d3f4b 100644 --- a/src/mds/MDCache.cc +++ b/src/mds/MDCache.cc @@ -4729,7 +4729,13 @@ void MDCache::handle_cache_rejoin_strong(MMDSCacheRejoin *strong) mds_rank_t from = mds_rank_t(strong->get_source().num()); // only a recovering node will get a strong rejoin. - assert(mds->is_rejoin()); + if (!mds->is_rejoin()) { + if (mds->get_want_state() == MDSMap::STATE_REJOIN) { + mds->wait_for_rejoin(new C_MDS_RetryMessage(mds, strong)); + return; + } + ceph_assert(!"got unexpected rejoin message during recovery"); + } // assimilate any potentially dirty scatterlock state for (map::iterator p = strong->inode_scatterlocks.begin(); diff --git a/src/mds/MDSRank.cc b/src/mds/MDSRank.cc index 9ef53b14f50..00e82041a86 100644 --- a/src/mds/MDSRank.cc +++ b/src/mds/MDSRank.cc @@ -1881,6 +1881,7 @@ void MDSRank::rejoin_start() { dout(1) << "rejoin_start" << dendl; mdcache->rejoin_start(new C_MDS_VoidFn(this, &MDSRank::rejoin_done)); + finish_contexts(g_ceph_context, waiting_for_rejoin); } void MDSRank::rejoin_done() { diff --git a/src/mds/MDSRank.h b/src/mds/MDSRank.h index 93fd933b23c..3837536ddfa 100644 --- a/src/mds/MDSRank.h +++ b/src/mds/MDSRank.h @@ -273,7 +273,8 @@ class MDSRank { ceph_tid_t last_tid; // for mds-initiated requests (e.g. stray rename) - list waiting_for_active, waiting_for_replay, waiting_for_reconnect, waiting_for_resolve; + list waiting_for_active, waiting_for_replay, waiting_for_rejoin, + waiting_for_reconnect, waiting_for_resolve; list waiting_for_any_client_connection; list replay_queue; map > waiting_for_active_peer; @@ -413,6 +414,9 @@ class MDSRank { void wait_for_replay(MDSInternalContextBase *c) { waiting_for_replay.push_back(c); } + void wait_for_rejoin(MDSInternalContextBase *c) { + waiting_for_rejoin.push_back(c); + } void wait_for_reconnect(MDSInternalContextBase *c) { waiting_for_reconnect.push_back(c); } -- 2.47.3