]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks/cephfs: add test for discontinuous mdsmap 22977/head
authorYan, Zheng <zyan@redhat.com>
Thu, 19 Jul 2018 04:36:31 +0000 (12:36 +0800)
committerYan, Zheng <zyan@redhat.com>
Thu, 19 Jul 2018 06:34:29 +0000 (14:34 +0800)
Signed-off-by: "Yan, Zheng" <zyan@redhat.com>
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_failover.py
qa/tasks/vstart_runner.py

index 4b3cffa9ad075622f22f80373acd7567a69f8f32..b2837c2eee82da6592557539052a5c6bc5e85111 100644 (file)
@@ -269,6 +269,12 @@ class MDSCluster(CephCluster):
 
         self._one_or_all(mds_id, _fail_restart)
 
+    def mds_signal(self, mds_id, sig, silent=False):
+        """
+        signal a MDS daemon
+        """
+        self.mds_daemons[mds_id].signal(sig, silent);
+
     def newfs(self, name='cephfs', create=True):
         return Filesystem(self._ctx, name=name, create=create)
 
index 97f848ba1b970386e5c13ad9ca387e35de36a04e..48a4327058388b2c03dcbe1029150f395e404a7a 100644 (file)
@@ -1,3 +1,5 @@
+import time
+import signal
 import json
 import logging
 from unittest import case, SkipTest
@@ -283,8 +285,52 @@ class TestFailover(CephFSTestCase):
         self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
         self.wait_for_health_clear(timeout=30)
 
+    def test_discontinuous_mdsmap(self):
+        """
+        That discontinuous mdsmap does not affect failover.
+        See http://tracker.ceph.com/issues/24856.
+        """
+        mds_ids = sorted(self.mds_cluster.mds_ids)
+        mds_a, mds_b = mds_ids[0:2]
+        # Assign mds to fixed ranks. To prevent standby mds from replacing frozen mds
+        rank = 0;
+        for mds_id in mds_ids:
+            self.set_conf("mds.{0}".format(mds_id), "mds_standby_for_rank", str(rank))
+            rank += 1
+        self.mds_cluster.mds_restart()
+        self.fs.wait_for_daemons()
+
+        self.fs.set_max_mds(2)
+        self.fs.wait_for_state('up:active', rank=1)
+
+        self.mount_a.umount_wait()
+
+        grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
+        monc_timeout = float(self.fs.get_config("mon_client_ping_timeout", service_type="mds"))
 
+        # Freeze mds_a
+        self.mds_cluster.mds_signal(mds_a, signal.SIGSTOP)
+        self.wait_until_true(
+            lambda: "laggy_since" in self.fs.status().get_mds(mds_a),
+            timeout=grace * 2
+        )
+
+        self.mds_cluster.mds_restart(mds_b)
+        self.fs.wait_for_state('up:resolve', rank=1, timeout=30)
+
+        # Make sure of mds_a's monitor connection gets reset
+        time.sleep(monc_timeout * 2)
+
+        # Unfreeze mds_a, it will get discontinuous mdsmap
+        self.mds_cluster.mds_signal(mds_a, signal.SIGCONT)
+        self.wait_until_true(
+            lambda: "laggy_since" not in self.fs.status().get_mds(mds_a),
+            timeout=grace * 2
+        )
 
+        # mds.b will be stuck at 'reconnect' state if snapserver gets confused
+        # by discontinuous mdsmap
+        self.fs.wait_for_state('up:active', rank=1, timeout=30)
 
 class TestStandbyReplay(CephFSTestCase):
     MDSS_REQUIRED = 4
index 87e45e0f840bc75b225930ea2fbb9907d585eac8..0462514b1bbe7013fda1ba244fd39e0fde8c019e 100644 (file)
@@ -373,6 +373,14 @@ class LocalDaemon(object):
 
         self.proc = self.controller.run([os.path.join(BIN_PREFIX, "./ceph-{0}".format(self.daemon_type)), "-i", self.daemon_id])
 
+    def signal(self, sig, silent=False):
+        if not self.running():
+            raise RuntimeError("Can't send signal to non-running daemon")
+
+        os.kill(self._get_pid(), sig)
+        if not silent:
+            log.info("Sent signal {0} to {1}.{2}".format(sig, self.daemon_type, self.daemon_id))
+
 
 def safe_kill(pid):
     """