]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa: add health warning test for insufficient standbys
authorPatrick Donnelly <pdonnell@redhat.com>
Wed, 10 May 2017 03:31:29 +0000 (23:31 -0400)
committerPatrick Donnelly <pdonnell@redhat.com>
Wed, 10 May 2017 15:05:09 +0000 (11:05 -0400)
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
qa/tasks/cephfs/test_failover.py

index 85ef2b509fba427547eaac07616a15e2011c5e98..faefec458d6107bece8dca1bad283e149e13035a 100644 (file)
@@ -94,6 +94,47 @@ class TestFailover(CephFSTestCase):
         with self.assertRaises(CommandFailedError):
             self.mounts[0].mount()
 
+    def test_standby_count_wanted(self):
+        """
+        That cluster health warnings are generated by insufficient standbys available.
+        """
+
+        # Need all my standbys up as well as the active daemons
+        self.wait_for_daemon_start()
+
+        grace = int(self.fs.get_config("mds_beacon_grace", service_type="mon"))
+
+        standbys = self.mds_cluster.get_standby_daemons()
+        self.assertGreaterEqual(len(standbys), 1)
+        self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
+
+        # Kill a standby and check for warning
+        victim = standbys.pop()
+        self.fs.mds_stop(victim)
+        log.info("waiting for insufficient standby daemon warning")
+        self.wait_for_health("insufficient standby daemons available", grace*2)
+
+        # restart the standby, see that he becomes a standby, check health clears
+        self.fs.mds_restart(victim)
+        self.wait_until_true(
+            lambda: victim in self.mds_cluster.get_standby_daemons(),
+            timeout=60  # Approximately long enough for MDS to start and mon to notice
+        )
+        self.wait_for_health_clear(timeout=30)
+
+        # Set it one greater than standbys ever seen
+        standbys = self.mds_cluster.get_standby_daemons()
+        self.assertGreaterEqual(len(standbys), 1)
+        self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
+        log.info("waiting for insufficient standby daemon warning")
+        self.wait_for_health("insufficient standby daemons available", grace*2)
+
+        # Set it to 0
+        self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
+        self.wait_for_health_clear(timeout=30)
+
+
+
 
 class TestStandbyReplay(CephFSTestCase):
     MDSS_REQUIRED = 4