]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks/mgr: move test initialization to setUpClass method
authorRicardo Dias <rdias@suse.com>
Wed, 28 Feb 2018 22:11:34 +0000 (22:11 +0000)
committerRicardo Dias <rdias@suse.com>
Mon, 5 Mar 2018 13:07:18 +0000 (13:07 +0000)
With this change, we avoid the disabling/enabling of the ceph-mgr module
being tested for each test function declared in each test case. Now
the ceph-mgr module being tested is disabled/enabled only once for each
test case.

Signed-off-by: Ricardo Dias <rdias@suse.com>
qa/tasks/ceph_test_case.py
qa/tasks/mgr/mgr_test_case.py
qa/tasks/mgr/test_dashboard.py
qa/tasks/mgr/test_failover.py
qa/tasks/mgr/test_module_selftest.py
qa/tasks/mgr/test_prometheus.py

index 5767df4611de9727763f44a30b004c8d6c20fe4b..1c6bc04ef8a3e94265f3f5b189ebecc9d47db283 100644 (file)
@@ -132,7 +132,8 @@ class CephTestCase(unittest.TestCase):
 
         log.debug("wait_until_equal: success")
 
-    def wait_until_true(self, condition, timeout):
+    @classmethod
+    def wait_until_true(cls, condition, timeout):
         period = 5
         elapsed = 0
         while True:
index cc0222b487d5d1b35480933b7bbe96e58dc1ab45..ea9d6a3b15d43c8b58da87cbae4cca9aa998a6e6 100644 (file)
@@ -63,81 +63,86 @@ class MgrCluster(CephCluster):
 class MgrTestCase(CephTestCase):
     MGRS_REQUIRED = 1
 
-    def setUp(self):
-        super(MgrTestCase, self).setUp()
-
-        # The test runner should have populated this
-        assert self.mgr_cluster is not None
-
-        if len(self.mgr_cluster.mgr_ids) < self.MGRS_REQUIRED:
-            raise case.SkipTest("Only have {0} manager daemons, "
-                                "{1} are required".format(
-                len(self.mgr_cluster.mgr_ids), self.MGRS_REQUIRED))
-
+    @classmethod
+    def setup_mgrs(cls):
         # Stop all the daemons
-        for daemon in self.mgr_cluster.mgr_daemons.values():
+        for daemon in cls.mgr_cluster.mgr_daemons.values():
             daemon.stop()
 
-        for mgr_id in self.mgr_cluster.mgr_ids:
-            self.mgr_cluster.mgr_fail(mgr_id)
+        for mgr_id in cls.mgr_cluster.mgr_ids:
+            cls.mgr_cluster.mgr_fail(mgr_id)
 
         # Unload all non-default plugins
-        loaded = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd(
+        loaded = json.loads(cls.mgr_cluster.mon_manager.raw_cluster_cmd(
                    "mgr", "module", "ls"))['enabled_modules']
         unload_modules = set(loaded) - {"status", "restful"}
 
         for m in unload_modules:
-            self.mgr_cluster.mon_manager.raw_cluster_cmd(
+            cls.mgr_cluster.mon_manager.raw_cluster_cmd(
                 "mgr", "module", "disable", m)
 
         # Start all the daemons
-        for daemon in self.mgr_cluster.mgr_daemons.values():
+        for daemon in cls.mgr_cluster.mgr_daemons.values():
             daemon.restart()
 
         # Wait for an active to come up
-        self.wait_until_true(lambda: self.mgr_cluster.get_active_id() != "",
+        cls.wait_until_true(lambda: cls.mgr_cluster.get_active_id() != "",
                              timeout=20)
 
-        expect_standbys = set(self.mgr_cluster.mgr_ids) \
-                          - {self.mgr_cluster.get_active_id()}
-        self.wait_until_true(
-            lambda: set(self.mgr_cluster.get_standby_ids()) == expect_standbys,
+        expect_standbys = set(cls.mgr_cluster.mgr_ids) \
+                          - {cls.mgr_cluster.get_active_id()}
+        cls.wait_until_true(
+            lambda: set(cls.mgr_cluster.get_standby_ids()) == expect_standbys,
             timeout=20)
 
-    def _load_module(self, module_name):
-        loaded = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd(
+    @classmethod
+    def setUpClass(cls):
+        # The test runner should have populated this
+        assert cls.mgr_cluster is not None
+
+        if len(cls.mgr_cluster.mgr_ids) < cls.MGRS_REQUIRED:
+            raise case.SkipTest("Only have {0} manager daemons, "
+                                "{1} are required".format(
+                len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED))
+
+        cls.setup_mgrs()
+
+    @classmethod
+    def _load_module(cls, module_name):
+        loaded = json.loads(cls.mgr_cluster.mon_manager.raw_cluster_cmd(
                    "mgr", "module", "ls"))['enabled_modules']
         if module_name in loaded:
             # The enable command is idempotent, but our wait for a restart
             # isn't, so let's return now if it's already loaded
             return
 
-        initial_gid = self.mgr_cluster.get_mgr_map()['active_gid']
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable",
+        initial_gid = cls.mgr_cluster.get_mgr_map()['active_gid']
+        cls.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable",
                                          module_name)
 
         # Wait for the module to load
         def has_restarted():
-            mgr_map = self.mgr_cluster.get_mgr_map()
+            mgr_map = cls.mgr_cluster.get_mgr_map()
             done = mgr_map['active_gid'] != initial_gid and mgr_map['available']
             if done:
                 log.info("Restarted after module load (new active {0}/{1})".format(
                     mgr_map['active_name'] , mgr_map['active_gid']))
             return done
-        self.wait_until_true(has_restarted, timeout=30)
+        cls.wait_until_true(has_restarted, timeout=30)
 
 
-    def _get_uri(self, service_name):
+    @classmethod
+    def _get_uri(cls, service_name):
         # Little dict hack so that I can assign into this from
         # the get_or_none function
         mgr_map = {'x': None}
 
         def _get_or_none():
-            mgr_map['x'] = self.mgr_cluster.get_mgr_map()
+            mgr_map['x'] = cls.mgr_cluster.get_mgr_map()
             result = mgr_map['x']['services'].get(service_name, None)
             return result
 
-        self.wait_until_true(lambda: _get_or_none() is not None, 30)
+        cls.wait_until_true(lambda: _get_or_none() is not None, 30)
 
         uri = mgr_map['x']['services'][service_name]
 
@@ -147,8 +152,8 @@ class MgrTestCase(CephTestCase):
 
         return uri
 
-
-    def _assign_ports(self, module_name, config_name, min_port=7789):
+    @classmethod
+    def _assign_ports(cls, module_name, config_name, min_port=7789):
         """
         To avoid the need to run lots of hosts in teuthology tests to
         get different URLs per mgr, we will hand out different ports
@@ -160,27 +165,27 @@ class MgrTestCase(CephTestCase):
         # Start handing out ports well above Ceph's range.
         assign_port = min_port
 
-        for mgr_id in self.mgr_cluster.mgr_ids:
-            self.mgr_cluster.mgr_stop(mgr_id)
-            self.mgr_cluster.mgr_fail(mgr_id)
+        for mgr_id in cls.mgr_cluster.mgr_ids:
+            cls.mgr_cluster.mgr_stop(mgr_id)
+            cls.mgr_cluster.mgr_fail(mgr_id)
 
-        for mgr_id in self.mgr_cluster.mgr_ids:
+        for mgr_id in cls.mgr_cluster.mgr_ids:
             log.info("Using port {0} for {1} on mgr.{2}".format(
                 assign_port, module_name, mgr_id
             ))
-            self.mgr_cluster.set_module_localized_conf(module_name, mgr_id,
-                                                       config_name,
-                                                       str(assign_port))
+            cls.mgr_cluster.set_module_localized_conf(module_name, mgr_id,
+                                                      config_name,
+                                                      str(assign_port))
             assign_port += 1
 
-        for mgr_id in self.mgr_cluster.mgr_ids:
-            self.mgr_cluster.mgr_restart(mgr_id)
+        for mgr_id in cls.mgr_cluster.mgr_ids:
+            cls.mgr_cluster.mgr_restart(mgr_id)
 
         def is_available():
-            mgr_map = self.mgr_cluster.get_mgr_map()
+            mgr_map = cls.mgr_cluster.get_mgr_map()
             done = mgr_map['available']
             if done:
                 log.info("Available after assign ports (new active {0}/{1})".format(
                     mgr_map['active_name'] , mgr_map['active_gid']))
             return done
-        self.wait_until_true(is_available, timeout=30)
+        cls.wait_until_true(is_available, timeout=30)
index 60c1e334ee44283730cd8b582256d4dea36b3f56..c87b52347608f01ec1a440a3cfd1ae9d766174e8 100644 (file)
@@ -12,6 +12,9 @@ log = logging.getLogger(__name__)
 class TestDashboard(MgrTestCase):
     MGRS_REQUIRED = 3
 
+    def setUp(self):
+        self.setup_mgrs()
+
     def test_standby(self):
         self._assign_ports("dashboard", "server_port")
         self._load_module("dashboard")
index 0dd9cb7e8bacb02e9c522020468ce927401bd716..c66cec1a9d857c978abc986a1b4572d9e0c1400f 100644 (file)
@@ -11,6 +11,9 @@ log = logging.getLogger(__name__)
 class TestFailover(MgrTestCase):
     MGRS_REQUIRED = 2
 
+    def setUp(self):
+        self.setup_mgrs()
+
     def test_timeout(self):
         """
         That when an active mgr stops responding, a standby is promoted
index 7e0a035ec52af9bc74b5cab5eb84b2c6f370ea67..c34cce3b782918fe1fe0c7afa9ba701ee740577c 100644 (file)
@@ -19,6 +19,9 @@ class TestModuleSelftest(MgrTestCase):
     """
     MGRS_REQUIRED = 1
 
+    def setUp(self):
+        self.setup_mgrs()
+
     def _selftest_plugin(self, module_name):
         self._load_module(module_name)
 
index 8ca18a1c5856fc1f7448ac5477d12a4c80802f28..13e4a0b3ce3d2441a5b0a60568ae52c6d1a43ead 100644 (file)
@@ -12,6 +12,9 @@ log = logging.getLogger(__name__)
 class TestPrometheus(MgrTestCase):
     MGRS_REQUIRED = 3
 
+    def setUp(self):
+        self.setup_mgrs()
+
     def test_standby(self):
         self._assign_ports("prometheus", "server_port")
         self._load_module("prometheus")