]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: add tests for cephfs-top 49154/head
authorJos Collin <jcollin@redhat.com>
Wed, 1 Feb 2023 12:14:47 +0000 (17:44 +0530)
committerJos Collin <jcollin@redhat.com>
Thu, 16 Feb 2023 09:43:19 +0000 (15:13 +0530)
* Updated the existing tests
* Added new qa tests for --dump and --dumpfs options
* Updated the cluster configuration to 2 mds, 2 clients

Fixes: https://tracker.ceph.com/issues/57014
Signed-off-by: Jos Collin <jcollin@redhat.com>
qa/suites/fs/top/cluster/1-node.yaml
qa/tasks/cephfs/test_fstop.py

index c5313bb0492a86580a147743fdd48c4f758168d3..48c4996e77f8a167b9ac142145a7ca64e6111ec6 100644 (file)
@@ -1,10 +1,12 @@
 meta:
-- desc: 1 ceph cluster with 1 mon, 1 mgr, 3 osds, 1 mds
+- desc: 1 ceph cluster with 1 mon, 1 mgr, 3 osds, 2 mds, 2 clients
 roles:
 - - mon.a
   - mgr.x
   - mds.a
+  - mds.b
   - osd.0
   - osd.1
   - osd.2
   - client.0
+  - client.1
index 08617807eb665268abb979951c4f6bb08830639f..ed76eaac2cfb47064628fb598c4eaf319724a3ac 100644 (file)
@@ -1,13 +1,48 @@
 import logging
+import json
 
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
 from teuthology.exceptions import CommandFailedError
+from teuthology.contextutil import safe_while
 
 log = logging.getLogger(__name__)
 
+
 class TestFSTop(CephFSTestCase):
+    CLIENTS_REQUIRED = 2
+
+    def setUp(self):
+        super(TestFSTop, self).setUp()
+        self._enable_mgr_stats_plugin()
+
+    def tearDown(self):
+        self._disable_mgr_stats_plugin()
+        super(TestFSTop, self).tearDown()
+
+    def _enable_mgr_stats_plugin(self):
+        return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", "stats")
+
+    def _disable_mgr_stats_plugin(self):
+        return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", "stats")
+
+    def _fstop_dump(self, *args):
+        return self.mount_a.run_shell(['cephfs-top',
+                                       '--id=admin',
+                                       *args]).stdout.getvalue()
+
+    def _get_metrics(self, verifier_callback, trials, *args):
+        metrics = None
+        done = False
+        with safe_while(sleep=1, tries=trials, action='wait for metrics') as proceed:
+            while proceed():
+                metrics = json.loads(self._fstop_dump(*args))
+                done = verifier_callback(metrics)
+                if done:
+                    break
+        return done, metrics
+
+    # TESTS
     def test_fstop_non_existent_cluster(self):
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", "stats")
         try:
             self.mount_a.run_shell(['cephfs-top',
                                     '--cluster=hpec',
@@ -17,11 +52,63 @@ class TestFSTop(CephFSTestCase):
             pass
         else:
             raise RuntimeError('expected cephfs-top command to fail.')
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", "stats")
 
     def test_fstop(self):
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", "stats")
-        self.mount_a.run_shell(['cephfs-top',
-                                '--id=admin',
-                                '--selftest'])
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", "stats")
+        try:
+            self.mount_a.run_shell(['cephfs-top',
+                                    '--id=admin',
+                                    '--selftest'])
+        except CommandFailedError:
+            raise RuntimeError('cephfs-top --selftest failed')
+
+    def test_dump(self):
+        """
+        Tests 'cephfs-top --dump' output is valid
+        """
+        def verify_fstop_metrics(metrics):
+            clients = metrics.get(self.fs.name, {})
+            if str(self.mount_a.get_global_id()) in clients and \
+               str(self.mount_b.get_global_id()) in clients:
+                return True
+            return False
+
+        # validate
+        valid, metrics = self._get_metrics(verify_fstop_metrics, 30, '--dump')
+        log.debug("metrics={0}".format(metrics))
+        self.assertTrue(valid)
+
+    def test_dumpfs(self):
+        """
+        Tests 'cephfs-top --dumpfs' output is valid
+        """
+        newfs_name = "cephfs_b"
+
+        def verify_fstop_metrics(metrics):
+            clients = metrics.get(newfs_name, {})
+            if self.fs.name not in metrics and \
+               str(self.mount_b.get_global_id()) in clients:
+                return True
+            return False
+
+        # umount mount_b, mount another filesystem on it and use --dumpfs filter
+        self.mount_b.umount_wait()
+
+        self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set", "enable_multiple", "true",
+                                                     "--yes-i-really-mean-it")
+
+        # create a new filesystem
+        fs_b = self.mds_cluster.newfs(name=newfs_name)
+
+        # mount cephfs_b on mount_b
+        self.mount_b.mount_wait(cephfs_name=fs_b.name)
+
+        # validate
+        valid, metrics = self._get_metrics(verify_fstop_metrics, 30,
+                                           '--dumpfs={}'.format(newfs_name))
+        log.debug("metrics={0}".format(metrics))
+
+        # restore mount_b
+        self.mount_b.umount_wait()
+        self.mount_b.mount_wait(cephfs_name=self.fs.name)
+
+        self.assertTrue(valid)