From 6d27be9241a6e2eb014954eec45ac73a85b367f7 Mon Sep 17 00:00:00 2001 From: Rishabh Dave Date: Mon, 13 Mar 2023 18:35:50 +0530 Subject: [PATCH] qa/cephfs: create CephManager instance in CephFSTestCase To run a Ceph command conveniently, run_cluster_cmd(), raw_cluster_cmd() or raw_cluster_cmd_result() must be called. These methods are available in class CephManager which in turn is available only if an instance of Filesystem, MDSCluster, CephCluster or MgrCluster is initialized. Having an instance of CephManager in CephFSTestCase will provide easy access to these methods. For example, in CephFS tests writing "self.mon_manager.raw_cluser_cmd()" instead of writing "self.mds_cluster.mon_manager.raw_cluster()" will suffice. This commit provides a basis for upcoming commits in this patch series. With next patches, running Ceph command will be further simplified. Just writing self.run_ceph_cmd() will suffice for running a CephFS command. Signed-off-by: Rishabh Dave (cherry picked from commit 0c0041005ee127e3a488b111181c1da8035d199c) --- qa/tasks/cephfs/cephfs_test_case.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index d2688929cc37f..ee52dab8755da 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -55,7 +55,7 @@ class MountDetails(): mntobj.hostfs_mntpt = self.hostfs_mntpt -class CephFSTestCase(CephTestCase): +class CephFSTestCase(CephTestCase, RunCephCmd): """ Test case for Ceph FS, requires caller to populate Filesystem and Mounts, into the fs, mount_a, mount_b class attributes (setting mount_b is optional) @@ -113,8 +113,20 @@ class CephFSTestCase(CephTestCase): for addr, blocklisted_at in blacklist.items(): self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blacklist", "rm", addr) + def _init_mon_manager(self): + # if vstart_runner.py has invoked this code + if 'Local' in str(type(self.ceph_cluster)): + from tasks.vstart_runner import LocalCephManager + self.mon_manager = LocalCephManager(ctx=self.ctx) + # else teuthology has invoked this code + else: + from tasks.ceph_manager import CephManager + self.mon_manager = CephManager(self.ceph_cluster.admin_remote, + ctx=self.ctx, logger=log.getChild('ceph_manager')) + def setUp(self): super(CephFSTestCase, self).setUp() + self._init_mon_manager() self.config_set('mon', 'mon_allow_pool_delete', True) -- 2.39.5