]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks/cephfs: don't create a new CephManager if there is one in the context
authorLeonid Usov <leonid.usov@ibm.com>
Thu, 25 Apr 2024 16:41:57 +0000 (19:41 +0300)
committerLeonid Usov <leonid.usov@ibm.com>
Thu, 25 Apr 2024 18:15:05 +0000 (21:15 +0300)
Signed-off-by: Leonid Usov <leonid.usov@ibm.com>
qa/tasks/cephfs/filesystem.py
qa/tasks/vstart_runner.py

index 4a3eafdf0a54b3c6bad340107881281666df1818..190e365c5cbce0bd57d1bed202920d8a5a328ca8 100644 (file)
@@ -233,10 +233,15 @@ class CephClusterBase(RunCephCmd):
         (result,) = self._ctx.cluster.only(first_mon).remotes.keys()
         return result
 
-    def __init__(self, ctx) -> None:
+    def __init__(self, ctx, cluster_name='ceph') -> None:
         self._ctx = ctx
-        self.mon_manager = CephManager(self.admin_remote, ctx=ctx,
-                                       logger=log.getChild('ceph_manager'))
+        try:
+            manager = ctx.managers[cluster_name]
+        except Exception as e:
+            log.warn(f"Couldn't get a manager for cluster {cluster_name} from the context; exception: {e}")
+            manager = CephManager(self.admin_remote, ctx=ctx,
+                                  logger=log.getChild('ceph_manager'))
+        self.mon_manager = manager
 
     def get_config(self, key, service_type=None):
         """
@@ -308,8 +313,8 @@ class MDSClusterBase(CephClusterBase):
     as a separate instance outside of your (multiple) Filesystem instances.
     """
 
-    def __init__(self, ctx):
-        super(MDSClusterBase, self).__init__(ctx)
+    def __init__(self, ctx, cluster_name='ceph'):
+        super(MDSClusterBase, self).__init__(ctx, cluster_name=cluster_name)
 
     @property
     def mds_ids(self):
@@ -534,13 +539,13 @@ class FilesystemBase(MDSClusterBase):
     This object is for driving a CephFS filesystem.  The MDS daemons driven by
     MDSCluster may be shared with other Filesystems.
     """
-    def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False,
+    def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False, cluster_name='ceph',
                  **kwargs):
         """
         kwargs accepts recover: bool, allow_dangerous_metadata_overlay: bool,
         yes_i_really_really_mean_it: bool and fs_ops: list[str]
         """
-        super(FilesystemBase, self).__init__(ctx)
+        super(FilesystemBase, self).__init__(ctx, cluster_name=cluster_name)
 
         self.name = name
         self.id = None
index 262d385cc251a876fb903fbb718fb5a6cedaa0ab..c1648dd7ccc55a9a03522e97110e46b2d9f4afb5 100644 (file)
@@ -783,10 +783,9 @@ tasks.cephfs.fuse_mount.FuseMount = LocalFuseMount
 # XXX: this class has nothing to do with the Ceph daemon (ceph-mgr) of
 # the same name.
 class LocalCephManager(CephManager):
-    def __init__(self, ctx=None):
+    def __init__(self, ctx=None, cluster_name=None):
         self.ctx = ctx
-        if self.ctx:
-            self.cluster = self.ctx.config['cluster']
+        self.cluster = cluster_name
 
         # Deliberately skip parent init, only inheriting from it to get
         # util methods like osd_dump that sit on top of raw_cluster_cmd
@@ -832,10 +831,10 @@ class LocalCephManager(CephManager):
 
 
 class LocalCephCluster(tasks.cephfs.filesystem.CephClusterBase):
-    def __init__(self, ctx):
+    def __init__(self, ctx, cluster_name='ceph'):
         # Deliberately skip calling CephCluster constructor
         self._ctx = ctx
-        self.mon_manager = LocalCephManager(ctx=self._ctx)
+        self.mon_manager = LocalCephManager(ctx=self._ctx, cluster_name=cluster_name)
         self._conf = defaultdict(dict)
 
     @property
@@ -904,8 +903,8 @@ class LocalCephCluster(tasks.cephfs.filesystem.CephClusterBase):
 tasks.cephfs.filesystem.CephCluster = LocalCephCluster
 
 class LocalMDSCluster(LocalCephCluster, tasks.cephfs.filesystem.MDSClusterBase):
-    def __init__(self, ctx):
-        LocalCephCluster.__init__(self, ctx)
+    def __init__(self, ctx, cluster_name='ceph'):
+        LocalCephCluster.__init__(self, ctx, cluster_name=cluster_name)
         # Deliberately skip calling MDSCluster constructor
         self._mds_ids = ctx.daemons.daemons['ceph.mds'].keys()
         log.debug("Discovered MDS IDs: {0}".format(self._mds_ids))
@@ -945,10 +944,10 @@ class LocalMgrCluster(LocalCephCluster, tasks.mgr.mgr_test_case.MgrClusterBase):
 tasks.mgr.mgr_test_case.MgrCluster = LocalMgrCluster
 
 class LocalFilesystem(LocalMDSCluster, tasks.cephfs.filesystem.FilesystemBase):
-    def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False,
+    def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False, cluster_name='ceph',
                  **kwargs):
         # Deliberately skip calling Filesystem constructor
-        LocalMDSCluster.__init__(self, ctx)
+        LocalMDSCluster.__init__(self, ctx, cluster_name=cluster_name)
 
         self.id = None
         self.name = name
@@ -959,7 +958,7 @@ class LocalFilesystem(LocalMDSCluster, tasks.cephfs.filesystem.FilesystemBase):
         self.fs_config = fs_config
         self.ec_profile = fs_config.get('ec_profile')
 
-        self.mon_manager = LocalCephManager(ctx=self._ctx)
+        self.mon_manager = LocalCephManager(ctx=self._ctx, cluster_name=cluster_name)
 
         self.client_remote = LocalRemote()
 
@@ -1031,7 +1030,7 @@ class LocalContext(object):
         self.summary = get_summary("vstart_runner", None)
         if not hasattr(self, 'managers'):
             self.managers = {}
-        self.managers[self.config['cluster']] = LocalCephManager(ctx=self)
+        self.managers[cluster_name] = LocalCephManager(ctx=self, cluster_name=cluster_name)
 
         # Shove some LocalDaemons into the ctx.daemons DaemonGroup instance so that any
         # tests that want to look these up via ctx can do so.