]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/cephadm: use spec to grab nvmeof service pool and group
authorAdam King <adking@redhat.com>
Wed, 22 May 2024 15:18:14 +0000 (11:18 -0400)
committerAlexander Indenbaum <aindenba@redhat.com>
Thu, 20 Nov 2025 08:55:27 +0000 (10:55 +0200)
Rather than trying to set it as a class attribute during
the config function which doesn't work consistently

Resolves: rhbz#2282364

Signed-off-by: Adam King <adking@redhat.com>
src/pybind/mgr/cephadm/services/nvmeof.py

index f23f64b69f6552d6642bb9038b05055ff295894a..068219e559832ab017e484ff86ffe03caf9c7ab3 100644 (file)
@@ -22,11 +22,9 @@ class NvmeofService(CephService):
         if not spec.pool:
             self.mgr.log.error(f"nvmeof config pool should be defined: {spec.pool}")
             return False
-        self.pool = spec.pool
         if spec.group is None:
             self.mgr.log.error(f"nvmeof config group should not be None: {spec.group}")
             return False
-        self.group = spec.group
         # unlike some other config funcs, if this fails we can't
         # go forward deploying the daemon and then retry later. For
         # that reason we make no attempt to catch the OrchestratorError
@@ -73,17 +71,13 @@ class NvmeofService(CephService):
         """ Overrides the daemon_check_post to add nvmeof gateways safely
         """
         self.mgr.log.info(f"nvmeof daemon_check_post {daemon_descrs}")
-        # Assert configured
-        if not self.pool or self.group is None:
-            self.mgr.log.error(f"nvmeof daemon_check_post: invalid pool {self.pool} or group {self.group}")
-        if not hasattr(self, 'pool'):
-            err_msg = ('Trying to daemon_check_post nvmeof but no pool is defined')
-            logger.error(err_msg)
-            raise OrchestratorError(err_msg)
-        if not hasattr(self, 'group') or self.group is None:
-            err_msg = ('Trying to daemon_check_post nvmeof but no group is defined')
-            logger.error(err_msg)
-            raise OrchestratorError(err_msg)
+        spec = cast(NvmeofServiceSpec,
+                    self.mgr.spec_store.all_specs.get(daemon_descrs[0].service_name(), None))
+        if not spec:
+            self.mgr.log.error(f'Failed to find spec for {daemon_descrs[0].name()}')
+            return
+        pool = spec.pool
+        group = spec.group
         for dd in daemon_descrs:
             self.mgr.log.info(f"nvmeof daemon_descr {dd}")
             if dd.daemon_id not in self.gws:
@@ -96,8 +90,8 @@ class NvmeofService(CephService):
             cmd = {
                 'prefix': 'nvme-gw create',
                 'id': name,
-                'group': self.group,
-                'pool': self.pool
+                'group': group,
+                'pool': pool
             }
             self.mgr.log.info(f"create gateway: monitor command {cmd}")
             _, _, err = self.mgr.mon_command(cmd)
@@ -181,18 +175,27 @@ class NvmeofService(CephService):
         if not ret:
             logger.info(f'{daemon.hostname} removed from nvmeof gateways dashboard config')
 
-        # Assert configured
-        assert self.pool
-        assert self.group is not None
-        assert daemon.daemon_id in self.gws
+        spec = cast(NvmeofServiceSpec,
+                    self.mgr.spec_store.all_specs.get(daemon.service_name(), None))
+        if not spec:
+            self.mgr.log.error(f'Failed to find spec for {daemon.name()}')
+            return
+        pool = spec.pool
+        group = spec.group
+
+        if daemon.daemon_id not in self.gws:
+            err_msg = (f'Trying to remove nvmeof but {daemon.daemon_id} '
+                       'not in gws list')
+            logger.error(err_msg)
+            raise OrchestratorError(err_msg)
         name = self.gws[daemon.daemon_id]
         self.gws.pop(daemon.daemon_id)
         # Notify monitor about this gateway deletion
         cmd = {
             'prefix': 'nvme-gw delete',
             'id': name,
-            'group': self.group,
-            'pool': self.pool
+            'group': group,
+            'pool': pool
         }
         self.mgr.log.info(f"delete gateway: monitor command {cmd}")
         _, _, err = self.mgr.mon_command(cmd)
@@ -203,14 +206,12 @@ class NvmeofService(CephService):
         """Make sure no zombie gateway is left behind
         """
         # Assert configured
-        if not hasattr(self, 'pool'):
-            err_msg = ('Trying to purge nvmeof but no pool is defined')
-            logger.error(err_msg)
-            raise OrchestratorError(err_msg)
-        if not hasattr(self, 'group') or self.group is None:
-            err_msg = ('Trying to purge nvmeof but no group is defined')
-            logger.error(err_msg)
-            raise OrchestratorError(err_msg)
+        spec = cast(NvmeofServiceSpec, self.mgr.spec_store.all_specs.get(service_name, None))
+        if not spec:
+            self.mgr.log.error(f'Failed to find spec for {service_name}')
+            return
+        pool = spec.pool
+        group = spec.group
         for daemon_id in self.gws:
             name = self.gws[daemon_id]
             self.gws.pop(daemon_id)
@@ -218,12 +219,12 @@ class NvmeofService(CephService):
             cmd = {
                 'prefix': 'nvme-gw delete',
                 'id': name,
-                'group': self.group,
-                'pool': self.pool
+                'group': group,
+                'pool': pool
             }
             self.mgr.log.info(f"purge delete gateway: monitor command {cmd}")
             _, _, err = self.mgr.mon_command(cmd)
             if err:
-                err_msg = (f"Unable to send monitor command {cmd}, error {err}")
+                err_msg = (f'Monitor command "{cmd}" failed: {err}')
                 logger.error(err_msg)
                 raise OrchestratorError(err_msg)