]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm: Check for pool existance for iscsi And NFS 34698/head
authorMatthew Oliver <moliver@suse.com>
Thu, 23 Apr 2020 01:07:19 +0000 (11:07 +1000)
committerMatthew Oliver <moliver@suse.com>
Fri, 24 Apr 2020 00:44:54 +0000 (00:44 +0000)
Currently both iscsi and NFS require pools to be specified when they are
deployed. However, we don't actaully check these pools exist. Leading to
broken containers.

This patch uses the rados client that is part of the mgrmodule to check
that the specified pool exists. As we need to check in 2 different
daemons, a helper method:

   def _check_pool_exists(self, pool, service_name):

was added to `cephadm/module.py`.

Fixes: https://tracker.ceph.com/issues/45161
Signed-off-by: Matthew Oliver <moliver@suse.com>
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/tests/test_cephadm.py

index 2a1ff2db6421500a88c169ccc8b63c13f8759a19..20add28eca3fe5d17a1f186bf6528f0dd2291928 100644 (file)
@@ -2567,6 +2567,12 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
                     spec.service_name(), spec, e))
         return r
 
+    def _check_pool_exists(self, pool, service_name):
+        logger.info(f'Checking pool "{pool}" exists for service {service_name}')
+        if not self.rados.pool_exists(pool):
+            raise OrchestratorError(f'Cannot find pool "{pool}" for '
+                                    f'service {service_name}')
+
     def _check_daemons(self):
         # get monmap mtime so we can refresh configs when mons change
         monmap = self.get('mon_map')
@@ -2899,6 +2905,8 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
         return self._add_daemon('iscsi', spec, self._create_iscsi, self._config_iscsi)
 
     def _config_iscsi(self, spec):
+        self._check_pool_exists(spec.pool, spec.service_name())
+
         logger.info('Saving service %s spec with placement %s' % (
             spec.service_name(), spec.placement.pretty_str()))
         self.spec_store.save(spec)
@@ -2991,6 +2999,8 @@ api_secure = {api_secure}
         return self._add_daemon('nfs', spec, self._create_nfs, self._config_nfs)
 
     def _config_nfs(self, spec):
+        self._check_pool_exists(spec.pool, spec.service_name())
+
         logger.info('Saving service %s spec with placement %s' % (
             spec.service_name(), spec.placement.pretty_str()))
         self.spec_store.save(spec)
index 21cf5afe4f5366f615732581560f6d475eb2a21e..7be3e950608c8ac5bc43a90abdbe0eee61d186d8 100644 (file)
@@ -429,6 +429,7 @@ class TestCephadm(object):
             match_glob(out, "Deployed nfs.name.* on host 'test'")
 
     @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+    @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
     def test_iscsi(self, cephadm_module):
         with self._with_host(cephadm_module, 'test'):
             ps = PlacementSpec(hosts=['test'], count=1)