]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm: Check for pool existance for iscsi And NFS
authorMatthew Oliver <moliver@suse.com>
Thu, 23 Apr 2020 01:07:19 +0000 (11:07 +1000)
committerSebastian Wagner <sebastian.wagner@suse.com>
Mon, 4 May 2020 16:03:02 +0000 (18:03 +0200)
Currently both iscsi and NFS require pools to be specified when they are
deployed. However, we don't actaully check these pools exist. Leading to
broken containers.

This patch uses the rados client that is part of the mgrmodule to check
that the specified pool exists. As we need to check in 2 different
daemons, a helper method:

   def _check_pool_exists(self, pool, service_name):

was added to `cephadm/module.py`.

Fixes: https://tracker.ceph.com/issues/45161
Signed-off-by: Matthew Oliver <moliver@suse.com>
(cherry picked from commit d3de8697e6d389c514d26f5eb62675ac061a56c6)

src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/tests/test_cephadm.py

index 751460b67faa5bc56c097f1eb0083fc016796579..89af9d14b044bc3e4d125e15dc64447422698897 100644 (file)
@@ -2521,6 +2521,12 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
                     spec.service_name(), spec, e))
         return r
 
+    def _check_pool_exists(self, pool, service_name):
+        logger.info(f'Checking pool "{pool}" exists for service {service_name}')
+        if not self.rados.pool_exists(pool):
+            raise OrchestratorError(f'Cannot find pool "{pool}" for '
+                                    f'service {service_name}')
+
     def _check_daemons(self):
         # get monmap mtime so we can refresh configs when mons change
         monmap = self.get('mon_map')
@@ -2852,6 +2858,8 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
         return self._add_daemon('iscsi', spec, self._create_iscsi, self._config_iscsi)
 
     def _config_iscsi(self, spec):
+        self._check_pool_exists(spec.pool, spec.service_name())
+
         logger.info('Saving service %s spec with placement %s' % (
             spec.service_name(), spec.placement.pretty_str()))
         self.spec_store.save(spec)
@@ -2944,6 +2952,8 @@ api_secure = {api_secure}
         return self._add_daemon('nfs', spec, self._create_nfs, self._config_nfs)
 
     def _config_nfs(self, spec):
+        self._check_pool_exists(spec.pool, spec.service_name())
+
         logger.info('Saving service %s spec with placement %s' % (
             spec.service_name(), spec.placement.pretty_str()))
         self.spec_store.save(spec)
index f13680cab26441c0da6b8ef1c8ffae586d12f530..a8bd0ae0f93b413e95ba4c611326f02f1c80d4fe 100644 (file)
@@ -438,6 +438,7 @@ class TestCephadm(object):
             assert_rm_service(cephadm_module, 'nfs.name')
 
     @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+    @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
     def test_iscsi(self, cephadm_module):
         with self._with_host(cephadm_module, 'test'):
             ps = PlacementSpec(hosts=['test'], count=1)