]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/volumes: adapt to now orch interface
authorSebastian Wagner <sebastian.wagner@suse.com>
Wed, 10 Feb 2021 12:39:44 +0000 (13:39 +0100)
committerSebastian Wagner <sebastian.wagner@suse.com>
Mon, 1 Mar 2021 15:50:42 +0000 (16:50 +0100)
Signed-off-by: Sebastian Wagner <sebastian.wagner@suse.com>
src/pybind/mgr/volumes/fs/fs_util.py
src/pybind/mgr/volumes/fs/nfs.py
src/pybind/mgr/volumes/fs/operations/volume.py

index 2adec83f5aafc4d0b6adcbf525bb407c6439576b..7f8734f42e1c58fce9909bf03c512bad77a425a2 100644 (file)
@@ -41,7 +41,6 @@ def create_mds(mgr, fs_name, placement):
                                     placement=PlacementSpec.from_string(placement))
     try:
         completion = mgr.apply_mds(spec)
-        mgr._orchestrator_wait([completion])
         orchestrator.raise_if_exception(completion)
     except (ImportError, orchestrator.OrchestratorError):
         return 0, "", "Volume created successfully (no MDS daemons created)"
index a6f21c137e4e5092ee0b69ca93316755d8a0084b..699e4852316fd76578a070c2ce58242c3fa1fd44 100644 (file)
@@ -27,7 +27,6 @@ def available_clusters(mgr):
     '''
     # TODO check cephadm cluster list with rados pool conf objects
     completion = mgr.describe_service(service_type='nfs')
-    mgr._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     return [cluster.spec.service_id for cluster in completion.result
             if cluster.spec.service_id]
@@ -662,7 +661,6 @@ class NFSCluster:
                               pool=self.pool_name, namespace=self.pool_ns,
                               placement=PlacementSpec.from_string(placement))
         completion = self.mgr.apply_nfs(spec)
-        self.mgr._orchestrator_wait([completion])
         orchestrator.raise_if_exception(completion)
 
     def create_empty_rados_obj(self):
@@ -678,7 +676,6 @@ class NFSCluster:
     def _restart_nfs_service(self):
         completion = self.mgr.service_action(action='restart',
                                              service_name='nfs.'+self.cluster_id)
-        self.mgr._orchestrator_wait([completion])
         orchestrator.raise_if_exception(completion)
 
     @cluster_setter
@@ -725,7 +722,6 @@ class NFSCluster:
             if cluster_id in cluster_list:
                 self.mgr.fs_export.delete_all_exports(cluster_id)
                 completion = self.mgr.remove_service('nfs.' + self.cluster_id)
-                self.mgr._orchestrator_wait([completion])
                 orchestrator.raise_if_exception(completion)
                 self.delete_config_obj()
                 return 0, "NFS Cluster Deleted Successfully", ""
@@ -744,7 +740,6 @@ class NFSCluster:
     def _show_nfs_cluster_info(self, cluster_id):
         self._set_cluster_id(cluster_id)
         completion = self.mgr.list_daemons(daemon_type='nfs')
-        self.mgr._orchestrator_wait([completion])
         orchestrator.raise_if_exception(completion)
         host_ip = []
         # Here completion.result is a list DaemonDescription objects
index 2bc68667ae7fca28d34eb6fa65fa01b33acaea69..e809f264d791930cae44e996fe8eb2304882b8b1 100644 (file)
@@ -87,7 +87,6 @@ def delete_volume(mgr, volname, metadata_pool, data_pools):
     # Tear down MDS daemons
     try:
         completion = mgr.remove_service('mds.' + volname)
-        mgr._orchestrator_wait([completion])
         orchestrator.raise_if_exception(completion)
     except (ImportError, orchestrator.OrchestratorError):
         log.warning("OrchestratorError, not tearing down MDS daemons")