]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/dashboard: fix duplicated rows when creating NFS export.
authorAlfonso Martínez <almartin@redhat.com>
Thu, 22 Apr 2021 12:09:52 +0000 (14:09 +0200)
committerAlfonso Martínez <almartin@redhat.com>
Thu, 22 Apr 2021 12:09:52 +0000 (14:09 +0200)
- Show an error message if the same pool & namespace is used by more than 1 cluster.
- Fix error handling when no rgw daemons found.

Fixes: https://tracker.ceph.com/issues/50440
Signed-off-by: Alfonso Martínez <almartin@redhat.com>
src/pybind/mgr/dashboard/controllers/nfsganesha.py
src/pybind/mgr/dashboard/services/ganesha.py
src/pybind/mgr/dashboard/tests/test_ganesha.py
src/vstart.sh

index b9eb79b0240bf763d21bde36410541372b0fab7b..91d079234e4608b3c9dc7b6a550471ed7812e29d 100644 (file)
@@ -13,7 +13,8 @@ from ..services.cephfs import CephFS
 from ..services.cephx import CephX
 from ..services.exception import DashboardException, serialize_dashboard_exception
 from ..services.ganesha import Ganesha, GaneshaConf, NFSException
-from ..services.rgw_client import RgwClient
+from ..services.rgw_client import NoCredentialsException, \
+    NoRgwDaemonsException, RequestException, RgwClient
 from . import ApiController, BaseController, ControllerDoc, Endpoint, \
     EndpointDoc, ReadPermission, RESTController, Task, UiApiController
 
@@ -307,7 +308,11 @@ class NFSGaneshaUi(BaseController):
     @Endpoint('GET', '/rgw/buckets')
     @ReadPermission
     def buckets(self, user_id=None):
-        return RgwClient.instance(user_id).get_buckets()
+        try:
+            return RgwClient.instance(user_id).get_buckets()
+        except (DashboardException, NoCredentialsException, RequestException,
+                NoRgwDaemonsException):
+            return []
 
     @Endpoint('GET', '/clusters')
     @ReadPermission
index b4af48f415d1849bb97dcba45c943bd2961881b8..9799f837a74f9b976a84362d42edc2a9f525e66c 100644 (file)
@@ -16,7 +16,7 @@ from ..settings import Settings
 from .cephfs import CephFS
 from .cephx import CephX
 from .orchestrator import OrchClient
-from .rgw_client import NoCredentialsException, RequestException, RgwClient
+from .rgw_client import NoCredentialsException, NoRgwDaemonsException, RequestException, RgwClient
 
 logger = logging.getLogger('ganesha')
 
@@ -43,9 +43,6 @@ class Ganesha(object):
         location_list = [loc.strip() for loc in location_list_str.split(
             ",")] if location_list_str else []
         for location in location_list:
-            cluster = None
-            pool = None
-            namespace = None
             if not location:
                 raise NFSException("Invalid Ganesha cluster RADOS "
                                    "[cluster_id:]pool/namespace setting: {}"
@@ -69,6 +66,18 @@ class Ganesha(object):
                 else:
                     pool, namespace = pool_nm.split('/', 1)
 
+            # Check pool/namespace collision.
+            for clusters in [orch_result, result]:
+                for cluster_name, cluster_data in clusters.items():
+                    if cluster_data['pool'] == pool and cluster_data['namespace'] == namespace:
+                        raise NFSException(
+                            f'Pool `{pool}` and namespace `{namespace}` are already in use by '
+                            f"""NFS-Ganesha cluster called `{cluster_name}`{" that is deployed by "
+                            "the Orchestrator" if cluster_data['type'] == ClusterType.ORCHESTRATOR
+                            else ''}. """
+                            'Please update GANESHA_RADOS_POOL_NAMESPACE setting.'
+                        )
+
             if cluster in orch_result:
                 # cephadm might have set same cluster settings, ask the user to remove it.
                 raise NFSException(
@@ -154,7 +163,8 @@ class Ganesha(object):
             if RgwClient.admin_instance().is_service_online() and \
                     RgwClient.admin_instance().is_system_user():
                 result.append("RGW")
-        except (DashboardException, NoCredentialsException, RequestException, LookupError):
+        except (DashboardException, NoCredentialsException, RequestException,
+                NoRgwDaemonsException):
             pass
         return result
 
index 90f45471e8d4142c2d13ff1f919169d21eea6070..f9cc44aa2738bbafa6f3b8d8779611e6aac76466 100644 (file)
@@ -156,22 +156,14 @@ EXPORT
     def _ioctx_set_namespace_mock(self, namespace):
         self.temp_store_namespace = namespace
 
+    @staticmethod
+    def _set_user_defined_clusters_location(clusters_pool_namespace='ganesha/ns'):
+        Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = clusters_pool_namespace
+
     def setUp(self):
         self.mock_kv_store()
 
         self.clusters = {
-            '_default_': {
-                'pool': 'ganesha',
-                'namespace': 'ns',
-                'type': ClusterType.USER,
-                'daemon_conf': None,
-                'daemons': ['nodea', 'nodeb'],
-                'exports': {
-                    1: ['nodea', 'nodeb'],
-                    2: ['nodea'],
-                    3: ['nodea', 'nodeb']  # for new-added export
-                }
-            },
             'foo': {
                 'pool': 'ganesha2',
                 'namespace': 'ns2',
@@ -186,8 +178,8 @@ EXPORT
             }
         }
 
-        Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = '{pool}/{namespace}'.format_map(
-            self.clusters['_default_'])
+        # Unset user-defined location.
+        self._set_user_defined_clusters_location('')
 
         self.temp_store_namespace = None
         self._reset_temp_store()
@@ -860,6 +852,7 @@ EXPORT
             self.io_mock.reset_mock()
 
         # User-defined cluster: reload daemons in the parameter
+        self._set_user_defined_clusters_location()
         conf = GaneshaConf.instance('_default_')
         calls = [mock_call('conf-{}'.format(daemon)) for daemon in ['nodea', 'nodeb']]
         conf.reload_daemons(['nodea', 'nodeb'])
@@ -897,13 +890,14 @@ EXPORT
                 instance.validate(export)
 
     def test_validate_user(self):
+        self._set_user_defined_clusters_location()
         cluster_id = '_default_'
-        cluster_info = self.clusters[cluster_id]
         instance = GaneshaConf.instance(cluster_id)
         export = MagicMock()
 
         # export can be linked to none, partial, or all daemons
-        export_daemons = [[], cluster_info['daemons'][:1], cluster_info['daemons']]
+        fake_daemons = ['nodea', 'nodeb']
+        export_daemons = [[], fake_daemons[:1], fake_daemons]
         for daemons in export_daemons:
             export.daemons = daemons
             instance.validate(export)
@@ -919,25 +913,15 @@ EXPORT
         for cluster_id in cluster_ids:
             self.assertIn(cluster_id, locations)
             cluster = locations.pop(cluster_id)
-            expected_info = self.clusters[cluster_id]
-            self.assertDictEqual(cluster, {key: expected_info[key] for key in [
+            self.assertDictEqual(cluster, {key: cluster[key] for key in [
                 'pool', 'namespace', 'type', 'daemon_conf']})
         self.assertDictEqual(locations, {})
 
     def test_get_cluster_locations(self):
         # pylint: disable=protected-access
-        # There are both Orchstrator cluster and user-defined cluster.
-        locations = ganesha.Ganesha._get_clusters_locations()
-        self._verify_locations(locations, self.clusters.keys())
 
-        # There is only a user-defined cluster.
-        self._mock_orchestrator(False)
-        locations = ganesha.Ganesha._get_clusters_locations()
-        self._verify_locations(locations, ['_default_'])
+        # There is only a Orchestrator cluster.
         self._mock_orchestrator(True)
-
-        # There is only a Orchstrator cluster.
-        Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = ''
         locations = ganesha.Ganesha._get_clusters_locations()
         self._verify_locations(locations, ['foo'])
 
@@ -946,16 +930,38 @@ EXPORT
         with self.assertRaises(NFSException):
             ganesha.Ganesha._get_clusters_locations()
 
+        # There is only a user-defined cluster.
+        self._set_user_defined_clusters_location()
+        self._mock_orchestrator(False)
+        locations = ganesha.Ganesha._get_clusters_locations()
+        self._verify_locations(locations, ['_default_'])
+
+        # There are both Orchestrator cluster and user-defined cluster.
+        self._set_user_defined_clusters_location()
+        self._mock_orchestrator(True)
+        locations = ganesha.Ganesha._get_clusters_locations()
+        self._verify_locations(locations, ['foo', '_default_'])
+
     def test_get_cluster_locations_conflict(self):
         # pylint: disable=protected-access
-        # Raise an exception when there is a user-defined cluster that conlicts
-        # with Orchestrator clusters.
-        old_setting = Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE
-        conflicted_location = ',foo:{pool}/{namespace}'.format_map(
-            self.clusters['foo'])
-        Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = old_setting + conflicted_location
-        with self.assertRaises(NFSException):
+
+        # Pool/namespace collision.
+        self._set_user_defined_clusters_location('ganesha2/ns2')
+        with self.assertRaises(NFSException) as ctx:
+            ganesha.Ganesha._get_clusters_locations()
+        self.assertIn('already in use', str(ctx.exception))
+
+        # Cluster name collision with orch. cluster.
+        self._set_user_defined_clusters_location('foo:ganesha/ns')
+        with self.assertRaises(NFSException) as ctx:
+            ganesha.Ganesha._get_clusters_locations()
+        self.assertIn('Detected a conflicting NFS-Ganesha cluster', str(ctx.exception))
+
+        # Cluster name collision with user-defined cluster.
+        self._set_user_defined_clusters_location('cluster1:ganesha/ns,cluster1:fake-pool/fake-ns')
+        with self.assertRaises(NFSException) as ctx:
             ganesha.Ganesha._get_clusters_locations()
+        self.assertIn('Duplicate Ganesha cluster definition', str(ctx.exception))
 
 
 class NFSGaneshaUiControllerTest(ControllerTestCase):
index 7f0dfef731a3bfd001f70ae6a9139e63a486342f..96a52d869be760c916b5cacad20d5bd416b308b4 100755 (executable)
@@ -1202,15 +1202,11 @@ EOF
 
         prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
 
-        if $with_mgr_dashboard; then
-            $CEPH_BIN/rados -p $pool_name put "conf-$name" "$ganesha_dir/ganesha-$name.conf"
-        fi
-
         echo "$test_user ganesha daemon $name started on port: $port"
     done
 
     if $with_mgr_dashboard; then
-        ceph_adm dashboard set-ganesha-clusters-rados-pool-namespace $pool_name
+        ceph_adm dashboard set-ganesha-clusters-rados-pool-namespace "$cluster_id:$pool_name/$cluster_id"
     fi
 }