From 8d53d7b8e528a08358132032d11f96c774deedb8 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Alfonso=20Mart=C3=ADnez?= Date: Thu, 22 Apr 2021 14:09:52 +0200 Subject: [PATCH] mgr/dashboard: fix duplicated rows when creating NFS export. MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit - Show an error message if the same pool & namespace is used by more than 1 cluster. - Fix error handling when no rgw daemons found. Fixes: https://tracker.ceph.com/issues/50440 Signed-off-by: Alfonso Martínez --- .../mgr/dashboard/controllers/nfsganesha.py | 9 ++- src/pybind/mgr/dashboard/services/ganesha.py | 20 +++-- .../mgr/dashboard/tests/test_ganesha.py | 76 ++++++++++--------- src/vstart.sh | 6 +- 4 files changed, 64 insertions(+), 47 deletions(-) diff --git a/src/pybind/mgr/dashboard/controllers/nfsganesha.py b/src/pybind/mgr/dashboard/controllers/nfsganesha.py index b9eb79b0240..91d079234e4 100644 --- a/src/pybind/mgr/dashboard/controllers/nfsganesha.py +++ b/src/pybind/mgr/dashboard/controllers/nfsganesha.py @@ -13,7 +13,8 @@ from ..services.cephfs import CephFS from ..services.cephx import CephX from ..services.exception import DashboardException, serialize_dashboard_exception from ..services.ganesha import Ganesha, GaneshaConf, NFSException -from ..services.rgw_client import RgwClient +from ..services.rgw_client import NoCredentialsException, \ + NoRgwDaemonsException, RequestException, RgwClient from . import ApiController, BaseController, ControllerDoc, Endpoint, \ EndpointDoc, ReadPermission, RESTController, Task, UiApiController @@ -307,7 +308,11 @@ class NFSGaneshaUi(BaseController): @Endpoint('GET', '/rgw/buckets') @ReadPermission def buckets(self, user_id=None): - return RgwClient.instance(user_id).get_buckets() + try: + return RgwClient.instance(user_id).get_buckets() + except (DashboardException, NoCredentialsException, RequestException, + NoRgwDaemonsException): + return [] @Endpoint('GET', '/clusters') @ReadPermission diff --git a/src/pybind/mgr/dashboard/services/ganesha.py b/src/pybind/mgr/dashboard/services/ganesha.py index b4af48f415d..9799f837a74 100644 --- a/src/pybind/mgr/dashboard/services/ganesha.py +++ b/src/pybind/mgr/dashboard/services/ganesha.py @@ -16,7 +16,7 @@ from ..settings import Settings from .cephfs import CephFS from .cephx import CephX from .orchestrator import OrchClient -from .rgw_client import NoCredentialsException, RequestException, RgwClient +from .rgw_client import NoCredentialsException, NoRgwDaemonsException, RequestException, RgwClient logger = logging.getLogger('ganesha') @@ -43,9 +43,6 @@ class Ganesha(object): location_list = [loc.strip() for loc in location_list_str.split( ",")] if location_list_str else [] for location in location_list: - cluster = None - pool = None - namespace = None if not location: raise NFSException("Invalid Ganesha cluster RADOS " "[cluster_id:]pool/namespace setting: {}" @@ -69,6 +66,18 @@ class Ganesha(object): else: pool, namespace = pool_nm.split('/', 1) + # Check pool/namespace collision. + for clusters in [orch_result, result]: + for cluster_name, cluster_data in clusters.items(): + if cluster_data['pool'] == pool and cluster_data['namespace'] == namespace: + raise NFSException( + f'Pool `{pool}` and namespace `{namespace}` are already in use by ' + f"""NFS-Ganesha cluster called `{cluster_name}`{" that is deployed by " + "the Orchestrator" if cluster_data['type'] == ClusterType.ORCHESTRATOR + else ''}. """ + 'Please update GANESHA_RADOS_POOL_NAMESPACE setting.' + ) + if cluster in orch_result: # cephadm might have set same cluster settings, ask the user to remove it. raise NFSException( @@ -154,7 +163,8 @@ class Ganesha(object): if RgwClient.admin_instance().is_service_online() and \ RgwClient.admin_instance().is_system_user(): result.append("RGW") - except (DashboardException, NoCredentialsException, RequestException, LookupError): + except (DashboardException, NoCredentialsException, RequestException, + NoRgwDaemonsException): pass return result diff --git a/src/pybind/mgr/dashboard/tests/test_ganesha.py b/src/pybind/mgr/dashboard/tests/test_ganesha.py index 90f45471e8d..f9cc44aa273 100644 --- a/src/pybind/mgr/dashboard/tests/test_ganesha.py +++ b/src/pybind/mgr/dashboard/tests/test_ganesha.py @@ -156,22 +156,14 @@ EXPORT def _ioctx_set_namespace_mock(self, namespace): self.temp_store_namespace = namespace + @staticmethod + def _set_user_defined_clusters_location(clusters_pool_namespace='ganesha/ns'): + Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = clusters_pool_namespace + def setUp(self): self.mock_kv_store() self.clusters = { - '_default_': { - 'pool': 'ganesha', - 'namespace': 'ns', - 'type': ClusterType.USER, - 'daemon_conf': None, - 'daemons': ['nodea', 'nodeb'], - 'exports': { - 1: ['nodea', 'nodeb'], - 2: ['nodea'], - 3: ['nodea', 'nodeb'] # for new-added export - } - }, 'foo': { 'pool': 'ganesha2', 'namespace': 'ns2', @@ -186,8 +178,8 @@ EXPORT } } - Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = '{pool}/{namespace}'.format_map( - self.clusters['_default_']) + # Unset user-defined location. + self._set_user_defined_clusters_location('') self.temp_store_namespace = None self._reset_temp_store() @@ -860,6 +852,7 @@ EXPORT self.io_mock.reset_mock() # User-defined cluster: reload daemons in the parameter + self._set_user_defined_clusters_location() conf = GaneshaConf.instance('_default_') calls = [mock_call('conf-{}'.format(daemon)) for daemon in ['nodea', 'nodeb']] conf.reload_daemons(['nodea', 'nodeb']) @@ -897,13 +890,14 @@ EXPORT instance.validate(export) def test_validate_user(self): + self._set_user_defined_clusters_location() cluster_id = '_default_' - cluster_info = self.clusters[cluster_id] instance = GaneshaConf.instance(cluster_id) export = MagicMock() # export can be linked to none, partial, or all daemons - export_daemons = [[], cluster_info['daemons'][:1], cluster_info['daemons']] + fake_daemons = ['nodea', 'nodeb'] + export_daemons = [[], fake_daemons[:1], fake_daemons] for daemons in export_daemons: export.daemons = daemons instance.validate(export) @@ -919,25 +913,15 @@ EXPORT for cluster_id in cluster_ids: self.assertIn(cluster_id, locations) cluster = locations.pop(cluster_id) - expected_info = self.clusters[cluster_id] - self.assertDictEqual(cluster, {key: expected_info[key] for key in [ + self.assertDictEqual(cluster, {key: cluster[key] for key in [ 'pool', 'namespace', 'type', 'daemon_conf']}) self.assertDictEqual(locations, {}) def test_get_cluster_locations(self): # pylint: disable=protected-access - # There are both Orchstrator cluster and user-defined cluster. - locations = ganesha.Ganesha._get_clusters_locations() - self._verify_locations(locations, self.clusters.keys()) - # There is only a user-defined cluster. - self._mock_orchestrator(False) - locations = ganesha.Ganesha._get_clusters_locations() - self._verify_locations(locations, ['_default_']) + # There is only a Orchestrator cluster. self._mock_orchestrator(True) - - # There is only a Orchstrator cluster. - Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = '' locations = ganesha.Ganesha._get_clusters_locations() self._verify_locations(locations, ['foo']) @@ -946,16 +930,38 @@ EXPORT with self.assertRaises(NFSException): ganesha.Ganesha._get_clusters_locations() + # There is only a user-defined cluster. + self._set_user_defined_clusters_location() + self._mock_orchestrator(False) + locations = ganesha.Ganesha._get_clusters_locations() + self._verify_locations(locations, ['_default_']) + + # There are both Orchestrator cluster and user-defined cluster. + self._set_user_defined_clusters_location() + self._mock_orchestrator(True) + locations = ganesha.Ganesha._get_clusters_locations() + self._verify_locations(locations, ['foo', '_default_']) + def test_get_cluster_locations_conflict(self): # pylint: disable=protected-access - # Raise an exception when there is a user-defined cluster that conlicts - # with Orchestrator clusters. - old_setting = Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE - conflicted_location = ',foo:{pool}/{namespace}'.format_map( - self.clusters['foo']) - Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = old_setting + conflicted_location - with self.assertRaises(NFSException): + + # Pool/namespace collision. + self._set_user_defined_clusters_location('ganesha2/ns2') + with self.assertRaises(NFSException) as ctx: + ganesha.Ganesha._get_clusters_locations() + self.assertIn('already in use', str(ctx.exception)) + + # Cluster name collision with orch. cluster. + self._set_user_defined_clusters_location('foo:ganesha/ns') + with self.assertRaises(NFSException) as ctx: + ganesha.Ganesha._get_clusters_locations() + self.assertIn('Detected a conflicting NFS-Ganesha cluster', str(ctx.exception)) + + # Cluster name collision with user-defined cluster. + self._set_user_defined_clusters_location('cluster1:ganesha/ns,cluster1:fake-pool/fake-ns') + with self.assertRaises(NFSException) as ctx: ganesha.Ganesha._get_clusters_locations() + self.assertIn('Duplicate Ganesha cluster definition', str(ctx.exception)) class NFSGaneshaUiControllerTest(ControllerTestCase): diff --git a/src/vstart.sh b/src/vstart.sh index 7f0dfef731a..96a52d869be 100755 --- a/src/vstart.sh +++ b/src/vstart.sh @@ -1202,15 +1202,11 @@ EOF prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace - if $with_mgr_dashboard; then - $CEPH_BIN/rados -p $pool_name put "conf-$name" "$ganesha_dir/ganesha-$name.conf" - fi - echo "$test_user ganesha daemon $name started on port: $port" done if $with_mgr_dashboard; then - ceph_adm dashboard set-ganesha-clusters-rados-pool-namespace $pool_name + ceph_adm dashboard set-ganesha-clusters-rados-pool-namespace "$cluster_id:$pool_name/$cluster_id" fi } -- 2.39.5