from ..services.cephx import CephX
from ..services.exception import DashboardException, serialize_dashboard_exception
from ..services.ganesha import Ganesha, GaneshaConf, NFSException
-from ..services.rgw_client import RgwClient
+from ..services.rgw_client import NoCredentialsException, \
+ NoRgwDaemonsException, RequestException, RgwClient
from . import ApiController, BaseController, ControllerDoc, Endpoint, \
EndpointDoc, ReadPermission, RESTController, Task, UiApiController
@Endpoint('GET', '/rgw/buckets')
@ReadPermission
def buckets(self, user_id=None):
- return RgwClient.instance(user_id).get_buckets()
+ try:
+ return RgwClient.instance(user_id).get_buckets()
+ except (DashboardException, NoCredentialsException, RequestException,
+ NoRgwDaemonsException):
+ return []
@Endpoint('GET', '/clusters')
@ReadPermission
from .cephfs import CephFS
from .cephx import CephX
from .orchestrator import OrchClient
-from .rgw_client import NoCredentialsException, RequestException, RgwClient
+from .rgw_client import NoCredentialsException, NoRgwDaemonsException, RequestException, RgwClient
logger = logging.getLogger('ganesha')
location_list = [loc.strip() for loc in location_list_str.split(
",")] if location_list_str else []
for location in location_list:
- cluster = None
- pool = None
- namespace = None
if not location:
raise NFSException("Invalid Ganesha cluster RADOS "
"[cluster_id:]pool/namespace setting: {}"
else:
pool, namespace = pool_nm.split('/', 1)
+ # Check pool/namespace collision.
+ for clusters in [orch_result, result]:
+ for cluster_name, cluster_data in clusters.items():
+ if cluster_data['pool'] == pool and cluster_data['namespace'] == namespace:
+ raise NFSException(
+ f'Pool `{pool}` and namespace `{namespace}` are already in use by '
+ f"""NFS-Ganesha cluster called `{cluster_name}`{" that is deployed by "
+ "the Orchestrator" if cluster_data['type'] == ClusterType.ORCHESTRATOR
+ else ''}. """
+ 'Please update GANESHA_RADOS_POOL_NAMESPACE setting.'
+ )
+
if cluster in orch_result:
# cephadm might have set same cluster settings, ask the user to remove it.
raise NFSException(
if RgwClient.admin_instance().is_service_online() and \
RgwClient.admin_instance().is_system_user():
result.append("RGW")
- except (DashboardException, NoCredentialsException, RequestException, LookupError):
+ except (DashboardException, NoCredentialsException, RequestException,
+ NoRgwDaemonsException):
pass
return result
def _ioctx_set_namespace_mock(self, namespace):
self.temp_store_namespace = namespace
+ @staticmethod
+ def _set_user_defined_clusters_location(clusters_pool_namespace='ganesha/ns'):
+ Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = clusters_pool_namespace
+
def setUp(self):
self.mock_kv_store()
self.clusters = {
- '_default_': {
- 'pool': 'ganesha',
- 'namespace': 'ns',
- 'type': ClusterType.USER,
- 'daemon_conf': None,
- 'daemons': ['nodea', 'nodeb'],
- 'exports': {
- 1: ['nodea', 'nodeb'],
- 2: ['nodea'],
- 3: ['nodea', 'nodeb'] # for new-added export
- }
- },
'foo': {
'pool': 'ganesha2',
'namespace': 'ns2',
}
}
- Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = '{pool}/{namespace}'.format_map(
- self.clusters['_default_'])
+ # Unset user-defined location.
+ self._set_user_defined_clusters_location('')
self.temp_store_namespace = None
self._reset_temp_store()
self.io_mock.reset_mock()
# User-defined cluster: reload daemons in the parameter
+ self._set_user_defined_clusters_location()
conf = GaneshaConf.instance('_default_')
calls = [mock_call('conf-{}'.format(daemon)) for daemon in ['nodea', 'nodeb']]
conf.reload_daemons(['nodea', 'nodeb'])
instance.validate(export)
def test_validate_user(self):
+ self._set_user_defined_clusters_location()
cluster_id = '_default_'
- cluster_info = self.clusters[cluster_id]
instance = GaneshaConf.instance(cluster_id)
export = MagicMock()
# export can be linked to none, partial, or all daemons
- export_daemons = [[], cluster_info['daemons'][:1], cluster_info['daemons']]
+ fake_daemons = ['nodea', 'nodeb']
+ export_daemons = [[], fake_daemons[:1], fake_daemons]
for daemons in export_daemons:
export.daemons = daemons
instance.validate(export)
for cluster_id in cluster_ids:
self.assertIn(cluster_id, locations)
cluster = locations.pop(cluster_id)
- expected_info = self.clusters[cluster_id]
- self.assertDictEqual(cluster, {key: expected_info[key] for key in [
+ self.assertDictEqual(cluster, {key: cluster[key] for key in [
'pool', 'namespace', 'type', 'daemon_conf']})
self.assertDictEqual(locations, {})
def test_get_cluster_locations(self):
# pylint: disable=protected-access
- # There are both Orchstrator cluster and user-defined cluster.
- locations = ganesha.Ganesha._get_clusters_locations()
- self._verify_locations(locations, self.clusters.keys())
- # There is only a user-defined cluster.
- self._mock_orchestrator(False)
- locations = ganesha.Ganesha._get_clusters_locations()
- self._verify_locations(locations, ['_default_'])
+ # There is only a Orchestrator cluster.
self._mock_orchestrator(True)
-
- # There is only a Orchstrator cluster.
- Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = ''
locations = ganesha.Ganesha._get_clusters_locations()
self._verify_locations(locations, ['foo'])
with self.assertRaises(NFSException):
ganesha.Ganesha._get_clusters_locations()
+ # There is only a user-defined cluster.
+ self._set_user_defined_clusters_location()
+ self._mock_orchestrator(False)
+ locations = ganesha.Ganesha._get_clusters_locations()
+ self._verify_locations(locations, ['_default_'])
+
+ # There are both Orchestrator cluster and user-defined cluster.
+ self._set_user_defined_clusters_location()
+ self._mock_orchestrator(True)
+ locations = ganesha.Ganesha._get_clusters_locations()
+ self._verify_locations(locations, ['foo', '_default_'])
+
def test_get_cluster_locations_conflict(self):
# pylint: disable=protected-access
- # Raise an exception when there is a user-defined cluster that conlicts
- # with Orchestrator clusters.
- old_setting = Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE
- conflicted_location = ',foo:{pool}/{namespace}'.format_map(
- self.clusters['foo'])
- Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = old_setting + conflicted_location
- with self.assertRaises(NFSException):
+
+ # Pool/namespace collision.
+ self._set_user_defined_clusters_location('ganesha2/ns2')
+ with self.assertRaises(NFSException) as ctx:
+ ganesha.Ganesha._get_clusters_locations()
+ self.assertIn('already in use', str(ctx.exception))
+
+ # Cluster name collision with orch. cluster.
+ self._set_user_defined_clusters_location('foo:ganesha/ns')
+ with self.assertRaises(NFSException) as ctx:
+ ganesha.Ganesha._get_clusters_locations()
+ self.assertIn('Detected a conflicting NFS-Ganesha cluster', str(ctx.exception))
+
+ # Cluster name collision with user-defined cluster.
+ self._set_user_defined_clusters_location('cluster1:ganesha/ns,cluster1:fake-pool/fake-ns')
+ with self.assertRaises(NFSException) as ctx:
ganesha.Ganesha._get_clusters_locations()
+ self.assertIn('Duplicate Ganesha cluster definition', str(ctx.exception))
class NFSGaneshaUiControllerTest(ControllerTestCase):