From: Ramana Raja Date: Thu, 3 May 2018 22:09:09 +0000 (+0530) Subject: ceph_volume_client: allow volumes without namespace isolation X-Git-Tag: v14.0.0~186^2~1 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=3a7fdb8b052fee2ff1bff24c8de745278314c78c;p=ceph-ci.git ceph_volume_client: allow volumes without namespace isolation Fixes: https://tracker.ceph.com/issues/23695 Signed-off-by: Ramana Raja --- diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py index 76de57fefc1..f69a04f0cfd 100644 --- a/qa/tasks/cephfs/test_volume_client.py +++ b/qa/tasks/cephfs/test_volume_client.py @@ -343,6 +343,19 @@ vc.disconnect() vc.delete_volume(vp, data_isolated=True) vc.purge_volume(vp, data_isolated=True) vc.purge_volume(vp, data_isolated=True) + + vc.create_volume(vp, 10, namespace_isolated=False) + vc.create_volume(vp, 10, namespace_isolated=False) + vc.authorize(vp, "{guest_entity}") + vc.authorize(vp, "{guest_entity}") + vc.deauthorize(vp, "{guest_entity}") + vc.deauthorize(vp, "{guest_entity}") + vc.evict("{guest_entity}") + vc.evict("{guest_entity}") + vc.delete_volume(vp) + vc.delete_volume(vp) + vc.purge_volume(vp) + vc.purge_volume(vp) """.format( group_id=group_id, volume_id=volume_id, @@ -1014,3 +1027,43 @@ vc.disconnect() # Mount the volume in the guest using the auth ID to assert that the # auth caps are valid guest_mount.mount(mount_path=mount_path) + + def test_volume_without_namespace_isolation(self): + """ + That volume client can create volumes that do not have separate RADOS + namespace layouts. + """ + vc_mount = self.mounts[1] + vc_mount.umount_wait() + + # Configure vc_mount as the handle for driving volumeclient + self._configure_vc_auth(vc_mount, "manila") + + # Create a volume + volume_prefix = "/myprefix" + group_id = "grpid" + volume_id = "volid" + mount_path = self._volume_client_python(vc_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False) + print create_result['mount_path'] + """.format( + group_id=group_id, + volume_id=volume_id + )), volume_prefix) + + # The CephFS volume should be created + self.mounts[0].stat(os.path.join("myprefix", group_id, volume_id)) + vol_namespace = self.mounts[0].getfattr( + os.path.join("myprefix", group_id, volume_id), + "ceph.dir.layout.pool_namespace") + assert not vol_namespace + + self._volume_client_python(vc_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + vc.purge_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id, + )), volume_prefix) diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py index 8e21e13fffa..627bf50f00a 100644 --- a/src/pybind/ceph_volume_client.py +++ b/src/pybind/ceph_volume_client.py @@ -204,7 +204,7 @@ CEPHFSVOLUMECLIENT_VERSION_HISTORY = """ * 1 - Initial version * 2 - Added get_object, put_object, delete_object methods to CephFSVolumeClient - + * 3 - Allow volumes to be created without RADOS namespace isolation """ @@ -228,7 +228,7 @@ class CephFSVolumeClient(object): """ # Current version - version = 2 + version = 3 # Where shall we create our volumes? POOL_PREFIX = "fsvolume_" @@ -600,7 +600,7 @@ class CephFSVolumeClient(object): except cephfs.ObjectNotFound: self.fs.mkdir(subpath, 0o755) - def create_volume(self, volume_path, size=None, data_isolated=False): + def create_volume(self, volume_path, size=None, data_isolated=False, namespace_isolated=True): """ Set up metadata, pools and auth for a volume. @@ -610,6 +610,7 @@ class CephFSVolumeClient(object): :param volume_path: VolumePath instance :param size: In bytes, or None for no size limit :param data_isolated: If true, create a separate OSD pool for this volume + :param namespace_isolated: If true, use separate RADOS namespace for this volume :return: """ path = self._get_path(volume_path) @@ -633,10 +634,17 @@ class CephFSVolumeClient(object): }) self.fs.setxattr(path, 'ceph.dir.layout.pool', pool_name, 0) - # enforce security isolation, use seperate namespace for this volume - namespace = "{0}{1}".format(self.pool_ns_prefix, volume_path.volume_id) - log.info("create_volume: {0}, using rados namespace {1} to isolate data.".format(volume_path, namespace)) - self.fs.setxattr(path, 'ceph.dir.layout.pool_namespace', namespace, 0) + # enforce security isolation, use separate namespace for this volume + if namespace_isolated: + namespace = "{0}{1}".format(self.pool_ns_prefix, volume_path.volume_id) + log.info("create_volume: {0}, using rados namespace {1} to isolate data.".format(volume_path, namespace)) + self.fs.setxattr(path, 'ceph.dir.layout.pool_namespace', namespace, 0) + else: + # If volume's namespace layout is not set, then the volume's pool + # layout remains unset and will undesirably change with ancestor's + # pool layout changes. + pool_name = self._get_ancestor_xattr(path, "ceph.dir.layout.pool") + self.fs.setxattr(path, 'ceph.dir.layout.pool', pool_name, 0) # Create a volume meta file, if it does not already exist, to store # data about auth ids having access to the volume @@ -1025,15 +1033,23 @@ class CephFSVolumeClient(object): # First I need to work out what the data pool is for this share: # read the layout pool_name = self._get_ancestor_xattr(path, "ceph.dir.layout.pool") - namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + + try: + namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + except cephfs.NoData: + namespace = None # Now construct auth capabilities that give the guest just enough # permissions to access the share client_entity = "client.{0}".format(auth_id) want_access_level = 'r' if readonly else 'rw' want_mds_cap = 'allow {0} path={1}'.format(want_access_level, path) - want_osd_cap = 'allow {0} pool={1} namespace={2}'.format( - want_access_level, pool_name, namespace) + if namespace: + want_osd_cap = 'allow {0} pool={1} namespace={2}'.format( + want_access_level, pool_name, namespace) + else: + want_osd_cap = 'allow {0} pool={1}'.format(want_access_level, + pool_name) try: existing = self._rados_command( @@ -1061,26 +1077,41 @@ class CephFSVolumeClient(object): # auth caps. unwanted_access_level = 'r' if want_access_level is 'rw' else 'rw' unwanted_mds_cap = 'allow {0} path={1}'.format(unwanted_access_level, path) - unwanted_osd_cap = 'allow {0} pool={1} namespace={2}'.format( - unwanted_access_level, pool_name, namespace) + if namespace: + unwanted_osd_cap = 'allow {0} pool={1} namespace={2}'.format( + unwanted_access_level, pool_name, namespace) + else: + unwanted_osd_cap = 'allow {0} pool={1}'.format( + unwanted_access_level, pool_name) + + def cap_update( + orig_mds_caps, orig_osd_caps, want_mds_cap, + want_osd_cap, unwanted_mds_cap, unwanted_osd_cap): - def cap_update(orig, want, unwanted): - # Updates the existing auth caps such that there is a single - # occurrence of wanted auth caps and no occurrence of - # conflicting auth caps. + if not orig_mds_caps: + return want_mds_cap, want_osd_cap - if not orig: - return want + mds_cap_tokens = orig_mds_caps.split(",") + osd_cap_tokens = orig_osd_caps.split(",") - cap_tokens = set(orig.split(",")) + if want_mds_cap in mds_cap_tokens: + return orig_mds_caps, orig_osd_caps - cap_tokens.discard(unwanted) - cap_tokens.add(want) + if unwanted_mds_cap in mds_cap_tokens: + mds_cap_tokens.remove(unwanted_mds_cap) + osd_cap_tokens.remove(unwanted_osd_cap) - return ",".join(cap_tokens) + mds_cap_tokens.append(want_mds_cap) + osd_cap_tokens.append(want_osd_cap) - osd_cap_str = cap_update(cap['caps'].get('osd', ""), want_osd_cap, unwanted_osd_cap) - mds_cap_str = cap_update(cap['caps'].get('mds', ""), want_mds_cap, unwanted_mds_cap) + return ",".join(mds_cap_tokens), ",".join(osd_cap_tokens) + + orig_mds_caps = cap['caps'].get('mds', "") + orig_osd_caps = cap['caps'].get('osd', "") + + mds_cap_str, osd_cap_str = cap_update( + orig_mds_caps, orig_osd_caps, want_mds_cap, want_osd_cap, + unwanted_mds_cap, unwanted_osd_cap) caps = self._rados_command( 'auth caps', @@ -1187,16 +1218,23 @@ class CephFSVolumeClient(object): client_entity = "client.{0}".format(auth_id) path = self._get_path(volume_path) pool_name = self._get_ancestor_xattr(path, "ceph.dir.layout.pool") - namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + try: + namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + except cephfs.NoData: + namespace = None # The auth_id might have read-only or read-write mount access for the # volume path. access_levels = ('r', 'rw') - want_mds_caps = {'allow {0} path={1}'.format(access_level, path) - for access_level in access_levels} - want_osd_caps = {'allow {0} pool={1} namespace={2}'.format( - access_level, pool_name, namespace) - for access_level in access_levels} + want_mds_caps = ['allow {0} path={1}'.format(access_level, path) + for access_level in access_levels] + if namespace: + want_osd_caps = ['allow {0} pool={1} namespace={2}'.format(access_level, pool_name, namespace) + for access_level in access_levels] + else: + want_osd_caps = ['allow {0} pool={1}'.format(access_level, pool_name) + for access_level in access_levels] + try: existing = self._rados_command( @@ -1206,14 +1244,25 @@ class CephFSVolumeClient(object): } ) - def cap_remove(orig, want): - cap_tokens = set(orig.split(",")) - return ",".join(cap_tokens.difference(want)) + def cap_remove(orig_mds_caps, orig_osd_caps, want_mds_caps, want_osd_caps): + mds_cap_tokens = orig_mds_caps.split(",") + osd_cap_tokens = orig_osd_caps.split(",") + + for want_mds_cap, want_osd_cap in zip(want_mds_caps, want_osd_caps): + if want_mds_cap in mds_cap_tokens: + mds_cap_tokens.remove(want_mds_cap) + osd_cap_tokens.remove(want_osd_cap) + break + + return ",".join(mds_cap_tokens), ",".join(osd_cap_tokens) cap = existing[0] - osd_cap_str = cap_remove(cap['caps'].get('osd', ""), want_osd_caps) - mds_cap_str = cap_remove(cap['caps'].get('mds', ""), want_mds_caps) - if (not osd_cap_str) and (not mds_cap_str): + orig_mds_caps = cap['caps'].get('mds', "") + orig_osd_caps = cap['caps'].get('osd', "") + mds_cap_str, osd_cap_str = cap_remove(orig_mds_caps, orig_osd_caps, + want_mds_caps, want_osd_caps) + + if not mds_cap_str: self._rados_command('auth del', {'entity': client_entity}, decode=False) else: self._rados_command(