]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
python-cephfs: use rados namespace for data isolation.
authorXiaoxi Chen <xiaoxchen@ebay.com>
Wed, 6 Apr 2016 14:45:02 +0000 (22:45 +0800)
committerXiaoxi Chen <xiaoxchen@ebay.com>
Wed, 27 Apr 2016 17:26:40 +0000 (12:26 -0500)
As cephfs dir layout supports rados namespace in jewel, it would be
good to use rados namespace instead of pool, since it is free,
we always enforce namespace isolation for volumes.

The meaning of "data_isolated" flag changed a bit, it's no longer used
for security isolation, referring to physical isolation
(i.e. potentially using different OSDs).

Also, (de)authorize based on namespace.

Signed-off-by: Xiaoxi Chen <xiaoxchen@ebay.com>
(cherry picked from commit 0e1d013ea690c18f7fa6e2a19500f854a9949091)

src/pybind/ceph_volume_client.py

index 8add31114c0281796fd19b6b53605c4f079d35f4..c035871216c4744b432a18ec589d8038f5245af2 100644 (file)
@@ -204,6 +204,7 @@ class CephFSVolumeClient(object):
     # Where shall we create our volumes?
     VOLUME_PREFIX = "/volumes"
     POOL_PREFIX = "fsvolume_"
+    POOL_NS_PREFIX = "fsvolumens_"
 
     def __init__(self, auth_id, conf_path, cluster_name):
         self.fs = None
@@ -443,7 +444,9 @@ class CephFSVolumeClient(object):
         if size is not None:
             self.fs.setxattr(path, 'ceph.quota.max_bytes', size.__str__(), 0)
 
+        # data_isolated means create a seperate pool for this volume
         if data_isolated:
+            log.info("create_volume: {0}, create pool {1} as data_isolated =True.".format(volume_path, pool_name))
             pool_name = "{0}{1}".format(self.POOL_PREFIX, volume_path.volume_id)
             pool_id = self._create_volume_pool(pool_name)
             mds_map = self._rados_command("mds dump", {})
@@ -453,6 +456,11 @@ class CephFSVolumeClient(object):
                 })
             self.fs.setxattr(path, 'ceph.dir.layout.pool', pool_name, 0)
 
+        # enforce security isolation, create a seperate pool for this volume
+        namespace = "{0}{1}".format(self.POOL_NS_PREFIX, volume_path.volume_id)
+        log.info("create_volume: {0}, using rados namespace {1} to isolate data.".format(volume_path, namespace))
+        self.fs.setxattr(path, 'ceph.dir.layout.pool_namespace', namespace, 0)
+
         return {
             'mount_path': path
         }
@@ -568,12 +576,13 @@ class CephFSVolumeClient(object):
         # read the layout
         path = self._get_path(volume_path)
         pool_name = self._get_ancestor_xattr(path, "ceph.dir.layout.pool")
+        namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace")
 
         # Now construct auth capabilities that give the guest just enough
         # permissions to access the share
         client_entity = "client.{0}".format(auth_id)
         want_mds_cap = 'allow rw path={0}'.format(path)
-        want_osd_cap = 'allow rw pool={0}'.format(pool_name)
+        want_osd_cap = 'allow rw pool={0} namespace={1}'.format(pool_name, namespace)
         try:
             existing = self._rados_command(
                 'auth get',
@@ -648,9 +657,10 @@ class CephFSVolumeClient(object):
         client_entity = "client.{0}".format(auth_id)
         path = self._get_path(volume_path)
         pool_name = self._get_ancestor_xattr(path, "ceph.dir.layout.pool")
+        namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace")
 
         want_mds_cap = 'allow rw path={0}'.format(path)
-        want_osd_cap = 'allow rw pool={0}'.format(pool_name)
+        want_osd_cap = 'allow rw pool={0} namespace={1}'.format(pool_name, namespace)
 
         try:
             existing = self._rados_command(