]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tasks/cephfs: mds allow
authorSage Weil <sage@redhat.com>
Mon, 28 Sep 2015 16:38:04 +0000 (12:38 -0400)
committerSage Weil <sage@redhat.com>
Mon, 28 Sep 2015 16:38:04 +0000 (12:38 -0400)
Signed-off-by: Sage Weil <sage@redhat.com>
tasks/cephfs/cephfs_test_case.py
tasks/cephfs/test_pool_perm.py

index 3b2d7c23a3d6b816317b97f35066840192db79d6..6608fa2ee22e96c9ce333101ef5a7def272b938b 100644 (file)
@@ -106,9 +106,11 @@ class CephFSTestCase(unittest.TestCase):
 
         # In case some test messed with auth caps, reset them
         for mount in self.mounts:
-            self.fs.mon_manager.raw_cluster_cmd_result('auth', 'caps', "client.{0}".format(mount.client_id),
-                                                       'mon', 'allow r', 'osd',
-                                                       'allow rw pool={0}'.format(self.fs.get_data_pool_name()))
+            self.fs.mon_manager.raw_cluster_cmd_result(
+                'auth', 'caps', "client.{0}".format(mount.client_id),
+                'mds', 'allow',
+                'mon', 'allow r',
+                'osd', 'allow rw pool={0}'.format(self.fs.get_data_pool_name()))
 
         self.fs.mds_restart()
         self.fs.wait_for_daemons()
index d1bd6644be0917d5c5df1da5e6fdabdd3111ea93..7f0163fefce17d86fd5a055409a8b9a9c3ed2bb7 100644 (file)
@@ -29,8 +29,9 @@ class TestPoolPerm(CephFSTestCase):
         client_name = "client.{0}".format(self.mount_a.client_id)
 
         # set data pool read only
-        self.fs.mon_manager.raw_cluster_cmd_result('auth', 'caps', client_name, 'mon', 'allow r', 'osd',
-                                                   'allow r pool={0}'.format(self.fs.get_data_pool_name()))
+        self.fs.mon_manager.raw_cluster_cmd_result(
+            'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd',
+            'allow r pool={0}'.format(self.fs.get_data_pool_name()))
 
         self.mount_a.umount_wait()
         self.mount_a.mount()
@@ -40,8 +41,9 @@ class TestPoolPerm(CephFSTestCase):
         self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(False)))
 
         # set data pool write only
-        self.fs.mon_manager.raw_cluster_cmd_result('auth', 'caps', client_name, 'mon', 'allow r', 'osd',
-                                                   'allow w pool={0}'.format(self.fs.get_data_pool_name()))
+        self.fs.mon_manager.raw_cluster_cmd_result(
+            'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd',
+            'allow w pool={0}'.format(self.fs.get_data_pool_name()))
 
         self.mount_a.umount_wait()
         self.mount_a.mount()
@@ -51,8 +53,9 @@ class TestPoolPerm(CephFSTestCase):
         self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(True)))
 
     def tearDown(self):
-        self.fs.mon_manager.raw_cluster_cmd_result('auth', 'caps', "client.{0}".format(self.mount_a.client_id),
-                                                   'mon', 'allow r', 'osd',
-                                                   'allow rw pool={0}'.format(self.fs.get_data_pool_name()))
+        self.fs.mon_manager.raw_cluster_cmd_result(
+            'auth', 'caps', "client.{0}".format(self.mount_a.client_id),
+            'mds', 'allow', 'mon', 'allow r', 'osd',
+            'allow rw pool={0}'.format(self.fs.get_data_pool_name()))
 
         super(TestPoolPerm, self).tearDown()