From 28d2aec24cd896338f0497ef751edb53da0429b0 Mon Sep 17 00:00:00 2001 From: Mykola Golub Date: Wed, 1 Mar 2023 12:45:00 +0200 Subject: [PATCH] qa/tasks/cephfs: use cephfs tags when recreating osd caps to make it work when an extra data pool is added Signed-off-by: Mykola Golub (cherry picked from commit 6a384b5d7ff73de95821231b0d1476bd84956131) --- qa/tasks/cephfs/cephfs_test_case.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index ded3fa379de2..e15658c7d378 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -163,7 +163,7 @@ class CephFSTestCase(CephTestCase): # In case some test messed with auth caps, reset them for client_id in client_mount_ids: cmd = ['auth', 'caps', f'client.{client_id}', 'mon','allow r', - 'osd', f'allow rw pool={self.fs.get_data_pool_name()}', + 'osd', f'allow rw tag cephfs data={self.fs.name}', 'mds', 'allow'] if self.run_cluster_cmd_result(cmd) == 0: -- 2.47.3