From 330f0c7cba12412a4782947a76be158830f941c9 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Fri, 7 Aug 2020 13:26:47 -0700 Subject: [PATCH] qa: use real entity for config_get So this changed in Octopus/Pacific to allow ceph config get mds foo but it doesn't work in Nautilus which expects a real entity: https://pulpito.ceph.com/yuriw-2020-08-07_15:03:29-kcephfs-wip-yuri4-testing-2020-08-07-1350-nautilus-distro-basic-smithi/5308647/ 2020-08-07T18:56:49.830 INFO:teuthology.orchestra.run.smithi106.stderr:2020-08-07 18:56:49.833 7fbe0d7fa700 1 -- 172.21.15.106:0/369883181 <== mon.1 v2:172.21.15.106:3300/0 7 ==== mon_command_ack([{"prefix": "config get", "who": "mds", "key": "mds_min_caps_per_client"}]=-22 unrecognized entity 'mds' v1) v1 ==== 131+0+0 (secure 0 0 0) 0x7fbdf8000940 con 0x7fbdf40064c0 2020-08-07T18:56:49.830 INFO:teuthology.orchestra.run.smithi106.stderr:Error EINVAL: unrecognized entity 'mds' Signed-off-by: Patrick Donnelly --- qa/tasks/cephfs/test_client_limits.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/qa/tasks/cephfs/test_client_limits.py b/qa/tasks/cephfs/test_client_limits.py index 0120dea46349f..5786cad384d62 100644 --- a/qa/tasks/cephfs/test_client_limits.py +++ b/qa/tasks/cephfs/test_client_limits.py @@ -44,9 +44,9 @@ class TestClientLimits(CephFSTestCase): self.config_set('mds', 'mds_recall_max_caps', int(open_files/2)) self.config_set('mds', 'mds_recall_warning_threshold', open_files) - mds_min_caps_per_client = int(self.config_get('mds', "mds_min_caps_per_client")) + mds_min_caps_per_client = int(self.config_get('mds.a', "mds_min_caps_per_client")) self.config_set('mds', 'mds_min_caps_working_set', mds_min_caps_per_client) - mds_recall_warning_decay_rate = float(self.config_get('mds', "mds_recall_warning_decay_rate")) + mds_recall_warning_decay_rate = float(self.config_get('mds.a', "mds_recall_warning_decay_rate")) self.assertGreaterEqual(open_files, mds_min_caps_per_client) mount_a_client_id = self.mount_a.get_global_id() @@ -122,8 +122,8 @@ class TestClientLimits(CephFSTestCase): self.config_set('mds', 'mds_recall_warning_threshold', open_files) self.config_set('mds', 'mds_min_caps_working_set', open_files*2) - mds_min_caps_per_client = int(self.config_get('mds', "mds_min_caps_per_client")) - mds_recall_warning_decay_rate = float(self.config_get('mds', "mds_recall_warning_decay_rate")) + mds_min_caps_per_client = int(self.config_get('mds.a', "mds_min_caps_per_client")) + mds_recall_warning_decay_rate = float(self.config_get('mds.a', "mds_recall_warning_decay_rate")) self.assertGreaterEqual(open_files, mds_min_caps_per_client) mount_a_client_id = self.mount_a.get_global_id() @@ -285,7 +285,7 @@ class TestClientLimits(CephFSTestCase): That the MDS will not let a client sit above mds_max_caps_per_client caps. """ - mds_min_caps_per_client = int(self.config_get('mds', "mds_min_caps_per_client")) + mds_min_caps_per_client = int(self.config_get('mds.a', "mds_min_caps_per_client")) mds_max_caps_per_client = 2*mds_min_caps_per_client self.config_set('mds', 'mds_max_caps_per_client', mds_max_caps_per_client) -- 2.39.5