From 538834171fe4524b4bb7cffdcb08c5b13fe7689f Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Wed, 27 Sep 2017 10:39:01 -0700 Subject: [PATCH] mds: cap client recall to min caps per client Fixes: http://tracker.ceph.com/issues/21575 Signed-off-by: Patrick Donnelly --- qa/tasks/cephfs/test_client_limits.py | 37 ++++++++++++++++++--------- src/mds/Server.cc | 14 +++++----- 2 files changed, 31 insertions(+), 20 deletions(-) diff --git a/qa/tasks/cephfs/test_client_limits.py b/qa/tasks/cephfs/test_client_limits.py index b06d2a1d233..14ac2c40826 100644 --- a/qa/tasks/cephfs/test_client_limits.py +++ b/qa/tasks/cephfs/test_client_limits.py @@ -29,7 +29,7 @@ class TestClientLimits(CephFSTestCase): REQUIRE_KCLIENT_REMOTE = True CLIENTS_REQUIRED = 2 - def _test_client_pin(self, use_subdir): + def _test_client_pin(self, use_subdir, open_files): """ When a client pins an inode in its cache, for example because the file is held open, it should reject requests from the MDS to trim these caps. The MDS should complain @@ -39,13 +39,16 @@ class TestClientLimits(CephFSTestCase): :param use_subdir: whether to put test files in a subdir or use root """ - cache_size = 100 - open_files = 200 + cache_size = open_files/2 self.set_conf('mds', 'mds cache size', cache_size) self.fs.mds_fail_restart() self.fs.wait_for_daemons() + mds_min_caps_per_client = int(self.fs.get_config("mds_min_caps_per_client")) + self.assertTrue(open_files >= mds_min_caps_per_client) + mds_max_ratio_caps_per_client = float(self.fs.get_config("mds_max_ratio_caps_per_client")) + mount_a_client_id = self.mount_a.get_global_id() path = "subdir/mount_a" if use_subdir else "mount_a" open_proc = self.mount_a.open_n_background(path, open_files) @@ -62,8 +65,7 @@ class TestClientLimits(CephFSTestCase): # MDS should not be happy about that, as the client is failing to comply # with the SESSION_RECALL messages it is being sent mds_recall_state_timeout = float(self.fs.get_config("mds_recall_state_timeout")) - self.wait_for_health("MDS_CLIENT_RECALL", - mds_recall_state_timeout + 10) + self.wait_for_health("MDS_CLIENT_RECALL", mds_recall_state_timeout+10) # We can also test that the MDS health warning for oversized # cache is functioning as intended. @@ -82,19 +84,30 @@ class TestClientLimits(CephFSTestCase): # The remaining caps should comply with the numbers sent from MDS in SESSION_RECALL message, # which depend on the caps outstanding, cache size and overall ratio - self.wait_until_equal( - lambda: self.get_session(mount_a_client_id)['num_caps'], - int(open_files * 0.2), - timeout=30, - reject_fn=lambda x: x < int(open_files*0.2)) + def expected_caps(): + num_caps = self.get_session(mount_a_client_id)['num_caps'] + if num_caps < mds_min_caps_per_client: + raise + elif num_caps == mds_min_caps_per_client: + return True + elif num_caps == int((1.0-mds_max_ratio_caps_per_client)*(open_files+2)): + return True + else: + return False + + self.wait_until_true(expected_caps, timeout=60) @needs_trimming def test_client_pin_root(self): - self._test_client_pin(False) + self._test_client_pin(False, 400) @needs_trimming def test_client_pin(self): - self._test_client_pin(True) + self._test_client_pin(True, 800) + + @needs_trimming + def test_client_pin_mincaps(self): + self._test_client_pin(True, 200) def test_client_release_bug(self): """ diff --git a/src/mds/Server.cc b/src/mds/Server.cc index 3c339210fc9..1b95991d5d6 100644 --- a/src/mds/Server.cc +++ b/src/mds/Server.cc @@ -1113,14 +1113,12 @@ void Server::recall_client_state(void) << ", leases " << session->leases.size() << dendl; - if (session->caps.size() > min_caps_per_client) { - uint64_t newlim = MIN((session->caps.size() * ratio), max_caps_per_client); - if (session->caps.size() > newlim) { - MClientSession *m = new MClientSession(CEPH_SESSION_RECALL_STATE); - m->head.max_caps = newlim; - mds->send_message_client(m, session); - session->notify_recall_sent(newlim); - } + uint64_t newlim = MAX(MIN((session->caps.size() * ratio), max_caps_per_client), min_caps_per_client); + if (session->caps.size() > newlim) { + MClientSession *m = new MClientSession(CEPH_SESSION_RECALL_STATE); + m->head.max_caps = newlim; + mds->send_message_client(m, session); + session->notify_recall_sent(newlim); } } } -- 2.39.5