From 3321cc7b375a5e0ea1da4ab197ab447639ca4db3 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 24 Oct 2017 08:49:27 -0400 Subject: [PATCH] mds: fold mds_revoke_cap_timeout into mds_session_timeout Right now, we have two different timeout settings -- one for when the client is just not responding at all (mds_session_timeout), and one for when the client is otherwise responding but isn't returning caps in a timely fashion (mds_cap_revoke_timeout). The default settings on them are equivalent (60s), but only the mds_session_timeout is communicated via the mdsmap. The mds_cap_revoke_timeout is known only to the MDS. Neither timeout results in anything other than warnings in the current codebase. There is also a third setting (mds_session_autoclose) that is also communicated via the MDSmap. Exceeding that value (default of 300s) could eventually result in the client being blacklisted from the cluster. The code to implement that doesn't exist yet, however. The current codebase doesn't do any real sanity checking of these timeouts, so the potential for admins to get them wrong is rather high. It's hard to concoct a use-case where we'd want to warn about these events at different intervals. Simplify this by just removing the mds_cap_revoke_timeout setting, and replace its use in the code with the mds_session_timeout. With that, the client can at least determine when warnings might start showing up in the MDS' logs. Signed-off-by: Jeff Layton --- doc/cephfs/health-messages.rst | 2 +- qa/tasks/cephfs/test_client_limits.py | 6 +++--- src/common/legacy_config_opts.h | 3 +-- src/common/options.cc | 4 ---- src/mds/Locker.cc | 10 +++++----- 5 files changed, 10 insertions(+), 15 deletions(-) diff --git a/doc/cephfs/health-messages.rst b/doc/cephfs/health-messages.rst index adaafb842e2..057eea491ad 100644 --- a/doc/cephfs/health-messages.rst +++ b/doc/cephfs/health-messages.rst @@ -69,7 +69,7 @@ are like locks. Sometimes, for example when another client needs access, the MDS will request clients release their capabilities. If the client is unresponsive or buggy, it might fail to do so promptly or fail to do so at all. This message appears if a client has taken longer than -``mds_revoke_cap_timeout`` (default 60s) to comply. +``mds_session_timeout`` (default 60s) to comply. Message: "Client *name* failing to respond to cache pressure" Code: MDS_HEALTH_CLIENT_RECALL, MDS_HEALTH_CLIENT_RECALL_MANY diff --git a/qa/tasks/cephfs/test_client_limits.py b/qa/tasks/cephfs/test_client_limits.py index cb5e3a46252..b06d5123d8f 100644 --- a/qa/tasks/cephfs/test_client_limits.py +++ b/qa/tasks/cephfs/test_client_limits.py @@ -134,10 +134,10 @@ class TestClientLimits(CephFSTestCase): # Client B tries to stat the file that client A created rproc = self.mount_b.write_background("file1") - # After mds_revoke_cap_timeout, we should see a health warning (extra lag from + # After mds_session_timeout, we should see a health warning (extra lag from # MDS beacon period) - mds_revoke_cap_timeout = float(self.fs.get_config("mds_revoke_cap_timeout")) - self.wait_for_health("MDS_CLIENT_LATE_RELEASE", mds_revoke_cap_timeout + 10) + mds_session_timeout = float(self.fs.get_config("mds_session_timeout")) + self.wait_for_health("MDS_CLIENT_LATE_RELEASE", mds_session_timeout + 10) # Client B should still be stuck self.assertFalse(rproc.finished) diff --git a/src/common/legacy_config_opts.h b/src/common/legacy_config_opts.h index c9429134ba1..c00681b475e 100644 --- a/src/common/legacy_config_opts.h +++ b/src/common/legacy_config_opts.h @@ -435,12 +435,11 @@ OPTION(mds_beacon_grace, OPT_FLOAT) OPTION(mds_enforce_unique_name, OPT_BOOL) OPTION(mds_blacklist_interval, OPT_FLOAT) // how long to blacklist failed nodes -OPTION(mds_session_timeout, OPT_FLOAT) // cap bits and leases time out if client idle +OPTION(mds_session_timeout, OPT_FLOAT) // cap bits and leases time out if client unresponsive or not returning its caps OPTION(mds_session_blacklist_on_timeout, OPT_BOOL) // whether to blacklist clients whose sessions are dropped due to timeout OPTION(mds_session_blacklist_on_evict, OPT_BOOL) // whether to blacklist clients whose sessions are dropped via admin commands OPTION(mds_sessionmap_keys_per_op, OPT_U32) // how many sessions should I try to load/store in a single OMAP operation? -OPTION(mds_revoke_cap_timeout, OPT_FLOAT) // detect clients which aren't revoking caps OPTION(mds_recall_state_timeout, OPT_FLOAT) // detect clients which aren't trimming caps OPTION(mds_freeze_tree_timeout, OPT_FLOAT) // detecting freeze tree deadlock OPTION(mds_session_autoclose, OPT_FLOAT) // autoclose idle session diff --git a/src/common/options.cc b/src/common/options.cc index 344518efc02..d3a83c1e1ef 100644 --- a/src/common/options.cc +++ b/src/common/options.cc @@ -5479,10 +5479,6 @@ std::vector