From def177ff3ba32a9441cb3bfb05a8dd993b27d994 Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Sun, 18 Oct 2020 21:57:09 -0400 Subject: [PATCH] qa/tasks: switch to _kill_background() helper to terminate the daemons Fixes: https://tracker.ceph.com/issues/46883 Signed-off-by: Xiubo Li --- qa/tasks/cephfs/test_client_limits.py | 8 +------- qa/tasks/cephfs/test_client_recovery.py | 23 ++++------------------- qa/tasks/cephfs/test_misc.py | 9 ++------- 3 files changed, 7 insertions(+), 33 deletions(-) diff --git a/qa/tasks/cephfs/test_client_limits.py b/qa/tasks/cephfs/test_client_limits.py index 51c3048bd63a8..6b5d1a2791fc8 100644 --- a/qa/tasks/cephfs/test_client_limits.py +++ b/qa/tasks/cephfs/test_client_limits.py @@ -6,7 +6,6 @@ exceed the limits of how many caps/inodes they should hold. import logging from textwrap import dedent -from teuthology.orchestra.run import CommandFailedError from tasks.ceph_test_case import TestTimeoutError from tasks.cephfs.cephfs_test_case import CephFSTestCase, needs_trimming from tasks.cephfs.fuse_mount import FuseMount @@ -77,12 +76,7 @@ class TestClientLimits(CephFSTestCase): # When the client closes the files, it should retain only as many caps as allowed # under the SESSION_RECALL policy log.info("Terminating process holding files open") - open_proc.stdin.close() - try: - open_proc.wait() - except CommandFailedError: - # We killed it, so it raises an error - pass + self.mount_a._kill_background(open_proc) # The remaining caps should comply with the numbers sent from MDS in SESSION_RECALL message, # which depend on the caps outstanding, cache size and overall ratio diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index 0aeb36a07d465..65ae312d4ec70 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -11,7 +11,7 @@ import re import os from teuthology.orchestra import run -from teuthology.orchestra.run import CommandFailedError, ConnectionLostError +from teuthology.orchestra.run import CommandFailedError from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.cephfs_test_case import CephFSTestCase from teuthology.packaging import get_package_version @@ -263,12 +263,7 @@ class TestClientRecovery(CephFSTestCase): cap_waited, session_timeout )) - cap_holder.stdin.close() - try: - cap_holder.wait() - except (CommandFailedError, ConnectionLostError): - # We killed it (and possibly its node), so it raises an error - pass + self.mount_a._kill_background(cap_holder) finally: # teardown() doesn't quite handle this case cleanly, so help it out self.mount_a.resume_netns() @@ -322,12 +317,7 @@ class TestClientRecovery(CephFSTestCase): cap_waited, session_timeout / 2.0 )) - cap_holder.stdin.close() - try: - cap_holder.wait() - except (CommandFailedError, ConnectionLostError): - # We killed it (and possibly its node), so it raises an error - pass + self.mount_a._kill_background(cap_holder) finally: self.mount_a.resume_netns() @@ -396,12 +386,7 @@ class TestClientRecovery(CephFSTestCase): self.mount_b.check_filelock(do_flock=flockable) # Tear down the background process - lock_holder.stdin.close() - try: - lock_holder.wait() - except (CommandFailedError, ConnectionLostError): - # We killed it, so it raises an error - pass + self.mount_a._kill_background(lock_holder) def test_filelock_eviction(self): """ diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py index 8ae64a36dbe09..6a295bbfdf1ac 100644 --- a/qa/tasks/cephfs/test_misc.py +++ b/qa/tasks/cephfs/test_misc.py @@ -1,7 +1,7 @@ from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.cephfs_test_case import CephFSTestCase -from teuthology.orchestra.run import CommandFailedError, ConnectionLostError +from teuthology.orchestra.run import CommandFailedError import errno import time import json @@ -166,12 +166,7 @@ class TestMisc(CephFSTestCase): )) self.assertTrue(self.mount_a.is_blocklisted()) - cap_holder.stdin.close() - try: - cap_holder.wait() - except (CommandFailedError, ConnectionLostError): - # We killed it (and possibly its node), so it raises an error - pass + self.mount_a._kill_background(cap_holder) finally: self.mount_a.resume_netns() -- 2.39.5