]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/cephfs: change default timeout from 900 secs to 300 44958/head
authorRishabh Dave <ridave@redhat.com>
Wed, 9 Feb 2022 18:16:27 +0000 (23:46 +0530)
committerRishabh Dave <ridave@redhat.com>
Tue, 26 Apr 2022 13:04:42 +0000 (18:34 +0530)
15 minutes is unnecessarily large as a default value for timeout for a
command. Not having to wait unnecessarily on a crash of a command will
reduce teuthology's testing queue and will save individual developer's
time while running tests locally.

Whatever lines are modified for this purpose are also modified to follow
the stlye guideline, specfically wrapping at 80 characters.

Fixes: https://tracker.ceph.com/issues/54236
Signed-off-by: Rishabh Dave <ridave@redhat.com>
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/fuse_mount.py
qa/tasks/cephfs/kernel_mount.py
qa/tasks/cephfs/mount.py

index 65af4adaa2f0154f477e699c28b4b383f445e630..5e02ea07d05f62052a0edbea017cee0861fda49b 100644 (file)
@@ -250,7 +250,7 @@ class CephCluster(object):
 
     def json_asok(self, command, service_type, service_id, timeout=None):
         if timeout is None:
-            timeout = 15*60
+            timeout = 300
         command.insert(0, '--format=json')
         proc = self.mon_manager.admin_socket(service_type, service_id, command, timeout=timeout)
         response_data = proc.stdout.getvalue().strip()
index 6c82379e26ed885f0d7ca63d21ac9a674ed05190..d774580ec149433d6e3b02089c9264e685b4c14a 100644 (file)
@@ -10,7 +10,7 @@ from teuthology.contextutil import safe_while
 from teuthology.orchestra import run
 from teuthology.exceptions import CommandFailedError
 from tasks.ceph_manager import get_valgrind_args
-from tasks.cephfs.mount import CephFSMount
+from tasks.cephfs.mount import CephFSMount, UMOUNT_TIMEOUT
 
 log = logging.getLogger(__name__)
 
@@ -146,7 +146,7 @@ class FuseMount(CephFSMount):
         try:
             ls_str = self.client_remote.sh("ls " + conn_dir,
                                            stdout=StringIO(),
-                                           timeout=(15*60)).strip()
+                                           timeout=300).strip()
         except CommandFailedError:
             return []
 
@@ -239,7 +239,7 @@ class FuseMount(CephFSMount):
             stdout=StringIO(),
             stderr=StringIO(),
             wait=False,
-            timeout=(15*60)
+            timeout=300
         )
         try:
             proc.wait()
@@ -286,7 +286,7 @@ class FuseMount(CephFSMount):
                 stderr = StringIO()
                 self.client_remote.run(args=['sudo', 'chmod', '1777',
                                              self.hostfs_mntpt],
-                                       timeout=(15*60),
+                                       timeout=300,
                                        stderr=stderr, omit_sudo=False)
                 break
             except run.CommandFailedError:
@@ -299,7 +299,9 @@ class FuseMount(CephFSMount):
                     raise
 
     def _mountpoint_exists(self):
-        return self.client_remote.run(args=["ls", "-d", self.hostfs_mntpt], check_status=False, timeout=(15*60)).exitstatus == 0
+        return self.client_remote.run(args=["ls", "-d", self.hostfs_mntpt],
+                                      check_status=False,
+                                      timeout=300).exitstatus == 0
 
     def umount(self, cleanup=True):
         """
@@ -314,10 +316,9 @@ class FuseMount(CephFSMount):
         try:
             log.info('Running fusermount -u on {name}...'.format(name=self.client_remote.name))
             stderr = StringIO()
-            self.client_remote.run(args=['sudo', 'fusermount', '-u',
-                                         self.hostfs_mntpt],
-                                   stderr=stderr,
-                                   timeout=(30*60), omit_sudo=False)
+            self.client_remote.run(
+                args=['sudo', 'fusermount', '-u', self.hostfs_mntpt],
+                stderr=stderr, timeout=UMOUNT_TIMEOUT, omit_sudo=False)
         except run.CommandFailedError:
             if "mountpoint not found" in stderr.getvalue():
                 # This happens if the mount directory doesn't exist
@@ -331,7 +332,7 @@ class FuseMount(CephFSMount):
                 self.client_remote.run(
                     args=['sudo', run.Raw('PATH=/usr/sbin:$PATH'), 'lsof',
                     run.Raw(';'), 'ps', 'auxf'],
-                    timeout=(60*15), omit_sudo=False)
+                    timeout=UMOUNT_TIMEOUT, omit_sudo=False)
 
                 # abort the fuse mount, killing all hung processes
                 if self._fuse_conn:
@@ -346,9 +347,9 @@ class FuseMount(CephFSMount):
                 stderr = StringIO()
                 # make sure its unmounted
                 try:
-                    self.client_remote.run(args=['sudo', 'umount', '-l', '-f',
-                                                 self.hostfs_mntpt],
-                                           stderr=stderr, timeout=(60*15), omit_sudo=False)
+                    self.client_remote.run(
+                        args=['sudo', 'umount', '-l', '-f', self.hostfs_mntpt],
+                        stderr=stderr, timeout=UMOUNT_TIMEOUT, omit_sudo=False)
                 except CommandFailedError:
                     if self.is_mounted():
                         raise
@@ -361,7 +362,8 @@ class FuseMount(CephFSMount):
         if cleanup:
             self.cleanup()
 
-    def umount_wait(self, force=False, require_clean=False, timeout=900):
+    def umount_wait(self, force=False, require_clean=False,
+                    timeout=UMOUNT_TIMEOUT):
         """
         :param force: Complete cleanly even if the MDS is offline
         """
@@ -475,7 +477,7 @@ print(_find_admin_socket("{client_name}"))
                     p = self.client_remote.run(args=
                         ['sudo', self._prefix + 'ceph', '--admin-daemon', asok_path] + args,
                         stdout=StringIO(), stderr=StringIO(), wait=False,
-                        timeout=(15*60))
+                        timeout=300)
                     p.wait()
                     break
                 except CommandFailedError:
index 9eef819633d1f6e34872abecd64120be81229b53..8d11b01028cd5597c465e252ea7a42f9ecff04d2 100644 (file)
@@ -10,12 +10,11 @@ from teuthology.exceptions import CommandFailedError
 from teuthology.orchestra import run
 from teuthology.contextutil import MaxWhileTries
 
-from tasks.cephfs.mount import CephFSMount
+from tasks.cephfs.mount import CephFSMount, UMOUNT_TIMEOUT
 
 log = logging.getLogger(__name__)
 
 
-UMOUNT_TIMEOUT = 300
 # internal metadata directory
 DEBUGFS_META_DIR = 'meta'
 
@@ -70,7 +69,7 @@ class KernelMount(CephFSMount):
         mountcmd_stdout, mountcmd_stderr = StringIO(), StringIO()
 
         try:
-            self.client_remote.run(args=mount_cmd, timeout=(30*60),
+            self.client_remote.run(args=mount_cmd, timeout=300,
                                    stdout=mountcmd_stdout,
                                    stderr=mountcmd_stderr, omit_sudo=False)
         except CommandFailedError as e:
@@ -136,13 +135,13 @@ class KernelMount(CephFSMount):
             cmd=['sudo', 'umount', self.hostfs_mntpt]
             if force:
                 cmd.append('-f')
-            self.client_remote.run(args=cmd, timeout=(15*60), omit_sudo=False)
+            self.client_remote.run(args=cmd, timeout=UMOUNT_TIMEOUT, omit_sudo=False)
         except Exception as e:
             log.debug('Killing processes on client.{id}...'.format(id=self.client_id))
             self.client_remote.run(
                 args=['sudo', run.Raw('PATH=/usr/sbin:$PATH'), 'lsof',
                       run.Raw(';'), 'ps', 'auxf'],
-                timeout=(15*60), omit_sudo=False)
+                timeout=UMOUNT_TIMEOUT, omit_sudo=False)
             raise e
 
         if self.dynamic_debug:
@@ -155,7 +154,8 @@ class KernelMount(CephFSMount):
         self.mounted = False
         self.cleanup()
 
-    def umount_wait(self, force=False, require_clean=False, timeout=900):
+    def umount_wait(self, force=False, require_clean=False,
+                    timeout=UMOUNT_TIMEOUT):
         """
         Unlike the fuse client, the kernel client's umount is immediate
         """
@@ -172,8 +172,8 @@ class KernelMount(CephFSMount):
             # force delete the netns and umount
             log.debug('Force/lazy unmounting on client.{id}...'.format(id=self.client_id))
             self.client_remote.run(args=['sudo', 'umount', '-f', '-l',
-                                         self.mountpoint],
-                                   timeout=(15*60), omit_sudo=False)
+                                         self.mountpoint], timeout=timeout,
+                                   omit_sudo=False)
 
             self.mounted = False
             self.cleanup()
index 41ebcba8587f2c19658fd9afd4173c0f9dbf08a0..9674bea0d988592eb008c6d7307a03615eb77eb6 100644 (file)
@@ -21,6 +21,10 @@ from tasks.cephfs.filesystem import Filesystem
 
 log = logging.getLogger(__name__)
 
+
+UMOUNT_TIMEOUT = 300
+
+
 class CephFSMount(object):
     def __init__(self, ctx, test_dir, client_id, client_remote,
                  client_keyring_path=None, hostfs_mntpt=None,
@@ -452,7 +456,8 @@ class CephFSMount(object):
     def umount(self):
         raise NotImplementedError()
 
-    def umount_wait(self, force=False, require_clean=False, timeout=None):
+    def umount_wait(self, force=False, require_clean=False,
+                    timeout=UMOUNT_TIMEOUT):
         """
 
         :param force: Expect that the mount will not shutdown cleanly: kill
@@ -695,7 +700,7 @@ class CephFSMount(object):
         p.wait()
         return p.stdout.getvalue().strip()
 
-    def run_shell(self, args, timeout=900, **kwargs):
+    def run_shell(self, args, timeout=300, **kwargs):
         args = args.split() if isinstance(args, str) else args
         kwargs.pop('omit_sudo', False)
         sudo = kwargs.pop('sudo', False)