]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks/cephfs: get rid of StringIO for py3
authorKyr Shatskyy <kyrylo.shatskyy@suse.com>
Mon, 16 Dec 2019 01:20:11 +0000 (02:20 +0100)
committerKefu Chai <kchai@redhat.com>
Tue, 2 Jun 2020 02:32:22 +0000 (10:32 +0800)
Use io.BytesIO and six.ensure_str for py3 compatibility

Signed-off-by: Kyr Shatskyy <kyrylo.shatskyy@suse.com>
(cherry picked from commit 9f6c764f10f99cbf5ca164c2681b001d29505d9d)

Conflicts:
qa/tasks/cephfs/fuse_mount.py
qa/tasks/cephfs/mount.py
qa/tasks/cephfs/test_acls.py
qa/tasks/cephfs/test_cephfs_shell.py
qa/tasks/cephfs/test_journal_migration.py
qa/tasks/cephfs/xfstests_dev.py: trivial resolution

qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/cephfs/fuse_mount.py
qa/tasks/cephfs/mount.py
qa/tasks/cephfs/test_cephfs_shell.py
qa/tasks/cephfs/test_exports.py
qa/tasks/cephfs/test_journal_migration.py

index 1884d78f759349b35806cdc71e876563f023c6f2..f5aef8d747d2eb8a5d13fccd29194204a2804770 100644 (file)
@@ -5,7 +5,6 @@ from unittest import case
 from tasks.ceph_test_case import CephTestCase
 import os
 import re
-from StringIO import StringIO
 
 from tasks.cephfs.fuse_mount import FuseMount
 
@@ -261,21 +260,21 @@ class CephFSTestCase(CephTestCase):
     def delete_mds_coredump(self, daemon_id):
         # delete coredump file, otherwise teuthology.internal.coredump will
         # catch it later and treat it as a failure.
-        p = self.mds_cluster.mds_daemons[daemon_id].remote.run(args=[
-            "sudo", "sysctl", "-n", "kernel.core_pattern"], stdout=StringIO())
-        core_dir = os.path.dirname(p.stdout.getvalue().strip())
+        core_pattern = self.mds_cluster.mds_daemons[daemon_id].remote.sh(
+            "sudo sysctl -n kernel.core_pattern")
+        core_dir = os.path.dirname(core_pattern.strip())
         if core_dir:  # Non-default core_pattern with a directory in it
             # We have seen a core_pattern that looks like it's from teuthology's coredump
             # task, so proceed to clear out the core file
             log.info("Clearing core from directory: {0}".format(core_dir))
 
             # Verify that we see the expected single coredump
-            ls_proc = self.mds_cluster.mds_daemons[daemon_id].remote.run(args=[
+            ls_output = self.mds_cluster.mds_daemons[daemon_id].remote.sh([
                 "cd", core_dir, run.Raw('&&'),
                 "sudo", "ls", run.Raw('|'), "sudo", "xargs", "file"
-            ], stdout=StringIO())
+            ])
             cores = [l.partition(":")[0]
-                     for l in ls_proc.stdout.getvalue().strip().split("\n")
+                     for l in ls_output.strip().split("\n")
                      if re.match(r'.*ceph-mds.* -i +{0}'.format(daemon_id), l)]
 
             log.info("Enumerated cores: {0}".format(cores))
index d0665270aba25fe2c8059b58e13625c5456135d1..56a39790759c2579599c788921f11b383d5f483f 100644 (file)
@@ -1,14 +1,17 @@
-from StringIO import StringIO
+from io import BytesIO
 import json
 import time
 import logging
+
+import six
+
 from textwrap import dedent
 
 from teuthology import misc
 from teuthology.contextutil import MaxWhileTries
 from teuthology.orchestra import run
 from teuthology.orchestra.run import CommandFailedError
-from .mount import CephFSMount
+from tasks.cephfs.mount import CephFSMount
 
 log = logging.getLogger(__name__)
 
@@ -96,16 +99,12 @@ class FuseMount(CephFSMount):
                 check_status=False,
                 timeout=(15*60)
             )
-            p = self.client_remote.run(
-                args=["ls", "/sys/fs/fuse/connections"],
-                stdout=StringIO(),
-                check_status=False,
-                timeout=(15*60)
-            )
-            if p.exitstatus != 0:
+            try:
+                ls_str = self.client_remote.sh("ls /sys/fs/fuse/connections",
+                                               timeout=(15*60)).strip()
+            except CommandFailedError:
                 return []
 
-            ls_str = p.stdout.getvalue().strip()
             if ls_str:
                 return [int(n) for n in ls_str.split("\n")]
             else:
@@ -186,16 +185,17 @@ class FuseMount(CephFSMount):
                 '--',
                 self.mountpoint,
             ],
-            stdout=StringIO(),
-            stderr=StringIO(),
+            stdout=BytesIO(),
+            stderr=BytesIO(),
             wait=False,
             timeout=(15*60)
         )
         try:
             proc.wait()
         except CommandFailedError:
-            if ("endpoint is not connected" in proc.stderr.getvalue()
-            or "Software caused connection abort" in proc.stderr.getvalue()):
+            error = six.ensure_str(proc.stderr.getvalue())
+            if ("endpoint is not connected" in error
+            or "Software caused connection abort" in error):
                 # This happens is fuse is killed without unmount
                 log.warn("Found stale moutn point at {0}".format(self.mountpoint))
                 return True
@@ -204,7 +204,7 @@ class FuseMount(CephFSMount):
                 log.info('mount point does not exist: %s', self.mountpoint)
                 return False
 
-        fstype = proc.stdout.getvalue().rstrip('\n')
+        fstype = six.ensure_str(proc.stdout.getvalue()).rstrip('\n')
         if fstype == 'fuseblk':
             log.info('ceph-fuse is mounted on %s', self.mountpoint)
             return True
@@ -229,11 +229,11 @@ class FuseMount(CephFSMount):
         # Now that we're mounted, set permissions so that the rest of the test will have
         # unrestricted access to the filesystem mount.
         try:
-            stderr = StringIO()
+            stderr = BytesIO()
             self.client_remote.run(args=['sudo', 'chmod', '1777', self.mountpoint], timeout=(15*60), stderr=stderr)
         except run.CommandFailedError:
             stderr = stderr.getvalue()
-            if "Read-only file system".lower() in stderr.lower():
+            if b"Read-only file system".lower() in stderr.lower():
                 pass
             else:
                 raise
@@ -275,7 +275,7 @@ class FuseMount(CephFSMount):
                 """).format(self._fuse_conn))
                 self._fuse_conn = None
 
-            stderr = StringIO()
+            stderr = BytesIO()
             try:
                 # make sure its unmounted
                 self.client_remote.run(
@@ -337,7 +337,7 @@ class FuseMount(CephFSMount):
 
         Prerequisite: the client is not mounted.
         """
-        stderr = StringIO()
+        stderr = BytesIO()
         try:
             self.client_remote.run(
                 args=[
@@ -349,7 +349,7 @@ class FuseMount(CephFSMount):
                 timeout=(60*5)
             )
         except CommandFailedError:
-            if "No such file or directory" in stderr.getvalue():
+            if b"No such file or directory" in stderr.getvalue():
                 pass
             else:
                 raise
@@ -433,17 +433,16 @@ print find_socket("{client_name}")
             client_name="client.{0}".format(self.client_id))
 
         # Find the admin socket
-        p = self.client_remote.run(args=[
-            'sudo', 'python2', '-c', pyscript
-        ], stdout=StringIO(), timeout=(15*60))
-        asok_path = p.stdout.getvalue().strip()
+        asok_path = self.client_remote.sh([
+            'sudo', 'python3', '-c', pyscript
+        ], timeout=(15*60)).strip()
         log.info("Found client admin socket at {0}".format(asok_path))
 
         # Query client ID from admin socket
-        p = self.client_remote.run(
-            args=['sudo', self._prefix + 'ceph', '--admin-daemon', asok_path] + args,
-            stdout=StringIO(), timeout=(15*60))
-        return json.loads(p.stdout.getvalue())
+        json_data = self.client_remote.sh(
+            ['sudo', self._prefix + 'ceph', '--admin-daemon', asok_path] + args,
+            timeout=(15*60))
+        return json.loads(json_data)
 
     def get_global_id(self):
         """
index 625d3aec44599b1326285bb181b31bb2bcf5de6e..bbaf0e510d9493eace949f4adbd37d829164628b 100644 (file)
@@ -1,11 +1,12 @@
 from contextlib import contextmanager
+from io import BytesIO
 import json
 import logging
 import datetime
+import six
 import time
 from textwrap import dedent
 import os
-from StringIO import StringIO
 from teuthology.orchestra import run
 from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
 from tasks.cephfs.filesystem import Filesystem
@@ -148,17 +149,17 @@ class CephFSMount(object):
         return self.client_remote.run(
                args=['sudo', 'adjust-ulimits', 'daemon-helper', 'kill',
                      py_version, '-c', pyscript], wait=False, stdin=run.PIPE,
-               stdout=StringIO())
+               stdout=BytesIO())
 
     def run_python(self, pyscript, py_version='python'):
         p = self._run_python(pyscript, py_version)
         p.wait()
-        return p.stdout.getvalue().strip()
+        return six.ensure_str(p.stdout.getvalue().strip())
 
     def run_shell(self, args, wait=True, check_status=True, omit_sudo=True):
         args = ["cd", self.mountpoint, run.Raw('&&'), "sudo"] + args
-        return self.client_remote.run(args=args, stdout=StringIO(),
-                                      stderr=StringIO(), wait=wait,
+        return self.client_remote.run(args=args, stdout=BytesIO(),
+                                      stderr=BytesIO(), wait=wait,
                                       check_status=check_status,
                                       omit_sudo=omit_sudo)
 
index 8cf8474e2699d559dbbcbb88e3b0640f90a5d04f..8ddbaedb35260414311bda2668a22a34399a4009 100644 (file)
@@ -1,7 +1,7 @@
 import os
 import crypt
 import logging
-from StringIO import StringIO
+from six import StringIO
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
 
 log = logging.getLogger(__name__)
@@ -16,8 +16,7 @@ class TestCephFSShell(CephFSTestCase):
             args.extend(opts)
         args.extend(("--", cmd))
         log.info("Running command: {}".format(" ".join(args)))
-        status = self.mount_a.client_remote.run(args=args,
-                                                stdout=StringIO(),
+        status = self.mount_a.client_remote.run(args=args, stdout=StringIO(),
                                                 stdin=stdin)
         return status.stdout.getvalue().strip()
 
index 7d2a3425a894b74933f3595748d7e9ff34613ae1..70e55060684161a8055fc3682e97a2366bbbfa55 100644 (file)
@@ -1,6 +1,5 @@
 import logging
 import time
-from StringIO import StringIO
 from tasks.cephfs.fuse_mount import FuseMount
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
 
@@ -129,7 +128,7 @@ class TestExports(CephFSTestCase):
             self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0), ('/1/2/3', 2)])
 
         if not isinstance(self.mount_a, FuseMount):
-            p = self.mount_a.client_remote.run(args=['uname', '-r'], stdout=StringIO(), wait=True)
+            p = self.mount_a.client_remote.sh('uname -r'), wait=True)
             dir_pin = self.mount_a.getfattr("1", "ceph.dir.pin")
             log.debug("mount.getfattr('1','ceph.dir.pin'): %s " % dir_pin)
            if str(p.stdout.getvalue()) < "5" and not(dir_pin):
index 9d1d399caa8ed67d4b56d474a36887b0f9c804a2..8863b371f39662937144d9cf03ae17365d2ac160 100644 (file)
@@ -1,5 +1,4 @@
 
-from StringIO import StringIO
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
 from tasks.workunit import task as workunit
 
@@ -76,14 +75,12 @@ class TestJournalMigration(CephFSTestCase):
 
         self.fs.journal_tool(["event", "get", "json",
                               "--path", "/tmp/journal.json"], 0)
-        p = self.fs.tool_remote.run(
-            args=[
+        p = self.fs.tool_remote.sh([
                 "python3",
                 "-c",
                 "import json; print(len(json.load(open('/tmp/journal.json'))))"
-            ],
-            stdout=StringIO())
-        event_count = int(p.stdout.getvalue().strip())
+            ])
+        event_count = int(p.strip())
         if event_count < 1000:
             # Approximate value of "lots", expected from having run fsstress
             raise RuntimeError("Unexpectedly few journal events: {0}".format(event_count))