from tasks.ceph_test_case import CephTestCase
import os
import re
-from StringIO import StringIO
from tasks.cephfs.fuse_mount import FuseMount
def delete_mds_coredump(self, daemon_id):
# delete coredump file, otherwise teuthology.internal.coredump will
# catch it later and treat it as a failure.
- p = self.mds_cluster.mds_daemons[daemon_id].remote.run(args=[
- "sudo", "sysctl", "-n", "kernel.core_pattern"], stdout=StringIO())
- core_dir = os.path.dirname(p.stdout.getvalue().strip())
+ core_pattern = self.mds_cluster.mds_daemons[daemon_id].remote.sh(
+ "sudo sysctl -n kernel.core_pattern")
+ core_dir = os.path.dirname(core_pattern.strip())
if core_dir: # Non-default core_pattern with a directory in it
# We have seen a core_pattern that looks like it's from teuthology's coredump
# task, so proceed to clear out the core file
log.info("Clearing core from directory: {0}".format(core_dir))
# Verify that we see the expected single coredump
- ls_proc = self.mds_cluster.mds_daemons[daemon_id].remote.run(args=[
+ ls_output = self.mds_cluster.mds_daemons[daemon_id].remote.sh([
"cd", core_dir, run.Raw('&&'),
"sudo", "ls", run.Raw('|'), "sudo", "xargs", "file"
- ], stdout=StringIO())
+ ])
cores = [l.partition(":")[0]
- for l in ls_proc.stdout.getvalue().strip().split("\n")
+ for l in ls_output.strip().split("\n")
if re.match(r'.*ceph-mds.* -i +{0}'.format(daemon_id), l)]
log.info("Enumerated cores: {0}".format(cores))
-from StringIO import StringIO
+from io import BytesIO
import json
import time
import logging
+
+import six
+
from textwrap import dedent
from teuthology import misc
from teuthology.contextutil import MaxWhileTries
from teuthology.orchestra import run
from teuthology.orchestra.run import CommandFailedError
-from .mount import CephFSMount
+from tasks.cephfs.mount import CephFSMount
log = logging.getLogger(__name__)
check_status=False,
timeout=(15*60)
)
- p = self.client_remote.run(
- args=["ls", "/sys/fs/fuse/connections"],
- stdout=StringIO(),
- check_status=False,
- timeout=(15*60)
- )
- if p.exitstatus != 0:
+ try:
+ ls_str = self.client_remote.sh("ls /sys/fs/fuse/connections",
+ timeout=(15*60)).strip()
+ except CommandFailedError:
return []
- ls_str = p.stdout.getvalue().strip()
if ls_str:
return [int(n) for n in ls_str.split("\n")]
else:
'--',
self.mountpoint,
],
- stdout=StringIO(),
- stderr=StringIO(),
+ stdout=BytesIO(),
+ stderr=BytesIO(),
wait=False,
timeout=(15*60)
)
try:
proc.wait()
except CommandFailedError:
- if ("endpoint is not connected" in proc.stderr.getvalue()
- or "Software caused connection abort" in proc.stderr.getvalue()):
+ error = six.ensure_str(proc.stderr.getvalue())
+ if ("endpoint is not connected" in error
+ or "Software caused connection abort" in error):
# This happens is fuse is killed without unmount
log.warn("Found stale moutn point at {0}".format(self.mountpoint))
return True
log.info('mount point does not exist: %s', self.mountpoint)
return False
- fstype = proc.stdout.getvalue().rstrip('\n')
+ fstype = six.ensure_str(proc.stdout.getvalue()).rstrip('\n')
if fstype == 'fuseblk':
log.info('ceph-fuse is mounted on %s', self.mountpoint)
return True
# Now that we're mounted, set permissions so that the rest of the test will have
# unrestricted access to the filesystem mount.
try:
- stderr = StringIO()
+ stderr = BytesIO()
self.client_remote.run(args=['sudo', 'chmod', '1777', self.mountpoint], timeout=(15*60), stderr=stderr)
except run.CommandFailedError:
stderr = stderr.getvalue()
- if "Read-only file system".lower() in stderr.lower():
+ if b"Read-only file system".lower() in stderr.lower():
pass
else:
raise
""").format(self._fuse_conn))
self._fuse_conn = None
- stderr = StringIO()
+ stderr = BytesIO()
try:
# make sure its unmounted
self.client_remote.run(
Prerequisite: the client is not mounted.
"""
- stderr = StringIO()
+ stderr = BytesIO()
try:
self.client_remote.run(
args=[
timeout=(60*5)
)
except CommandFailedError:
- if "No such file or directory" in stderr.getvalue():
+ if b"No such file or directory" in stderr.getvalue():
pass
else:
raise
client_name="client.{0}".format(self.client_id))
# Find the admin socket
- p = self.client_remote.run(args=[
- 'sudo', 'python2', '-c', pyscript
- ], stdout=StringIO(), timeout=(15*60))
- asok_path = p.stdout.getvalue().strip()
+ asok_path = self.client_remote.sh([
+ 'sudo', 'python3', '-c', pyscript
+ ], timeout=(15*60)).strip()
log.info("Found client admin socket at {0}".format(asok_path))
# Query client ID from admin socket
- p = self.client_remote.run(
- args=['sudo', self._prefix + 'ceph', '--admin-daemon', asok_path] + args,
- stdout=StringIO(), timeout=(15*60))
- return json.loads(p.stdout.getvalue())
+ json_data = self.client_remote.sh(
+ ['sudo', self._prefix + 'ceph', '--admin-daemon', asok_path] + args,
+ timeout=(15*60))
+ return json.loads(json_data)
def get_global_id(self):
"""
from contextlib import contextmanager
+from io import BytesIO
import json
import logging
import datetime
+import six
import time
from textwrap import dedent
import os
-from StringIO import StringIO
from teuthology.orchestra import run
from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
from tasks.cephfs.filesystem import Filesystem
return self.client_remote.run(
args=['sudo', 'adjust-ulimits', 'daemon-helper', 'kill',
py_version, '-c', pyscript], wait=False, stdin=run.PIPE,
- stdout=StringIO())
+ stdout=BytesIO())
def run_python(self, pyscript, py_version='python'):
p = self._run_python(pyscript, py_version)
p.wait()
- return p.stdout.getvalue().strip()
+ return six.ensure_str(p.stdout.getvalue().strip())
def run_shell(self, args, wait=True, check_status=True, omit_sudo=True):
args = ["cd", self.mountpoint, run.Raw('&&'), "sudo"] + args
- return self.client_remote.run(args=args, stdout=StringIO(),
- stderr=StringIO(), wait=wait,
+ return self.client_remote.run(args=args, stdout=BytesIO(),
+ stderr=BytesIO(), wait=wait,
check_status=check_status,
omit_sudo=omit_sudo)
import os
import crypt
import logging
-from StringIO import StringIO
+from six import StringIO
from tasks.cephfs.cephfs_test_case import CephFSTestCase
log = logging.getLogger(__name__)
args.extend(opts)
args.extend(("--", cmd))
log.info("Running command: {}".format(" ".join(args)))
- status = self.mount_a.client_remote.run(args=args,
- stdout=StringIO(),
+ status = self.mount_a.client_remote.run(args=args, stdout=StringIO(),
stdin=stdin)
return status.stdout.getvalue().strip()
import logging
import time
-from StringIO import StringIO
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0), ('/1/2/3', 2)])
if not isinstance(self.mount_a, FuseMount):
- p = self.mount_a.client_remote.run(args=['uname', '-r'], stdout=StringIO(), wait=True)
+ p = self.mount_a.client_remote.sh('uname -r'), wait=True)
dir_pin = self.mount_a.getfattr("1", "ceph.dir.pin")
log.debug("mount.getfattr('1','ceph.dir.pin'): %s " % dir_pin)
if str(p.stdout.getvalue()) < "5" and not(dir_pin):
-from StringIO import StringIO
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from tasks.workunit import task as workunit
self.fs.journal_tool(["event", "get", "json",
"--path", "/tmp/journal.json"], 0)
- p = self.fs.tool_remote.run(
- args=[
+ p = self.fs.tool_remote.sh([
"python3",
"-c",
"import json; print(len(json.load(open('/tmp/journal.json'))))"
- ],
- stdout=StringIO())
- event_count = int(p.stdout.getvalue().strip())
+ ])
+ event_count = int(p.strip())
if event_count < 1000:
# Approximate value of "lots", expected from having run fsstress
raise RuntimeError("Unexpectedly few journal events: {0}".format(event_count))