from tasks.ceph_test_case import CephTestCase
import os
import re
-from StringIO import StringIO
from tasks.cephfs.fuse_mount import FuseMount
def delete_mds_coredump(self, daemon_id):
# delete coredump file, otherwise teuthology.internal.coredump will
# catch it later and treat it as a failure.
- p = self.mds_cluster.mds_daemons[daemon_id].remote.run(args=[
- "sudo", "sysctl", "-n", "kernel.core_pattern"], stdout=StringIO())
- core_dir = os.path.dirname(p.stdout.getvalue().strip())
+ core_pattern = self.mds_cluster.mds_daemons[daemon_id].remote.sh(
+ "sudo sysctl -n kernel.core_pattern")
+ core_dir = os.path.dirname(core_pattern.strip())
if core_dir: # Non-default core_pattern with a directory in it
# We have seen a core_pattern that looks like it's from teuthology's coredump
# task, so proceed to clear out the core file
log.info("Clearing core from directory: {0}".format(core_dir))
# Verify that we see the expected single coredump
- ls_proc = self.mds_cluster.mds_daemons[daemon_id].remote.run(args=[
+ ls_output = self.mds_cluster.mds_daemons[daemon_id].remote.sh([
"cd", core_dir, run.Raw('&&'),
"sudo", "ls", run.Raw('|'), "sudo", "xargs", "file"
- ], stdout=StringIO())
+ ])
cores = [l.partition(":")[0]
- for l in ls_proc.stdout.getvalue().strip().split("\n")
+ for l in ls_output.strip().split("\n")
if re.match(r'.*ceph-mds.* -i +{0}'.format(daemon_id), l)]
log.info("Enumerated cores: {0}".format(cores))
-from StringIO import StringIO
+from io import BytesIO
import json
import time
import logging
+
+import six
+
from textwrap import dedent
from teuthology import misc
from teuthology.contextutil import MaxWhileTries
from teuthology.orchestra import run
from teuthology.orchestra.run import CommandFailedError
-from .mount import CephFSMount
+from tasks.cephfs.mount import CephFSMount
log = logging.getLogger(__name__)
check_status=False,
timeout=(15*60)
)
- p = self.client_remote.run(
- args=["ls", "/sys/fs/fuse/connections"],
- stdout=StringIO(),
- check_status=False,
- timeout=(15*60)
- )
- if p.exitstatus != 0:
+ try:
+ ls_str = self.client_remote.sh("ls /sys/fs/fuse/connections",
+ timeout=(15*60)).strip()
+ except CommandFailedError:
return []
- ls_str = p.stdout.getvalue().strip()
if ls_str:
return [int(n) for n in ls_str.split("\n")]
else:
self.mountpoint,
],
cwd=self.test_dir,
- stdout=StringIO(),
- stderr=StringIO(),
+ stdout=BytesIO(),
+ stderr=BytesIO(),
wait=False,
timeout=(15*60)
)
try:
proc.wait()
except CommandFailedError:
- if ("endpoint is not connected" in proc.stderr.getvalue()
- or "Software caused connection abort" in proc.stderr.getvalue()):
+ error = six.ensure_str(proc.stderr.getvalue())
+ if ("endpoint is not connected" in error
+ or "Software caused connection abort" in error):
# This happens is fuse is killed without unmount
log.warning("Found stale moutn point at {0}".format(self.mountpoint))
return True
log.info('mount point does not exist: %s', self.mountpoint)
return False
- fstype = proc.stdout.getvalue().rstrip('\n')
+ fstype = six.ensure_str(proc.stdout.getvalue()).rstrip('\n')
if fstype == 'fuseblk':
log.info('ceph-fuse is mounted on %s', self.mountpoint)
return True
# Now that we're mounted, set permissions so that the rest of the test will have
# unrestricted access to the filesystem mount.
try:
- stderr = StringIO()
+ stderr = BytesIO()
self.client_remote.run(args=['sudo', 'chmod', '1777', self.mountpoint], timeout=(15*60), cwd=self.test_dir, stderr=stderr)
except run.CommandFailedError:
stderr = stderr.getvalue()
- if "Read-only file system".lower() in stderr.lower():
+ if b"Read-only file system".lower() in stderr.lower():
pass
else:
raise
""").format(self._fuse_conn))
self._fuse_conn = None
- stderr = StringIO()
+ stderr = BytesIO()
try:
# make sure its unmounted
self.client_remote.run(
Prerequisite: the client is not mounted.
"""
- stderr = StringIO()
+ stderr = BytesIO()
try:
self.client_remote.run(
args=[
check_status=False,
)
except CommandFailedError:
- if "No such file or directory" in stderr.getvalue():
+ if b"No such file or directory" in stderr.getvalue():
pass
else:
raise
client_name="client.{0}".format(self.client_id))
# Find the admin socket
- p = self.client_remote.run(args=[
+ asok_path = self.client_remote.sh([
'sudo', 'python3', '-c', pyscript
- ], stdout=StringIO(), timeout=(15*60))
- asok_path = p.stdout.getvalue().strip()
+ ], timeout=(15*60)).strip()
log.info("Found client admin socket at {0}".format(asok_path))
# Query client ID from admin socket
- p = self.client_remote.run(
- args=['sudo', self._prefix + 'ceph', '--admin-daemon', asok_path] + args,
- stdout=StringIO(), timeout=(15*60))
- return json.loads(p.stdout.getvalue())
+ json_data = self.client_remote.sh(
+ ['sudo', self._prefix + 'ceph', '--admin-daemon', asok_path] + args,
+ timeout=(15*60))
+ return json.loads(json_data)
def get_global_id(self):
"""
from contextlib import contextmanager
+from io import BytesIO
import json
import logging
import datetime
+import six
import time
from textwrap import dedent
import os
-from StringIO import StringIO
from teuthology.orchestra import run
from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
from tasks.cephfs.filesystem import Filesystem
return self.client_remote.run(
args=['sudo', 'adjust-ulimits', 'daemon-helper', 'kill',
py_version, '-c', pyscript], wait=False, stdin=run.PIPE,
- stdout=StringIO())
+ stdout=BytesIO())
def run_python(self, pyscript, py_version='python3'):
p = self._run_python(pyscript, py_version)
p.wait()
- return p.stdout.getvalue().strip()
+ return six.ensure_str(p.stdout.getvalue().strip())
def run_shell(self, args, wait=True, stdin=None, check_status=True,
omit_sudo=True):
args = args.split()
args = ["cd", self.mountpoint, run.Raw('&&'), "sudo"] + args
- return self.client_remote.run(args=args, stdout=StringIO(),
- stderr=StringIO(), wait=wait,
+ return self.client_remote.run(args=args, stdout=BytesIO(),
+ stderr=BytesIO(), wait=wait,
stdin=stdin, check_status=check_status,
omit_sudo=omit_sudo)
import logging
-from StringIO import StringIO
+from io import BytesIO
from xfstests_dev import XFSTestsDev
log = logging.getLogger(__name__)
log.info('client is kernel mounted')
self.mount_a.client_remote.run(args=['sudo', './check',
- 'generic/099'], cwd=self.repo_path, stdout=StringIO(),
- stderr=StringIO(), timeout=30, check_status=True,
+ 'generic/099'], cwd=self.repo_path, stdout=BytesIO(),
+ stderr=BytesIO(), timeout=30, check_status=True,
label='running tests for ACLs from xfstests-dev')
Before running this testsuite, add path to cephfs-shell module to $PATH and
export $PATH.
"""
+from io import BytesIO
from os import path
import crypt
import logging
from tempfile import mkstemp as tempfile_mkstemp
import math
+from six import ensure_str
from sys import version_info as sys_version_info
from re import search as re_search
from time import sleep
-from StringIO import StringIO
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from teuthology.misc import sudo_write_file
from teuthology.orchestra.run import CommandFailedError
args.extend(("--", cmd))
log.info("Running command: {}".format(" ".join(args)))
- return mount_x.client_remote.run(args=args, stdout=StringIO(),
- stderr=StringIO(), stdin=stdin)
+ return mount_x.client_remote.run(args=args, stdout=BytesIO(),
+ stderr=BytesIO(), stdin=stdin)
def get_cephfs_shell_cmd_error(self, cmd, mount_x=None, opts=None,
stdin=None):
- return self.run_cephfs_shell_cmd(cmd, mount_x, opts, stdin).stderr.\
- getvalue().strip()
+ return ensure_str(self.run_cephfs_shell_cmd(cmd, mount_x, opts, stdin).stderr.\
+ getvalue().strip())
def get_cephfs_shell_cmd_output(self, cmd, mount_x=None, opts=None,
stdin=None, config_path=None):
- return self.run_cephfs_shell_cmd(cmd, mount_x, opts, stdin,
+ return ensure_str(self.run_cephfs_shell_cmd(cmd, mount_x, opts, stdin,
config_path).\
- stdout.getvalue().strip()
+ stdout.getvalue().strip())
def get_cephfs_shell_script_output(self, script, mount_x=None, stdin=None):
- return self.run_cephfs_shell_script(script, mount_x, stdin).stdout.\
- getvalue().strip()
+ return ensure_str(self.run_cephfs_shell_script(script, mount_x, stdin).stdout.\
+ getvalue().strip())
def run_cephfs_shell_script(self, script, mount_x=None, stdin=None):
if mount_x is None:
args = ["cephfs-shell", "-c", mount_x.config_path, '-b', scriptpath]
log.info('Running script \"' + scriptpath + '\"')
- return mount_x.client_remote.run(args=args, stdout=StringIO(),
- stderr=StringIO(), stdin=stdin)
+ return mount_x.client_remote.run(args=args, stdout=BytesIO(),
+ stderr=BytesIO(), stdin=stdin)
class TestMkdir(TestCephFSShell):
def test_mkdir(self):
def test_df_for_invalid_directory(self):
dir_abspath = path.join(self.mount_a.mountpoint, 'non-existent-dir')
proc = self.run_cephfs_shell_cmd('df ' + dir_abspath)
- assert proc.stderr.getvalue().find('error in stat') != -1
+ assert proc.stderr.getvalue().find(b'error in stat') != -1
def test_df_for_valid_file(self):
s = 'df test' * 14145016
dirname = 'somedirectory'
self.run_cephfs_shell_cmd(['mkdir', dirname])
- output = self.mount_a.client_remote.run(args=['cephfs-shell', '-c',
- self.mount_a.config_path, 'ls'],
- stdout=StringIO()).stdout.getvalue().strip()
+ output = self.mount_a.client_remote.sh([
+ 'cephfs-shell', '-c', self.mount_a.config_path, 'ls'
+ ]).strip()
if sys_version_info.major >= 3:
self.assertRegex(dirname, output)
import logging
import time
-from StringIO import StringIO
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0), ('/1/2/3', 2)])
if not isinstance(self.mount_a, FuseMount):
- p = self.mount_a.client_remote.run(args=['uname', '-r'], stdout=StringIO(), wait=True)
+ p = self.mount_a.client_remote.sh('uname -r'), wait=True)
dir_pin = self.mount_a.getfattr("1", "ceph.dir.pin")
log.debug("mount.getfattr('1','ceph.dir.pin'): %s " % dir_pin)
if str(p) < "5" and not(dir_pin):
-from StringIO import StringIO
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from tasks.workunit import task as workunit
self.fs.journal_tool(["event", "get", "json",
"--path", "/tmp/journal.json"], 0)
- p = self.fs.tool_remote.run(
- args=[
+ p = self.fs.tool_remote.sh([
"python3",
"-c",
"import json; print(len(json.load(open('/tmp/journal.json'))))"
- ],
- stdout=StringIO())
- event_count = int(p.stdout.getvalue().strip())
+ ])
+ event_count = int(p.strip())
if event_count < 1000:
# Approximate value of "lots", expected from having run fsstress
raise RuntimeError("Unexpectedly few journal events: {0}".format(event_count))
+from io import BytesIO
import six
import logging
-from StringIO import StringIO
from tasks.cephfs.cephfs_test_case import CephFSTestCase
logger = logging.getLogger(__name__)
# NOTE: On teuthology machines it's necessary to run "make" as
# superuser since the repo is cloned somewhere in /tmp.
self.mount_a.client_remote.run(args=['sudo', 'make'],
- cwd=self.repo_path, stdout=StringIO(),
- stderr=StringIO())
+ cwd=self.repo_path, stdout=BytesIO(),
+ stderr=BytesIO())
self.mount_a.client_remote.run(args=['sudo', 'make', 'install'],
cwd=self.repo_path, omit_sudo=False,
- stdout=StringIO(), stderr=StringIO())
+ stdout=BytesIO(), stderr=BytesIO())
def get_repo(self):
"""