From 204ceee156cbb8a20bdf56efb0cd0610ee4c107e Mon Sep 17 00:00:00 2001 From: Kyr Shatskyy Date: Sat, 14 Dec 2019 00:32:07 +0100 Subject: [PATCH] qa/tasks/ceph_manager: get rid of CStringIO for py3 Use io.BytesIO instead cStringIO.StringIO for py3 compatibility Signed-off-by: Kefu Chai Signed-off-by: Kyr Shatskyy --- qa/tasks/ceph_manager.py | 49 ++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index cdb80d398a99a..3f090ea33e7b3 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -1,7 +1,6 @@ """ ceph manager -- Thrasher and CephManager objects """ -from cStringIO import StringIO from functools import wraps import contextlib import random @@ -14,6 +13,8 @@ import logging import threading import traceback import os + +from io import BytesIO from teuthology import misc as teuthology from tasks.scrub import Scrubber from util.rados import cmd_erasure_code_profile @@ -57,7 +58,7 @@ def shell(ctx, cluster_name, remote, args, name=None, **kwargs): ) def write_conf(ctx, conf_path=DEFAULT_CONF_PATH, cluster='ceph'): - conf_fp = StringIO() + conf_fp = BytesIO() ctx.ceph[cluster].conf.write(conf_fp) conf_fp.seek(0) writes = ctx.cluster.run( @@ -212,8 +213,8 @@ class OSDThrasher(Thrasher): allremotes = list(set(allremotes)) for remote in allremotes: proc = remote.run(args=['type', cmd], wait=True, - check_status=False, stdout=StringIO(), - stderr=StringIO()) + check_status=False, stdout=BytesIO(), + stderr=BytesIO()) if proc.exitstatus != 0: return False; return True; @@ -225,13 +226,13 @@ class OSDThrasher(Thrasher): args=['ceph-objectstore-tool'] + cmd, name=osd, wait=True, check_status=False, - stdout=StringIO(), - stderr=StringIO()) + stdout=BytesIO(), + stderr=BytesIO()) else: return remote.run( args=['sudo', 'adjust-ulimits', 'ceph-objectstore-tool'] + cmd, - wait=True, check_status=False, stdout=StringIO(), - stderr=StringIO()) + wait=True, check_status=False, stdout=BytesIO(), + stderr=BytesIO()) def kill_osd(self, osd=None, mark_down=False, mark_out=False): """ @@ -275,8 +276,8 @@ class OSDThrasher(Thrasher): with safe_while(sleep=15, tries=40, action="type ceph-objectstore-tool") as proceed: while proceed(): proc = exp_remote.run(args=['type', 'ceph-objectstore-tool'], - wait=True, check_status=False, stdout=StringIO(), - stderr=StringIO()) + wait=True, check_status=False, stdout=BytesIO(), + stderr=BytesIO()) if proc.exitstatus == 0: break log.debug("ceph-objectstore-tool binary not present, trying again") @@ -366,7 +367,7 @@ class OSDThrasher(Thrasher): raise Exception("ceph-objectstore-tool: " "imp list-pgs failure with status {ret}". format(ret=proc.exitstatus)) - pgs = proc.stdout.getvalue().split('\n')[:-1] + pgs = six.ensure_str(proc.stdout.getvalue()).split('\n')[:-1] if pg not in pgs: self.log("Moving pg {pg} from osd.{fosd} to osd.{tosd}". format(pg=pg, fosd=exp_osd, tosd=imp_osd)) @@ -441,8 +442,7 @@ class OSDThrasher(Thrasher): '--journal-path', JPATH.format(id=imp_osd), ]) + " --op apply-layout-settings --pool " + pool).format(id=osd) - proc = imp_remote.run(args=cmd, wait=True, check_status=False, stderr=StringIO()) - output = proc.stderr.getvalue() + output = imp_remote.sh(cmd, wait=True, check_status=False) if 'Couldn\'t find pool' in output: continue if proc.exitstatus: @@ -1266,7 +1266,7 @@ class ObjectStoreTool: def run(self, options, args, stdin=None, stdout=None): if stdout is None: - stdout = StringIO() + stdout = BytesIO() self.manager.kill_osd(self.osd) cmd = self.build_cmd(options, args, stdin) self.manager.log(cmd) @@ -1274,11 +1274,12 @@ class ObjectStoreTool: proc = self.remote.run(args=['bash', '-e', '-x', '-c', cmd], check_status=False, stdout=stdout, - stderr=StringIO()) + stderr=BytesIO()) proc.wait() if proc.exitstatus != 0: self.manager.log("failed with " + str(proc.exitstatus)) - error = proc.stdout.getvalue() + " " + proc.stderr.getvalue() + error = six.ensure_str(proc.stdout.getvalue()) + " " + \ + six.ensure_str(proc.stderr.getvalue()) raise Exception(error) finally: if self.do_revive: @@ -1337,7 +1338,7 @@ class CephManager: if self.cephadm: proc = shell(self.ctx, self.cluster, self.controller, args=['ceph'] + list(args), - stdout=StringIO()) + stdout=BytesIO()) else: testdir = teuthology.get_testdir(self.ctx) ceph_args = [ @@ -1355,9 +1356,9 @@ class CephManager: ceph_args.extend(args) proc = self.controller.run( args=ceph_args, - stdout=StringIO(), + stdout=BytesIO(), ) - return proc.stdout.getvalue() + return six.ensure_str(proc.stdout.getvalue()) def raw_cluster_cmd_result(self, *args, **kwargs): """ @@ -1388,7 +1389,7 @@ class CephManager: def run_ceph_w(self, watch_channel=None): """ - Execute "ceph -w" in the background with stdout connected to a StringIO, + Execute "ceph -w" in the background with stdout connected to a BytesIO, and return the RemoteProcess. :param watch_channel: Specifies the channel to be watched. This can be @@ -1405,7 +1406,7 @@ class CephManager: if watch_channel is not None: args.append("--watch-channel") args.append(watch_channel) - return self.controller.run(args=args, wait=False, stdout=StringIO(), stdin=run.PIPE) + return self.controller.run(args=args, wait=False, stdout=BytesIO(), stdin=run.PIPE) def get_mon_socks(self): """ @@ -1588,7 +1589,7 @@ class CephManager: def osd_admin_socket(self, osd_id, command, check_status=True, timeout=0, stdout=None): if stdout is None: - stdout = StringIO() + stdout = BytesIO() return self.admin_socket('osd', osd_id, command, check_status, timeout, stdout) def find_remote(self, service_type, service_id): @@ -1612,7 +1613,7 @@ class CephManager: to the admin socket """ if stdout is None: - stdout = StringIO() + stdout = BytesIO() remote = self.find_remote(service_type, service_id) @@ -1741,7 +1742,7 @@ class CephManager: five seconds and try again. """ if stdout is None: - stdout = StringIO() + stdout = BytesIO() tries = 0 while True: proc = self.admin_socket(service_type, service_id, -- 2.39.5