From 51ecc1b922fabb17db6b2ea7c7179a325eede83c Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 11 Dec 2019 13:55:08 -0600 Subject: [PATCH] qa/tasks: ceph-daemon -> cephadm throughput var names and comments Signed-off-by: Sage Weil --- qa/tasks/ceph2.py | 31 +++++++++++++++---------------- qa/tasks/ceph_manager.py | 12 ++++++------ src/cephadm/cephadm | 2 +- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/qa/tasks/ceph2.py b/qa/tasks/ceph2.py index d1134a43109..3ae49ab7739 100644 --- a/qa/tasks/ceph2.py +++ b/qa/tasks/ceph2.py @@ -1,5 +1,5 @@ """ -Ceph cluster task, deployed via ceph-daemon and ssh orchestrator +Ceph cluster task, deployed via cephadm and ssh orchestrator """ from cStringIO import StringIO @@ -42,7 +42,7 @@ def _shell(ctx, cluster_name, remote, args, **kwargs): return remote.run( args=[ 'sudo', - ctx.ceph_daemon, + ctx.cephadm, '--image', ctx.ceph[cluster_name].image, 'shell', '-c', '{}/{}.conf'.format(testdir, cluster_name), @@ -76,7 +76,7 @@ def build_initial_config(ctx, config): def normalize_hostnames(ctx): """ Ensure we have short hostnames throughout, for consistency between - remote.shortname and socket.gethostname() in ceph-daemon. + remote.shortname and socket.gethostname() in cephadm. """ log.info('Normalizing hostnames...') ctx.cluster.run(args=[ @@ -91,7 +91,7 @@ def normalize_hostnames(ctx): pass @contextlib.contextmanager -def download_ceph_daemon(ctx, config, ref): +def download_cephadm(ctx, config, ref): cluster_name = config['cluster'] testdir = teuthology.get_testdir(ctx) @@ -108,13 +108,13 @@ def download_ceph_daemon(ctx, config, ref): run.Raw('|'), 'tar', '-xO', 'src/cephadm/cephadm', run.Raw('>'), - ctx.ceph_daemon, + ctx.cephadm, run.Raw('&&'), 'test', '-s', - ctx.ceph_daemon, + ctx.cephadm, run.Raw('&&'), 'chmod', '+x', - ctx.ceph_daemon, + ctx.cephadm, ], ) @@ -124,7 +124,7 @@ def download_ceph_daemon(ctx, config, ref): log.info('Removing cluster...') ctx.cluster.run(args=[ 'sudo', - ctx.ceph_daemon, + ctx.cephadm, 'rm-cluster', '--fsid', ctx.ceph[cluster_name].fsid, '--force', @@ -136,7 +136,7 @@ def download_ceph_daemon(ctx, config, ref): args=[ 'rm', '-rf', - ctx.ceph_daemon, + ctx.cephadm, ], ) @@ -263,7 +263,7 @@ def ceph_bootstrap(ctx, config): log.info('Bootstrapping...') cmd = [ 'sudo', - ctx.ceph_daemon, + ctx.cephadm, '--image', ctx.ceph[cluster_name].image, 'bootstrap', '--fsid', fsid, @@ -784,14 +784,14 @@ def task(ctx, config): config['cephadm_mode'] = 'root' assert config['cephadm_mode'] in ['root', 'cephadm-package'] if config['cephadm_mode'] == 'root': - ctx.ceph_daemon = testdir + '/cephadm' + ctx.cephadm = testdir + '/cephadm' else: - ctx.ceph_daemon = 'cephadm' # in the path + ctx.cephadm = 'cephadm' # in the path if first_ceph_cluster: # FIXME: this is global for all clusters ctx.daemons = DaemonGroup( - use_ceph_daemon=ctx.ceph_daemon) + use_cephadm=ctx.cephadm) # image ctx.ceph[cluster_name].image = config.get('image') @@ -833,8 +833,7 @@ def task(ctx, config): with contextutil.nested( lambda: ceph_initial(), lambda: normalize_hostnames(ctx=ctx), - lambda: download_ceph_daemon(ctx=ctx, config=config, - ref=ref), + lambda: download_cephadm(ctx=ctx, config=config, ref=ref), lambda: ceph_log(ctx=ctx, config=config), lambda: ceph_crash(ctx=ctx, config=config), lambda: ceph_bootstrap(ctx=ctx, config=config), @@ -849,7 +848,7 @@ def task(ctx, config): ctx=ctx, logger=log.getChild('ceph_manager.' + cluster_name), cluster=cluster_name, - ceph_daemon=True, + cephadm=True, ) try: diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index 91463e929f4..6d8bebeb07d 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -33,13 +33,13 @@ DEFAULT_CONF_PATH = '/etc/ceph/ceph.conf' log = logging.getLogger(__name__) -# this is for ceph-daemon clusters +# this is for cephadm clusters def shell(ctx, cluster_name, remote, args, **kwargs): testdir = teuthology.get_testdir(ctx) return remote.run( args=[ 'sudo', - ctx.ceph_daemon, + ctx.cephadm, '--image', ctx.ceph[cluster_name].image, 'shell', '-c', '{}/{}.conf'.format(testdir, cluster_name), @@ -1223,14 +1223,14 @@ class CephManager: """ def __init__(self, controller, ctx=None, config=None, logger=None, - cluster='ceph', ceph_daemon=False): + cluster='ceph', cephadm=False): self.lock = threading.RLock() self.ctx = ctx self.config = config self.controller = controller self.next_pool_id = 0 self.cluster = cluster - self.ceph_daemon = ceph_daemon + self.cephadm = cephadm if (logger): self.log = lambda x: logger.info(x) else: @@ -1255,7 +1255,7 @@ class CephManager: """ Start ceph on a raw cluster. Return count """ - if self.ceph_daemon: + if self.cephadm: proc = shell(self.ctx, self.cluster, self.controller, args=['ceph'] + list(args), stdout=StringIO()) @@ -1283,7 +1283,7 @@ class CephManager: """ Start ceph on a cluster. Return success or failure information. """ - if self.ceph_daemon: + if self.cephadm: proc = shell(self.ctx, self.cluster, self.controller, args=['ceph'] + list(args), check_status=False) diff --git a/src/cephadm/cephadm b/src/cephadm/cephadm index 2a9b0e0f4c3..1dcf615e1f0 100755 --- a/src/cephadm/cephadm +++ b/src/cephadm/cephadm @@ -11,7 +11,7 @@ CONTAINER_PREFERENCE = ['podman', 'docker'] # prefer podman to docker CUSTOM_PS1=r'[ceph: \u@\h \W]\$ ' """ -You can invoke ceph-daemon in two ways: +You can invoke cephadm in two ways: 1. The normal way, at the command line. -- 2.39.5