]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa/tasks: ceph-daemon -> cephadm throughput var names and comments
authorSage Weil <sage@redhat.com>
Wed, 11 Dec 2019 19:55:08 +0000 (13:55 -0600)
committerSage Weil <sage@redhat.com>
Thu, 12 Dec 2019 01:14:09 +0000 (19:14 -0600)
Signed-off-by: Sage Weil <sage@redhat.com>
qa/tasks/ceph2.py
qa/tasks/ceph_manager.py
src/cephadm/cephadm

index d1134a43109e4a5a3db643d86e3613d248ea4bb0..3ae49ab7739f44c347a91f70d9e88aa8959ecc68 100644 (file)
@@ -1,5 +1,5 @@
 """
-Ceph cluster task, deployed via ceph-daemon and ssh orchestrator
+Ceph cluster task, deployed via cephadm and ssh orchestrator
 """
 from cStringIO import StringIO
 
@@ -42,7 +42,7 @@ def _shell(ctx, cluster_name, remote, args, **kwargs):
     return remote.run(
         args=[
             'sudo',
-            ctx.ceph_daemon,
+            ctx.cephadm,
             '--image', ctx.ceph[cluster_name].image,
             'shell',
             '-c', '{}/{}.conf'.format(testdir, cluster_name),
@@ -76,7 +76,7 @@ def build_initial_config(ctx, config):
 def normalize_hostnames(ctx):
     """
     Ensure we have short hostnames throughout, for consistency between
-    remote.shortname and socket.gethostname() in ceph-daemon.
+    remote.shortname and socket.gethostname() in cephadm.
     """
     log.info('Normalizing hostnames...')
     ctx.cluster.run(args=[
@@ -91,7 +91,7 @@ def normalize_hostnames(ctx):
         pass
 
 @contextlib.contextmanager
-def download_ceph_daemon(ctx, config, ref):
+def download_cephadm(ctx, config, ref):
     cluster_name = config['cluster']
     testdir = teuthology.get_testdir(ctx)
 
@@ -108,13 +108,13 @@ def download_ceph_daemon(ctx, config, ref):
                 run.Raw('|'),
                 'tar', '-xO', 'src/cephadm/cephadm',
                 run.Raw('>'),
-                ctx.ceph_daemon,
+                ctx.cephadm,
                 run.Raw('&&'),
                 'test', '-s',
-                ctx.ceph_daemon,
+                ctx.cephadm,
                 run.Raw('&&'),
                 'chmod', '+x',
-                ctx.ceph_daemon,
+                ctx.cephadm,
             ],
         )
 
@@ -124,7 +124,7 @@ def download_ceph_daemon(ctx, config, ref):
         log.info('Removing cluster...')
         ctx.cluster.run(args=[
             'sudo',
-            ctx.ceph_daemon,
+            ctx.cephadm,
             'rm-cluster',
             '--fsid', ctx.ceph[cluster_name].fsid,
             '--force',
@@ -136,7 +136,7 @@ def download_ceph_daemon(ctx, config, ref):
                 args=[
                     'rm',
                     '-rf',
-                    ctx.ceph_daemon,
+                    ctx.cephadm,
                 ],
             )
 
@@ -263,7 +263,7 @@ def ceph_bootstrap(ctx, config):
         log.info('Bootstrapping...')
         cmd = [
             'sudo',
-            ctx.ceph_daemon,
+            ctx.cephadm,
             '--image', ctx.ceph[cluster_name].image,
             'bootstrap',
             '--fsid', fsid,
@@ -784,14 +784,14 @@ def task(ctx, config):
         config['cephadm_mode'] = 'root'
     assert config['cephadm_mode'] in ['root', 'cephadm-package']
     if config['cephadm_mode'] == 'root':
-        ctx.ceph_daemon = testdir + '/cephadm'
+        ctx.cephadm = testdir + '/cephadm'
     else:
-        ctx.ceph_daemon = 'cephadm'  # in the path
+        ctx.cephadm = 'cephadm'  # in the path
 
     if first_ceph_cluster:
         # FIXME: this is global for all clusters
         ctx.daemons = DaemonGroup(
-            use_ceph_daemon=ctx.ceph_daemon)
+            use_cephadm=ctx.cephadm)
 
     # image
     ctx.ceph[cluster_name].image = config.get('image')
@@ -833,8 +833,7 @@ def task(ctx, config):
     with contextutil.nested(
             lambda: ceph_initial(),
             lambda: normalize_hostnames(ctx=ctx),
-            lambda: download_ceph_daemon(ctx=ctx, config=config,
-                                         ref=ref),
+            lambda: download_cephadm(ctx=ctx, config=config, ref=ref),
             lambda: ceph_log(ctx=ctx, config=config),
             lambda: ceph_crash(ctx=ctx, config=config),
             lambda: ceph_bootstrap(ctx=ctx, config=config),
@@ -849,7 +848,7 @@ def task(ctx, config):
             ctx=ctx,
             logger=log.getChild('ceph_manager.' + cluster_name),
             cluster=cluster_name,
-            ceph_daemon=True,
+            cephadm=True,
         )
 
         try:
index 91463e929f46a6324e7870e730dae0eb29027060..6d8bebeb07d0fb46fdfd1eb555c59595bc22e179 100644 (file)
@@ -33,13 +33,13 @@ DEFAULT_CONF_PATH = '/etc/ceph/ceph.conf'
 
 log = logging.getLogger(__name__)
 
-# this is for ceph-daemon clusters
+# this is for cephadm clusters
 def shell(ctx, cluster_name, remote, args, **kwargs):
     testdir = teuthology.get_testdir(ctx)
     return remote.run(
         args=[
             'sudo',
-            ctx.ceph_daemon,
+            ctx.cephadm,
             '--image', ctx.ceph[cluster_name].image,
             'shell',
             '-c', '{}/{}.conf'.format(testdir, cluster_name),
@@ -1223,14 +1223,14 @@ class CephManager:
     """
 
     def __init__(self, controller, ctx=None, config=None, logger=None,
-                 cluster='ceph', ceph_daemon=False):
+                 cluster='ceph', cephadm=False):
         self.lock = threading.RLock()
         self.ctx = ctx
         self.config = config
         self.controller = controller
         self.next_pool_id = 0
         self.cluster = cluster
-        self.ceph_daemon = ceph_daemon
+        self.cephadm = cephadm
         if (logger):
             self.log = lambda x: logger.info(x)
         else:
@@ -1255,7 +1255,7 @@ class CephManager:
         """
         Start ceph on a raw cluster.  Return count
         """
-        if self.ceph_daemon:
+        if self.cephadm:
             proc = shell(self.ctx, self.cluster, self.controller,
                          args=['ceph'] + list(args),
                          stdout=StringIO())
@@ -1283,7 +1283,7 @@ class CephManager:
         """
         Start ceph on a cluster.  Return success or failure information.
         """
-        if self.ceph_daemon:
+        if self.cephadm:
             proc = shell(self.ctx, self.cluster, self.controller,
                          args=['ceph'] + list(args),
                          check_status=False)
index 2a9b0e0f4c3c5dd93825ad2921307e7d18045c38..1dcf615e1f0209575c39c798f2a3c6c88890d26b 100755 (executable)
@@ -11,7 +11,7 @@ CONTAINER_PREFERENCE = ['podman', 'docker']  # prefer podman to docker
 CUSTOM_PS1=r'[ceph: \u@\h \W]\$ '
 
 """
-You can invoke ceph-daemon in two ways:
+You can invoke cephadm in two ways:
 
 1. The normal way, at the command line.