]> git-server-git.apps.pok.os.sepia.ceph.com Git - teuthology.git/commitdiff
scripts: add cli tools for reimaging nodes without locking 1526/head
authorVasu Kulkarni <vasu@redhat.com>
Thu, 24 Jan 2019 21:47:58 +0000 (13:47 -0800)
committerKyr Shatskyy <kyrylo.shatskyy@suse.com>
Wed, 1 Jul 2020 00:21:07 +0000 (02:21 +0200)
Add teuthology-reimage cli tool to be able to provision nodes using
Fog or Pelagos without locking and unlocking.

This is useful, for example, when someone just locks the node for
development or debugging purposes and do not want to release while
resetting the image, because it can happen that there are no free
nodes available.

Signed-off-by: Kyr Shatskyy <kyrylo.shatskyy@suse.com>
Signed-off-by: Vasu Kulkarni <vasu@redhat.com>
scripts/reimage.py [new file with mode: 0644]
setup.py
teuthology/reimage.py [new file with mode: 0644]

diff --git a/scripts/reimage.py b/scripts/reimage.py
new file mode 100644 (file)
index 0000000..42ec6e8
--- /dev/null
@@ -0,0 +1,25 @@
+import docopt
+import sys
+
+import teuthology.reimage
+
+doc = """
+usage: teuthology-reimage --help
+       teuthology-reimage --os-type distro --os-version version [options] <nodes>...
+
+Reimage nodes without locking using specified distro type and version.
+The nodes must be locked by the current user, otherwise an error occurs.
+Custom owner can be specified in order to provision someone else nodes.
+Reimaging unlocked nodes cannot be provided.
+
+Standard arguments:
+  -h, --help                        Show this help message and exit
+  -v, --verbose                     Be more verbose
+  --os-type <os-type>               Distro type like: rhel, ubuntu, etc.
+  --os-version <os-version>         Distro version like: 7.6, 16.04, etc.
+  --owner user@host                 Owner of the locked machines
+"""
+
+def main(argv=sys.argv[1:]):
+    args = docopt.docopt(doc, argv=argv)
+    return teuthology.reimage.main(args)
index dca032c75e29e89e2bed2c2c011d073effc41332..7b08ee831383d1a7edb495d2bd0d55754c4546ab 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -128,6 +128,7 @@ setup(
             'teuthology-queue = scripts.queue:main',
             'teuthology-prune-logs = scripts.prune_logs:main',
             'teuthology-describe = scripts.describe:main',
+            'teuthology-reimage = scripts.reimage:main'
             ],
         },
 
diff --git a/teuthology/reimage.py b/teuthology/reimage.py
new file mode 100644 (file)
index 0000000..fdc9054
--- /dev/null
@@ -0,0 +1,57 @@
+import argparse
+import logging
+
+import teuthology
+
+from teuthology.parallel import parallel
+from teuthology.provision import reimage, get_reimage_types
+from teuthology.lock import query, ops
+from teuthology.misc import get_user
+from teuthology.misc import decanonicalize_hostname as shortname
+
+log = logging.getLogger(__name__)
+
+def main(args):
+    if (args['--verbose']):
+        teuthology.log.setLevel(logging.DEBUG)
+
+    ctx = argparse.Namespace()
+    ctx.os_type = args['--os-type']
+    ctx.os_version = args['--os-version']
+
+    nodes = args['<nodes>']
+
+    reimage_types = get_reimage_types()
+    statuses = query.get_statuses(nodes)
+    owner = args['--owner'] or get_user()
+    unlocked = [shortname(_['name'])
+                            for _ in statuses if not _['locked']]
+    if unlocked:
+        log.error(
+            "Some of the nodes are not locked: %s", unlocked)
+        exit(1)
+
+    improper = [shortname(_['name']) for _ in statuses if _['locked_by'] != owner]
+    if improper:
+        log.error(
+            "Some of the nodes are not owned by '%s': %s", owner, improper)
+        exit(1)
+
+    irreimageable = [shortname(_['name']) for _ in statuses
+                                if _['machine_type'] not in reimage_types]
+    if irreimageable:
+        log.error(
+            "Following nodes cannot be reimaged because theirs machine type "
+            "is not reimageable: %s", irreimageable)
+        exit(1)
+
+    def reimage_node(ctx, machine_name, machine_type):
+        ops.update_nodes([machine_name], True)
+        reimage(ctx, machine_name, machine_type)
+        ops.update_nodes([machine_name])
+        log.debug("Node '%s' reimaging is complete", machine_name)
+
+    with parallel() as p:
+        for node in statuses:
+            log.debug("Start node '%s' reimaging", node['name'])
+            p.spawn(reimage_node, ctx, shortname(node['name']), node['machine_type'])