]> git.apps.os.sepia.ceph.com Git - teuthology.git/commitdiff
Fix a circular import 1817/head
authorZack Cerza <zack@redhat.com>
Tue, 7 Mar 2023 20:54:01 +0000 (13:54 -0700)
committerZack Cerza <zack@redhat.com>
Wed, 8 Mar 2023 20:51:37 +0000 (13:51 -0700)
This was exposed during development of the exporter.

Signed-off-by: Zack Cerza <zack@redhat.com>
teuthology/beanstalk.py
teuthology/dispatcher/__init__.py
teuthology/dispatcher/supervisor.py
teuthology/lock/ops.py
teuthology/nuke/__init__.py
teuthology/test/test_nuke.py

index 41e2acedd17d134f97d7884ed5d4130b2735335d..abba88ea85ace1d40a05114e246e9440530e3364 100644 (file)
@@ -6,8 +6,9 @@ import pprint
 import sys
 from collections import OrderedDict
 
+import teuthology.report as report
+
 from teuthology.config import config
-from teuthology import report
 
 log = logging.getLogger(__name__)
 
index 8ce9b6557305fb05ce74aaef182e0991a2f5594d..69b27072bff15adb2c80b2e88ed7e320b53b97d2 100644 (file)
@@ -8,17 +8,18 @@ import yaml
 from datetime import datetime
 from typing import Dict, List
 
+import teuthology.dispatcher.supervisor as supervisor
+import teuthology.lock.ops as lock_ops
+import teuthology.nuke as nuke
+import teuthology.worker as worker
+
 from teuthology import setup_log_file, install_except_hook
 from teuthology import beanstalk
 from teuthology import report
 from teuthology.config import config as teuth_config
 from teuthology.exceptions import SkipJob
 from teuthology.repo_utils import fetch_qa_suite, fetch_teuthology
-from teuthology.lock.ops import block_and_lock_machines
-from teuthology.dispatcher import supervisor
-from teuthology.worker import prep_job
 from teuthology import safepath
-from teuthology.nuke import nuke
 
 log = logging.getLogger(__name__)
 start_time = datetime.utcnow()
@@ -134,7 +135,7 @@ def main(args):
             keep_running = False
 
         try:
-            job_config, teuth_bin_path = prep_job(
+            job_config, teuth_bin_path = worker.prep_job(
                 job_config,
                 log_file_path,
                 archive_dir,
@@ -175,7 +176,7 @@ def main(args):
             error_message = "Saw error while trying to spawn supervisor."
             log.exception(error_message)
             if 'targets' in job_config:
-                nuke(supervisor.create_fake_context(job_config), True)
+                nuke.nuke(supervisor.create_fake_context(job_config), True)
             report.try_push_job_info(job_config, dict(
                 status='fail',
                 failure_reason=error_message))
@@ -194,7 +195,7 @@ def main(args):
     return max(returncodes)
 
 
-def find_dispatcher_processes() -> Dict[str, List[psutil.Process]] :
+def find_dispatcher_processes() -> Dict[str, List[psutil.Process]]:
     def match(proc):
         cmdline = proc.cmdline()
         if len(cmdline) < 3:
@@ -223,7 +224,7 @@ def find_dispatcher_processes() -> Dict[str, List[psutil.Process]] :
 def lock_machines(job_config):
     report.try_push_job_info(job_config, dict(status='running'))
     fake_ctx = supervisor.create_fake_context(job_config, block=True)
-    block_and_lock_machines(
+    lock_ops.block_and_lock_machines(
         fake_ctx,
         len(job_config['roles']),
         job_config['machine_type'],
index e5ea4a3205b0595554e686a6cd6b4cac80fed33b..c38118a378f0721e9712b789ff79a1b1b182c854 100644 (file)
@@ -8,17 +8,17 @@ import requests
 from urllib.parse import urljoin
 from datetime import datetime
 
-import teuthology
+import teuthology.lock.ops as lock_ops
+import teuthology.nuke as nuke
+
 from teuthology import report
 from teuthology import safepath
 from teuthology.config import config as teuth_config
 from teuthology.exceptions import SkipJob, MaxWhileTries
 from teuthology import setup_log_file, install_except_hook
-from teuthology.lock.ops import reimage_machines
 from teuthology.misc import get_user, archive_logs, compress_logs
 from teuthology.config import FakeNamespace
 from teuthology.job_status import get_status
-from teuthology.nuke import nuke
 from teuthology.kill import kill_job
 from teuthology.task.internal import add_remotes
 from teuthology.misc import decanonicalize_hostname as shortname
@@ -165,6 +165,7 @@ def failure_is_reimage(failure_reason):
     else:
         return False
 
+
 def check_for_reimage_failures_and_mark_down(targets, count=10):
     # Grab paddles history of jobs in the machine
     # and count the number of reimaging errors
@@ -173,9 +174,8 @@ def check_for_reimage_failures_and_mark_down(targets, count=10):
     for k, _ in targets.items():
         machine = k.split('@')[-1]
         url = urljoin(
-                base_url,
-                '/nodes/{0}/jobs/?count={1}'.format(
-                machine, count)
+            base_url,
+            '/nodes/{0}/jobs/?count={1}'.format(machine, count)
         )
         resp = requests.get(url)
         jobs = resp.json()
@@ -189,15 +189,16 @@ def check_for_reimage_failures_and_mark_down(targets, count=10):
             continue
         # Mark machine down
         machine_name = shortname(k)
-        teuthology.lock.ops.update_lock(
-           machine_name,
-           description='reimage failed {0} times'.format(count),
-           status='down',
-       )
+        lock_ops.update_lock(
+            machine_name,
+            description='reimage failed {0} times'.format(count),
+            status='down',
+        )
         log.error(
             'Reimage failed {0} times ... marking machine down'.format(count)
         )
 
+
 def reimage(job_config):
     # Reimage the targets specified in job config
     # and update their keys in config after reimaging
@@ -206,12 +207,15 @@ def reimage(job_config):
     report.try_push_job_info(ctx.config, dict(status='waiting'))
     targets = job_config['targets']
     try:
-        reimaged = reimage_machines(ctx, targets, job_config['machine_type'])
+        reimaged = lock_ops.reimage_machines(ctx, targets, job_config['machine_type'])
     except Exception as e:
         log.exception('Reimaging error. Nuking machines...')
         # Reimage failures should map to the 'dead' status instead of 'fail'
-        report.try_push_job_info(ctx.config, dict(status='dead', failure_reason='Error reimaging machines: ' + str(e)))
-        nuke(ctx, True)
+        report.try_push_job_info(
+            ctx.config,
+            dict(status='dead', failure_reason='Error reimaging machines: ' + str(e))
+        )
+        nuke.nuke(ctx, True)
         # Machine that fails to reimage after 10 times will be marked down
         check_for_reimage_failures_and_mark_down(targets)
         raise
@@ -241,18 +245,20 @@ def unlock_targets(job_config):
     if not locked:
         return
     job_status = get_status(job_info)
-    if job_status == 'pass' or \
-            (job_config.get('unlock_on_failure', False) and not job_config.get('nuke-on-error', False)):
+    if job_status == 'pass' or (job_config.get('unlock_on_failure', False)
+                                and not job_config.get('nuke-on-error', False)):
         log.info('Unlocking machines...')
         fake_ctx = create_fake_context(job_config)
         for machine in locked:
-            teuthology.lock.ops.unlock_one(fake_ctx,
-                                           machine, job_info['owner'],
-                                           job_info['archive_path'])
+            lock_ops.unlock_one(
+                fake_ctx,
+                machine, job_info['owner'],
+                job_info['archive_path']
+            )
     if job_status != 'pass' and job_config.get('nuke-on-error', False):
         log.info('Nuking machines...')
         fake_ctx = create_fake_context(job_config)
-        nuke(fake_ctx, True)
+        nuke.nuke(fake_ctx, True)
 
 
 def run_with_watchdog(process, job_config):
index 5ab995ad7c679c4927df8d722460676c2f5a1314..bc66dd1c62b34a5ae185fa1feec6a1024ddce11a 100644 (file)
@@ -10,8 +10,9 @@ import requests
 import teuthology.orchestra.remote
 import teuthology.parallel
 import teuthology.provision
+import teuthology.report as report
+
 from teuthology import misc
-from teuthology import report
 from teuthology.config import config
 from teuthology.contextutil import safe_while
 from teuthology.task import console_log
index 540a6b836ff9bff2999ffa9e091dff76938b3472..15749702b3c3033bd06d2c8ea635671ac8bdcfc0 100644 (file)
@@ -8,8 +8,9 @@ import subprocess
 import yaml
 
 import teuthology
+import teuthology.lock.ops as lock_ops
+
 from teuthology import provision
-from teuthology.lock.ops import unlock_one
 from teuthology.lock.query import is_vm, list_locks, \
     find_stale_locks, get_status
 from teuthology.lock.util import locked_since_seconds
@@ -150,7 +151,7 @@ def stale_openstack_nodes(ctx, instances, locked_nodes):
                              created=locked_since_seconds(node),
                              delay=OPENSTACK_DELAY))
             if not ctx.dry_run:
-                unlock_one(ctx, name, node['locked_by'])
+                lock_ops.unlock_one(ctx, name, node['locked_by'])
             continue
         log.debug("stale-openstack: node " + name + " OK")
 
@@ -296,7 +297,7 @@ def nuke_one(ctx, target, should_unlock, synch_clocks,
         ret = target
     else:
         if should_unlock:
-            unlock_one(ctx, list(target.keys())[0], ctx.owner)
+            lock_ops.unlock_one(ctx, list(target.keys())[0], ctx.owner)
     return ret
 
 
index 6c02dee40a8b3754e544f09c59a9245b97bd1a56..b061d89b4510e4ee69b9e672b8ed7d4c0812197d 100644 (file)
@@ -80,66 +80,48 @@ class TestNuke(object):
         #
         # A node is not of type openstack is left untouched
         #
-        with patch.multiple(
-                nuke,
-                unlock_one=DEFAULT,
-                ) as m:
-            nuke.stale_openstack_nodes(ctx, {
-            }, {
-                name: { 'locked_since': now,
-                        'machine_type': 'mira', },
-            })
-            m['unlock_one'].assert_not_called()
+        with patch("teuthology.lock.ops.unlock_one") as m_unlock_one:
+            nuke.stale_openstack_nodes(
+                ctx,
+                {},
+                {name: {'locked_since': now, 'machine_type': 'mira'}},
+            )
+            m_unlock_one.assert_not_called()
         #
         # A node that was just locked and does not have
         # an instance yet is left untouched
         #
-        with patch.multiple(
-                nuke,
-                unlock_one=DEFAULT,
-                ) as m:
-            nuke.stale_openstack_nodes(ctx, {
-            }, {
-                name: { 'locked_since': now,
-                        'machine_type': 'openstack', },
-            })
-            m['unlock_one'].assert_not_called()
+        with patch("teuthology.lock.ops.unlock_one") as m_unlock_one:
+            nuke.stale_openstack_nodes(
+                ctx,
+                {},
+                {name: {'locked_since': now, 'machine_type': 'openstack'}},
+            )
+            m_unlock_one.assert_not_called()
         #
         # A node that has been locked for some time and
         # has no instance is unlocked.
         #
         ancient = "2000-11-02 15:43:12.000000"
         me = 'loic@dachary.org'
-        with patch.multiple(
-                nuke,
-                unlock_one=DEFAULT,
-                ) as m:
-            nuke.stale_openstack_nodes(ctx, {
-            }, {
-                name: { 'locked_since': ancient,
-                        'locked_by': me,
-                        'machine_type': 'openstack', },
-            })
-            m['unlock_one'].assert_called_with(
-                ctx, name, me)
+        with patch("teuthology.lock.ops.unlock_one") as m_unlock_one:
+            nuke.stale_openstack_nodes(
+                ctx,
+                {},
+                {name: {'locked_since': ancient, 'locked_by': me, 'machine_type': 'openstack'}},
+            )
+            m_unlock_one.assert_called_with(ctx, name, me)
         #
         # A node that has been locked for some time and
         # has an instance is left untouched
         #
-        with patch.multiple(
-                nuke,
-                unlock_one=DEFAULT,
-                ) as m:
-            nuke.stale_openstack_nodes(ctx, {
-                uuid: {
-                    'ID': uuid,
-                    'Name': name,
-                },
-            }, {
-                name: { 'locked_since': ancient,
-                        'machine_type': 'openstack', },
-            })
-            m['unlock_one'].assert_not_called()
+        with patch("teuthology.lock.ops.unlock_one") as m_unlock_one:
+            nuke.stale_openstack_nodes(
+                ctx,
+                {uuid: {'ID': uuid, 'Name': name}},
+                {name: {'locked_since': ancient, 'machine_type': 'openstack'}},
+            )
+            m_unlock_one.assert_not_called()
 
     def test_stale_openstack_instances(self):
         if 'OS_AUTH_URL' not in os.environ:
@@ -231,7 +213,9 @@ class TestNuke(object):
             })
             m['destroy'].assert_not_called()
 
-def test_nuke_internal():
+
+@patch("teuthology.lock.ops.unlock_one")
+def test_nuke_internal(m_unlock_one):
     job_config = dict(
         owner='test_owner',
         targets={'user@host1': 'key1', 'user@host2': 'key2'},
@@ -251,43 +235,42 @@ def test_nuke_internal():
     with patch.multiple(
             nuke,
             nuke_helper=DEFAULT,
-            unlock_one=DEFAULT,
             get_status=lambda i: statuses[i],
             ) as m:
         nuke.nuke(ctx, True)
         m['nuke_helper'].assert_called_with(ANY, True, False, True)
-        m['unlock_one'].assert_called()
+        m_unlock_one.assert_called()
+    m_unlock_one.reset_mock()
 
     # don't unlock
     with patch.multiple(
             nuke,
             nuke_helper=DEFAULT,
-            unlock_one=DEFAULT,
             get_status=lambda i: statuses[i],
             ) as m:
         nuke.nuke(ctx, False)
         m['nuke_helper'].assert_called_with(ANY, False, False, True)
-        m['unlock_one'].assert_not_called()
+        m_unlock_one.assert_not_called()
+    m_unlock_one.reset_mock()
 
     # mimicing what teuthology-dispatcher --supervisor does
     with patch.multiple(
             nuke,
             nuke_helper=DEFAULT,
-            unlock_one=DEFAULT,
             get_status=lambda i: statuses[i],
             ) as m:
         nuke.nuke(ctx, False, True, False, True, False)
         m['nuke_helper'].assert_called_with(ANY, False, True, False)
-        m['unlock_one'].assert_not_called()
+        m_unlock_one.assert_not_called()
+    m_unlock_one.reset_mock()
 
     # no targets
     del ctx.config['targets']
     with patch.multiple(
             nuke,
             nuke_helper=DEFAULT,
-            unlock_one=DEFAULT,
             get_status=lambda i: statuses[i],
             ) as m:
         nuke.nuke(ctx, True)
         m['nuke_helper'].assert_not_called()
-        m['unlock_one'].assert_not_called()
+        m_unlock_one.assert_not_called()