From be1f60179b02cb90a85c773ee88b77e4f1126ae7 Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Tue, 7 Mar 2023 13:54:01 -0700 Subject: [PATCH] Fix a circular import This was exposed during development of the exporter. Signed-off-by: Zack Cerza --- teuthology/beanstalk.py | 3 +- teuthology/dispatcher/__init__.py | 17 +++--- teuthology/dispatcher/supervisor.py | 46 +++++++------- teuthology/lock/ops.py | 3 +- teuthology/nuke/__init__.py | 7 ++- teuthology/test/test_nuke.py | 93 ++++++++++++----------------- 6 files changed, 81 insertions(+), 88 deletions(-) diff --git a/teuthology/beanstalk.py b/teuthology/beanstalk.py index 41e2acedd1..abba88ea85 100644 --- a/teuthology/beanstalk.py +++ b/teuthology/beanstalk.py @@ -6,8 +6,9 @@ import pprint import sys from collections import OrderedDict +import teuthology.report as report + from teuthology.config import config -from teuthology import report log = logging.getLogger(__name__) diff --git a/teuthology/dispatcher/__init__.py b/teuthology/dispatcher/__init__.py index 8ce9b65573..69b27072bf 100644 --- a/teuthology/dispatcher/__init__.py +++ b/teuthology/dispatcher/__init__.py @@ -8,17 +8,18 @@ import yaml from datetime import datetime from typing import Dict, List +import teuthology.dispatcher.supervisor as supervisor +import teuthology.lock.ops as lock_ops +import teuthology.nuke as nuke +import teuthology.worker as worker + from teuthology import setup_log_file, install_except_hook from teuthology import beanstalk from teuthology import report from teuthology.config import config as teuth_config from teuthology.exceptions import SkipJob from teuthology.repo_utils import fetch_qa_suite, fetch_teuthology -from teuthology.lock.ops import block_and_lock_machines -from teuthology.dispatcher import supervisor -from teuthology.worker import prep_job from teuthology import safepath -from teuthology.nuke import nuke log = logging.getLogger(__name__) start_time = datetime.utcnow() @@ -134,7 +135,7 @@ def main(args): keep_running = False try: - job_config, teuth_bin_path = prep_job( + job_config, teuth_bin_path = worker.prep_job( job_config, log_file_path, archive_dir, @@ -175,7 +176,7 @@ def main(args): error_message = "Saw error while trying to spawn supervisor." log.exception(error_message) if 'targets' in job_config: - nuke(supervisor.create_fake_context(job_config), True) + nuke.nuke(supervisor.create_fake_context(job_config), True) report.try_push_job_info(job_config, dict( status='fail', failure_reason=error_message)) @@ -194,7 +195,7 @@ def main(args): return max(returncodes) -def find_dispatcher_processes() -> Dict[str, List[psutil.Process]] : +def find_dispatcher_processes() -> Dict[str, List[psutil.Process]]: def match(proc): cmdline = proc.cmdline() if len(cmdline) < 3: @@ -223,7 +224,7 @@ def find_dispatcher_processes() -> Dict[str, List[psutil.Process]] : def lock_machines(job_config): report.try_push_job_info(job_config, dict(status='running')) fake_ctx = supervisor.create_fake_context(job_config, block=True) - block_and_lock_machines( + lock_ops.block_and_lock_machines( fake_ctx, len(job_config['roles']), job_config['machine_type'], diff --git a/teuthology/dispatcher/supervisor.py b/teuthology/dispatcher/supervisor.py index e5ea4a3205..c38118a378 100644 --- a/teuthology/dispatcher/supervisor.py +++ b/teuthology/dispatcher/supervisor.py @@ -8,17 +8,17 @@ import requests from urllib.parse import urljoin from datetime import datetime -import teuthology +import teuthology.lock.ops as lock_ops +import teuthology.nuke as nuke + from teuthology import report from teuthology import safepath from teuthology.config import config as teuth_config from teuthology.exceptions import SkipJob, MaxWhileTries from teuthology import setup_log_file, install_except_hook -from teuthology.lock.ops import reimage_machines from teuthology.misc import get_user, archive_logs, compress_logs from teuthology.config import FakeNamespace from teuthology.job_status import get_status -from teuthology.nuke import nuke from teuthology.kill import kill_job from teuthology.task.internal import add_remotes from teuthology.misc import decanonicalize_hostname as shortname @@ -165,6 +165,7 @@ def failure_is_reimage(failure_reason): else: return False + def check_for_reimage_failures_and_mark_down(targets, count=10): # Grab paddles history of jobs in the machine # and count the number of reimaging errors @@ -173,9 +174,8 @@ def check_for_reimage_failures_and_mark_down(targets, count=10): for k, _ in targets.items(): machine = k.split('@')[-1] url = urljoin( - base_url, - '/nodes/{0}/jobs/?count={1}'.format( - machine, count) + base_url, + '/nodes/{0}/jobs/?count={1}'.format(machine, count) ) resp = requests.get(url) jobs = resp.json() @@ -189,15 +189,16 @@ def check_for_reimage_failures_and_mark_down(targets, count=10): continue # Mark machine down machine_name = shortname(k) - teuthology.lock.ops.update_lock( - machine_name, - description='reimage failed {0} times'.format(count), - status='down', - ) + lock_ops.update_lock( + machine_name, + description='reimage failed {0} times'.format(count), + status='down', + ) log.error( 'Reimage failed {0} times ... marking machine down'.format(count) ) + def reimage(job_config): # Reimage the targets specified in job config # and update their keys in config after reimaging @@ -206,12 +207,15 @@ def reimage(job_config): report.try_push_job_info(ctx.config, dict(status='waiting')) targets = job_config['targets'] try: - reimaged = reimage_machines(ctx, targets, job_config['machine_type']) + reimaged = lock_ops.reimage_machines(ctx, targets, job_config['machine_type']) except Exception as e: log.exception('Reimaging error. Nuking machines...') # Reimage failures should map to the 'dead' status instead of 'fail' - report.try_push_job_info(ctx.config, dict(status='dead', failure_reason='Error reimaging machines: ' + str(e))) - nuke(ctx, True) + report.try_push_job_info( + ctx.config, + dict(status='dead', failure_reason='Error reimaging machines: ' + str(e)) + ) + nuke.nuke(ctx, True) # Machine that fails to reimage after 10 times will be marked down check_for_reimage_failures_and_mark_down(targets) raise @@ -241,18 +245,20 @@ def unlock_targets(job_config): if not locked: return job_status = get_status(job_info) - if job_status == 'pass' or \ - (job_config.get('unlock_on_failure', False) and not job_config.get('nuke-on-error', False)): + if job_status == 'pass' or (job_config.get('unlock_on_failure', False) + and not job_config.get('nuke-on-error', False)): log.info('Unlocking machines...') fake_ctx = create_fake_context(job_config) for machine in locked: - teuthology.lock.ops.unlock_one(fake_ctx, - machine, job_info['owner'], - job_info['archive_path']) + lock_ops.unlock_one( + fake_ctx, + machine, job_info['owner'], + job_info['archive_path'] + ) if job_status != 'pass' and job_config.get('nuke-on-error', False): log.info('Nuking machines...') fake_ctx = create_fake_context(job_config) - nuke(fake_ctx, True) + nuke.nuke(fake_ctx, True) def run_with_watchdog(process, job_config): diff --git a/teuthology/lock/ops.py b/teuthology/lock/ops.py index 5ab995ad7c..bc66dd1c62 100644 --- a/teuthology/lock/ops.py +++ b/teuthology/lock/ops.py @@ -10,8 +10,9 @@ import requests import teuthology.orchestra.remote import teuthology.parallel import teuthology.provision +import teuthology.report as report + from teuthology import misc -from teuthology import report from teuthology.config import config from teuthology.contextutil import safe_while from teuthology.task import console_log diff --git a/teuthology/nuke/__init__.py b/teuthology/nuke/__init__.py index 540a6b836f..15749702b3 100644 --- a/teuthology/nuke/__init__.py +++ b/teuthology/nuke/__init__.py @@ -8,8 +8,9 @@ import subprocess import yaml import teuthology +import teuthology.lock.ops as lock_ops + from teuthology import provision -from teuthology.lock.ops import unlock_one from teuthology.lock.query import is_vm, list_locks, \ find_stale_locks, get_status from teuthology.lock.util import locked_since_seconds @@ -150,7 +151,7 @@ def stale_openstack_nodes(ctx, instances, locked_nodes): created=locked_since_seconds(node), delay=OPENSTACK_DELAY)) if not ctx.dry_run: - unlock_one(ctx, name, node['locked_by']) + lock_ops.unlock_one(ctx, name, node['locked_by']) continue log.debug("stale-openstack: node " + name + " OK") @@ -296,7 +297,7 @@ def nuke_one(ctx, target, should_unlock, synch_clocks, ret = target else: if should_unlock: - unlock_one(ctx, list(target.keys())[0], ctx.owner) + lock_ops.unlock_one(ctx, list(target.keys())[0], ctx.owner) return ret diff --git a/teuthology/test/test_nuke.py b/teuthology/test/test_nuke.py index 6c02dee40a..b061d89b45 100644 --- a/teuthology/test/test_nuke.py +++ b/teuthology/test/test_nuke.py @@ -80,66 +80,48 @@ class TestNuke(object): # # A node is not of type openstack is left untouched # - with patch.multiple( - nuke, - unlock_one=DEFAULT, - ) as m: - nuke.stale_openstack_nodes(ctx, { - }, { - name: { 'locked_since': now, - 'machine_type': 'mira', }, - }) - m['unlock_one'].assert_not_called() + with patch("teuthology.lock.ops.unlock_one") as m_unlock_one: + nuke.stale_openstack_nodes( + ctx, + {}, + {name: {'locked_since': now, 'machine_type': 'mira'}}, + ) + m_unlock_one.assert_not_called() # # A node that was just locked and does not have # an instance yet is left untouched # - with patch.multiple( - nuke, - unlock_one=DEFAULT, - ) as m: - nuke.stale_openstack_nodes(ctx, { - }, { - name: { 'locked_since': now, - 'machine_type': 'openstack', }, - }) - m['unlock_one'].assert_not_called() + with patch("teuthology.lock.ops.unlock_one") as m_unlock_one: + nuke.stale_openstack_nodes( + ctx, + {}, + {name: {'locked_since': now, 'machine_type': 'openstack'}}, + ) + m_unlock_one.assert_not_called() # # A node that has been locked for some time and # has no instance is unlocked. # ancient = "2000-11-02 15:43:12.000000" me = 'loic@dachary.org' - with patch.multiple( - nuke, - unlock_one=DEFAULT, - ) as m: - nuke.stale_openstack_nodes(ctx, { - }, { - name: { 'locked_since': ancient, - 'locked_by': me, - 'machine_type': 'openstack', }, - }) - m['unlock_one'].assert_called_with( - ctx, name, me) + with patch("teuthology.lock.ops.unlock_one") as m_unlock_one: + nuke.stale_openstack_nodes( + ctx, + {}, + {name: {'locked_since': ancient, 'locked_by': me, 'machine_type': 'openstack'}}, + ) + m_unlock_one.assert_called_with(ctx, name, me) # # A node that has been locked for some time and # has an instance is left untouched # - with patch.multiple( - nuke, - unlock_one=DEFAULT, - ) as m: - nuke.stale_openstack_nodes(ctx, { - uuid: { - 'ID': uuid, - 'Name': name, - }, - }, { - name: { 'locked_since': ancient, - 'machine_type': 'openstack', }, - }) - m['unlock_one'].assert_not_called() + with patch("teuthology.lock.ops.unlock_one") as m_unlock_one: + nuke.stale_openstack_nodes( + ctx, + {uuid: {'ID': uuid, 'Name': name}}, + {name: {'locked_since': ancient, 'machine_type': 'openstack'}}, + ) + m_unlock_one.assert_not_called() def test_stale_openstack_instances(self): if 'OS_AUTH_URL' not in os.environ: @@ -231,7 +213,9 @@ class TestNuke(object): }) m['destroy'].assert_not_called() -def test_nuke_internal(): + +@patch("teuthology.lock.ops.unlock_one") +def test_nuke_internal(m_unlock_one): job_config = dict( owner='test_owner', targets={'user@host1': 'key1', 'user@host2': 'key2'}, @@ -251,43 +235,42 @@ def test_nuke_internal(): with patch.multiple( nuke, nuke_helper=DEFAULT, - unlock_one=DEFAULT, get_status=lambda i: statuses[i], ) as m: nuke.nuke(ctx, True) m['nuke_helper'].assert_called_with(ANY, True, False, True) - m['unlock_one'].assert_called() + m_unlock_one.assert_called() + m_unlock_one.reset_mock() # don't unlock with patch.multiple( nuke, nuke_helper=DEFAULT, - unlock_one=DEFAULT, get_status=lambda i: statuses[i], ) as m: nuke.nuke(ctx, False) m['nuke_helper'].assert_called_with(ANY, False, False, True) - m['unlock_one'].assert_not_called() + m_unlock_one.assert_not_called() + m_unlock_one.reset_mock() # mimicing what teuthology-dispatcher --supervisor does with patch.multiple( nuke, nuke_helper=DEFAULT, - unlock_one=DEFAULT, get_status=lambda i: statuses[i], ) as m: nuke.nuke(ctx, False, True, False, True, False) m['nuke_helper'].assert_called_with(ANY, False, True, False) - m['unlock_one'].assert_not_called() + m_unlock_one.assert_not_called() + m_unlock_one.reset_mock() # no targets del ctx.config['targets'] with patch.multiple( nuke, nuke_helper=DEFAULT, - unlock_one=DEFAULT, get_status=lambda i: statuses[i], ) as m: nuke.nuke(ctx, True) m['nuke_helper'].assert_not_called() - m['unlock_one'].assert_not_called() + m_unlock_one.assert_not_called() -- 2.39.5