]> git.apps.os.sepia.ceph.com Git - teuthology.git/commitdiff
teuthology: convert deprecated method name 1706/head
authorPatrick Donnelly <pdonnell@redhat.com>
Thu, 20 Jan 2022 15:11:48 +0000 (10:11 -0500)
committerPatrick Donnelly <pdonnell@redhat.com>
Thu, 20 Jan 2022 15:11:48 +0000 (10:11 -0500)
Avoiding this warning:

    /home/runner/work/teuthology/teuthology/teuthology/task/install/__init__.py:285: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead

Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
23 files changed:
teuthology/dispatcher/supervisor.py
teuthology/kill.py
teuthology/lock/ops.py
teuthology/misc.py
teuthology/nuke/actions.py
teuthology/orchestra/daemon/cephadmunit.py
teuthology/orchestra/daemon/state.py
teuthology/orchestra/daemon/systemd.py
teuthology/provision/cloud/openstack.py
teuthology/provision/downburst.py
teuthology/provision/fog.py
teuthology/provision/openstack.py
teuthology/provision/pelagos.py
teuthology/repo_utils.py
teuthology/report.py
teuthology/results.py
teuthology/suite/__init__.py
teuthology/suite/run.py
teuthology/suite/util.py
teuthology/task/install/__init__.py
teuthology/task/kernel.py
teuthology/task/pcp.py
teuthology/worker.py

index 3003d11abb70abc54208010aa2d055b5fee58648..3036d0335c08b9f00bb34cfd36ffb25bd203122b 100644 (file)
@@ -109,7 +109,7 @@ def run_job(job_config, teuth_bin_path, archive_dir, verbose):
     if 'config' in job_config:
         inner_config = job_config.pop('config')
         if not isinstance(inner_config, dict):
-            log.warn("run_job: job_config['config'] isn't a dict, it's a %s",
+            log.warning("run_job: job_config['config'] isn't a dict, it's a %s",
                      str(type(inner_config)))
         else:
             job_config.update(inner_config)
index 770bf8eafa0c5821fd7599da32f599f775d1f598..583d2723a350a95df024d1e522bab44711784b8d 100755 (executable)
@@ -51,8 +51,8 @@ def kill_run(run_name, archive_base=None, owner=None, machine_type=None,
                 machine_type = run_info['machine_type']
                 owner = run_info['owner']
             else:
-                log.warn("The run info does not have machine type: %s" % run_info)
-                log.warn("Run archive used: %s" % run_archive_dir)
+                log.warning("The run info does not have machine type: %s" % run_info)
+                log.warning("Run archive used: %s" % run_archive_dir)
                 log.info("Using machine type '%s' and owner '%s'" % (machine_type, owner))
         elif machine_type is None:
             raise RuntimeError("The run is still entirely enqueued; " +
index 7ad8bb17044c4a9a74855b3ffeb36b5fc4c74061..41fc9ff192e4526b1b24b6a1a955fd41e4ed3e10 100644 (file)
@@ -209,7 +209,7 @@ def unlock_one(ctx, name, user, description=None):
                     return response.ok
             # Work around https://github.com/kennethreitz/requests/issues/2364
             except requests.ConnectionError as e:
-                log.warn("Saw %s while unlocking; retrying...", str(e))
+                log.warning("Saw %s while unlocking; retrying...", str(e))
     try:
         reason = response.json().get('message')
     except ValueError:
@@ -436,5 +436,5 @@ def block_and_lock_machines(ctx, total_requested, machine_type, reimage=True):
             "{total} machines locked ({new} new); need {more} more".format(
                 total=len(all_locked), new=len(newly_locked), more=requested)
         )
-        log.warn('Could not lock enough machines, waiting...')
+        log.warning('Could not lock enough machines, waiting...')
         time.sleep(10)
index 2432d46d5f04127c1cec3d719f3ab6acd2ca7e52..72d1ca8fdd4a12655b50aa02a0798983ece1bb7b 100644 (file)
@@ -778,7 +778,7 @@ def pull_directory_tarball(remote, remotedir, localfile):
 
 
 def get_wwn_id_map(remote, devs):
-    log.warn("Entering get_wwn_id_map, a deprecated function that will be removed")
+    log.warning("Entering get_wwn_id_map, a deprecated function that will be removed")
     return dict((d, d) for d in devs)
 
 
@@ -797,7 +797,7 @@ def get_scratch_devices(remote):
     for dev in devs:
         if 'vda' in dev:
             devs.remove(dev)
-            log.warn("Removing root device: %s from device list" % dev)
+            log.warning("Removing root device: %s from device list" % dev)
 
     log.debug('devs={d}'.format(d=devs))
 
@@ -1109,7 +1109,7 @@ def ssh_keyscan(hostnames, _raise=True):
         missing = set(hostnames) - set(keys_dict.keys())
         msg = "Unable to scan these host keys: %s" % ' '.join(missing)
         if not _raise:
-            log.warn(msg)
+            log.warning(msg)
         else:
             raise RuntimeError(msg)
     return keys_dict
index ef7ca2d272da5b45a044961be5496c24e20c10a4..854ca27d4858bcc59c61d51c82abbede4da7829d 100644 (file)
@@ -150,7 +150,7 @@ def stale_kernel_mount(remote):
 def reboot(ctx, remotes):
     for remote in remotes:
         if stale_kernel_mount(remote):
-            log.warn('Stale kernel mount on %s!', remote.name)
+            log.warning('Stale kernel mount on %s!', remote.name)
             log.info('force/no-sync rebooting %s', remote.name)
             # -n is ignored in systemd versions through v229, which means this
             # only works on trusty -- on 7.3 (v219) and xenial (v229) reboot -n
index fff1319c1543e3dd2e72363158bdfb4f08f82bb1..9b579da08e830103a057c27884383157e85f0902 100644 (file)
@@ -132,7 +132,7 @@ class CephadmUnit(DaemonState):
         Start this daemon instance.
         """
         if self.running():
-            self.log.warn('Restarting a running daemon')
+            self.log.warning('Restarting a running daemon')
             self.restart()
             return
         self._start_logger()
index fd9baa54198afe128f755ae445f241b66bce1b99..c3b6ddad933378943b8907c5255389cb855027b6 100644 (file)
@@ -118,7 +118,7 @@ class DaemonState(object):
         Start this daemon instance.
         """
         if self.running():
-            self.log.warn('Restarting a running daemon')
+            self.log.warning('Restarting a running daemon')
         self.restart()
 
     def stop(self, timeout=300):
index 204f9422c698fee01eca1a72fc50ec3574f6507e..fd833b84fb822dc9a239040f72b98869c1fc054e 100644 (file)
@@ -156,7 +156,7 @@ class SystemDState(DaemonState):
 
         :param extra_args: Extra keyword arguments to be added.
         """
-        self.log.warn(
+        self.log.warning(
                 "restart_with_args() is not supported with systemd; performing"
                 "normal restart")
         self.restart()
@@ -180,7 +180,7 @@ class SystemDState(DaemonState):
 
         :param sig: signal to send
         """
-        self.log.warn("systemd may restart daemons automatically")
+        self.log.warning("systemd may restart daemons automatically")
         pid = self.pid
         self.log.info("Sending signal %s to process %s", sig, pid)
         sig = '-' + str(sig)
@@ -191,7 +191,7 @@ class SystemDState(DaemonState):
         Start this daemon instance.
         """
         if self.running():
-            self.log.warn('Restarting a running daemon')
+            self.log.warning('Restarting a running daemon')
             self.restart()
             return
         self.remote.run(args=[run.Raw(self.start_cmd)])
index 39a1d0e6bad025c5fc80daaf880c2bd1ed6c5609..d8b838b13e9c65989269ea4928eb70dbaecd62dc 100644 (file)
@@ -132,7 +132,7 @@ class OpenStackProvider(Provider):
                 else:
                     self._networks = list()
             except AttributeError:
-                log.warn("Unable to list networks for %s", self.driver)
+                log.warning("Unable to list networks for %s", self.driver)
                 self._networks = list()
         return self._networks
 
@@ -150,7 +150,7 @@ class OpenStackProvider(Provider):
                     self.driver.ex_list_security_groups
                 )
             except AttributeError:
-                log.warn("Unable to list security groups for %s", self.driver)
+                log.warning("Unable to list security groups for %s", self.driver)
                 self._security_groups = list()
         return self._security_groups
 
@@ -426,7 +426,7 @@ class OpenStackProvisioner(base.Provisioner):
         msg = "Unknown error locating %s"
         if not matches:
             msg = "No nodes found with name '%s'" % self.name
-            log.warn(msg)
+            log.warning(msg)
             return
         elif len(matches) > 1:
             msg = "More than one node found with name '%s'"
@@ -444,9 +444,9 @@ class OpenStackProvisioner(base.Provisioner):
         self._destroy_volumes()
         nodes = self._find_nodes()
         if not nodes:
-            log.warn("Didn't find any nodes named '%s' to destroy!", self.name)
+            log.warning("Didn't find any nodes named '%s' to destroy!", self.name)
             return True
         if len(nodes) > 1:
-            log.warn("Found multiple nodes named '%s' to destroy!", self.name)
+            log.warning("Found multiple nodes named '%s' to destroy!", self.name)
         log.info("Destroying nodes: %s", nodes)
         return all([node.destroy() for node in nodes])
index 90feea239c7aa71f13e888c5b22de2cbf035799b..17b6818b0724e3f120e2e85ab5715aa4ab00274f 100644 (file)
@@ -165,7 +165,7 @@ class Downburst(object):
         if proc.returncode != 0:
             not_found_msg = "no domain with matching name '%s'" % self.shortname
             if not_found_msg in err:
-                log.warn("Ignoring error during destroy: %s", err)
+                log.warning("Ignoring error during destroy: %s", err)
                 return True
             log.error("Error destroying %s: %s", self.name, err)
             return False
@@ -307,7 +307,7 @@ def get_distro_from_downburst():
     executable_cmd = downburst_executable()
     environment_dict = downburst_environment()
     if not executable_cmd:
-        log.warn("Downburst not found!")
+        log.warning("Downburst not found!")
         log.info('Using default values for supported os_type/os_version')
         return default_table
     try:
index d4ad3444b21201e3a5767a9378c2f97899e5f471..9be85b61c84bb77f3bc84c96b90974ef645f0ac8 100644 (file)
@@ -29,7 +29,7 @@ def enabled(warn=False):
     params = ['endpoint', 'api_token', 'user_token', 'machine_types']
     unset = [param for param in params if not fog_conf.get(param)]
     if unset and warn:
-        log.warn(
+        log.warning(
             "FOG disabled; set the following config options to enable: %s",
             ' '.join(unset),
         )
index 1d1812cf01b231426431d4c83e37253ec224aa2d..23066cda3883131f83d5107fe9970dae86fb1182 100644 (file)
@@ -77,7 +77,7 @@ class ProvisionOpenStack(OpenStack):
             if 'No volume with a name or ID' not in e.output:
                 raise e
         if volume_id:
-            log.warn("Volume {} already exists with ID {}; using it"
+            log.warning("Volume {} already exists with ID {}; using it"
                      .format(volume_name, volume_id))
         volume_id = self._openstack(
             "volume create %s" % config['openstack'].get('volume-create','')
@@ -106,7 +106,7 @@ class ProvisionOpenStack(OpenStack):
                         log.debug("volume %s not in '%s' status yet"
                                   % (volume_id, status))
                 except subprocess.CalledProcessError:
-                        log.warn("volume " + volume_id +
+                        log.warning("volume " + volume_id +
                                  " not information available yet")
 
     def _attach_volume(self, volume_id, name):
index 4e3d15e66a4fdda98d1ed6ae2d700d11376a7bf2..5dd04a4faec6d3e7300f812a673fc4a4a3b9b18d 100644 (file)
@@ -26,7 +26,7 @@ def enabled(warn=False):
     params = ['endpoint', 'machine_types']
     unset = [_ for _ in params if not conf.get(_)]
     if unset and warn:
-        log.warn(
+        log.warning(
             "Pelagos is disabled; set the following config options to enable: %s",
             ' '.join(unset),
         )
index 582ff7999373f20a33b6a52e4f3095465ce03234..916a34076bf27567096909034da8cab1a6c1cd9f 100644 (file)
@@ -431,7 +431,7 @@ def bootstrap_teuthology(dest_path):
         log.info("Bootstrap exited with status %s", returncode)
         if returncode != 0:
             for line in out.split():
-                log.warn(line.strip())
+                log.warning(line.strip())
             venv_path = os.path.join(dest_path, 'virtualenv')
             log.info("Removing %s", venv_path)
             shutil.rmtree(venv_path, ignore_errors=True)
index 08e115cd4d5e55a7b4d6a1af59b65d5e46d74b9c..710b778a983e6c29daec6564c2f9cc08179a7a79 100644 (file)
@@ -199,7 +199,7 @@ class ResultsReporter(object):
 
         if not self.base_uri:
             msg = "No results_server set in {yaml}; cannot report results"
-            self.log.warn(msg.format(yaml=config.yaml_path))
+            self.log.warning(msg.format(yaml=config.yaml_path))
 
     def _make_session(self, max_retries=10):
         session = requests.Session()
index 73953e02af93582c536fa62bf5bad33965657e31..5339b68e1f081adb9955959109a5546a470813e7 100644 (file)
@@ -53,7 +53,7 @@ def results(archive_dir, name, email, timeout, dry_run):
     reporter = ResultsReporter()
     while timeout > 0:
         if time.time() - starttime > timeout:
-            log.warn('test(s) did not finish before timeout of %d seconds',
+            log.warning('test(s) did not finish before timeout of %d seconds',
                      timeout)
             break
         jobs = reporter.get_jobs(name, fields=['job_id', 'status'])
index 39dcbc181cb2fed384ee91311e1ccc541a7ccacb..95b4123e9f337e44d77c7071b2999f2e77988c8b 100644 (file)
@@ -127,7 +127,7 @@ def main(args):
     if conf.rerun:
         rerun_filters = get_rerun_filters(conf.rerun, conf.rerun_statuses)
         if len(rerun_filters['descriptions']) == 0:
-            log.warn(
+            log.warning(
                 "No jobs matched the status filters: %s",
                 conf.rerun_statuses,
             )
index 24a0153744118b24caa7b9bc04b89a57852c8e3d..f8d789e19d5fd62872841a9391368d8e064cfd5f 100644 (file)
@@ -669,6 +669,6 @@ Note: If you still want to go ahead, use --job-threshold 0'''
                  (len(configs) - count),
                  len(configs))
         if missing_count:
-            log.warn('Scheduled %d/%d jobs that are missing packages!',
+            log.warning('Scheduled %d/%d jobs that are missing packages!',
                      missing_count, count)
         return count
index 5933f5689128a597fa4d21d61cb7f8c495939ec2..d272442d5c09158bed7b8786b0bb2d7b7df11546 100644 (file)
@@ -293,7 +293,7 @@ def get_arch(machine_type):
     """
     result = teuthology.lock.query.list_locks(machine_type=machine_type, count=1)
     if not result:
-        log.warn("No machines found with machine_type %s!", machine_type)
+        log.warning("No machines found with machine_type %s!", machine_type)
     else:
         return result[0]['arch']
 
index 00a46a44656b13a783ba0bad1484e4ea3f9b4a7a..bc481d8f9da7a55102afee9fa6904c817096fcdb 100644 (file)
@@ -282,7 +282,7 @@ def upgrade_remote_to_config(ctx, config):
             # take any remote in the dict
             remote = next(iter(remotes_dict))
             if remote in remotes:
-                log.warn('remote %s came up twice (role %s)', remote, role)
+                log.warning('remote %s came up twice (role %s)', remote, role)
                 continue
             remotes[remote] = config.get(role)
 
index f235a9039ea31984d2a345f14bc74ea49d3e26a7..892c382f79d20b9253f9add8dbad00627f5f305a 100644 (file)
@@ -661,7 +661,7 @@ def enable_disable_kdb(ctx, config):
                         'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc'
                         ])
             except run.CommandFailedError:
-                log.warn('Kernel does not support kdb')
+                log.warning('Kernel does not support kdb')
         else:
             log.info('Disabling kdb on {role}...'.format(role=role))
             # Add true pipe so command doesn't fail on kernel without kdb support.
@@ -675,7 +675,7 @@ def enable_disable_kdb(ctx, config):
                         'true',
                         ])
             except run.CommandFailedError:
-                log.warn('Kernel does not support kdb')
+                log.warning('Kernel does not support kdb')
 
 
 def wait_for_reboot(ctx, need_install, timeout, config, distro=False):
index c4a6b61a40c15777852375e218218135af14d397..80458a1317bbe2b063e271d483ee0e2214b547a3 100644 (file)
@@ -163,7 +163,7 @@ class GraphiteGrapher(PCPGrapher):
             )
             resp = requests.get(url, timeout=GRAPHITE_DOWNLOAD_TIMEOUT)
             if not resp.ok:
-                log.warn(
+                log.warning(
                     "Graph download failed with error %s %s: %s",
                     resp.status_code,
                     resp.reason,
index 1437083e60353c3b2d2b4b6ab7d09c55c0c94c81..be5d6c6501fe57c707881af47c063316c3b0bd4c 100644 (file)
@@ -253,7 +253,7 @@ def run_job(job_config, teuth_bin_path, archive_dir, verbose):
     if 'config' in job_config:
         inner_config = job_config.pop('config')
         if not isinstance(inner_config, dict):
-            log.warn("run_job: job_config['config'] isn't a dict, it's a %s",
+            log.warning("run_job: job_config['config'] isn't a dict, it's a %s",
                      str(type(inner_config)))
         else:
             job_config.update(inner_config)