if 'config' in job_config:
inner_config = job_config.pop('config')
if not isinstance(inner_config, dict):
- log.warn("run_job: job_config['config'] isn't a dict, it's a %s",
+ log.warning("run_job: job_config['config'] isn't a dict, it's a %s",
str(type(inner_config)))
else:
job_config.update(inner_config)
machine_type = run_info['machine_type']
owner = run_info['owner']
else:
- log.warn("The run info does not have machine type: %s" % run_info)
- log.warn("Run archive used: %s" % run_archive_dir)
+ log.warning("The run info does not have machine type: %s" % run_info)
+ log.warning("Run archive used: %s" % run_archive_dir)
log.info("Using machine type '%s' and owner '%s'" % (machine_type, owner))
elif machine_type is None:
raise RuntimeError("The run is still entirely enqueued; " +
return response.ok
# Work around https://github.com/kennethreitz/requests/issues/2364
except requests.ConnectionError as e:
- log.warn("Saw %s while unlocking; retrying...", str(e))
+ log.warning("Saw %s while unlocking; retrying...", str(e))
try:
reason = response.json().get('message')
except ValueError:
"{total} machines locked ({new} new); need {more} more".format(
total=len(all_locked), new=len(newly_locked), more=requested)
)
- log.warn('Could not lock enough machines, waiting...')
+ log.warning('Could not lock enough machines, waiting...')
time.sleep(10)
def get_wwn_id_map(remote, devs):
- log.warn("Entering get_wwn_id_map, a deprecated function that will be removed")
+ log.warning("Entering get_wwn_id_map, a deprecated function that will be removed")
return dict((d, d) for d in devs)
for dev in devs:
if 'vda' in dev:
devs.remove(dev)
- log.warn("Removing root device: %s from device list" % dev)
+ log.warning("Removing root device: %s from device list" % dev)
log.debug('devs={d}'.format(d=devs))
missing = set(hostnames) - set(keys_dict.keys())
msg = "Unable to scan these host keys: %s" % ' '.join(missing)
if not _raise:
- log.warn(msg)
+ log.warning(msg)
else:
raise RuntimeError(msg)
return keys_dict
def reboot(ctx, remotes):
for remote in remotes:
if stale_kernel_mount(remote):
- log.warn('Stale kernel mount on %s!', remote.name)
+ log.warning('Stale kernel mount on %s!', remote.name)
log.info('force/no-sync rebooting %s', remote.name)
# -n is ignored in systemd versions through v229, which means this
# only works on trusty -- on 7.3 (v219) and xenial (v229) reboot -n
Start this daemon instance.
"""
if self.running():
- self.log.warn('Restarting a running daemon')
+ self.log.warning('Restarting a running daemon')
self.restart()
return
self._start_logger()
Start this daemon instance.
"""
if self.running():
- self.log.warn('Restarting a running daemon')
+ self.log.warning('Restarting a running daemon')
self.restart()
def stop(self, timeout=300):
:param extra_args: Extra keyword arguments to be added.
"""
- self.log.warn(
+ self.log.warning(
"restart_with_args() is not supported with systemd; performing"
"normal restart")
self.restart()
:param sig: signal to send
"""
- self.log.warn("systemd may restart daemons automatically")
+ self.log.warning("systemd may restart daemons automatically")
pid = self.pid
self.log.info("Sending signal %s to process %s", sig, pid)
sig = '-' + str(sig)
Start this daemon instance.
"""
if self.running():
- self.log.warn('Restarting a running daemon')
+ self.log.warning('Restarting a running daemon')
self.restart()
return
self.remote.run(args=[run.Raw(self.start_cmd)])
else:
self._networks = list()
except AttributeError:
- log.warn("Unable to list networks for %s", self.driver)
+ log.warning("Unable to list networks for %s", self.driver)
self._networks = list()
return self._networks
self.driver.ex_list_security_groups
)
except AttributeError:
- log.warn("Unable to list security groups for %s", self.driver)
+ log.warning("Unable to list security groups for %s", self.driver)
self._security_groups = list()
return self._security_groups
msg = "Unknown error locating %s"
if not matches:
msg = "No nodes found with name '%s'" % self.name
- log.warn(msg)
+ log.warning(msg)
return
elif len(matches) > 1:
msg = "More than one node found with name '%s'"
self._destroy_volumes()
nodes = self._find_nodes()
if not nodes:
- log.warn("Didn't find any nodes named '%s' to destroy!", self.name)
+ log.warning("Didn't find any nodes named '%s' to destroy!", self.name)
return True
if len(nodes) > 1:
- log.warn("Found multiple nodes named '%s' to destroy!", self.name)
+ log.warning("Found multiple nodes named '%s' to destroy!", self.name)
log.info("Destroying nodes: %s", nodes)
return all([node.destroy() for node in nodes])
if proc.returncode != 0:
not_found_msg = "no domain with matching name '%s'" % self.shortname
if not_found_msg in err:
- log.warn("Ignoring error during destroy: %s", err)
+ log.warning("Ignoring error during destroy: %s", err)
return True
log.error("Error destroying %s: %s", self.name, err)
return False
executable_cmd = downburst_executable()
environment_dict = downburst_environment()
if not executable_cmd:
- log.warn("Downburst not found!")
+ log.warning("Downburst not found!")
log.info('Using default values for supported os_type/os_version')
return default_table
try:
params = ['endpoint', 'api_token', 'user_token', 'machine_types']
unset = [param for param in params if not fog_conf.get(param)]
if unset and warn:
- log.warn(
+ log.warning(
"FOG disabled; set the following config options to enable: %s",
' '.join(unset),
)
if 'No volume with a name or ID' not in e.output:
raise e
if volume_id:
- log.warn("Volume {} already exists with ID {}; using it"
+ log.warning("Volume {} already exists with ID {}; using it"
.format(volume_name, volume_id))
volume_id = self._openstack(
"volume create %s" % config['openstack'].get('volume-create','')
log.debug("volume %s not in '%s' status yet"
% (volume_id, status))
except subprocess.CalledProcessError:
- log.warn("volume " + volume_id +
+ log.warning("volume " + volume_id +
" not information available yet")
def _attach_volume(self, volume_id, name):
params = ['endpoint', 'machine_types']
unset = [_ for _ in params if not conf.get(_)]
if unset and warn:
- log.warn(
+ log.warning(
"Pelagos is disabled; set the following config options to enable: %s",
' '.join(unset),
)
log.info("Bootstrap exited with status %s", returncode)
if returncode != 0:
for line in out.split():
- log.warn(line.strip())
+ log.warning(line.strip())
venv_path = os.path.join(dest_path, 'virtualenv')
log.info("Removing %s", venv_path)
shutil.rmtree(venv_path, ignore_errors=True)
if not self.base_uri:
msg = "No results_server set in {yaml}; cannot report results"
- self.log.warn(msg.format(yaml=config.yaml_path))
+ self.log.warning(msg.format(yaml=config.yaml_path))
def _make_session(self, max_retries=10):
session = requests.Session()
reporter = ResultsReporter()
while timeout > 0:
if time.time() - starttime > timeout:
- log.warn('test(s) did not finish before timeout of %d seconds',
+ log.warning('test(s) did not finish before timeout of %d seconds',
timeout)
break
jobs = reporter.get_jobs(name, fields=['job_id', 'status'])
if conf.rerun:
rerun_filters = get_rerun_filters(conf.rerun, conf.rerun_statuses)
if len(rerun_filters['descriptions']) == 0:
- log.warn(
+ log.warning(
"No jobs matched the status filters: %s",
conf.rerun_statuses,
)
(len(configs) - count),
len(configs))
if missing_count:
- log.warn('Scheduled %d/%d jobs that are missing packages!',
+ log.warning('Scheduled %d/%d jobs that are missing packages!',
missing_count, count)
return count
"""
result = teuthology.lock.query.list_locks(machine_type=machine_type, count=1)
if not result:
- log.warn("No machines found with machine_type %s!", machine_type)
+ log.warning("No machines found with machine_type %s!", machine_type)
else:
return result[0]['arch']
# take any remote in the dict
remote = next(iter(remotes_dict))
if remote in remotes:
- log.warn('remote %s came up twice (role %s)', remote, role)
+ log.warning('remote %s came up twice (role %s)', remote, role)
continue
remotes[remote] = config.get(role)
'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc'
])
except run.CommandFailedError:
- log.warn('Kernel does not support kdb')
+ log.warning('Kernel does not support kdb')
else:
log.info('Disabling kdb on {role}...'.format(role=role))
# Add true pipe so command doesn't fail on kernel without kdb support.
'true',
])
except run.CommandFailedError:
- log.warn('Kernel does not support kdb')
+ log.warning('Kernel does not support kdb')
def wait_for_reboot(ctx, need_install, timeout, config, distro=False):
)
resp = requests.get(url, timeout=GRAPHITE_DOWNLOAD_TIMEOUT)
if not resp.ok:
- log.warn(
+ log.warning(
"Graph download failed with error %s %s: %s",
resp.status_code,
resp.reason,
if 'config' in job_config:
inner_config = job_config.pop('config')
if not isinstance(inner_config, dict):
- log.warn("run_job: job_config['config'] isn't a dict, it's a %s",
+ log.warning("run_job: job_config['config'] isn't a dict, it's a %s",
str(type(inner_config)))
else:
job_config.update(inner_config)