from .config import config
from .config import set_config_attr
from .contextutil import safe_while
-from .lockstatus import get_status
log = logging.getLogger(__name__)
-def is_vm(name):
- return get_status(name)['is_vm']
+def get_status(name):
+ name = misc.canonicalize_hostname(name, user=None)
+ uri = os.path.join(config.lock_server, 'nodes', name, '')
+ response = requests.get(uri)
+ success = response.ok
+ if success:
+ return response.json()
+ log.warning(
+ "Failed to query lock server for status of {name}".format(name=name))
+ return None
+
+
+def is_vm(name=None, status=None):
+ if status is None:
+ if name is None:
+ raise ValueError("Must provide either name or status, or both")
+ name = misc.canonicalize_hostname(name)
+ status = get_status(name)
+ return status.get('is_vm', False)
def get_distro_from_downburst():
statuses = get_statuses(machines)
owner = ctx.owner or misc.get_user()
for machine in statuses:
- if machine['is_vm'] and machine['locked'] and \
+ if is_vm(status=machine) and machine['locked'] and \
(machines or machine['locked_by'] == owner):
vmachines.append(machine['name'])
if vmachines:
# When listing, only show the vm_host's name, not every detail
for s in statuses:
- if not s.get('is_vm', False):
+ if not is_vm(status=s):
continue
# with an OpenStack API, there is no host for a VM
if s['vm_host'] is None:
if ctx.owner is None and user is None:
user = misc.get_user()
# If none of them are vpm, do them all in one shot
- if not filter(is_vm, machines):
+ if not filter(lambda x: is_vm(status=x), machines):
res = unlock_many(machines, user)
return 0 if res else 1
for machine in machines:
+++ /dev/null
-import requests
-import os
-import logging
-from .config import config
-from .misc import canonicalize_hostname
-
-log = logging.getLogger(__name__)
-
-
-def get_status(name):
- name = canonicalize_hostname(name, user=None)
- uri = os.path.join(config.lock_server, 'nodes', name, '')
- response = requests.get(uri)
- success = response.ok
- if success:
- return response.json()
- log.warning(
- "Failed to query lock server for status of {name}".format(name=name))
- return None
import datetime
stamp = datetime.datetime.now().strftime("%y%m%d%H%M")
-is_vm = lambda x: x.startswith('vpm') or x.startswith('ubuntu@vpm')
is_arm = lambda x: x.startswith('tala') or x.startswith(
'ubuntu@tala') or x.startswith('saya') or x.startswith('ubuntu@saya')
from ..config import config, FakeNamespace
from ..lock import (
- list_locks, locked_since_seconds, unlock_one, find_stale_locks
+ list_locks, locked_since_seconds, unlock_one, find_stale_locks, get_status,
+ is_vm
)
-from ..lockstatus import get_status
from ..misc import (
canonicalize_hostname, config_file, decanonicalize_hostname, merge_configs,
get_user, sh
if 'vpm' in shortname:
return
status_info = get_status(host)
- if status_info['is_vm'] and status_info['machine_type'] == 'openstack':
+ if is_vm(status=status_info) and \
+ status_info['machine_type'] == 'openstack':
return
log.debug('shortname: %s' % shortname)
log.debug('{ctx}'.format(ctx=ctx))
import sys
import time
-from teuthology import lockstatus as ls
+from teuthology import lock
from teuthology.config import config
from ..exceptions import ConsoleError
raise RuntimeError("libvirt not found")
self.shortname = remote.getShortName(name)
- status_info = ls.get_status(self.shortname)
+ status_info = lock.get_status(self.shortname)
try:
- if status_info.get('is_vm', False):
+ if lock.is_vm(status=status_info):
phys_host = status_info['vm_host']['name'].split('.')[0]
except TypeError:
return
import re
import logging
from cStringIO import StringIO
-from teuthology import lockstatus as ls
import os
import pwd
import tempfile
import console
+from teuthology import lock
+
log = logging.getLogger(__name__)
@property
def machine_type(self):
if not getattr(self, '_machine_type', None):
- remote_info = ls.get_status(self.hostname)
+ remote_info = lock.get_status(self.hostname)
if not remote_info:
return None
self._machine_type = remote_info.get("machine_type", None)
"""
if self.os.package_type != 'rpm':
return
- if misc.is_vm(self.shortname):
+ if lock.is_vm(self.shortname):
return
self.run(args="sudo chcon {con} {path}".format(
con=context, path=file_path))
self._console = getRemoteConsole(self.name)
return self._console
+ @property
+ def is_vm(self):
+ if not hasattr(self, '_is_vm'):
+ self._is_vm = lock.is_vm(self.name)
+ return self._is_vm
+
def __del__(self):
if self.ssh is not None:
self.ssh.close()
"""
Return either VirtualConsole or PhysicalConsole depending on name.
"""
- if misc.is_vm(name):
+ if lock.is_vm(name):
return console.VirtualConsole(name)
return console.PhysicalConsole(
name, ipmiuser, ipmipass, ipmidomain, logfile, timeout)
import logging
from ..misc import decanonicalize_hostname, get_distro, get_distro_version
-from ..lockstatus import get_status
+from ..lock import get_status, is_vm
from .downburst import Downburst
from .openstack import ProvisionOpenStack
machine_type = status_info['machine_type']
os_type = get_distro(ctx)
os_version = get_distro_version(ctx)
- if not status_info.get('is_vm', False):
+ if not is_vm(status=status_info):
return False
if machine_type in cloud.get_types():
status_info = _downburst.status
else:
status_info = get_status(machine_name)
- if not status_info or not status_info.get('is_vm', False):
+ if not status_info or not is_vm(status=status_info):
return True
if user is not None and user != status_info['locked_by']:
msg = "Tried to destroy {node} as {as_user} but it is locked " + \
from ..config import config
from ..contextutil import safe_while
from ..misc import decanonicalize_hostname
-from ..lockstatus import get_status
+from ..lock import get_status
log = logging.getLogger(__name__)
import logging
-from teuthology import lockstatus
+from teuthology import lock
from teuthology.config import config as teuth_config
return
log.info('Checking locks...')
for machine in ctx.config['targets'].iterkeys():
- status = lockstatus.get_status(machine)
+ status = lock.get_status(machine)
log.debug('machine status is %s', repr(status))
assert status is not None, \
'could not read lock status for {name}'.format(name=machine)
import yaml
from teuthology import lock
-from teuthology import lockstatus
from teuthology import misc
from teuthology import provision
from teuthology import report
if len(all_locked) == total_requested:
vmlist = []
for lmach in all_locked:
- if misc.is_vm(lmach):
+ if lock.is_vm(lmach):
vmlist.append(lmach)
if vmlist:
log.info('Waiting for virtual machines to come up')
log.info("Error in virtual machine keys")
newscandict = {}
for dkey in all_locked.iterkeys():
- stats = lockstatus.get_status(dkey)
+ stats = lock.get_status(dkey)
newscandict[dkey] = stats['ssh_pub_key']
ctx.config['targets'] = newscandict
else:
from cStringIO import StringIO
-from teuthology import misc
-
from teuthology.parallel import parallel
from teuthology.task import ansible
with parallel():
editinfo = os.path.join(os.path.dirname(__file__), 'edit_sudoers.sh')
for rem in ctx.cluster.remotes.iterkeys():
- if misc.is_vm(rem.shortname):
+ if rem.is_vm:
ansible_hosts.add(rem.shortname)
r = rem.run(args=['test', '-e', '/ceph-qa-ready'],
stdout=StringIO(), check_status=False)
from teuthology.misc import get_archive_dir
from teuthology.orchestra.cluster import Cluster
from teuthology.orchestra import run
-from teuthology.lockstatus import get_status
from . import Task
super(SELinux, self).filter_hosts()
new_cluster = Cluster()
for (remote, roles) in self.cluster.remotes.iteritems():
- status_info = get_status(remote.name)
- if status_info and status_info.get('is_vm', False):
+ if remote.is_vm:
msg = "Excluding {host}: VMs are not yet supported"
log.info(msg.format(host=remote.shortname))
elif remote.os.package_type == 'rpm':
self.start_patchers()
def start_patchers(self):
- self.patchers = list()
- self.patchers.append(patch(
+ self.patchers = dict()
+ self.patchers['makedirs'] = patch(
'teuthology.task.console_log.os.makedirs',
- ))
- for patcher in self.patchers:
- patcher.start()
+ )
+ self.patchers['is_vm'] = patch(
+ 'teuthology.lock.query.is_vm',
+ )
+ self.patchers['is_vm'].return_value = False
+ self.patchers['get_status'] = patch(
+ 'teuthology.lock.query.get_status',
+ )
+ self.mocks = dict()
+ for name, patcher in self.patchers.items():
+ self.mocks[name] = patcher.start()
+ self.mocks['is_vm'].return_value = False
def teardown(self):
- for patcher in self.patchers:
+ for patcher in self.patchers.values():
patcher.stop()
def test_enabled(self):
self.ctx = FakeNamespace()
self.ctx.config = dict()
- @patch('teuthology.task.selinux.get_status')
- def test_host_exclusion(self, mock_get_status):
- mock_get_status.return_value = None
+ def test_host_exclusion(self):
with patch.multiple(
Remote,
os=DEFAULT,
remote1 = Remote('remote1')
remote1.os = Mock()
remote1.os.package_type = 'rpm'
+ remote1._is_vm = False
self.ctx.cluster.add(remote1, ['role1'])
remote2 = Remote('remote1')
remote2.os = Mock()
remote2.os.package_type = 'deb'
+ remote2._is_vm = False
self.ctx.cluster.add(remote2, ['role2'])
task_config = dict()
with SELinux(self.ctx, task_config) as task: