]> git.apps.os.sepia.ceph.com Git - teuthology.git/commitdiff
More intelligently implement is_vm()
authorZack Cerza <zack@redhat.com>
Thu, 5 Jan 2017 20:24:11 +0000 (13:24 -0700)
committerZack Cerza <zack@redhat.com>
Fri, 24 Feb 2017 16:03:33 +0000 (09:03 -0700)
Move to using one decent implementation instead of multiple (some naive)
implementations

Signed-off-by: Zack Cerza <zack@redhat.com>
14 files changed:
teuthology/lock.py
teuthology/lockstatus.py [deleted file]
teuthology/misc.py
teuthology/nuke/__init__.py
teuthology/orchestra/console.py
teuthology/orchestra/remote.py
teuthology/provision/__init__.py
teuthology/provision/downburst.py
teuthology/task/internal/check_lock.py
teuthology/task/internal/lock_machines.py
teuthology/task/internal/vm_setup.py
teuthology/task/selinux.py
teuthology/test/task/test_console_log.py
teuthology/test/task/test_selinux.py

index ebe7827f79e8d737a01e42a3c341b0ea99812770..bc32d75d7dc4f4cefb1cebd5baf3a747ac4257fb 100644 (file)
@@ -16,13 +16,29 @@ from . import provision
 from .config import config
 from .config import set_config_attr
 from .contextutil import safe_while
-from .lockstatus import get_status
 
 log = logging.getLogger(__name__)
 
 
-def is_vm(name):
-    return get_status(name)['is_vm']
+def get_status(name):
+    name = misc.canonicalize_hostname(name, user=None)
+    uri = os.path.join(config.lock_server, 'nodes', name, '')
+    response = requests.get(uri)
+    success = response.ok
+    if success:
+        return response.json()
+    log.warning(
+        "Failed to query lock server for status of {name}".format(name=name))
+    return None
+
+
+def is_vm(name=None, status=None):
+    if status is None:
+        if name is None:
+            raise ValueError("Must provide either name or status, or both")
+        name = misc.canonicalize_hostname(name)
+        status = get_status(name)
+    return status.get('is_vm', False)
 
 
 def get_distro_from_downburst():
@@ -241,7 +257,7 @@ def main(ctx):
         statuses = get_statuses(machines)
         owner = ctx.owner or misc.get_user()
         for machine in statuses:
-            if machine['is_vm'] and machine['locked'] and \
+            if is_vm(status=machine) and machine['locked'] and \
                (machines or machine['locked_by'] == owner):
                 vmachines.append(machine['name'])
         if vmachines:
@@ -269,7 +285,7 @@ def main(ctx):
 
             # When listing, only show the vm_host's name, not every detail
             for s in statuses:
-                if not s.get('is_vm', False):
+                if not is_vm(status=s):
                     continue
                 # with an OpenStack API, there is no host for a VM
                 if s['vm_host'] is None:
@@ -322,7 +338,7 @@ def main(ctx):
         if ctx.owner is None and user is None:
             user = misc.get_user()
         # If none of them are vpm, do them all in one shot
-        if not filter(is_vm, machines):
+        if not filter(lambda x: is_vm(status=x), machines):
             res = unlock_many(machines, user)
             return 0 if res else 1
         for machine in machines:
diff --git a/teuthology/lockstatus.py b/teuthology/lockstatus.py
deleted file mode 100644 (file)
index 109b113..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-import requests
-import os
-import logging
-from .config import config
-from .misc import canonicalize_hostname
-
-log = logging.getLogger(__name__)
-
-
-def get_status(name):
-    name = canonicalize_hostname(name, user=None)
-    uri = os.path.join(config.lock_server, 'nodes', name, '')
-    response = requests.get(uri)
-    success = response.ok
-    if success:
-        return response.json()
-    log.warning(
-        "Failed to query lock server for status of {name}".format(name=name))
-    return None
index 35d9f0e00e2a0e9615e5c37a03f5a9b019fa079b..9929ca26dd250fd05376eacf894b9815b3436109 100644 (file)
@@ -33,7 +33,6 @@ log = logging.getLogger(__name__)
 
 import datetime
 stamp = datetime.datetime.now().strftime("%y%m%d%H%M")
-is_vm = lambda x: x.startswith('vpm') or x.startswith('ubuntu@vpm')
 
 is_arm = lambda x: x.startswith('tala') or x.startswith(
     'ubuntu@tala') or x.startswith('saya') or x.startswith('ubuntu@saya')
index 733a6e6dec02cc194898e704e3273728656784cf..071acc238dbdfafddd8136c463bae7dfad3667ff 100644 (file)
@@ -10,9 +10,9 @@ import teuthology
 
 from ..config import config, FakeNamespace
 from ..lock import (
-    list_locks, locked_since_seconds, unlock_one, find_stale_locks
+    list_locks, locked_since_seconds, unlock_one, find_stale_locks, get_status,
+    is_vm
 )
-from ..lockstatus import get_status
 from ..misc import (
     canonicalize_hostname, config_file, decanonicalize_hostname, merge_configs,
     get_user, sh
@@ -298,7 +298,8 @@ def nuke_helper(ctx, should_unlock):
         if 'vpm' in shortname:
             return
         status_info = get_status(host)
-        if status_info['is_vm'] and status_info['machine_type'] == 'openstack':
+        if is_vm(status=status_info) and \
+                status_info['machine_type'] == 'openstack':
             return
     log.debug('shortname: %s' % shortname)
     log.debug('{ctx}'.format(ctx=ctx))
index c5c4c0c0027339ca1b1e60bbc4d44fb9341c08b9..4799715b31fa0e8ed0f04993f66ee6d7640fe97a 100644 (file)
@@ -6,7 +6,7 @@ import subprocess
 import sys
 import time
 
-from teuthology import lockstatus as ls
+from teuthology import lock
 from teuthology.config import config
 
 from ..exceptions import ConsoleError
@@ -316,9 +316,9 @@ class VirtualConsole():
             raise RuntimeError("libvirt not found")
 
         self.shortname = remote.getShortName(name)
-        status_info = ls.get_status(self.shortname)
+        status_info = lock.get_status(self.shortname)
         try:
-            if status_info.get('is_vm', False):
+            if lock.is_vm(status=status_info):
                 phys_host = status_info['vm_host']['name'].split('.')[0]
         except TypeError:
             return
index ed840d253385d61b0ad3f28480b6b2e4fb70069e..473d31121da8b7fd9d18ed2698395a380e7143ab 100644 (file)
@@ -9,7 +9,6 @@ import time
 import re
 import logging
 from cStringIO import StringIO
-from teuthology import lockstatus as ls
 import os
 import pwd
 import tempfile
@@ -17,6 +16,8 @@ import netaddr
 
 import console
 
+from teuthology import lock
+
 log = logging.getLogger(__name__)
 
 
@@ -139,7 +140,7 @@ class Remote(object):
     @property
     def machine_type(self):
         if not getattr(self, '_machine_type', None):
-            remote_info = ls.get_status(self.hostname)
+            remote_info = lock.get_status(self.hostname)
             if not remote_info:
                 return None
             self._machine_type = remote_info.get("machine_type", None)
@@ -243,7 +244,7 @@ class Remote(object):
         """
         if self.os.package_type != 'rpm':
             return
-        if misc.is_vm(self.shortname):
+        if lock.is_vm(self.shortname):
             return
         self.run(args="sudo chcon {con} {path}".format(
             con=context, path=file_path))
@@ -444,6 +445,12 @@ class Remote(object):
             self._console = getRemoteConsole(self.name)
         return self._console
 
+    @property
+    def is_vm(self):
+        if not hasattr(self, '_is_vm'):
+            self._is_vm = lock.is_vm(self.name)
+        return self._is_vm
+
     def __del__(self):
         if self.ssh is not None:
             self.ssh.close()
@@ -463,7 +470,7 @@ def getRemoteConsole(name, ipmiuser=None, ipmipass=None, ipmidomain=None,
     """
     Return either VirtualConsole or PhysicalConsole depending on name.
     """
-    if misc.is_vm(name):
+    if lock.is_vm(name):
         return console.VirtualConsole(name)
     return console.PhysicalConsole(
         name, ipmiuser, ipmipass, ipmidomain, logfile, timeout)
index cfc4038fb2e80ffab7e23276b4ba2e269000ba75..02253a501fa61bb67bf3747bc6e64989d31d6193 100644 (file)
@@ -1,6 +1,6 @@
 import logging
 from ..misc import decanonicalize_hostname, get_distro, get_distro_version
-from ..lockstatus import get_status
+from ..lock import get_status, is_vm
 
 from .downburst import Downburst
 from .openstack import ProvisionOpenStack
@@ -25,7 +25,7 @@ def create_if_vm(ctx, machine_name, _downburst=None):
     machine_type = status_info['machine_type']
     os_type = get_distro(ctx)
     os_version = get_distro_version(ctx)
-    if not status_info.get('is_vm', False):
+    if not is_vm(status=status_info):
         return False
 
     if machine_type in cloud.get_types():
@@ -61,7 +61,7 @@ def destroy_if_vm(ctx, machine_name, user=None, description=None,
         status_info = _downburst.status
     else:
         status_info = get_status(machine_name)
-    if not status_info or not status_info.get('is_vm', False):
+    if not status_info or not is_vm(status=status_info):
         return True
     if user is not None and user != status_info['locked_by']:
         msg = "Tried to destroy {node} as {as_user} but it is locked " + \
index 12fa346df8c321478a5f3f6217d1c65a43c3da23..f5a0b156807099051433cc9fb9ef0696c510a303 100644 (file)
@@ -7,7 +7,7 @@ import yaml
 from ..config import config
 from ..contextutil import safe_while
 from ..misc import decanonicalize_hostname
-from ..lockstatus import get_status
+from ..lock import get_status
 
 
 log = logging.getLogger(__name__)
index da7ceb33288c41ad0bc3a1b5873d8892a9dce730..39a8c190dadaf483728c0d1827da608063f8be18 100644 (file)
@@ -1,6 +1,6 @@
 import logging
 
-from teuthology import lockstatus
+from teuthology import lock
 
 from teuthology.config import config as teuth_config
 
@@ -16,7 +16,7 @@ def check_lock(ctx, config, check_up=True):
         return
     log.info('Checking locks...')
     for machine in ctx.config['targets'].iterkeys():
-        status = lockstatus.get_status(machine)
+        status = lock.get_status(machine)
         log.debug('machine status is %s', repr(status))
         assert status is not None, \
             'could not read lock status for {name}'.format(name=machine)
index fcf8d211721258eb363447828c49f74bbe300fc5..68210cd856e8a51ec5a7aecbe6512d39dd515f7a 100644 (file)
@@ -4,7 +4,6 @@ import time
 import yaml
 
 from teuthology import lock
-from teuthology import lockstatus
 from teuthology import misc
 from teuthology import provision
 from teuthology import report
@@ -92,7 +91,7 @@ def lock_machines(ctx, config):
         if len(all_locked) == total_requested:
             vmlist = []
             for lmach in all_locked:
-                if misc.is_vm(lmach):
+                if lock.is_vm(lmach):
                     vmlist.append(lmach)
             if vmlist:
                 log.info('Waiting for virtual machines to come up')
@@ -117,7 +116,7 @@ def lock_machines(ctx, config):
                     log.info("Error in virtual machine keys")
                 newscandict = {}
                 for dkey in all_locked.iterkeys():
-                    stats = lockstatus.get_status(dkey)
+                    stats = lock.get_status(dkey)
                     newscandict[dkey] = stats['ssh_pub_key']
                 ctx.config['targets'] = newscandict
             else:
index 91eece313600673a603931b570c105bc1cc7ae45..c3e913bf84a227ed8543b8646bbb2dbd9efc0a2f 100644 (file)
@@ -4,8 +4,6 @@ import subprocess
 
 from cStringIO import StringIO
 
-from teuthology import misc
-
 from teuthology.parallel import parallel
 from teuthology.task import ansible
 
@@ -24,7 +22,7 @@ def vm_setup(ctx, config):
     with parallel():
         editinfo = os.path.join(os.path.dirname(__file__), 'edit_sudoers.sh')
         for rem in ctx.cluster.remotes.iterkeys():
-            if misc.is_vm(rem.shortname):
+            if rem.is_vm:
                 ansible_hosts.add(rem.shortname)
                 r = rem.run(args=['test', '-e', '/ceph-qa-ready'],
                             stdout=StringIO(), check_status=False)
index f540fea14922a23c266c0ad9263048155ea3ce60..d74df244f68d5ed0ea3c7b5b2467479f8ed78f8f 100644 (file)
@@ -7,7 +7,6 @@ from teuthology.exceptions import SELinuxError
 from teuthology.misc import get_archive_dir
 from teuthology.orchestra.cluster import Cluster
 from teuthology.orchestra import run
-from teuthology.lockstatus import get_status
 
 from . import Task
 
@@ -51,8 +50,7 @@ class SELinux(Task):
         super(SELinux, self).filter_hosts()
         new_cluster = Cluster()
         for (remote, roles) in self.cluster.remotes.iteritems():
-            status_info = get_status(remote.name)
-            if status_info and status_info.get('is_vm', False):
+            if remote.is_vm:
                 msg = "Excluding {host}: VMs are not yet supported"
                 log.info(msg.format(host=remote.shortname))
             elif remote.os.package_type == 'rpm':
index c55e9ac63db1a3f6515668e5bc5d56a73ee2b218..0357996150e177007050659bf27c3d586d7844c8 100644 (file)
@@ -29,15 +29,24 @@ class TestConsoleLog(TestTask):
         self.start_patchers()
 
     def start_patchers(self):
-        self.patchers = list()
-        self.patchers.append(patch(
+        self.patchers = dict()
+        self.patchers['makedirs'] = patch(
             'teuthology.task.console_log.os.makedirs',
-        ))
-        for patcher in self.patchers:
-            patcher.start()
+        )
+        self.patchers['is_vm'] = patch(
+            'teuthology.lock.query.is_vm',
+        )
+        self.patchers['is_vm'].return_value = False
+        self.patchers['get_status'] = patch(
+            'teuthology.lock.query.get_status',
+        )
+        self.mocks = dict()
+        for name, patcher in self.patchers.items():
+            self.mocks[name] = patcher.start()
+        self.mocks['is_vm'].return_value = False
 
     def teardown(self):
-        for patcher in self.patchers:
+        for patcher in self.patchers.values():
             patcher.stop()
 
     def test_enabled(self):
index 9145f31fbde7f57f234aa828f630d1462584bdbe..e12e81c0a928e9833010aa9d160d72752b028991 100644 (file)
@@ -11,9 +11,7 @@ class TestSELinux(object):
         self.ctx = FakeNamespace()
         self.ctx.config = dict()
 
-    @patch('teuthology.task.selinux.get_status')
-    def test_host_exclusion(self, mock_get_status):
-        mock_get_status.return_value = None
+    def test_host_exclusion(self):
         with patch.multiple(
             Remote,
             os=DEFAULT,
@@ -23,10 +21,12 @@ class TestSELinux(object):
             remote1 = Remote('remote1')
             remote1.os = Mock()
             remote1.os.package_type = 'rpm'
+            remote1._is_vm = False
             self.ctx.cluster.add(remote1, ['role1'])
             remote2 = Remote('remote1')
             remote2.os = Mock()
             remote2.os.package_type = 'deb'
+            remote2._is_vm = False
             self.ctx.cluster.add(remote2, ['role2'])
             task_config = dict()
             with SELinux(self.ctx, task_config) as task: