machine)
unlock_one(ctx, machine, user)
ok_machs = keys.do_update_keys(ok_machs.keys())[1]
+ update_nodes(ok_machs)
return ok_machs
- elif machine_type in reimage_types:
+ else:
reimaged = dict()
console_log_conf = dict(
logfile_name='{shortname}_reimage.log',
from netaddr.strategy.ipv4 import valid_str as _is_ipv4
from netaddr.strategy.ipv6 import valid_str as _is_ipv6
from teuthology import safepath
+from teuthology.parallel import parallel
from teuthology.exceptions import (CommandCrashedError, CommandFailedError,
ConnectionLostError)
- from .orchestra import run
- from .config import config
- from .contextutil import safe_while
- from .orchestra.opsys import DEFAULT_OS_VERSION
+ from teuthology.orchestra import run
+ from teuthology.config import config
+ from teuthology.contextutil import safe_while
+ from teuthology.orchestra.opsys import DEFAULT_OS_VERSION
log = logging.getLogger(__name__)
init_tasks.extend([
{'pcp': None},
{'selinux': None},
- {'clock': None}
])
- if config.get('ceph_cm_ansible', True):
- init_tasks.append({'ansible.cephlab': None})
+ # clock_sync_task: 'clock' or 'clock.check'
+ clock_sync_task = config.get('clock_sync_task', 'clock')
+ init_tasks.append({clock_sync_task: None})
+
if 'redhat' in config:
init_tasks.extend([
+ {'internal.setup_stage_cdn': None}
+ ])
+ # dont run cm-ansible by default unless requested in config
+ # nodes are reimaged by FOG and the images provided
+ # by FOG have already gone through cm-ansible run
+ if config.get('run-cm-ansible', False):
+ init_tasks.extend([{'ansible.cephlab': None}])
+
+ if 'redhat' in config:
+ init_tasks.extend([
+ {'internal.git_ignore_ssl': None},
{'internal.setup_cdn_repo': None},
{'internal.setup_base_repo': None},
{'internal.setup_additional_repo': None},
from datetime import datetime
from tempfile import NamedTemporaryFile
+from math import floor
- from ..config import config, JobConfig
- from ..exceptions import (
+ from teuthology.config import config, JobConfig
+ from teuthology.exceptions import (
BranchNotFoundError, CommitNotFoundError, VersionNotFoundError
)
- from ..misc import deep_merge, get_results_url
- from ..orchestra.opsys import OS
+ from teuthology.misc import deep_merge, get_results_url
+ from teuthology.orchestra.opsys import OS
- from . import util
- from .build_matrix import combine_path, build_matrix
- from .placeholder import substitute_placeholders, dict_templ
+ from teuthology.suite import util
+ from teuthology.suite.build_matrix import combine_path, build_matrix
+ from teuthology.suite.placeholder import substitute_placeholders, dict_templ
log = logging.getLogger(__name__)
import re
import logging
import yaml
-
+import time
+import errno
+import socket
from cStringIO import StringIO
- from . import Task
+ from teuthology.task import Task
from tempfile import NamedTemporaryFile
- from ..config import config as teuth_config
- from ..misc import get_scratch_devices
+ from teuthology.config import config as teuth_config
+ from teuthology.misc import get_scratch_devices
from teuthology import contextutil
from teuthology.orchestra import run
+from teuthology.orchestra.daemon import DaemonGroup
+from teuthology.task.install import ship_utilities
from teuthology import misc
+from teuthology import misc as teuthology
+
log = logging.getLogger(__name__)
log.debug("Running %s", args)
# If there is an installer.0 node, use that for the installer.
# Otherwise, use the first mon node as installer node.
- ansible_loc = self.ctx.cluster.only('installer.0')
- (ceph_first_mon,) = self.ctx.cluster.only(
- misc.get_first_mon(self.ctx,
- self.config)).remotes.keys()
+ ansible_loc = self.each_cluster.only('installer.0')
+# self.each_cluster = self.each_cluster.only(lambda role: role.startswith(self.cluster_name))
+# self.remove_cluster_prefix()
+ (ceph_first_mon,) = self.ctx.cluster.only(misc.get_first_mon(
+ self.ctx, self.config, self.cluster_name)).remotes.iterkeys()
++
if ansible_loc.remotes:
- (ceph_installer,) = ansible_loc.remotes.iterkeys()
+ (ceph_installer,) = ansible_loc.remotes.keys()
else:
ceph_installer = ceph_first_mon
self.ceph_first_mon = ceph_first_mon
roles = teuthology.all_roles(ctx.cluster)
config = dict((id_, a) for id_ in roles)
- for role, ls in config.items():
- (remote,) = ctx.cluster.only(role).remotes.keys()
- log.info('Running commands on role %s host %s', role, remote.name)
- for c in ls:
- c.replace('$TESTDIR', testdir)
- remote.run(
- args=[
- 'sudo',
- 'TESTDIR={tdir}'.format(tdir=testdir),
- 'bash',
- '-c',
- c],
- )
+ for role, ls in config.iteritems():
+ if 'mon' in role or 'osd' in role \
+ or 'client' in role:
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ log.info('Running commands on role %s host %s', role, remote.name)
+ for c in ls:
+ c.replace('$TESTDIR', testdir)
+ if retry:
+ with safe_while(sleep=sleep_for_retry, tries=retry,
+ action="exec_with_retry") as proceed:
+ while proceed():
+ proc = remote.run(
+ args=[
+ 'sudo',
+ 'TESTDIR={tdir}'.format(tdir=testdir),
+ 'bash',
+ '-c',
+ c],
+ timeout=timeout,
+ check_status=False,
+ wait=True,
+ )
+ if proc.exitstatus == 0:
+ break
+ else:
+ remote.run(
+ args=[
+ 'sudo',
+ 'TESTDIR={tdir}'.format(tdir=testdir),
+ 'bash',
+ '-c',
+ c],
+ timeout=timeout
+ )
+
:param ctx: Context
:param config: Configuration
"""
- assert config is None
testdir = teuthology.get_testdir(ctx)
filenames = []
-
+ if config is None:
+ config = dict()
+ log.info(config)
log.info('Shipping valgrind.supp...')
- with file(
- os.path.join(os.path.dirname(__file__), 'valgrind.supp'),
- 'rb'
- ) as f:
- fn = os.path.join(testdir, 'valgrind.supp')
- filenames.append(fn)
- for rem in ctx.cluster.remotes.iterkeys():
- teuthology.sudo_write_file(
- remote=rem,
- path=fn,
- data=f,
- )
- f.seek(0)
+ assert 'suite_path' in ctx.config
+ try:
+ with open(
+ os.path.join(ctx.config['suite_path'], 'valgrind.supp'),
+ 'rb'
+ ) as f:
+ fn = os.path.join(testdir, 'valgrind.supp')
+ filenames.append(fn)
+ for rem in ctx.cluster.remotes.keys():
+ teuthology.sudo_write_file(
+ remote=rem,
+ path=fn,
+ data=f,
+ )
+ f.seek(0)
+ except IOError as e:
+ log.info('Cannot ship supression file for valgrind: %s...', e.strerror)
- FILES = ['daemon-helper', 'adjust-ulimits']
+ FILES = ['daemon-helper', 'adjust-ulimits', 'ceph-coverage']
destdir = '/usr/bin'
for filename in FILES:
log.info('Shipping %r...', filename)
from teuthology.exceptions import VersionNotFoundError
from teuthology.job_status import get_status, set_status
from teuthology.orchestra import cluster, remote, run
+from .redhat import setup_cdn_repo, setup_base_repo, setup_additional_repo, setup_stage_cdn # noqa
+
+
log = logging.getLogger(__name__)
Setup repo based on redhat nodes
"""
with parallel():
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
if remote.os.package_type == 'rpm':
- remote.run(args=['sudo', 'subscription-manager', 'repos',
- run.Raw('--disable=*ceph*')])
+ # pre-cleanup
+ remote.run(args=['sudo', 'rm', run.Raw('/etc/yum.repos.d/rh*')],
+ check_status=False)
+ remote.run(args=['sudo', 'yum', 'clean', 'metadata'])
+ remote.run(args=['sudo', 'yum', 'update', 'metadata'])
+ # skip is required for beta iso testing
+ if config.get('skip-subscription-manager', False) is True:
+ log.info("Skipping subscription-manager command")
+ else:
+ remote.run(args=['sudo', 'subscription-manager', 'repos',
+ run.Raw('--disable=*ceph*')])
base_url = config.get('base-repo-url', '')
installer_url = config.get('installer-repo-url', '')
repos = ['MON', 'OSD', 'Tools', 'Calamari', 'Installer']