if 'repo' not in config:
self.config['repo'] = os.path.join(teuth_config.ceph_git_base_url,
'ceph-ansible.git')
- self.cluster_name = self.config.get('cluster', 'ceph')
+
+ if 'cluster' in config:
++ self.cluster_name = self.config.get('cluster')
++ else:
++ self.cluster_name = None
++
+ # Legacy option set to true in case we are running a test
+ # which was earlier using "ceph" task for configuration
+ self.legacy = False
+ if 'legacy' in config:
+ self.legacy = True
+
# default vars to dev builds
if 'vars' not in config:
vars = dict()
config['vars'] = vars
vars = config['vars']
- self.cluster_name = vars.get('cluster', 'ceph')
++
# for downstream bulids skip var setup
if 'rhbuild' in config:
return
self.extra_vars_file = self._write_hosts_file(prefix='teuth_ansible_gvar',
content=gvar)
- self.each_cluster = self.ctx.cluster.only(lambda role: role.startswith(self.cluster_name))
+ def remove_cluster_prefix(self):
+
+ stripped_role = {}
++ self.each_cluster = self.ctx.cluster.only(lambda role: role.startswith(self.cluster_name))\
++ if self.cluster_name else self.ctx.cluster
+ log.info('current cluster {}'.format(self.each_cluster))
+ for remote, roles in self.each_cluster.remotes.iteritems():
+ stripped_role[remote] = []
+ for rol in roles:
+ stripped_role[remote].append(teuthology.ceph_role(rol))
+ self.each_cluster.remotes = stripped_role
+ log.info('updated cluster {}'.format(self.each_cluster))
+
def execute_playbook(self):
"""
Execute ansible-playbook
run.Raw(str_args)
],
timeout=4200,
- stdout=out
)
- if re.search(r'all hosts have already failed', out.getvalue()):
- log.error("Failed during ceph-ansible execution")
- raise CephAnsibleError("Failed during ceph-ansible execution")
- if self.cluster_name == 'ceph':
- self.ready_cluster = self.ctx.cluster
- else:
- self.ready_cluster = self.ctx.cluster.only(lambda role: role.startswith(self.cluster_name))
+ self.ready_cluster = self.each_cluster
log.info('Ready_cluster {}'.format(self.ready_cluster))
self._create_rbd_pool()
self._fix_roles_map()
'sudo',
'chmod',
run.Raw('o+r'),
- '/etc/ceph/%s.client.admin.keyring' % self.cluster_name
+ '/etc/ceph/ceph.client.admin.keyring'
])
-
+ # this will be called only if "legacy" is true
+ def change_key_permission(self):
+ """
+ Change permission for admin.keyring files on all nodes
+ only if legacy is set to True
+ """
+ log.info("Changing permission for admin keyring on all nodes")
+ mons = self.ctx.cluster.only(teuthology.is_type('mon', self.cluster_name))
+ for remote, roles in mons.remotes.iteritems():
+ remote.run(args=[
+ 'sudo',
+ 'chmod',
+ run.Raw('o+r'),
+ '/etc/ceph/%s.client.admin.keyring' % self.cluster_name,
+ run.Raw('&&'),
+ 'sudo',
+ 'ls',
+ run.Raw('-l'),
+ '/etc/ceph/%s.client.admin.keyring' % self.cluster_name,
+ ])
+
+ def create_keyring(self):
+ """
+ Set up key ring on remote sites
+ """
+ log.info('Setting up client nodes...')
+ clients = self.ctx.cluster.only(teuthology.is_type('client', self.cluster_name))
+ testdir = teuthology.get_testdir(self.ctx)
+ coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+ for remote, roles_for_host in clients.remotes.iteritems():
+ for role in teuthology.cluster_roles_of_type(roles_for_host, 'client',
+ self.cluster_name):
+ name = teuthology.ceph_role(role)
+ log.info("Creating keyring for {}".format(name))
+ client_keyring = '/etc/ceph/{0}.{1}.keyring'.format(self.cluster_name, name)
+ remote.run(
+ args=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-authtool',
+ '--create-keyring',
+ '--gen-key',
+ # TODO this --name= is not really obeyed, all unknown "types" are munged to "client"
+ '--name={name}'.format(name=name),
+ '--cap',
+ 'osd',
+ 'allow rwx',
+ '--cap',
+ 'mon',
+ 'allow rwx',
+ client_keyring,
+ run.Raw('&&'),
+ 'sudo',
+ 'chmod',
+ '0644',
+ client_keyring,
+ run.Raw('&&'),
+ 'sudo',
+ 'ls',run.Raw('-l'),
+ client_keyring,
+ run.Raw('&&'),
+ 'sudo',
+ 'ceph',
+ 'auth',
+ 'import',
+ run.Raw('-i'),
+ client_keyring,
+ ],
+ )
+
class CephAnsibleError(Exception):
pass