From: Shilpa Jagannath Date: Wed, 2 Jan 2019 06:37:18 +0000 (+0530) Subject: Pulled from rh-new X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=d126cd69a7073f59d23df47d452dca8232a9dff2;p=teuthology.git Pulled from rh-new set_disk_info turned off single cluster fix cluster name single cluster fix cluster name single cluster fix --- d126cd69a7073f59d23df47d452dca8232a9dff2 diff --cc teuthology/task/ceph_ansible.py index 80fb7b3cb,79b46e8dd..43e940e24 --- a/teuthology/task/ceph_ansible.py +++ b/teuthology/task/ceph_ansible.py @@@ -74,13 -74,19 +74,24 @@@ class CephAnsible(Task) if 'repo' not in config: self.config['repo'] = os.path.join(teuth_config.ceph_git_base_url, 'ceph-ansible.git') + + if 'cluster' in config: - self.cluster_name = self.config.get('cluster', 'ceph') ++ self.cluster_name = self.config.get('cluster') ++ else: ++ self.cluster_name = None ++ + # Legacy option set to true in case we are running a test + # which was earlier using "ceph" task for configuration + self.legacy = False + if 'legacy' in config: + self.legacy = True + # default vars to dev builds if 'vars' not in config: vars = dict() config['vars'] = vars vars = config['vars'] - self.cluster_name = vars.get('cluster', 'ceph') ++ # for downstream bulids skip var setup if 'rhbuild' in config: return @@@ -116,18 -122,6 +127,19 @@@ self.extra_vars_file = self._write_hosts_file(prefix='teuth_ansible_gvar', content=gvar) + def remove_cluster_prefix(self): + + stripped_role = {} - self.each_cluster = self.ctx.cluster.only(lambda role: role.startswith(self.cluster_name)) ++ self.each_cluster = self.ctx.cluster.only(lambda role: role.startswith(self.cluster_name))\ ++ if self.cluster_name else self.ctx.cluster + log.info('current cluster {}'.format(self.each_cluster)) + for remote, roles in self.each_cluster.remotes.iteritems(): + stripped_role[remote] = [] + for rol in roles: + stripped_role[remote].append(teuthology.ceph_role(rol)) + self.each_cluster.remotes = stripped_role + log.info('updated cluster {}'.format(self.each_cluster)) + def execute_playbook(self): """ Execute ansible-playbook @@@ -511,12 -531,11 +523,8 @@@ run.Raw(str_args) ], timeout=4200, - stdout=out ) - if re.search(r'all hosts have already failed', out.getvalue()): - log.error("Failed during ceph-ansible execution") - raise CephAnsibleError("Failed during ceph-ansible execution") - if self.cluster_name == 'ceph': - self.ready_cluster = self.ctx.cluster - else: - self.ready_cluster = self.ctx.cluster.only(lambda role: role.startswith(self.cluster_name)) + self.ready_cluster = self.each_cluster log.info('Ready_cluster {}'.format(self.ready_cluster)) self._create_rbd_pool() self._fix_roles_map() @@@ -834,9 -858,82 +846,81 @@@ 'sudo', 'chmod', run.Raw('o+r'), - '/etc/ceph/%s.client.admin.keyring' % self.cluster_name + '/etc/ceph/ceph.client.admin.keyring' ]) + # this will be called only if "legacy" is true + def change_key_permission(self): + """ + Change permission for admin.keyring files on all nodes + only if legacy is set to True + """ + log.info("Changing permission for admin keyring on all nodes") + mons = self.ctx.cluster.only(teuthology.is_type('mon', self.cluster_name)) + for remote, roles in mons.remotes.iteritems(): + remote.run(args=[ + 'sudo', + 'chmod', + run.Raw('o+r'), + '/etc/ceph/%s.client.admin.keyring' % self.cluster_name, + run.Raw('&&'), + 'sudo', + 'ls', + run.Raw('-l'), + '/etc/ceph/%s.client.admin.keyring' % self.cluster_name, + ]) + - + def create_keyring(self): + """ + Set up key ring on remote sites + """ + log.info('Setting up client nodes...') + clients = self.ctx.cluster.only(teuthology.is_type('client', self.cluster_name)) + testdir = teuthology.get_testdir(self.ctx) + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) + for remote, roles_for_host in clients.remotes.iteritems(): + for role in teuthology.cluster_roles_of_type(roles_for_host, 'client', + self.cluster_name): + name = teuthology.ceph_role(role) + log.info("Creating keyring for {}".format(name)) + client_keyring = '/etc/ceph/{0}.{1}.keyring'.format(self.cluster_name, name) + remote.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--create-keyring', + '--gen-key', + # TODO this --name= is not really obeyed, all unknown "types" are munged to "client" + '--name={name}'.format(name=name), + '--cap', + 'osd', + 'allow rwx', + '--cap', + 'mon', + 'allow rwx', + client_keyring, + run.Raw('&&'), + 'sudo', + 'chmod', + '0644', + client_keyring, + run.Raw('&&'), + 'sudo', + 'ls',run.Raw('-l'), + client_keyring, + run.Raw('&&'), + 'sudo', + 'ceph', + 'auth', + 'import', + run.Raw('-i'), + client_keyring, + ], + ) + class CephAnsibleError(Exception): pass