os.remove(self.inventory)
os.remove(self.playbook_file)
os.remove(self.extra_vars_file)
+ # run purge-cluster that teardowns the cluster
+ args = [
+ 'ANSIBLE_STDOUT_CALLBACK=debug',
+ 'ansible-playbook', '-vv',
+ '-e', 'ireallymeanit=yes',
+ '-i', 'inven.yml', 'infrastructure-playbooks/purge-cluster.yml'
+ ]
+ log.debug("Running %s", args)
+ str_args = ' '.join(args)
+ installer_node = self.ceph_installer
+ if self.config.get('rhbuild'):
+ installer_node.run(
+ args=[
+ run.Raw('cd ~/ceph-ansible'),
+ run.Raw(';'),
+ run.Raw(str_args)
+ ]
+ )
+ else:
+ installer_node.run(
+ args=[
+ run.Raw('cd ~/ceph-ansible'),
+ run.Raw(';'),
+ run.Raw('source venv/bin/activate'),
+ run.Raw(';'),
+ run.Raw(str_args)
+ ]
+ )
def wait_for_ceph_health(self):
with contextutil.safe_while(sleep=15, tries=6,
)
self.patcher_get_scratch_devices.start()
+ self.patcher_teardown = patch(
+ 'teuthology.task.ceph_ansible.CephAnsible.teardown',
+ )
+ self.patcher_teardown.start()
+
def fake_set_iface_and_cidr(self):
self._interface = 'eth0'
self._cidr = '172.21.0.0/20'
def stop_patchers(self):
self.patcher_get_scratch_devices.stop()
self.patcher_remote.stop()
+ self.patcher_teardown.stop()
def test_playbook_none(self):
skip(SKIP_IRRELEVANT)