Diff option can help in CI when a task fail because of a corrupted render in previous tasks.
Signed-off-by: Seena Fallah <seenafallah@gmail.com>
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
- ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/cephadm.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/cephadm.yml --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
# configure lvm
- ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+ ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
- ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml
- ansible-playbook -vv -i {changedir}/hosts {toxinidir}/site-container.yml.sample --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/site-container.yml.sample --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
- ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/docker-to-podman.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/docker-to-podman.yml --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
"
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
- ansible-playbook -vv -i {changedir}/inventory {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/inventory {toxinidir}/tests/functional/setup.yml
# configure lvm
- ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/tests/functional/lvm_setup.yml
+ ansible-playbook -vv --diff -i {changedir}/inventory/hosts {toxinidir}/tests/functional/lvm_setup.yml
- non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch=main ceph_dev_sha1=latest" --tags "vagrant_setup"
- ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\
+ non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch=main ceph_dev_sha1=latest" --tags "vagrant_setup"
+ ansible-playbook -vv --diff -i {changedir}/inventory/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\
yes_i_know=true \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch=main \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
- ansible-playbook -vv -i {changedir}/inventory {toxinidir}/tests/functional/external_clients_admin_key.yml
+ ansible-playbook -vv --diff -i {changedir}/inventory {toxinidir}/tests/functional/external_clients_admin_key.yml
- ansible-playbook -vv -i {changedir}/inventory/external_clients-hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/inventory/external_clients-hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
yes_i_know=true \
ireallymeanit=yes \
fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf
- ansible-playbook -vv -i {changedir}/inventory/external_clients-hosts {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/inventory/external_clients-hosts {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
ireallymeanit=yes \
fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
# configure lvm
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# reboot all vms
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
# retest to ensure cluster came back up correctly after rebooting
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
- non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
+ non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
# configure lvm
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ceph_rbd_mirror_configure=true \
ceph_rbd_mirror_pool=rbd \
ceph_rbd_mirror_local_user_secret=AQC+eM1iKKBXFBAAVpunJvqpkodHSYmljCFCnw== \
bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}"
bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}/secondary"
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/setup.yml
- ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir}/secondary ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
+ ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir}/secondary ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/lvm_setup.yml
# ensure the rule isn't already present
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
- ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rbdmirror.yml --skip-tags=secondary --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/tests/functional/rbdmirror.yml --skip-tags=secondary --extra-vars "\
ceph_rbd_mirror_pool=rbd \
"
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rbdmirror.yml --skip-tags=primary -e 'ceph_rbd_mirror_pool=rbd'
[shrink-osd-single]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=0 \
"
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=1 \
"
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=2 \
"
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=3 \
"
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=4 \
"
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=5 \
"
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=6 \
"
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=7 \
"
[shrink-osd-multiple]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=0,1,2,3,4,5,6,7 \
"
commands=
ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections
- ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
+ ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
# configure lvm
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
yes_i_know=true \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
shrink_osd_multiple: {[shrink-osd-multiple]commands}
# configure lvm
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit osds --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit osds --extra-vars "\
yes_i_know=true \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
- non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
yes_i_know=true \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
# upgrade mons
# mon1
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit mon1 --tags=mons --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit mon1 --tags=mons --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
# mon0 and mon2
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit 'mons:!mon1' --tags=mons --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit 'mons:!mon1' --tags=mons --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
# upgrade mgrs
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=mgrs --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=mgrs --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
# upgrade osd1
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit=osd1 --tags=osds --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit=osd1 --tags=osds --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
# upgrade remaining osds (serially)
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit='osds:!osd1' --tags=osds --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit='osds:!osd1' --tags=osds --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
# upgrade rgws
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=rgws --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=rgws --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
# post upgrade actions
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=post_upgrade --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=post_upgrade --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
# # use the stable-7.0 branch to deploy an octopus cluster
# git clone -b stable-7.0 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
# pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
-# bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml'
+# bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml'
# # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
-# bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm"'
+# bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm"'
# configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
- non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
yes_i_know=true \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
# pip install -r {toxinidir}/tests/requirements.txt
# ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
- ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/lv-create.yml
+ ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/lv-create.yml
- ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/lv-teardown.yml --extra-vars "ireallymeanit=yes"
+ ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/lv-teardown.yml --extra-vars "ireallymeanit=yes"
cat {toxinidir}/infrastructure-playbooks/lv-create.log
# can be redployed to.
[purge]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml --extra-vars "\
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon-base} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-main} \
"
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
ireallymeanit=yes \
remove_packages=yes \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \
"
# re-setup lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
# set up the cluster again
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
yes_i_know=true \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
[purge-dashboard]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/purge-dashboard.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/purge-dashboard.yml --extra-vars "\
ireallymeanit=yes \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon-base} \
"
# set up the cluster again
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
yes_i_know=true \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
[purge-lvm]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
ireallymeanit=yes \
remove_packages=yes \
ceph_docker_registry_auth=True \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
# set up the cluster again
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
yes_i_know=true \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
[shrink-mon]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mon.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mon.yml --extra-vars "\
ireallymeanit=yes \
mon_to_kill={env:MON_TO_KILL:mon2} \
"
[shrink-osd]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill={env:OSD_TO_KILL:0} \
"
[shrink-mgr]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mgr.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mgr.yml --extra-vars "\
ireallymeanit=yes \
mgr_to_kill={env:MGR_TO_KILL:mgr1} \
"
[shrink-mds]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mds.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mds.yml --extra-vars "\
ireallymeanit=yes \
mds_to_kill={env:MDS_TO_KILL:mds0} \
"
[shrink-rbdmirror]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-rbdmirror.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-rbdmirror.yml --extra-vars "\
ireallymeanit=yes \
rbdmirror_to_kill={env:RBDMIRROR_TO_KILL:rbd-mirror0} \
"
[shrink-rgw]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-rgw.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-rgw.yml --extra-vars "\
ireallymeanit=yes \
rgw_to_kill={env:RGW_TO_KILL:rgw0.rgw0} \
"
[switch-to-containers]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\
ireallymeanit=yes \
ceph_docker_image_tag=latest-main-devel \
ceph_docker_registry=quay.io \
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[add-mons]
commands=
- ansible-playbook -vv -i {changedir}/hosts-2 --limit mon1 {toxinidir}/tests/functional/setup.yml
- ansible-playbook -vv -i {changedir}/hosts-2 {toxinidir}/infrastructure-playbooks/add-mon.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mon1 {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/hosts-2 {toxinidir}/infrastructure-playbooks/add-mon.yml --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
[add-mgrs]
commands=
- ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/tests/functional/setup.yml
- ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mgrs {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
yes_i_know=true \
ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
[add-mdss]
commands=
- ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml
- ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
yes_i_know=true \
ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
[add-rbdmirrors]
commands=
- ansible-playbook -vv -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/tests/functional/setup.yml
- ansible-playbook -vv -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
yes_i_know=true \
ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
[add-rgws]
commands=
- ansible-playbook -vv -i {changedir}/hosts-2 --limit rgws {toxinidir}/tests/functional/setup.yml
- ansible-playbook -vv -i {changedir}/hosts-2 --limit rgws {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rgws {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rgws {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
yes_i_know=true \
ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
[storage-inventory]
commands=
- ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/storage-inventory.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/storage-inventory.yml --extra-vars "\
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-main} \
"
[cephadm-adopt]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/cephadm-adopt.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/cephadm-adopt.yml --extra-vars "\
ireallymeanit=yes \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_repository=dev \
"
# idempotency test
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/cephadm-adopt.yml --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/cephadm-adopt.yml --extra-vars "\
ireallymeanit=yes \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_repository=dev \
commands=
ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections
- non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
+ non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
# configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
- !lvm_batch-!lvm_auto_discovery: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
- lvm_osds: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd2'
- all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
+ !lvm_batch-!lvm_auto_discovery: ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
+ lvm_osds: ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd2'
+ all_in_one: ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
no_log_on_ceph_key_tasks=false \
yes_i_know=true \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# reboot all vms
- all_daemons,all_daemons_ipv6,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
+ all_daemons,all_daemons_ipv6,collocation: ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
# retest to ensure cluster came back up correctly after rebooting
all_daemons,all_daemons_ipv6,collocation: py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# handlers/idempotency test
- all_daemons,all_daemon_ipv6,all_in_one,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-main} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} yes_i_know=true" --extra-vars @ceph-override.json
+ all_daemons,all_daemon_ipv6,all_in_one,collocation: ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "no_log_on_ceph_key_tasks=false delegate_facts_host={env:DELEGATE_FACTS_HOST:True} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-main} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} yes_i_know=true" --extra-vars @ceph-override.json
purge: {[purge]commands}
purge_dashboard: {[purge-dashboard]commands}