output = host.check_output(cmd)
assert output.strip().startswith("cluster")
- def test_ceph_config_has_inital_members_line(self, node, File, setup):
- assert File(setup["conf_path"]).contains("^mon initial members = .*$")
+ def test_ceph_config_has_inital_members_line(self, node, host, setup):
+ assert host.file(setup["conf_path"]).contains("^mon initial members = .*$")
- def test_initial_members_line_has_correct_value(self, node, host, File, setup): # noqa E501
+ def test_initial_members_line_has_correct_value(self, node, host, setup):
mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=setup['cluster_name'])) # noqa E501
result = True
for host in node["vars"]["groups"]["mons"]:
# wait 30sec for services to be ready
sleep 30
# test cluster state using ceph-ansible tests
- py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {envdir}/tmp/ceph-ansible/tests/functional/tests
# install ceph-ansible@master requirements
pip install -r {toxinidir}/tests/requirements.txt
"
# test cluster state again using ceph-ansible tests
- bash -c "CEPH_STABLE_RELEASE=nautilus py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests"
+ bash -c "CEPH_STABLE_RELEASE=nautilus py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
# reboot all vms
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/reboot.yml
# retest to ensure cluster came back up correctly after rebooting
- bash -c "CEPH_STABLE_RELEASE=nautilus py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests"
+ bash -c "CEPH_STABLE_RELEASE=nautilus py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
vagrant destroy --force
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
# test that the cluster can be redeployed in a healthy state
- py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
+ py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[purge-lvm]
commands=
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
# test that the cluster can be redeployed in a healthy state
- py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
+ py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[shrink-mon]
commands=
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
- py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers {toxinidir}/tests/functional/tests
+ py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[add-mons]
commands=
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
- py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
+ py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[add-osds]
commands=
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
- py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
+ py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[add-mgrs]
commands=
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
- py.test -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
+ py.test -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[add-rbdmirrors]
commands=
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
- py.test -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
+ py.test -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[add-rgws]
commands=
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
- py.test -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
+ py.test -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[rgw-multisite]
commands=
# wait 30sec for services to be ready
sleep 30
# test cluster state using ceph-ansible tests
- py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
+ py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# reboot all vms
all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
# wait 30sec for services to be ready
# retest to ensure cluster came back up correctly after rebooting
- all_daemons: py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
+ all_daemons: py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# handlers/idempotency test
all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-master} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} copy_admin_key={env:COPY_ADMIN_KEY:False}" --extra-vars @ceph-override.json