]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
tox: add new factor for OS
authorGuillaume Abrioux <gabrioux@redhat.com>
Fri, 14 Dec 2018 09:45:04 +0000 (10:45 +0100)
committerSébastien Han <seb@redhat.com>
Thu, 24 Jan 2019 10:46:32 +0000 (11:46 +0100)
This way we can test every scenario on all distribution.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
Vagrantfile
tox.ini

index fb760d66e2037742e66a8b1ee9a06832c61a5cfd..339b577d36d5cae5b9fd2872b28cf2e636c03a07 100644 (file)
@@ -19,8 +19,8 @@ NISCSI_GWS      = settings['iscsi_gw_vms']
 MGRS            = settings['mgr_vms']
 PUBLIC_SUBNET   = settings['public_subnet']
 CLUSTER_SUBNET  = settings['cluster_subnet']
-BOX             = settings['vagrant_box']
-CLIENT_BOX      = settings['client_vagrant_box'] || settings['vagrant_box']
+BOX             = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['vagrant_box']
+CLIENT_BOX      = settings['client_vagrant_box'] || BOX
 BOX_URL         = settings['vagrant_box_url']
 SYNC_DIR        = settings['vagrant_sync_dir']
 MEMORY          = settings['memory']
diff --git a/tox.ini b/tox.ini
index c03b499d51c43d32f2fc49dde11a42280d9d8be7..44b59640ae0d7c8f9348edaba4710e303c93c1ca 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist = {dev,rhcs}-{container,non_container}-{xenial_cluster,centos7,update,bluestore_lvm_osds,lvm_osds,shrink_mon,shrink_osd,lvm_batch,add_osds,rgw_multisite,purge_cluster,storage_inventory}
+envlist = {dev,rhcs}-{centos,fedora,ubuntu}-{container,non_container}-{all_daemons,update,bluestore_lvm_osds,lvm_osds,shrink_mon,shrink_osd,lvm_batch,add_osds,rgw_multisite,purge_cluster,storage_inventory}
   {dev,rhcs}-{switch_to_containers,cluster,ooo_collocation,docker_cluster_collocation,infra_lv_create,container_podman}
   infra_lv_create
 
@@ -45,7 +45,7 @@ commands=
 [purge]
 commands=
   cp {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml}
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
       ireallymeanit=yes \
       remove_packages=yes \
       ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
@@ -56,10 +56,10 @@ commands=
   "
 
   # re-setup lvm
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
 
   # set up the cluster again
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
       ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
       fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
       ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
@@ -69,12 +69,12 @@ commands=
       ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
   "
   # test that the cluster can be redeployed in a healthy state
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
 
 [purge-lvm]
 commands=
   cp {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml}
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
       ireallymeanit=yes \
       remove_packages=yes \
       ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
@@ -84,10 +84,10 @@ commands=
       ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
   "
 
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
 
   # set up the cluster again
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
       ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
       fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
       ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
@@ -97,7 +97,7 @@ commands=
       ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
   "
   # test that the cluster can be redeployed in a healthy state
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
 
 # extra commands for performing a rolling update
 # currently this hardcodes the release to kraken
@@ -106,7 +106,7 @@ commands=
 commands=
   cp {toxinidir}/infrastructure-playbooks/rolling_update.yml {toxinidir}/rolling_update.yml
   ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/rolling_update.yml --extra-vars "\
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/rolling_update.yml --extra-vars "\
       ireallymeanit=yes \
       fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
       ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
@@ -116,12 +116,12 @@ commands=
       ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
   "
 
-  bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests"
+  bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests"
 
 [shrink-mon]
 commands=
   cp {toxinidir}/infrastructure-playbooks/shrink-mon.yml {toxinidir}/shrink-mon.yml
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/shrink-mon.yml --extra-vars "\
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/shrink-mon.yml --extra-vars "\
       ireallymeanit=yes \
       mon_to_kill={env:MON_TO_KILL:ceph-mon2} \
       ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
@@ -129,7 +129,7 @@ commands=
 [shrink-osd]
 commands=
   cp {toxinidir}/infrastructure-playbooks/shrink-osd.yml {toxinidir}/shrink-osd.yml
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/shrink-osd.yml --extra-vars "\
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/shrink-osd.yml --extra-vars "\
       ireallymeanit=yes \
       osd_to_kill=0 \
       ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
@@ -138,7 +138,7 @@ commands=
 [switch-to-containers]
 commands=
   cp {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml {toxinidir}/switch-from-non-containerized-to-containerized-ceph-daemons.yml
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\
       ireallymeanit=yes \
       fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
       ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
@@ -213,6 +213,11 @@ setenv=
   ANSIBLE_CALLBACK_WHITELIST = profile_tasks
   # only available for ansible >= 2.5
   ANSIBLE_STDOUT_CALLBACK = yaml
+  centos: CEPH_ANSIBLE_VAGRANT_BOX = centos/7
+  fedora: CEPH_ANSIBLE_VAGRANT_BOX = fedora/29-atomic-host
+  ubuntu: CEPH_ANSIBLE_VAGRANT_BOX = ceph/ubuntu-xenial
+  ubuntu: _INVENTORY = hosts-ubuntu
+  INVENTORY = {env:_INVENTORY:hosts}
   container: CONTAINER_DIR = /container
   container: PLAYBOOK = site-docker.yml.sample
   container_podman: PLAYBOOK = site-docker.yml.sample
@@ -266,17 +271,15 @@ commands=
   bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
 
   # configure lvm
-  !lvm_batch: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+  !lvm_batch: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
 
   container_podman: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/simulate_rhel8.yml
-  storage_inventory: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
-  storage_inventory_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
 
-  rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
+  rhcs: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
 
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
 
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
       delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
       fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
       ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
@@ -291,18 +294,18 @@ commands=
   # wait 30sec for services to be ready
   sleep 30
   # test cluster state using ceph-ansible tests
-  testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
+  testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
 
   # reboot all vms
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/reboot.yml
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
 
   # wait 30sec for services to be ready
   sleep 30
   # retest to ensure cluster came back up correctly after rebooting
-  testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
+  testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
 
   # handlers/idempotency test
-  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} \
+  ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} \
       --extra-vars "\
       delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
       fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \