--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
{dev,rhcs}-{centos,ubuntu}-non_container-{switch_to_containers}
dev-rhel-container-podman
infra_lv_create
+ migrate_ceph_disk_to_ceph_volume
skipsdist = True
+# a test scenario that deploys a luminous cluster with ceph-disk osds
+# and then upgrades to nautilus and migrates those osds to ceph-volume
+[testenv:migrate_ceph_disk_to_ceph_volume]
+whitelist_externals =
+ vagrant
+ bash
+ cp
+ git
+ pip
+passenv=*
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
+ ANSIBLE_CONFIG = -F {toxinidir}/ansible.cfg
+ ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions
+ ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ CEPH_STABLE_RELEASE = luminous
+ # only available for ansible >= 2.5
+ ANSIBLE_STDOUT_CALLBACK = yaml
+changedir={toxinidir}/tests/functional/migrate_ceph_disk_to_ceph_volume
+commands=
+ vagrant up --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+ # use the stable-3.2 branch to deploy a luminous cluster
+ git clone -b {env:CEPH_ANSIBLE_BRANCH:stable-3.2} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
+ pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
+
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
+
+ # deploy the cluster
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
+ ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \
+ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
+ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
+ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
+ copy_admin_key={env:COPY_ADMIN_KEY:False} \
+ "
+
+ # wait 30sec for services to be ready
+ sleep 30
+ # test cluster state using ceph-ansible tests
+ py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+
+ # install ceph-ansible@master requirements
+ pip install -r {toxinidir}/tests/requirements.txt
+
+ # migrate osds to ceph-volume and upgrade to nautilus
+ cp {toxinidir}/infrastructure-playbooks/rolling_update.yml {toxinidir}/rolling_update.yml
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/rolling_update.yml --extra-vars "\
+ ireallymeanit=yes \
+ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
+ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
+ ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \
+ ceph_docker_image_tag={env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest-master} \
+ ceph_stable_release=nautilus \
+ osd_scenario=lvm \
+ "
+
+ # test cluster state again using ceph-ansible tests
+ bash -c "CEPH_STABLE_RELEASE=nautilus py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests"
+
+ # reboot all vms
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/reboot.yml
+
+ # retest to ensure cluster came back up correctly after rebooting
+ bash -c "CEPH_STABLE_RELEASE=nautilus py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests"
+
+ vagrant destroy --force
+
# a test scenario for the lv-create.yml and lv-teardown playbooks
[testenv:infra_lv_create]
whitelist_externals =