From ba7d4c4954fe2b53eddf3144973bc409b710b231 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Wed, 27 Mar 2019 14:36:51 -0500 Subject: [PATCH] tests: adds the migrate_ceph_disk_to_ceph_volume scenario This test deploys a luminous cluster with ceph-disk created osds and then upgrades to nautilus and migrates those osds to ceph-volume. The nodes are then rebooted and cluster state verified. Signed-off-by: Andrew Schoen (cherry picked from commit 399a821439f8f0997dc9b66e97aacd61ea870ea3) --- .../Vagrantfile | 1 + .../group_vars/all | 22 ++++++ .../migrate_ceph_disk_to_ceph_volume/hosts | 10 +++ .../vagrant_variables.yml | 73 +++++++++++++++++++ tox.ini | 73 +++++++++++++++++++ 5 files changed, 179 insertions(+) create mode 120000 tests/functional/migrate_ceph_disk_to_ceph_volume/Vagrantfile create mode 100644 tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all create mode 100644 tests/functional/migrate_ceph_disk_to_ceph_volume/hosts create mode 100644 tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml diff --git a/tests/functional/migrate_ceph_disk_to_ceph_volume/Vagrantfile b/tests/functional/migrate_ceph_disk_to_ceph_volume/Vagrantfile new file mode 120000 index 000000000..706a5bb47 --- /dev/null +++ b/tests/functional/migrate_ceph_disk_to_ceph_volume/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all b/tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all new file mode 100644 index 000000000..e6e60a4bc --- /dev/null +++ b/tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all @@ -0,0 +1,22 @@ +--- + +ceph_origin: repository +ceph_repository: community +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/tests/functional/migrate_ceph_disk_to_ceph_volume/hosts b/tests/functional/migrate_ceph_disk_to_ceph_volume/hosts new file mode 100644 index 000000000..157cccb9e --- /dev/null +++ b/tests/functional/migrate_ceph_disk_to_ceph_volume/hosts @@ -0,0 +1,10 @@ +[mons] +mon0 +mon1 +mon2 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml b/tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml new file mode 100644 index 000000000..fa34052f2 --- /dev/null +++ b/tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/tox.ini b/tox.ini index 60d6a3a17..c0587276a 100644 --- a/tox.ini +++ b/tox.ini @@ -4,9 +4,82 @@ envlist = {nautilus,rhcs}-{centos,ubuntu}-{container,non_container}-{all_daemons {nautilus,rhcs}-{centos,ubuntu}-non_container-{switch_to_containers} nautilus-rhel-container-podman infra_lv_create + migrate_ceph_disk_to_ceph_volume skipsdist = True +# a test scenario that deploys a luminous cluster with ceph-disk osds +# and then upgrades to nautilus and migrates those osds to ceph-volume +[testenv:migrate_ceph_disk_to_ceph_volume] +whitelist_externals = + vagrant + bash + cp + git + pip +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config + ANSIBLE_CONFIG = -F {toxinidir}/ansible.cfg + ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions + ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback + ANSIBLE_CALLBACK_WHITELIST = profile_tasks + CEPH_STABLE_RELEASE = luminous + # only available for ansible >= 2.5 + ANSIBLE_STDOUT_CALLBACK = yaml +changedir={toxinidir}/tests/functional/migrate_ceph_disk_to_ceph_volume +commands= + vagrant up --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + # use the stable-3.2 branch to deploy a luminous cluster + git clone -b {env:CEPH_ANSIBLE_BRANCH:stable-3.2} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible + pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt + + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml + + # deploy the cluster + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ + ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \ + ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ + ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ + ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \ + copy_admin_key={env:COPY_ADMIN_KEY:False} \ + " + + # wait 30sec for services to be ready + sleep 30 + # test cluster state using ceph-ansible tests + py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests + + # install ceph-ansible@master requirements + pip install -r {toxinidir}/tests/requirements.txt + + # migrate osds to ceph-volume and upgrade to nautilus + cp {toxinidir}/infrastructure-playbooks/rolling_update.yml {toxinidir}/rolling_update.yml + ansible-playbook -vv -i {changedir}/hosts {toxinidir}/rolling_update.yml --extra-vars "\ + ireallymeanit=yes \ + fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ + ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ + ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \ + ceph_docker_image_tag={env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest-master} \ + ceph_stable_release=nautilus \ + osd_scenario=lvm \ + " + + # test cluster state again using ceph-ansible tests + bash -c "CEPH_STABLE_RELEASE=nautilus py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests" + + # reboot all vms + ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/reboot.yml + + # retest to ensure cluster came back up correctly after rebooting + bash -c "CEPH_STABLE_RELEASE=nautilus py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests" + + vagrant destroy --force + # a test scenario for the lv-create.yml and lv-teardown playbooks [testenv:infra_lv_create] whitelist_externals = -- 2.39.5