From fb8a66149bc5605c0e51ab137f46c2c48580452a Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Wed, 20 Oct 2021 09:59:48 +0200 Subject: [PATCH] tests: add new scenario subset_update new scenario in order to test the subset upgrade approach using tags. Signed-off-by: Guillaume Abrioux --- tests/functional/subset_update/Vagrantfile | 1 + .../subset_update/ceph-override.json | 15 +++ .../subset_update/container/Vagrantfile | 1 + .../container/ceph-override.json | 1 + .../subset_update/container/group_vars/all | 36 ++++++ .../container/group_vars/clients | 13 ++ .../container/group_vars/iscsigws | 2 + .../subset_update/container/group_vars/mons | 3 + .../subset_update/container/group_vars/osds | 6 + .../subset_update/container/group_vars/rgws | 8 ++ .../functional/subset_update/container/hosts | 17 +++ .../container/vagrant_variables.yml | 61 ++++++++++ tests/functional/subset_update/group_vars/all | 27 +++++ .../subset_update/group_vars/clients | 13 ++ .../subset_update/group_vars/iscsigws | 2 + .../functional/subset_update/group_vars/mons | 3 + .../functional/subset_update/group_vars/nfss | 10 ++ .../functional/subset_update/group_vars/osds | 8 ++ .../functional/subset_update/group_vars/rgws | 9 ++ tests/functional/subset_update/hosts | 18 +++ .../subset_update/vagrant_variables.yml | 74 ++++++++++++ tox-subset-update.ini | 111 ++++++++++++++++++ 22 files changed, 439 insertions(+) create mode 120000 tests/functional/subset_update/Vagrantfile create mode 100644 tests/functional/subset_update/ceph-override.json create mode 120000 tests/functional/subset_update/container/Vagrantfile create mode 120000 tests/functional/subset_update/container/ceph-override.json create mode 100644 tests/functional/subset_update/container/group_vars/all create mode 100644 tests/functional/subset_update/container/group_vars/clients create mode 100644 tests/functional/subset_update/container/group_vars/iscsigws create mode 100644 tests/functional/subset_update/container/group_vars/mons create mode 100644 tests/functional/subset_update/container/group_vars/osds create mode 100644 tests/functional/subset_update/container/group_vars/rgws create mode 100644 tests/functional/subset_update/container/hosts create mode 100644 tests/functional/subset_update/container/vagrant_variables.yml create mode 100644 tests/functional/subset_update/group_vars/all create mode 100644 tests/functional/subset_update/group_vars/clients create mode 100644 tests/functional/subset_update/group_vars/iscsigws create mode 100644 tests/functional/subset_update/group_vars/mons create mode 100644 tests/functional/subset_update/group_vars/nfss create mode 100644 tests/functional/subset_update/group_vars/osds create mode 100644 tests/functional/subset_update/group_vars/rgws create mode 100644 tests/functional/subset_update/hosts create mode 100644 tests/functional/subset_update/vagrant_variables.yml create mode 100644 tox-subset-update.ini diff --git a/tests/functional/subset_update/Vagrantfile b/tests/functional/subset_update/Vagrantfile new file mode 120000 index 000000000..706a5bb47 --- /dev/null +++ b/tests/functional/subset_update/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/subset_update/ceph-override.json b/tests/functional/subset_update/ceph-override.json new file mode 100644 index 000000000..4643d4556 --- /dev/null +++ b/tests/functional/subset_update/ceph-override.json @@ -0,0 +1,15 @@ +{ + "ceph_conf_overrides": { + "global": { + "auth_allow_insecure_global_id_reclaim": false, + "osd_pool_default_pg_num": 12, + "osd_pool_default_size": 1, + "mon_allow_pool_size_one": true, + "mon_warn_on_pool_no_redundancy": false, + "mon_max_pg_per_osd": 300 + } + } + ], + "ceph_mon_docker_memory_limit": "2g", + "radosgw_num_instances": 2 +} diff --git a/tests/functional/subset_update/container/Vagrantfile b/tests/functional/subset_update/container/Vagrantfile new file mode 120000 index 000000000..16076e424 --- /dev/null +++ b/tests/functional/subset_update/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/subset_update/container/ceph-override.json b/tests/functional/subset_update/container/ceph-override.json new file mode 120000 index 000000000..772bdc5f5 --- /dev/null +++ b/tests/functional/subset_update/container/ceph-override.json @@ -0,0 +1 @@ +../ceph-override.json \ No newline at end of file diff --git a/tests/functional/subset_update/container/group_vars/all b/tests/functional/subset_update/container/group_vars/all new file mode 100644 index 000000000..89a050a87 --- /dev/null +++ b/tests/functional/subset_update/container/group_vars/all @@ -0,0 +1,36 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.5.0/24" +cluster_network: "192.168.6.0/24" +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +ceph_conf_overrides: + global: + auth_allow_insecure_global_id_reclaim: false + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 300 +openstack_config: false +docker_pull_timeout: 600s +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +mds_max_mds: 2 +# TODO: add monitoring later +dashboard_enabled: false +dashboard_admin_password: $sX!cD$rYU6qR^B! +grafana_admin_password: +xFRe+RES@7vg24n +ceph_docker_registry: quay.ceph.io +ceph_docker_image: ceph-ci/daemon +ceph_docker_image_tag: latest-master +node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0" +prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2" +alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2" +grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4" diff --git a/tests/functional/subset_update/container/group_vars/clients b/tests/functional/subset_update/container/group_vars/clients new file mode 100644 index 000000000..ec0bb3e09 --- /dev/null +++ b/tests/functional/subset_update/container/group_vars/clients @@ -0,0 +1,13 @@ +--- +user_config: True +copy_admin_key: True +test: + name: "test" + rule_name: "HDD" + size: 1 +test2: + name: "test2" + size: 1 +pools: + - "{{ test }}" + - "{{ test2 }}" diff --git a/tests/functional/subset_update/container/group_vars/iscsigws b/tests/functional/subset_update/container/group_vars/iscsigws new file mode 100644 index 000000000..8d0932ab4 --- /dev/null +++ b/tests/functional/subset_update/container/group_vars/iscsigws @@ -0,0 +1,2 @@ +--- +generate_crt: True diff --git a/tests/functional/subset_update/container/group_vars/mons b/tests/functional/subset_update/container/group_vars/mons new file mode 100644 index 000000000..441a4aa15 --- /dev/null +++ b/tests/functional/subset_update/container/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: false +crush_rule_config: false diff --git a/tests/functional/subset_update/container/group_vars/osds b/tests/functional/subset_update/container/group_vars/osds new file mode 100644 index 000000000..2a4cfbb4d --- /dev/null +++ b/tests/functional/subset_update/container/group_vars/osds @@ -0,0 +1,6 @@ +--- +osd_objectstore: "bluestore" +devices: + - /dev/sda + - /dev/sdb + - /dev/sdc diff --git a/tests/functional/subset_update/container/group_vars/rgws b/tests/functional/subset_update/container/group_vars/rgws new file mode 100644 index 000000000..639ade9ce --- /dev/null +++ b/tests/functional/subset_update/container/group_vars/rgws @@ -0,0 +1,8 @@ +--- +copy_admin_key: True +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 diff --git a/tests/functional/subset_update/container/hosts b/tests/functional/subset_update/container/hosts new file mode 100644 index 000000000..e7d4fa615 --- /dev/null +++ b/tests/functional/subset_update/container/hosts @@ -0,0 +1,17 @@ +[mons] +mon0 monitor_address=192.168.5.10 +mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +mon2 monitor_address=192.168.5.12 + +[mgrs] +mon0 +mon1 + +[osds] +osd0 +osd1 +osd2 + +[rgws] +rgw0 +rgw1 diff --git a/tests/functional/subset_update/container/vagrant_variables.yml b/tests/functional/subset_update/container/vagrant_variables.yml new file mode 100644 index 000000000..e775d18a0 --- /dev/null +++ b/tests/functional/subset_update/container/vagrant_variables.yml @@ -0,0 +1,61 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 3 +mds_vms: 0 +rgw_vms: 2 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.5 +cluster_subnet: 192.168.6 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/atomic-host +#client_vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/subset_update/group_vars/all b/tests/functional/subset_update/group_vars/all new file mode 100644 index 000000000..283523c7a --- /dev/null +++ b/tests/functional/subset_update/group_vars/all @@ -0,0 +1,27 @@ +--- +ceph_origin: repository +ceph_repository: dev +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_conf_overrides: + global: + auth_allow_insecure_global_id_reclaim: false + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 300 +openstack_config: false +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +mds_max_mds: 2 +# TODO: add monitoring later +dashboard_enabled: false +dashboard_admin_password: $sX!cD$rYU6qR^B! +grafana_admin_password: +xFRe+RES@7vg24n +ceph_docker_registry: quay.ceph.io +node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0" +prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2" +alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2" +grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4" +grafana_server_group_name: ceph_monitoring diff --git a/tests/functional/subset_update/group_vars/clients b/tests/functional/subset_update/group_vars/clients new file mode 100644 index 000000000..4c37898ff --- /dev/null +++ b/tests/functional/subset_update/group_vars/clients @@ -0,0 +1,13 @@ +--- +copy_admin_key: True +user_config: True +test: + name: "test" + rule_name: "HDD" + size: 1 +test2: + name: "test2" + size: 1 +pools: + - "{{ test }}" + - "{{ test2 }}" diff --git a/tests/functional/subset_update/group_vars/iscsigws b/tests/functional/subset_update/group_vars/iscsigws new file mode 100644 index 000000000..8d0932ab4 --- /dev/null +++ b/tests/functional/subset_update/group_vars/iscsigws @@ -0,0 +1,2 @@ +--- +generate_crt: True diff --git a/tests/functional/subset_update/group_vars/mons b/tests/functional/subset_update/group_vars/mons new file mode 100644 index 000000000..441a4aa15 --- /dev/null +++ b/tests/functional/subset_update/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: false +crush_rule_config: false diff --git a/tests/functional/subset_update/group_vars/nfss b/tests/functional/subset_update/group_vars/nfss new file mode 100644 index 000000000..8fe887217 --- /dev/null +++ b/tests/functional/subset_update/group_vars/nfss @@ -0,0 +1,10 @@ +copy_admin_key: true +nfs_file_gw: false +nfs_obj_gw: true +ganesha_conf_overrides: | + CACHEINODE { + Entries_HWMark = 100000; + } +nfs_ganesha_stable: true +nfs_ganesha_dev: false +nfs_ganesha_flavor: "ceph_master" diff --git a/tests/functional/subset_update/group_vars/osds b/tests/functional/subset_update/group_vars/osds new file mode 100644 index 000000000..9f9e8a042 --- /dev/null +++ b/tests/functional/subset_update/group_vars/osds @@ -0,0 +1,8 @@ +--- +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +osd_objectstore: "bluestore" +devices: + - /dev/sda + - /dev/sdb + - /dev/sdc diff --git a/tests/functional/subset_update/group_vars/rgws b/tests/functional/subset_update/group_vars/rgws new file mode 100644 index 000000000..d9c09f81f --- /dev/null +++ b/tests/functional/subset_update/group_vars/rgws @@ -0,0 +1,9 @@ +copy_admin_key: true +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/subset_update/hosts b/tests/functional/subset_update/hosts new file mode 100644 index 000000000..18669ec1d --- /dev/null +++ b/tests/functional/subset_update/hosts @@ -0,0 +1,18 @@ +[mons] +mon0 monitor_address=192.168.3.10 +mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +mon2 monitor_address=192.168.3.12 + +[mgrs] +mon0 +mon1 + +[osds] +osd0 +osd1 +osd2 + +[rgws] +rgw0 +rgw1 + diff --git a/tests/functional/subset_update/vagrant_variables.yml b/tests/functional/subset_update/vagrant_variables.yml new file mode 100644 index 000000000..00e9115a3 --- /dev/null +++ b/tests/functional/subset_update/vagrant_variables.yml @@ -0,0 +1,74 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 3 +mds_vms: 0 +rgw_vms: 2 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } + +# VM prefix name, need to match the hostname +# label_prefix: ceph diff --git a/tox-subset-update.ini b/tox-subset-update.ini new file mode 100644 index 000000000..95bc3cb4a --- /dev/null +++ b/tox-subset-update.ini @@ -0,0 +1,111 @@ +[tox] +envlist = centos-{container,non_container}-subset_update + +skipsdist = True + +[testenv] +whitelist_externals = + vagrant + bash + git + pip +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg + ANSIBLE_CALLBACK_WHITELIST = profile_tasks + ANSIBLE_CACHE_PLUGIN = memory + ANSIBLE_GATHERING = implicit + # only available for ansible >= 2.5 + ANSIBLE_STDOUT_CALLBACK = yaml +# non_container: DEV_SETUP = True + # Set the vagrant box image to use + centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8 + centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8 + + INVENTORY = {env:_INVENTORY:hosts} + container: CONTAINER_DIR = /container + container: PLAYBOOK = site-container.yml.sample + non_container: PLAYBOOK = site.yml.sample + + UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-master + UPDATE_CEPH_DEV_BRANCH = master + UPDATE_CEPH_DEV_SHA1 = latest + ROLLING_UPDATE = True +deps= -r{toxinidir}/tests/requirements.txt +changedir={toxinidir}/tests/functional/subset_update{env:CONTAINER_DIR:} +commands= + bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml + + non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" + ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + +# upgrade mons + ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=mons --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " +# upgrade mgrs + ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=mgrs --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " +# upgrade osd1 + ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit=osd1 --tags=osds --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " +# upgrade remaining osds (serially) + ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit='osds:!osd1' --tags=osds --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " +# upgrade rgws + ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=rgws --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " +# post upgrade actions + ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=post_upgrade --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + +# bash -c "CEPH_STABLE_RELEASE=quincy py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests" + +# vagrant destroy --force -- 2.39.5