NRBD_MIRRORS = settings['rbd_mirror_vms']
CLIENTS = settings['client_vms']
MGRS = settings['mgr_vms']
-PUBLIC_SUBNET = settings['public_subnet']
-CLUSTER_SUBNET = settings['cluster_subnet']
+PUBLIC_SUBNET = ENV['CEPH_PUBLIC_SUBNET'] || settings['public_subnet']
+CLUSTER_SUBNET = ENV['CEPH_CLUSTER_SUBNET'] || settings['cluster_subnet']
BOX = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['vagrant_box']
CLIENT_BOX = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['client_vagrant_box'] || BOX
BOX_URL = ENV['CEPH_ANSIBLE_VAGRANT_BOX_URL'] || settings['vagrant_box_url']
# DOCKER #
##########
#ceph_docker_image: "ceph/daemon-base"
-#ceph_docker_image_tag: latest-main
+#ceph_docker_image_tag: latest-reef
#ceph_docker_registry: quay.io
#ceph_docker_registry_auth: false
# ceph_docker_registry_username:
ansible.builtin.systemd:
name: "{{ 'ceph-exporter@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-exporter.service' }}"
state: stopped
+ ignore_errors: true
# it needs to be done in a separate task otherwise the stop just before doesn't work.
- name: Mask and disable the ceph-exporter service
name: "{{ 'ceph-exporter@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-exporter.service' }}"
enabled: false
masked: true
+ ignore_errors: true
- name: Import ceph-defaults role
ansible.builtin.import_role:
# DOCKER #
##########
ceph_docker_image: "ceph/daemon-base"
-ceph_docker_image_tag: latest-main
+ceph_docker_image_tag: latest-reef
ceph_docker_registry: quay.io
ceph_docker_registry_auth: false
# ceph_docker_registry_username:
dashboard_enabled: False
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
dashboard_enabled: False
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
---
ceph_origin: repository
-ceph_repository: dev
+ceph_repository: community
cluster: ceph
public_network: "192.168.73.0/24"
cluster_network: "192.168.74.0/24"
dashboard_enabled: False
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
---
ceph_origin: repository
-ceph_repository: dev
+ceph_repository: community
cluster: ceph
public_network: "192.168.53.0/24"
cluster_network: "192.168.54.0/24"
dashboard_enabled: False
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
dashboard_enabled: False
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
dashboard_enabled: False
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
db_vg: journals
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
+ceph_docker_image_tag: latest-reef
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
+ceph_docker_image_tag: latest-reef
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
---
ceph_origin: repository
-ceph_repository: dev
+ceph_repository: community
ip_version: ipv6
public_network: "fdec:f1fb:29cd:6940::/64"
cluster_network: "fdec:f1fb:29cd:7120::/64"
dashboard_admin_password: $sX!cD$rYU6qR^B!
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main-devel
+ceph_docker_image_tag: latest-reef-devel
containerized_deployment: true
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
+ceph_docker_image_tag: latest-reef
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
+ceph_docker_image_tag: latest-reef
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
generate_fsid: false
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
containerized_deployment: True
ceph_origin: repository
-ceph_repository: dev
+ceph_repository: community
cluster: ceph
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
handler_health_osd_check_delay: 10
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
handler_health_osd_check_delay: 10
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
- "{{ openstack_cinder_pool }}"
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
+ceph_docker_image_tag: latest-reef
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
dashboard_enabled: False
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
+ceph_docker_image_tag: latest-reef
dashboard_enabled: False
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
+ceph_docker_image_tag: latest-reef
copy_admin_key: True
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
dashboard_enabled: False
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
---
ceph_origin: repository
-ceph_repository: dev
+ceph_repository: community
public_network: "192.168.81.0/24"
cluster_network: "192.168.82.0/24"
monitor_interface: eth1
dashboard_enabled: False
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
copy_admin_key: True
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
copy_admin_key: True
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
copy_admin_key: True
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
\ No newline at end of file
---
ceph_origin: repository
-ceph_repository: dev
+ceph_repository: community
public_network: "192.168.89.0/24"
cluster_network: "192.168.90.0/24"
monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-main
+ceph_docker_image_tag: latest-reef
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
---
ceph_origin: repository
-ceph_repository: dev
+ceph_repository: community
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
- non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
yes_i_know=true \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
- ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
- ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
# mon1
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit mon1 --tags=mons --extra-vars "\
ireallymeanit=yes \
- ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
- ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
# mon0 and mon2
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit 'mons:!mon1' --tags=mons --extra-vars "\
ireallymeanit=yes \
- ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
- ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
# upgrade mgrs
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=mgrs --extra-vars "\
ireallymeanit=yes \
- ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
- ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
# upgrade osd1
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit=osd1 --tags=osds --extra-vars "\
ireallymeanit=yes \
- ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
- ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
# upgrade remaining osds (serially)
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit='osds:!osd1' --tags=osds --extra-vars "\
ireallymeanit=yes \
- ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
- ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
# upgrade rgws
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=rgws --extra-vars "\
ireallymeanit=yes \
- ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
- ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
# post upgrade actions
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=post_upgrade --extra-vars "\
ireallymeanit=yes \
- ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
- ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
pip
passenv=*
setenv=
- ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_SSH_ARGS = -F {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections
ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
ANSIBLE_CALLBACK_ENABLED = profile_tasks
deps= -r{toxinidir}/tests/requirements.txt
changedir={toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
commands=
- bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
- bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+ # use the stable-7.0 branch to deploy an octopus cluster
+ git clone -b stable-7.0 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
- ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections
+ bash {envdir}/tmp/ceph-ansible/tests/scripts/vagrant_up.sh {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:} --no-provision {posargs:--provider=virtualbox}
+ bash {envdir}/tmp/ceph-ansible/tests/scripts/generate_ssh_config.sh {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}
+
+ ansible-galaxy collection install -r {envdir}/tmp/ceph-ansible/requirements.yml -v -p {envdir}/ansible_collections
- ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
-# # use the stable-7.0 branch to deploy an octopus cluster
-# git clone -b stable-7.0 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
-# pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
-# bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml'
-# # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
-# bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm"'
+ pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
+ bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml'
+ # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
+ bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm" --limit 'osds:!osd2''
# configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
- non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
- ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
yes_i_know=true \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
- ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
- ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
+ container_package_name=podman \
+ container_service_name=podman \
+ container_binary=podman \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
-# pip uninstall -y ansible
-# pip install -r {toxinidir}/tests/requirements.txt
-# ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections
+ pip uninstall -y ansible
+ pip install -r {toxinidir}/tests/requirements.txt
+ ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
ireallymeanit=yes \
- ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \
- ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
commands=
ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections
- non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}