- ``stable-6.0`` Supports Ceph version ``pacific``. This branch requires Ansible version ``2.10``.
-- ``main`` Supports the main branch of Ceph. This branch requires Ansible version ``2.10``.
+- ``stable-7.0`` Supports Ceph version ``quincy``. This branch requires Ansible version ``2.12``.
+
+- ``main`` Supports the main (devel) branch of Ceph. This branch requires Ansible version ``2.12``.
.. NOTE:: ``stable-3.0`` and ``stable-3.1`` branches of ceph-ansible are deprecated and no longer maintained.
when: item.1.stdout == 'ceph data'
- name: umount osd data
- mount:
+ ansible.posix.mount:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ (item.0.stdout | from_json).whoami }}"
state: unmounted
with_together:
when: item.1.stdout == 'ceph data'
- name: umount osd lockbox
- mount:
+ ansible.posix.mount:
path: "/var/lib/ceph/osd-lockbox/{{ (item.0.stdout | from_json).data.uuid }}"
state: unmounted
with_together:
"This node has been skipped because OSDs are either"
"all bluestore or there's a mix of filestore and bluestore OSDs"
when:
- - skip_this_node | bool
\ No newline at end of file
+ - skip_this_node | bool
changed_when: false
- name: tear down any existing osd filesystem
- mount:
+ ansible.posix.mount:
path: "{{ item }}"
state: unmounted
with_items: "{{ old_osd_filesystems.stdout_lines }}"
with_items: "{{ groups[nfs_group_name] }}"
- name: ensure nfs-ganesha mountpoint(s) are unmounted
- mount:
+ ansible.posix.mount:
path: "{{ item.split(' ')[1] }}"
state: unmounted
with_items:
- (containerized_deployment | bool or ceph_volume_present.rc == 0)
- name: umount osd data partition
- mount:
+ ansible.posix.mount:
path: "{{ item }}"
state: unmounted
with_items: "{{ mounted_osd.stdout_lines }}"
listen: "remove data"
- name: umount osd data partition
- mount:
+ ansible.posix.mount:
path: "{{ item }}"
state: unmounted
with_items: "{{ mounted_osd.stdout_lines }}"
delegate_to: "{{ item.0 }}"
- name: umount osd lockbox
- mount:
+ ansible.posix.mount:
path: "/var/lib/ceph/osd-lockbox/{{ ceph_osd_data_json[item.2]['data']['uuid'] }}"
state: absent
loop: "{{ _osd_hosts }}"
- ceph_osd_data_json[item.2]['data']['uuid'] is defined
- name: umount osd data
- mount:
+ ansible.posix.mount:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: absent
loop: "{{ _osd_hosts }}"
# These are Python requirements needed to run ceph-ansible main
-ansible>=2.10,<2.11,!=2.9.10
+ansible-core>=2.12,<2.13
netaddr
six
version: 1.2.1
type: git
- name: ansible.utils
+ version: '>=2.5.0'
+ - name: community.general
+ - name: ansible.posix
- name: fail on unsupported ansible version
fail:
- msg: "Ansible version must be 2.10!"
- when: ansible_version.minor|int != 10
+ msg: "Ansible version must be 2.12!"
+ when: ansible_version.minor|int != 12
- name: fail on unsupported system
fail:
# mount -o remount doesn't work on RHEL 8 for now
- name: add mount options to /
- mount:
+ ansible.posix.mount:
path: '{{ rootmount.mount }}'
src: '{{ rootmount.device }}'
opts: "noatime,nodiratime{% if ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] | int < 8 %},nobarrier{% endif %}"
testinfra
pytest-xdist
pytest
-ansible>=2.10,<2.11,!=2.9.10
+ansible-core>=2.12,<2.13,!=2.9.10
Jinja2>=2.10
netaddr
mock
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+# # use the stable-7.0 branch to deploy an octopus cluster
+# git clone -b stable-7.0 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
+# pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
+# bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml'
+# # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
+# bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm"'
+
# configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
+# pip uninstall -y ansible
+# pip install -r {toxinidir}/tests/requirements.txt
+ ansible-galaxy install -r {toxinidir}/requirements.yml -v
+
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \