pre_tasks:
- name: gather facts
setup:
- when:
- - not delegate_facts_host | bool
+ when: not delegate_facts_host | bool
- import_role:
name: ceph-defaults
- "{{ groups[mon_group_name] }}"
- "{{ groups[osd_group_name] }}"
run_once: True
- when:
- - delegate_facts_host | bool
+ when: delegate_facts_host | bool
tasks:
- import_role:
- name: gather facts
setup:
- when:
- - not delegate_facts_host | bool
+ when: not delegate_facts_host | bool
- import_role:
name: ceph-defaults
- "{{ groups[mon_group_name] }}"
- "{{ groups[osd_group_name] }}"
run_once: True
- when:
- - delegate_facts_host | bool
+ when: delegate_facts_host | bool
# this task is needed so we can skip the openstack_config.yml include in roles/ceph-osd
- name: set_fact add_osd
- name: creates logical volumes for the bucket index or fs journals on a single device.
become: true
- hosts:
- - osds
+ hosts: osds
vars:
logfile: |
vg: "{{ nvme_vg_name }}"
size: "{{ journal_size }}"
pvs: "{{ nvme_device }}"
- with_items:
- - "{{ nvme_device_lvs }}"
+ with_items: "{{ nvme_device_lvs }}"
- name: create lvs for fs journals for hdd devices
lvol:
lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ nvme_vg_name }}"
size: "{{ journal_size }}"
- with_items:
- - "{{ hdd_devices }}"
+ with_items: "{{ hdd_devices }}"
- name: create the lv for data portion of the bucket index on the nvme device
lvol:
vg: "{{ nvme_vg_name }}"
size: "{{ item.size }}"
pvs: "{{ nvme_device }}"
- with_items:
- - "{{ nvme_device_lvs }}"
+ with_items: "{{ nvme_device_lvs }}"
# Make sure all hdd devices have a unique volume group
- name: create vgs for all hdd devices
pesize: 4
state: present
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
- with_items:
- - "{{ hdd_devices }}"
+ with_items: "{{ hdd_devices }}"
- name: create lvs for the data portion on hdd devices
lvol:
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
size: "{{ hdd_lv_size }}"
pvs: "{{ item }}"
- with_items:
- - "{{ hdd_devices }}"
+ with_items: "{{ hdd_devices }}"
- name: "write output for osds.yml to {{ logfile_path }}"
become: false
- name: tear down existing osd filesystems then logical volumes, volume groups, and physical volumes
become: true
- hosts:
- - osds
+ hosts: osds
vars_prompt:
- name: ireallymeanit
vg: "{{ nvme_vg_name }}"
state: absent
force: yes
- with_items:
- - "{{ nvme_device_lvs }}"
+ with_items: "{{ nvme_device_lvs }}"
- name: tear down any existing hdd data lvs
lvol:
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
state: absent
force: yes
- with_items:
- - "{{ hdd_devices }}"
+ with_items: "{{ hdd_devices }}"
- name: tear down any existing lv of journal for bucket index
lvol:
vg: "{{ nvme_vg_name }}"
state: absent
force: yes
- with_items:
- - "{{ nvme_device_lvs }}"
+ with_items: "{{ nvme_device_lvs }}"
- name: tear down any existing lvs of hdd journals
lvol:
vg: "{{ nvme_vg_name }}"
state: absent
force: yes
- with_items:
- - "{{ hdd_devices }}"
+ with_items: "{{ hdd_devices }}"
## Volume Groups
- name: remove vg on nvme device
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
state: absent
force: yes
- with_items:
- - "{{ hdd_devices }}"
+ with_items: "{{ hdd_devices }}"
## Physical Vols
- name: tear down pv for nvme device
- name: tear down pv for each hdd device
command: "pvremove --force --yes {{ item }}"
- with_items:
- - "{{ hdd_devices }}"
+ with_items: "{{ hdd_devices }}"
vars:
mds_group_name: mdss
- hosts:
- - "{{ mds_group_name|default('mdss') }}"
+ hosts: "{{ mds_group_name|default('mdss') }}"
gather_facts: false # Already gathered previously
vars:
mgr_group_name: mgrs
- hosts:
- - "{{ mgr_group_name|default('mgrs') }}"
+ hosts: "{{ mgr_group_name|default('mgrs') }}"
gather_facts: false # Already gathered previously
vars:
rgw_group_name: rgws
- hosts:
- - "{{ rgw_group_name|default('rgws') }}"
+ hosts: "{{ rgw_group_name|default('rgws') }}"
gather_facts: false # Already gathered previously
vars:
rbdmirror_group_name: rbdmirrors
- hosts:
- - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
gather_facts: false # Already gathered previously
vars:
nfs_group_name: nfss
- hosts:
- - "{{ nfs_group_name|default('nfss') }}"
+ hosts: "{{ nfs_group_name|default('nfss') }}"
gather_facts: false # Already gathered previously
osd_group_name: osds
reboot_osd_node: False
- hosts:
- - "{{ osd_group_name|default('osds') }}"
+ hosts: "{{ osd_group_name|default('osds') }}"
gather_facts: false # Already gathered previously
- name: resolve parent device
command: lsblk --nodeps -no pkname "{{ item }}"
register: tmp_resolved_parent_device
- with_items:
- - "{{ combined_devices_list }}"
+ with_items: "{{ combined_devices_list }}"
- name: set_fact resolved_parent_device
set_fact:
parted -s /dev/"{{ item }}" mklabel gpt
partprobe /dev/"{{ item }}"
udevadm settle --timeout=600
- with_items:
- - "{{ resolved_parent_device }}"
+ with_items: "{{ resolved_parent_device }}"
- name: purge ceph mon cluster
vars:
mon_group_name: mons
- hosts:
- - "{{ mon_group_name|default('mons') }}"
+ hosts: "{{ mon_group_name|default('mons') }}"
gather_facts: false # already gathered previously
module: command
echo requesting data removal
become: false
- notify:
- - remove data
+ notify: remove data
- name: purge dnf cache
command: dnf clean all
file:
path: "{{ item.path }}"
state: absent
- with_items:
- - "{{ systemd_files.files }}"
+ with_items: "{{ systemd_files.files }}"
when: ansible_service_mgr == 'systemd'
- name: purge fetch directory
- hosts:
- - localhost
+ hosts: localhost
gather_facts: false
- name: confirm whether user really meant to purge the cluster
- hosts:
- - localhost
+ hosts: localhost
gather_facts: false
- name: purge ceph mds cluster
- hosts:
- - "{{ mds_group_name|default('mdss') }}"
+ hosts: "{{ mds_group_name|default('mdss') }}"
become: true
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
- tags:
- remove_img
+ tags: remove_img
ignore_errors: true
- name: purge ceph iscsigws cluster
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
- tags:
- remove_img
+ tags: remove_img
ignore_errors: true
- name: purge ceph mgr cluster
- hosts:
- - "{{ mgr_group_name|default('mgrs') }}"
+ hosts: "{{ mgr_group_name|default('mgrs') }}"
become: true
tasks:
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
- tags:
- remove_img
+ tags: remove_img
ignore_errors: true
- name: purge ceph rgw cluster
- hosts:
- - "{{ rgw_group_name|default('rgws') }}"
+ hosts: "{{ rgw_group_name|default('rgws') }}"
become: true
- name: purge ceph rbd-mirror cluster
- hosts:
- - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
become: true
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
- tags:
- remove_img
+ tags: remove_img
- name: purge ceph nfs cluster
- hosts:
- - "{{ nfs_group_name|default('nfss') }}"
+ hosts: "{{ nfs_group_name|default('nfss') }}"
become: true
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
- tags:
- remove_img
+ tags: remove_img
- name: purge ceph osd cluster
- hosts:
- - "{{ osd_group_name | default('osds') }}"
+ hosts: "{{ osd_group_name | default('osds') }}"
gather_facts: true
become: true
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
- tags:
- remove_img
+ tags: remove_img
ignore_errors: true
- name: include vars from group_vars/osds.yml
file:
path: "{{ item.path }}"
state: absent
- with_items:
- - "{{ osd_disk_prepare_logs.files }}"
+ with_items: "{{ osd_disk_prepare_logs.files }}"
- name: purge ceph mon cluster
- hosts:
- - "{{ mon_group_name|default('mons') }}"
+ hosts: "{{ mon_group_name|default('mons') }}"
become: true
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
- tags:
- remove_img
+ tags: remove_img
ignore_errors: true
- name: check container hosts
become: true
- tags:
- with_pkg
+ tags: with_pkg
tasks:
- name: check if it is Atomic host
- name: purge fetch directory
- hosts:
- - localhost
+ hosts: localhost
gather_facts: false
- name: gather facts
setup:
- when:
- - not delegate_facts_host | bool
+ when: not delegate_facts_host | bool
- name: gather and delegate facts
setup:
delegate_facts: True
with_items: "{{ groups['all'] }}"
run_once: true
- when:
- - delegate_facts_host | bool
+ when: delegate_facts_host | bool
- set_fact: rolling_update=true
health_mon_check_retries: 5
health_mon_check_delay: 15
upgrade_ceph_packages: True
- hosts:
- - "{{ mon_group_name|default('mons') }}"
+ hosts: "{{ mon_group_name|default('mons') }}"
serial: 1
become: True
tasks:
file:
path: /etc/profile.d/ceph-aliases.sh
state: absent
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: set mon_host_count
set_fact:
- name: fail when less than three monitors
fail:
msg: "Upgrade of cluster with less than three monitors is not supported."
- when:
- - mon_host_count | int < 3
+ when: mon_host_count | int < 3
- name: select a running monitor
set_fact:
enabled: no
masked: yes
ignore_errors: True
- when:
- - not containerized_deployment
+ when: not containerized_deployment
# NOTE: we mask the service so the RPM can't restart it
# after the package gets upgraded
enabled: no
masked: yes
ignore_errors: True
- when:
- - not containerized_deployment
+ when: not containerized_deployment
# only mask the service for mgr because it must be upgraded
# after ALL monitors, even when collocated
systemd:
name: ceph-mgr@{{ ansible_hostname }}
masked: yes
- when:
- - inventory_hostname in groups[mgr_group_name] | default([])
- or groups[mgr_group_name] | default([]) | length == 0
+ when: inventory_hostname in groups[mgr_group_name] | default([])
+ or groups[mgr_group_name] | default([]) | length == 0
- name: set osd flags
command: ceph --cluster {{ cluster }} osd set {{ item }}
name: ceph-mon@{{ monitor_name }}
state: started
enabled: yes
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: start ceph mgr
systemd:
state: started
enabled: yes
ignore_errors: True # if no mgr collocated with mons
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: restart containerized ceph mon
systemd:
state: restarted
enabled: yes
daemon_reload: yes
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: non container | waiting for the monitor to join the quorum...
command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" -s --format json
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: container | waiting for the containerized monitor to join the quorum...
command: >
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: upgrade ceph mgr nodes when implicitly collocated on monitors
vars:
health_mon_check_retries: 5
health_mon_check_delay: 15
upgrade_ceph_packages: True
- hosts:
- - "{{ mon_group_name|default('mons') }}"
+ hosts: "{{ mon_group_name|default('mons') }}"
serial: 1
become: True
tasks:
- name: upgrade mgrs when no mgr group explicitly defined in inventory
- when:
- - groups.get(mgr_group_name, []) | length == 0
+ when: groups.get(mgr_group_name, []) | length == 0
block:
- name: stop ceph mgr
systemd:
vars:
upgrade_ceph_packages: True
ceph_release: "{{ ceph_stable_release }}"
- hosts:
- - "{{ mgr_group_name|default('mgrs') }}"
+ hosts: "{{ mgr_group_name|default('mgrs') }}"
serial: 1
become: True
tasks:
health_osd_check_delay: 30
upgrade_ceph_packages: True
- hosts:
- - "{{ osd_group_name|default('osds') }}"
+ hosts: "{{ osd_group_name|default('osds') }}"
serial: 1
become: True
tasks:
enabled: no
masked: yes
with_items: "{{ osd_ids.stdout_lines }}"
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: set num_osds for non container
set_fact:
enabled: yes
masked: no
with_items: "{{ osd_ids.stdout_lines }}"
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: restart containerized ceph osd
systemd:
masked: no
daemon_reload: yes
with_items: "{{ osd_names.stdout_lines }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: scan ceph-disk osds with ceph-volume if deploying nautilus
command: "ceph-volume --cluster={{ cluster }} simple scan"
- name: set_fact docker_exec_cmd_osd
set_fact:
docker_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: get osd versions
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
delegate_to: "{{ groups[mon_group_name][0] }}"
retries: "{{ health_osd_check_retries }}"
delay: "{{ health_osd_check_delay }}"
- when:
- - (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0
+ when: (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0
- name: unset osd flags
- hosts:
- - "{{ mon_group_name|default('mons') }}"
+ hosts: "{{ mon_group_name|default('mons') }}"
become: True
- name: set_fact docker_exec_cmd_osd
set_fact:
docker_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: unset osd flags
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
- name: upgrade ceph mdss cluster
vars:
upgrade_ceph_packages: True
- hosts:
- - "{{ mds_group_name|default('mdss') }}"
+ hosts: "{{ mds_group_name|default('mdss') }}"
serial: 1
become: True
tasks:
state: stopped
enabled: no
masked: yes
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- import_role:
name: ceph-defaults
state: started
enabled: yes
masked: no
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: restart ceph mds
systemd:
enabled: yes
masked: no
daemon_reload: yes
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: upgrade ceph rgws cluster
vars:
upgrade_ceph_packages: True
- hosts:
- - "{{ rgw_group_name|default('rgws') }}"
+ hosts: "{{ rgw_group_name|default('rgws') }}"
serial: 1
become: True
tasks:
enabled: no
masked: yes
with_items: "{{ rgw_instances }}"
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- import_role:
name: ceph-handler
masked: no
daemon_reload: yes
with_items: "{{ rgw_instances }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: upgrade ceph rbd mirror node
vars:
upgrade_ceph_packages: True
- hosts:
- - "{{ rbd_mirror_group_name|default('rbdmirrors') }}"
+ hosts: "{{ rbd_mirror_group_name|default('rbdmirrors') }}"
serial: 1
become: True
tasks:
state: started
enabled: yes
masked: no
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: restart containerized ceph rbd mirror
systemd:
enabled: yes
masked: no
daemon_reload: yes
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: upgrade ceph nfs node
vars:
upgrade_ceph_packages: True
- hosts:
- - "{{ nfs_group_name|default('nfss') }}"
+ hosts: "{{ nfs_group_name|default('nfss') }}"
serial: 1
become: True
tasks:
enabled: no
masked: yes
failed_when: false
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- import_role:
name: ceph-defaults
enabled: no
masked: yes
failed_when: false
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- import_role:
name: ceph-defaults
state: started
enabled: yes
masked: no
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: upgrade ceph client node
vars:
upgrade_ceph_packages: True
- hosts:
- - "{{ client_group_name|default('clients') }}"
+ hosts: "{{ client_group_name|default('clients') }}"
serial: "{{ client_update_batch | default(20) }}"
become: True
tasks:
name: ceph-client
- name: complete upgrade
- hosts:
- - all
+ hosts: all
become: True
tasks:
- import_role:
- name: show ceph status
- hosts:
- - "{{ mon_group_name|default('mons') }}"
+ hosts: "{{ mon_group_name|default('mons') }}"
become: True
tasks:
- import_role:
- name: set_fact docker_exec_cmd_status
set_fact:
docker_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: show ceph status
command: "{{ docker_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s"
- name: gather facts and check the init system
- hosts:
- - "{{ mon_group_name|default('mons') }}"
+ hosts: "{{ mon_group_name|default('mons') }}"
become: true
- debug: msg="gather facts on all Ceph hosts for following reference"
- name: confirm whether user really meant to remove monitor from the ceph cluster
- hosts:
- - localhost
+ hosts: localhost
become: true
vars_prompt:
- name: ireallymeanit
fail:
msg: "You are about to shrink the only monitor present in the cluster.
If you really want to do that, please use the purge-cluster playbook."
- when:
- - groups[mon_group_name] | length | int == 1
+ when: groups[mon_group_name] | length | int == 1
- name: exit playbook, if no monitor was given
fail:
Exiting shrink-cluster playbook, no monitor was removed.
On the command line when invoking the playbook, you can use
-e mon_to_kill=ceph-mon01 argument. You can only remove a single monitor each time the playbook runs."
- when:
- - mon_to_kill is not defined
+ when: mon_to_kill is not defined
- name: exit playbook, if the monitor is not part of the inventory
fail:
msg: "It seems that the host given is not part of your inventory, please make sure it is."
- when:
- - mon_to_kill not in groups[mon_group_name]
+ when: mon_to_kill not in groups[mon_group_name]
- name: exit playbook, if user did not mean to shrink cluster
fail:
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
- when:
- - ireallymeanit != 'yes'
+ when: ireallymeanit != 'yes'
- import_role:
name: ceph-defaults
set_fact:
mon_host: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"
- when:
- - item != mon_to_kill
+ when: item != mon_to_kill
- name: "set_fact docker_exec_cmd build {{ container_binary }} exec command (containerized)"
set_fact:
msg: "The monitor has been successfully removed from the cluster.
Please remove the monitor entry from the rest of your ceph configuration files, cluster wide."
run_once: true
- when:
- - mon_to_kill_hostname not in result.stdout
+ when: mon_to_kill_hostname not in result.stdout
- name: fail if monitor is still part of the cluster
fail:
msg: "Monitor appears to still be part of the cluster, please check what happened."
run_once: true
- when:
- - mon_to_kill_hostname in result.stdout
+ when: mon_to_kill_hostname in result.stdout
- name: show ceph health
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s"
- name: confirm whether user really meant to remove osd(s) from the cluster
- hosts:
- - localhost
+ hosts: localhost
become: true
- name: gather facts and check the init system
- hosts:
- - "{{ osd_group_name|default('osds') }}"
+ hosts: "{{ osd_group_name|default('osds') }}"
become: true
- name: query each host for storage device inventory
- hosts:
- - "{{ osd_group_name|default('osds') }}"
+ hosts: "{{ osd_group_name|default('osds') }}"
become: true
- name: confirm whether user really meant to switch from non-containerized to containerized ceph daemons
- hosts:
- - localhost
+ hosts: localhost
gather_facts: false
containerized_deployment: true
switch_to_containers: True
mon_group_name: mons
- hosts:
- - "{{ mon_group_name|default('mons') }}"
+ hosts: "{{ mon_group_name|default('mons') }}"
serial: 1
become: true
pre_tasks:
- name: switching from non-containerized to containerized ceph mgr
- hosts:
- - "{{ mgr_group_name|default('mgrs') }}"
+ hosts: "{{ mgr_group_name|default('mgrs') }}"
vars:
containerized_deployment: true
containerized_deployment: true
osd_group_name: osds
- hosts:
- - "{{ osd_group_name|default('osds') }}"
+ hosts: "{{ osd_group_name|default('osds') }}"
serial: 1
become: true
shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb
changed_when: false
failed_when: false
- when:
- - ldb_files.rc == 0
+ when: ldb_files.rc == 0
- name: check if containerized osds are already running
command: >
umount /var/lib/ceph/osd/{{ item }}
changed_when: false
failed_when: false
- with_items:
- - "{{ osd_dirs.stdout_lines }}"
- when:
- - osd_running.rc != 0
+ with_items: "{{ osd_dirs.stdout_lines }}"
+ when: osd_running.rc != 0
tasks:
- import_role:
delegate_to: "{{ groups[mon_group_name][0] }}"
retries: "{{ health_osd_check_retries }}"
delay: "{{ health_osd_check_delay }}"
- when:
- - (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0
+ when: (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0
- name: switching from non-containerized to containerized ceph mds
- hosts:
- - "{{ mds_group_name|default('mdss') }}"
+ hosts: "{{ mds_group_name|default('mdss') }}"
vars:
containerized_deployment: true
- name: switching from non-containerized to containerized ceph rgw
- hosts:
- - "{{ rgw_group_name|default('rgws') }}"
+ hosts: "{{ rgw_group_name|default('rgws') }}"
vars:
containerized_deployment: true
- name: switching from non-containerized to containerized ceph rbd-mirror
- hosts:
- - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
vars:
containerized_deployment: true
- name: switching from non-containerized to containerized ceph nfs
- hosts:
- - "{{ nfs_group_name|default('nfss') }}"
+ hosts: "{{ nfs_group_name|default('nfss') }}"
vars:
containerized_deployment: true
service: >
name={{ item }}
state=stopped
- with_items:
- - radosgw
+ with_items: radosgw
when: migration_completed.stat.exists == False
- name: Wait for radosgw to be down
shell: >
{{ item }}
chdir=/var/lib/ceph/
- with_items:
- - cp etc/ceph/* /etc/ceph/
+ with_items: cp etc/ceph/* /etc/ceph/
when: migration_completed.stat.exists == False
- name: Start rados gateway
service: >
name={{ item }}
state=started
- with_items:
- - radosgw
+ with_items: radosgw
when: migration_completed.stat.exists == False
- name: Wait for radosgw to be up again
journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106
data_typecode: 4fbd7e29-9d25-41b8-afd0-062c0ceff05d
devices: []
- hosts:
- - "{{ osd_group_name }}"
+ hosts: "{{ osd_group_name }}"
tasks:
- set_fact:
owner: 167
group: 167
- when:
- - ansible_os_family == "RedHat"
+ when: ansible_os_family == "RedHat"
- set_fact:
owner: 64045
group: 64045
- when:
- - ansible_os_family == "Debian"
+ when: ansible_os_family == "Debian"
- name: change partitions ownership
file:
with_subelements:
- "{{ devices }}"
- partitions
- when:
- item.0.device_name | match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$')
-...
\ No newline at end of file
+ when: item.0.device_name | match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$')
+...
osd_group_name: osds
journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106
osds_journal_devices: []
- hosts:
- - "{{ osd_group_name }}"
+ hosts: "{{ osd_group_name }}"
serial: 1
tasks:
msg: exit playbook osd(s) is not on this host
with_items:
osds_dir_stat.results
- when:
- - osds_dir_stat is defined and item.stat.exists == false
+ when: osds_dir_stat is defined and item.stat.exists == false
- name: install sgdisk(gdisk)
package:
--typecode={{ item.item[1].index }}:{{ journal_typecode }}
--partition-guid={{ item.item[1].index }}:{{ item.stdout }}
--mbrtogpt -- {{ item.item[0].device_name }}
- with_items:
- - "{{ osds.results }}"
+ with_items: "{{ osds.results }}"
- name: stop osd(s) service
service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: stopped
- with_items:
- - "{{ osds.results }}"
+ with_items: "{{ osds.results }}"
- name: flush osd(s) journal
command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }}
- with_items:
- - "{{ osds.results }}"
+ with_items: "{{ osds.results }}"
when: osds_journal_devices is defined
- name: update osd(s) journal soft link
command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal
- with_items:
- - "{{ osds.results }}"
+ with_items: "{{ osds.results }}"
- name: update osd(s) journal uuid
command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid
- with_items:
- - "{{ osds.results }}"
+ with_items: "{{ osds.results }}"
- name: initialize osd(s) new journal
command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
- with_items:
- - "{{ osds.results }}"
+ with_items: "{{ osds.results }}"
- name: start osd(s) service
service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: started
- with_items:
- - "{{ osds.results }}"
+ with_items: "{{ osds.results }}"
--typecode={{ item.item[1].index }}:{{ journal_typecode }}
--partition-guid={{ item.item[1].index }}:{{ item.stdout }}
--mbrtogpt -- {{ item.item[0].device_name }}
- with_items:
- - "{{ osds_uuid.results }}"
+ with_items: "{{ osds_uuid.results }}"
- name: stop osd(s) service
service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: stopped
- with_items:
- - "{{ osds_uuid.results }}"
+ with_items: "{{ osds_uuid.results }}"
- name: reinitialize osd(s) journal in new ssd
command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
- with_items:
- - "{{ osds_uuid.results }}"
+ with_items: "{{ osds_uuid.results }}"
- name: start osd(s) service
service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: started
- with_items:
- - "{{ osds_uuid.results }}"
+ with_items: "{{ osds_uuid.results }}"
- debug: msg="gather facts on all Ceph hosts for following reference"
- name: confirm whether user really meant to replace osd(s)
- hosts:
- - localhost
+ hosts: localhost
become: true
vars_prompt:
- name: ireallymeanit
with_items: "{{ osd_hosts }}"
delegate_to: "{{ item }}"
failed_when: false
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: fail when admin key is not present
fail:
- "{{ osd_to_replace.split(',') }}"
register: osd_to_replace_disks
delegate_to: "{{ item.0 }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: zapping osd(s) - container
shell: >
- "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}"
delegate_to: "{{ item.0 }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: zapping osd(s) - non container
command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }}
- "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}"
delegate_to: "{{ item.0 }}"
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: destroying osd(s)
command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap
- "{{ osd_hosts }}"
- "{{ osd_to_replace.split(',') }}"
delegate_to: "{{ item.0 }}"
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: replace osd(s) - prepare - non container
command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen)
location: "{{ hostvars[item]['osd_crush_location'] }}"
containerized: "{{ docker_exec_cmd }}"
with_items: "{{ groups[osd_group_name] }}"
- when:
- - crush_rule_config
+ when: crush_rule_config
'''
RETURN = '''# '''
raw: apt-get -y install python-simplejson
ignore_errors: yes
register: result
- when:
- - systempython.stat is undefined or not systempython.stat.exists
+ when: systempython.stat is undefined or not systempython.stat.exists
until: result is succeeded
- name: install python for fedora
raw: dnf -y install python3; ln -sf /usr/bin/python3 /usr/bin/python creates=/usr/bin/python
ignore_errors: yes
register: result
- when:
- - systempython.stat is undefined or not systempython.stat.exists
+ when: systempython.stat is undefined or not systempython.stat.exists
until: (result is succeeded) and ('Failed' not in result.stdout)
- name: install python for opensuse
raw: zypper -n install python-base creates=/usr/bin/python2.7
ignore_errors: yes
register: result
- when:
- - systempython.stat is undefined or not systempython.stat.exists
- until: result is succeeded
\ No newline at end of file
+ when: systempython.stat is undefined or not systempython.stat.exists
+ until: result is succeeded
state: present
register: result
until: result is succeeded
- tags:
- - package-install
+ tags: package-install
- name: create minion.d directory
file:
- name: set_fact keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module
set_fact:
keys_tmp: "{{ keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}"
- when:
- - item.get('mon_cap', None) # it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap
+ when: item.get('mon_cap', None) # it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap
with_items: "{{ keys }}"
- name: set_fact keys - override keys_tmp with keys
set_fact:
keys: "{{ keys_tmp }}"
- when:
- - keys_tmp is defined
+ when: keys_tmp is defined
# dummy container setup is only supported on x86_64
# when running with containerized_deployment: true this task
name: "{{ item }}"
groups: _filtered_clients
with_items: "{{ groups[client_group_name] }}"
- when:
- - (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment)
+ when: (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment)
- name: set_fact delegated_node
set_fact:
- name: slurp client cephx key(s)
slurp:
src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring"
- with_items:
- - "{{ keys }}"
+ with_items: "{{ keys }}"
register: slurp_client_keys
delegate_to: "{{ delegated_node }}"
when:
with_items: "{{ pools | unique }}"
changed_when: false
delegate_to: "{{ delegated_node }}"
- when:
- - item.application is defined
+ when: item.application is defined
- name: get client cephx keys
copy:
mode: "{{ item.item.get('mode', '0600') }}"
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
- with_items:
- - "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}"
- when:
- - not item.get('skipped', False)
+ with_items: "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}"
+ when: not item.get('skipped', False)
- name: include create_users_keys.yml
include_tasks: create_users_keys.yml
- when:
- - user_config
+ when: user_config
create: yes
line: "CLUSTER={{ cluster }}"
regexp: "^CLUSTER="
- when:
- - ansible_os_family in ["RedHat", "Suse"]
+ when: ansible_os_family in ["RedHat", "Suse"]
# NOTE(leseb): we are performing the following check
# to ensure any Jewel installation will not fail.
# - All previous versions from Canonical
# - Infernalis from ceph.com
- name: debian based systems - configure cluster name
- when:
- - ansible_os_family == "Debian"
+ when: ansible_os_family == "Debian"
block:
- name: check /etc/default/ceph exist
stat:
check_mode: no
- name: configure cluster name
- when:
- - etc_default_ceph.stat.exists
+ when: etc_default_ceph.stat.exists
block:
- name: when /etc/default/ceph is not dir
lineinfile:
create: yes
regexp: "^CLUSTER="
line: "CLUSTER={{ cluster }}"
- when:
- - not etc_default_ceph.stat.isdir
+ when: not etc_default_ceph.stat.isdir
- name: when /etc/default/ceph is dir
lineinfile:
create: yes
regexp: "^CLUSTER="
line: "CLUSTER={{ cluster }}"
- when:
- - etc_default_ceph.stat.isdir
+ when: etc_default_ceph.stat.isdir
when:
- ansible_os_family == 'Debian'
- etc_default_ceph.stat.exists
- notify:
- - restart ceph osds
+ notify: restart ceph osds
- name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat
lineinfile:
create: yes
regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
- when:
- - ansible_os_family == 'RedHat'
- notify:
- - restart ceph osds
+ when: ansible_os_family == 'RedHat'
+ notify: restart ceph osds
with_items:
- "{{ rbd_client_admin_socket_path }}"
- "{{ rbd_client_log_path }}"
- when:
- - rbd_client_directories
+ when: rbd_client_directories
---
- name: include debian_community_repository.yml
include_tasks: debian_community_repository.yml
- when:
- - ceph_repository == 'community'
+ when: ceph_repository == 'community'
- name: include debian_rhcs_repository.yml
include_tasks: debian_rhcs_repository.yml
- when:
- - ceph_repository == 'rhcs'
+ when: ceph_repository == 'rhcs'
- name: include debian_dev_repository.yml
include_tasks: debian_dev_repository.yml
- when:
- - ceph_repository == 'dev'
+ when: ceph_repository == 'dev'
- name: include debian_custom_repository.yml
include_tasks: debian_custom_repository.yml
- when:
- - ceph_repository == 'custom'
+ when: ceph_repository == 'custom'
- name: include debian_uca_repository.yml
include_tasks: debian_uca_repository.yml
- when:
- - ceph_repository == 'uca'
\ No newline at end of file
+ when: ceph_repository == 'uca'
file:
path: /tmp
state: directory
- when:
- - use_installer
+ when: use_installer
- name: use mktemp to create name for rundep
command: "mktemp /tmp/rundep.XXXXXXXX"
register: rundep_location
- when:
- - use_installer
+ when: use_installer
- name: copy rundep
copy:
src: "{{ ansible_dir }}/rundep"
dest: "{{ item }}"
with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}"
- when:
- - use_installer
+ when: use_installer
- name: install ceph dependencies
script: "{{ ansible_dir }}/rundep_installer.sh {{ item }}"
become: true
with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}"
- when:
- - use_installer
+ when: use_installer
- name: ensure rsync is installed
package:
---
- name: include redhat_community_repository.yml
include_tasks: redhat_community_repository.yml
- when:
- - ceph_repository == 'community'
+ when: ceph_repository == 'community'
- name: include redhat_rhcs_repository.yml
include_tasks: redhat_rhcs_repository.yml
- when:
- - ceph_repository == 'rhcs'
+ when: ceph_repository == 'rhcs'
- name: include redhat_dev_repository.yml
include_tasks: redhat_dev_repository.yml
- when:
- - ceph_repository == 'dev'
+ when: ceph_repository == 'dev'
- name: include redhat_custom_repository.yml
include_tasks: redhat_custom_repository.yml
- when:
- - ceph_repository == 'custom'
+ when: ceph_repository == 'custom'
# Remove yum caches so yum doesn't get confused if we are reinstalling a different ceph version
- name: purge yum cache
args:
warn: no
changed_when: false
- when:
- ansible_pkg_mgr == 'yum'
+ when: ansible_pkg_mgr == 'yum'
---
- name: include suse_obs_repository.yml
include_tasks: suse_obs_repository.yml
- when:
- - ceph_repository == 'obs'
+ when: ceph_repository == 'obs'
- name: include prerequisite_rhcs_iso_install_debian.yml
include_tasks: prerequisite_rhcs_iso_install_debian.yml
- when:
- - ceph_repository_type == 'iso'
+ when: ceph_repository_type == 'iso'
- name: include prerequisite_rhcs_cdn_install_debian.yml
include_tasks: prerequisite_rhcs_cdn_install_debian.yml
- when:
- - ceph_repository_type == 'cdn'
+ when: ceph_repository_type == 'cdn'
---
- name: include configure_debian_repository_installation.yml
include_tasks: configure_debian_repository_installation.yml
- when:
- - ceph_origin == 'repository'
+ when: ceph_origin == 'repository'
- name: update apt cache if cache_valid_time has expired
apt:
---
- name: include configure_redhat_repository_installation.yml
include_tasks: configure_redhat_repository_installation.yml
- when:
- - ceph_origin == 'repository'
+ when: ceph_origin == 'repository'
- name: include configure_redhat_local_installation.yml
include_tasks: configure_redhat_local_installation.yml
- when:
- - ceph_origin == 'local'
+ when: ceph_origin == 'local'
- name: include install_redhat_packages.yml
include_tasks: install_redhat_packages.yml
- when:
- - (ceph_origin == 'repository' or ceph_origin == 'distro')
+ when: (ceph_origin == 'repository' or ceph_origin == 'distro')
- name: Check for supported installation method on suse
fail:
msg: "Unsupported installation method origin:{{ ceph_origin }} repo:{{ ceph_repository }}'"
- when:
- - ceph_origin != 'distro' or (ceph_origin == 'repository' and ceph_repository != 'obs')
+ when: ceph_origin != 'distro' or (ceph_origin == 'repository' and ceph_repository != 'obs')
- name: include configure_suse_repository_installation.yml
include_tasks: configure_suse_repository_installation.yml
- when:
- - ceph_origin == 'repository'
+ when: ceph_origin == 'repository'
- name: install dependencies
zypper:
state: present
register: result
until: result is succeeded
- when:
- - ansible_distribution == 'RedHat'
+ when: ansible_distribution == 'RedHat'
- name: install centos dependencies
yum:
state: present
register: result
until: result is succeeded
- when:
- - ansible_distribution == 'CentOS'
+ when: ansible_distribution == 'CentOS'
- name: install redhat ceph packages
package:
name: "{{ redhat_ceph_pkgs | unique }}"
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
- until: result is succeeded
\ No newline at end of file
+ until: result is succeeded
- name: enable red hat storage monitor repository
rhsm_repository:
name: "rhel-7-server-rhceph-{{ ceph_rhcs_version }}-mon-rpms"
- when:
- - (mon_group_name in group_names or mgr_group_name in group_names)
+ when: (mon_group_name in group_names or mgr_group_name in group_names)
- name: enable red hat storage osd repository
rhsm_repository:
name: "rhel-7-server-rhceph-{{ ceph_rhcs_version }}-osd-rpms"
- when:
- - osd_group_name in group_names
+ when: osd_group_name in group_names
- name: enable red hat storage tools repository
rhsm_repository:
name: "rhel-7-server-rhceph-{{ ceph_rhcs_version }}-tools-rpms"
- when:
- - (rgw_group_name in group_names or mds_group_name in group_names or nfs_group_name in group_names or iscsi_gw_group_name in group_names or client_group_name in group_names)
\ No newline at end of file
+ when: (rgw_group_name in group_names or mds_group_name in group_names or nfs_group_name in group_names or iscsi_gw_group_name in group_names or client_group_name in group_names)
path: "{{ ceph_rhcs_iso_path | dirname }}"
state: directory
recurse: yes
- when:
- - ceph_rhcs_iso_path | dirname != '/'
+ when: ceph_rhcs_iso_path | dirname != '/'
- name: fetch the red hat storage iso from the ansible server for redhat systems
copy:
path: "{{ ceph_rhcs_iso_path | dirname }}"
state: directory
recurse: yes
- when:
- - ceph_rhcs_iso_path | dirname != '/'
+ when: ceph_rhcs_iso_path | dirname != '/'
- name: fetch the red hat storage iso from the ansible server for debian systems
copy:
name: yum-plugin-priorities
register: result
until: result is succeeded
- tags:
- - with_pkg
+ tags: with_pkg
- name: configure red hat ceph community repository stable key
rpm_key:
file: ceph_stable
priority: 2
register: result
- until: result is succeeded
\ No newline at end of file
+ until: result is succeeded
---
- name: include prerequisite_rhcs_iso_install.yml
include_tasks: prerequisite_rhcs_iso_install.yml
- when:
- - ceph_repository_type == 'iso'
+ when: ceph_repository_type == 'iso'
- name: include prerequisite_rhcs_cdn_install.yml
include_tasks: prerequisite_rhcs_cdn_install.yml
- when:
- - ceph_repository_type == 'cdn'
+ when: ceph_repository_type == 'cdn'
- name: include_tasks installs/install_on_redhat.yml
include_tasks: installs/install_on_redhat.yml
when: ansible_os_family == 'RedHat'
- tags:
- - package-install
+ tags: package-install
- name: include_tasks installs/install_on_suse.yml
include_tasks: installs/install_on_suse.yml
when: ansible_os_family == 'Suse'
- tags:
- - package-install
+ tags: package-install
- name: include installs/install_on_debian.yml
include_tasks: installs/install_on_debian.yml
- tags:
- - package-install
- when:
- - ansible_os_family == 'Debian'
+ tags: package-install
+ when: ansible_os_family == 'Debian'
- name: include_tasks installs/install_on_clear.yml
include_tasks: installs/install_on_clear.yml
when: ansible_os_family == 'ClearLinux'
- tags:
- - package-install
+ tags: package-install
- name: get ceph version
command: ceph --version
# override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory
- name: include release-rhcs.yml
include_tasks: release-rhcs.yml
- when:
- - ceph_repository in ['rhcs', 'dev']
+ when: ceph_repository in ['rhcs', 'dev']
or
ceph_origin == 'distro'
- tags:
- - always
+ tags: always
- name: set_fact ceph_release - override ceph_release with ceph_stable_release
set_fact:
when:
- ceph_origin == 'repository'
- ceph_repository not in ['dev', 'rhcs']
- tags:
- - always
+ tags: always
- name: include create_rbd_client_dir.yml
include_tasks: create_rbd_client_dir.yml
- name: set_fact ceph_release jewel
set_fact:
ceph_release: jewel
- when:
- - ceph_version.split('.')[0] is version_compare('10', '==')
+ when: ceph_version.split('.')[0] is version_compare('10', '==')
- name: set_fact ceph_release kraken
set_fact:
ceph_release: kraken
- when:
- - ceph_version.split('.')[0] is version_compare('11', '==')
+ when: ceph_version.split('.')[0] is version_compare('11', '==')
- name: set_fact ceph_release luminous
set_fact:
ceph_release: luminous
- when:
- - ceph_version.split('.')[0] is version_compare('12', '==')
+ when: ceph_version.split('.')[0] is version_compare('12', '==')
- name: set_fact ceph_release mimic
set_fact:
ceph_release: mimic
- when:
- - ceph_version.split('.')[0] is version_compare('13', '==')
+ when: ceph_version.split('.')[0] is version_compare('13', '==')
- name: set_fact ceph_release nautilus
set_fact:
ceph_release: nautilus
- when:
- - ceph_version.split('.')[0] is version_compare('14', '==')
+ when: ceph_version.split('.')[0] is version_compare('14', '==')
---
- name: include create_ceph_initial_dirs.yml
include_tasks: create_ceph_initial_dirs.yml
- when:
- - containerized_deployment|bool
+ when: containerized_deployment|bool
- name: config file operations related to OSDs
when:
- name: count number of osds for lvm scenario
set_fact:
num_osds: "{{ lvm_volumes | length | int }}"
- when:
- - lvm_volumes | default([]) | length > 0
+ when: lvm_volumes | default([]) | length > 0
- name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
ceph_volume:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
PYTHONIOENCODING: utf-8
- when:
- - devices | default([]) | length > 0
+ when: devices | default([]) | length > 0
- name: set_fact num_osds from the output of 'ceph-volume lvm batch --report'
set_fact:
# ceph-common
- name: config file operation for non-containerized scenarios
- when:
- - not containerized_deployment|bool
+ when: not containerized_deployment|bool
block:
- name: create ceph conf directory
file:
state: directory
mode: "0755"
delegate_to: localhost
- when:
- - ceph_conf_local
+ when: ceph_conf_local
- name: "generate {{ cluster }}.conf configuration file locally"
config_template:
- ceph_conf_local
- name: config file operations for containerized scenarios
- when:
- - containerized_deployment|bool
+ when: containerized_deployment|bool
block:
- name: create a local fetch directory if it does not exist
file:
until: docker_image.rc == 0
retries: "{{ docker_pull_retry }}"
delay: 10
- when:
- - (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
+ when: (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
- name: "inspecting {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
command: "{{ container_binary }} inspect {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- name: set_fact image_repodigest_after_pulling
set_fact:
image_repodigest_after_pulling: "{{ (image_inspect_after_pull.stdout | from_json)[0].RepoDigests[0].split('@')[1] }}"
- when:
- - image_inspect_after_pull.rc == 0
+ when: image_inspect_after_pull.rc == 0
- name: set_fact ceph_mon_image_updated
set_fact:
ceph_mon_image_updated: "{{ ceph_mon_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
- notify:
- - restart ceph mons
+ notify: restart ceph mons
when:
- mon_group_name in group_names
- ceph_mon_container_inspect_before_pull.get('rc') == 0
set_fact:
ceph_osd_image_updated: "{{ ceph_osd_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
- notify:
- - restart ceph osds
+ notify: restart ceph osds
when:
- osd_group_name in group_names
- ceph_osd_container_inspect_before_pull.get('rc') == 0
set_fact:
ceph_mds_image_updated: "{{ ceph_mds_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
- notify:
- - restart ceph mdss
+ notify: restart ceph mdss
when:
- mds_group_name in group_names
- ceph_mds_container_inspect_before_pull.get('rc') == 0
set_fact:
ceph_rgw_image_updated: "{{ ceph_rgw_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
- notify:
- - restart ceph rgws
+ notify: restart ceph rgws
when:
- rgw_group_name in group_names
- ceph_rgw_container_inspect_before_pull.get('rc') == 0
set_fact:
ceph_mgr_image_updated: "{{ ceph_mgr_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
- notify:
- - restart ceph mgrs
+ notify: restart ceph mgrs
when:
- mgr_group_name in group_names
- ceph_mgr_container_inspect_before_pull.get('rc') == 0
set_fact:
ceph_rbd_mirror_image_updated: "{{ ceph_rbd_mirror_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
- notify:
- - restart ceph rbdmirrors
+ notify: restart ceph rbdmirrors
when:
- rbdmirror_group_name in group_names
- ceph_rbd_mirror_container_inspect_before_pull.get('rc') == 0
set_fact:
ceph_nfs_image_updated: "{{ ceph_nfs_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
- notify:
- - restart ceph nfss
+ notify: restart ceph nfss
when:
- nfs_group_name in group_names
- ceph_nfs_container_inspect_before_pull.get('rc') == 0
{{ container_binary }} save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
"{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
delegate_to: localhost
- when:
- - (ceph_docker_dev_image is defined and ceph_docker_dev_image)
+ when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
run_once: true
- name: copy ceph dev image file
copy:
src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
- when:
- - (ceph_docker_dev_image is defined and ceph_docker_dev_image)
+ when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
- name: load ceph dev image
command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
- when:
- - (ceph_docker_dev_image is defined and ceph_docker_dev_image)
+ when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
- name: remove tmp ceph dev image file
file:
name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
state: absent
- when:
- - (ceph_docker_dev_image is defined and ceph_docker_dev_image)
+ when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
+
---
- name: include pre_requisites/prerequisites.yml
include_tasks: pre_requisites/prerequisites.yml
- when:
- - not is_atomic
+ when: not is_atomic
- name: get docker version
command: docker --version
changed_when: false
check_mode: no
register: ceph_docker_version
- when:
- - container_binary == 'docker'
+ when: container_binary == 'docker'
- name: set_fact ceph_docker_version ceph_docker_version.stdout.split
set_fact:
ceph_docker_version: "{{ ceph_docker_version.stdout.split(' ')[2] }}"
- when:
- - container_binary == 'docker'
+ when: container_binary == 'docker'
- name: include fetch_image.yml
include_tasks: fetch_image.yml
- tags:
- - fetch_container_image
+ tags: fetch_container_image
- name: get ceph version
command: >
ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
- name: include release.yml
- include_tasks: release.yml
\ No newline at end of file
+ include_tasks: release.yml
when:
- ansible_os_family == 'Debian'
- container_package_name == 'docker-ce'
- tags:
- with_pkg
+ tags: with_pkg
# ensure extras enabled for docker
- name: enable extras on centos
package:
name: ['{{ container_package_name }}', '{{ container_binding_name }}']
update_cache: true
- tags:
- with_pkg
+ tags: with_pkg
- name: start container service
service:
- name: set_fact ceph_release jewel
set_fact:
ceph_release: jewel
- when:
- - ceph_version.split('.')[0] is version_compare('10', '==')
+ when: ceph_version.split('.')[0] is version_compare('10', '==')
- name: set_fact ceph_release kraken
set_fact:
ceph_release: kraken
- when:
- - ceph_version.split('.')[0] is version_compare('11', '==')
+ when: ceph_version.split('.')[0] is version_compare('11', '==')
- name: set_fact ceph_release luminous
set_fact:
ceph_release: luminous
- when:
- - ceph_version.split('.')[0] is version_compare('12', '==')
+ when: ceph_version.split('.')[0] is version_compare('12', '==')
- name: set_fact ceph_release mimic
set_fact:
ceph_release: mimic
- when:
- - ceph_version.split('.')[0] is version_compare('13', '==')
+ when: ceph_version.split('.')[0] is version_compare('13', '==')
- name: set_fact ceph_release nautilus
set_fact:
ceph_release: nautilus
- when:
- - ceph_version.split('.')[0] is version_compare('14', '==')
+ when: ceph_version.split('.')[0] is version_compare('14', '==')
+
- name: set_fact monitor_name ansible_hostname
set_fact:
monitor_name: "{{ ansible_hostname }}"
- when:
- - not mon_use_fqdn
+ when: not mon_use_fqdn
- name: set_fact monitor_name ansible_fqdn
set_fact:
monitor_name: "{{ ansible_fqdn }}"
- when:
- - mon_use_fqdn
+ when: mon_use_fqdn
- name: set_fact docker_exec_cmd
set_fact:
set_fact:
ceph_current_status:
rc: 1
- when:
- - rolling_update or groups.get(mon_group_name, []) | length == 0
+ when: rolling_update or groups.get(mon_group_name, []) | length == 0
- name: create a local fetch directory if it does not exist
file:
delegate_to: localhost
changed_when: false
become: false
- when:
- - (cephx or generate_fsid)
+ when: cephx or generate_fsid
- name: get current fsid
command: "{{ timeout_command }} {{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }} config get fsid"
register: rolling_update_fsid
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
- when:
- - rolling_update
+ when: rolling_update
- name: set_fact fsid
set_fact:
fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}"
- when:
- - rolling_update
+ when: rolling_update
- name: set_fact ceph_current_status (convert to json)
set_fact:
- name: set_fact fsid from ceph_current_status
set_fact:
fsid: "{{ ceph_current_status.fsid }}"
- when:
- - ceph_current_status.fsid is defined
+ when: ceph_current_status.fsid is defined
- name: fsid realted tasks
when:
- name: set_fact mds_name ansible_hostname
set_fact:
mds_name: "{{ ansible_hostname }}"
- when:
- - not mds_use_fqdn
+ when: not mds_use_fqdn
- name: set_fact mds_name ansible_fqdn
set_fact:
mds_name: "{{ ansible_fqdn }}"
- when:
- - mds_use_fqdn
+ when: mds_use_fqdn
- name: set_fact rbd_client_directory_owner ceph
set_fact:
rbd_client_directory_owner: ceph
- when:
- - rbd_client_directory_owner is not defined
+ when: rbd_client_directory_owner is not defined
or not rbd_client_directory_owner
- name: set_fact rbd_client_directory_group rbd_client_directory_group
set_fact:
rbd_client_directory_group: ceph
- when:
- - rbd_client_directory_group is not defined
+ when: rbd_client_directory_group is not defined
or not rbd_client_directory_group
- name: set_fact rbd_client_directory_mode 0770
set_fact:
rbd_client_directory_mode: "0770"
- when:
- - rbd_client_directory_mode is not defined
+ when: rbd_client_directory_mode is not defined
or not rbd_client_directory_mode
- name: resolve device link(s)
- name: import_tasks set_radosgw_address.yml
import_tasks: set_radosgw_address.yml
- when:
- - inventory_hostname in groups.get(rgw_group_name, [])
+ when: inventory_hostname in groups.get(rgw_group_name, [])
- name: set_fact rgw_instances
set_fact:
rgw_instances: "{{ rgw_instances|default([]) | union([{'instance_name': 'rgw' + item|string, 'radosgw_address': _radosgw_address, 'radosgw_frontend_port': radosgw_frontend_port|int + item|int}]) }}"
with_sequence: start=0 end={{ radosgw_num_instances|int - 1 }}
- when:
- - inventory_hostname in groups.get(rgw_group_name, [])
+ when: inventory_hostname in groups.get(rgw_group_name, [])
- name: set ntp service name depending on OS family
block:
- name: set_fact _monitor_address to monitor_address_block ipv4
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv4_addresses'] | ipaddr(hostvars[item]['monitor_address_block']) | first }] }}"
- with_items:
- - "{{ groups.get(mon_group_name, []) }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- hostvars[item]['monitor_address_block'] is defined
- name: set_fact _monitor_address to monitor_address_block ipv6
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv6_addresses'] | ipaddr(hostvars[item]['monitor_address_block']) | last | ipwrap }] }}"
- with_items:
- - "{{ groups.get(mon_group_name, []) }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- hostvars[item]['monitor_address_block'] is defined
- name: set_fact _monitor_address to monitor_address
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['monitor_address'] | ipwrap}] }}"
- with_items:
- - "{{ groups.get(mon_group_name, []) }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- hostvars[item]['monitor_address'] is defined
- name: set_fact _monitor_address to monitor_interface - ipv4
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }] }}"
- with_items:
- - "{{ groups.get(mon_group_name, []) }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- ip_version == 'ipv4'
- name: set_fact _monitor_address to monitor_interface - ipv6
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
- with_items:
- - "{{ groups.get(mon_group_name, []) }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- ip_version == 'ipv6'
- name: set_fact _radosgw_address to radosgw_interface - ipv4
set_fact:
_radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}"
- when:
- - ip_version == 'ipv4'
+ when: ip_version == 'ipv4'
- name: set_fact _radosgw_address to radosgw_interface - ipv6
set_fact:
_radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] }}"
- when:
- - ip_version == 'ipv6'
+ when: ip_version == 'ipv6'
- name: update apt cache
apt:
update-cache: yes
- when:
- - ansible_os_family == 'Debian'
+ when: ansible_os_family == 'Debian'
register: result
until: result is succeeded
group: root
mode: 0750
listen: "restart ceph mdss"
- when:
- - mds_group_name in group_names
+ when: mds_group_name in group_names
- name: restart ceph mds daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_mds_daemon.sh
group: root
mode: 0750
listen: "restart ceph rgws"
- when:
- - rgw_group_name in group_names
+ when: rgw_group_name in group_names
- name: restart ceph rgw daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_rgw_daemon.sh
group: root
mode: 0750
listen: "restart ceph nfss"
- when:
- - nfs_group_name in group_names
+ when: nfs_group_name in group_names
- name: restart ceph nfs daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_nfs_daemon.sh
group: root
mode: 0750
listen: "restart ceph rbdmirrors"
- when:
- - rbdmirror_group_name in group_names
+ when: rbdmirror_group_name in group_names
- name: restart ceph rbd mirror daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_rbd_mirror_daemon.sh
group: root
mode: 0750
listen: "restart ceph mgrs"
- when:
- - mgr_group_name in group_names
+ when: mgr_group_name in group_names
- name: restart ceph mgr daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_mgr_daemon.sh
group: root
mode: 0750
listen: "restart ceph tcmu-runner"
- when:
- - iscsi_gw_group_name in group_names
+ when: iscsi_gw_group_name in group_names
- name: restart tcmu-runner
command: /usr/bin/env bash /tmp/restart_tcmu_runner.sh
group: root
mode: 0750
listen: "restart ceph rbd-target-gw"
- when:
- - iscsi_gw_group_name in group_names
+ when: iscsi_gw_group_name in group_names
- name: restart rbd-target-gw
command: /usr/bin/env bash /tmp/restart_rbd_target_gw.sh
group: root
mode: 0750
listen: "restart ceph rbd-target-api"
- when:
- - iscsi_gw_group_name in group_names
+ when: iscsi_gw_group_name in group_names
- name: restart rbd-target-api
command: /usr/bin/env bash /tmp/restart_rbd_target_api.sh
---
- name: include check_running_containers.yml
include_tasks: check_running_containers.yml
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: include check_socket_non_container.yml
include_tasks: check_socket_non_container.yml
- when:
- - not containerized_deployment
+ when: not containerized_deployment
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(mon_group_name, [])
+ when: inventory_hostname in groups.get(mon_group_name, [])
- name: check for an osd container
command: "{{ container_binary }} ps -q --filter='name=ceph-osd'"
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(osd_group_name, [])
+ when: inventory_hostname in groups.get(osd_group_name, [])
- name: check for a mds container
command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(mds_group_name, [])
+ when: inventory_hostname in groups.get(mds_group_name, [])
- name: check for a rgw container
command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(rgw_group_name, [])
+ when: inventory_hostname in groups.get(rgw_group_name, [])
- name: check for a mgr container
command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(mgr_group_name, [])
+ when: inventory_hostname in groups.get(mgr_group_name, [])
- name: check for a rbd mirror container
command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(rbdmirror_group_name, [])
+ when: inventory_hostname in groups.get(rbdmirror_group_name, [])
- name: check for a nfs container
command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'"
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(nfs_group_name, [])
+ when: inventory_hostname in groups.get(nfs_group_name, [])
- name: check for a tcmu-runner container
command: "{{ container_binary }} ps -q --filter='name=tcmu-runner'"
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(iscsi_gw_group_name, [])
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-api container
command: "{{ container_binary }} ps -q --filter='name=rbd-target-api'"
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(iscsi_gw_group_name, [])
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-gw container
command: "{{ container_binary }} ps -q --filter='name=rbd-target-gw'"
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(iscsi_gw_group_name, [])
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
failed_when: false
check_mode: no
register: mon_socket_stat
- when:
- - inventory_hostname in groups.get(mon_group_name, [])
+ when: inventory_hostname in groups.get(mon_group_name, [])
- name: check if the ceph mon socket is in-use
command: fuser --silent {{ mon_socket_stat.stdout }}
failed_when: false
check_mode: no
register: osd_socket_stat
- when:
- - inventory_hostname in groups.get(osd_group_name, [])
+ when: inventory_hostname in groups.get(osd_group_name, [])
- name: check if the ceph osd socket is in-use
command: fuser --silent {{ osd_socket_stat.stdout }}
failed_when: false
check_mode: no
register: mds_socket_stat
- when:
- - inventory_hostname in groups.get(mds_group_name, [])
+ when: inventory_hostname in groups.get(mds_group_name, [])
- name: check if the ceph mds socket is in-use
command: fuser --silent {{ mds_socket_stat.stdout }}
failed_when: false
check_mode: no
register: rgw_socket_stat
- when:
- - inventory_hostname in groups.get(rgw_group_name, [])
+ when: inventory_hostname in groups.get(rgw_group_name, [])
- name: check if the ceph rgw socket is in-use
command: fuser --silent {{ rgw_socket_stat.stdout }}
failed_when: false
check_mode: no
register: mgr_socket_stat
- when:
- - inventory_hostname in groups.get(mgr_group_name, [])
+ when: inventory_hostname in groups.get(mgr_group_name, [])
- name: check if the ceph mgr socket is in-use
command: fuser --silent {{ mgr_socket_stat.stdout }}
failed_when: false
check_mode: no
register: rbd_mirror_socket_stat
- when:
- - inventory_hostname in groups.get(rbdmirror_group_name, [])
+ when: inventory_hostname in groups.get(rbdmirror_group_name, [])
- name: check if the ceph rbd mirror socket is in-use
command: fuser --silent {{ rbd_mirror_socket_stat.stdout }}
failed_when: false
check_mode: no
register: nfs_socket_stat
- when:
- - inventory_hostname in groups.get(nfs_group_name, [])
+ when: inventory_hostname in groups.get(nfs_group_name, [])
- name: check if the ceph nfs ganesha socket is in-use
command: fuser --silent {{ nfs_socket_stat.stdout }}
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(iscsi_gw_group_name, [])
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-api
command: "pgrep rbd-target-api"
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(iscsi_gw_group_name, [])
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-gw
command: "pgrep name=rbd-target-gw"
changed_when: false
failed_when: false
check_mode: no
- when:
- - inventory_hostname in groups.get(iscsi_gw_group_name, [])
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
ignore_errors: true
check_mode: no
changed_when: false
- tags:
- - firewall
- when:
- - not containerized_deployment
+ tags: firewall
+ when: not containerized_deployment
-- when:
- - (firewalld_pkg_query.get('rc', 1) == 0
+- when: (firewalld_pkg_query.get('rc', 1) == 0
or is_atomic)
block:
- name: start firewalld
when:
- mon_group_name is defined
- mon_group_name in group_names
- tags:
- - firewall
+ tags: firewall
- name: open manager ports
firewalld:
when:
- mgr_group_name is defined
- mgr_group_name in group_names
- tags:
- - firewall
+ tags: firewall
- name: open osd ports
firewalld:
when:
- osd_group_name is defined
- osd_group_name in group_names
- tags:
- - firewall
+ tags: firewall
- name: open rgw ports
firewalld:
when:
- rgw_group_name is defined
- rgw_group_name in group_names
- tags:
- - firewall
+ tags: firewall
- name: open mds ports
firewalld:
when:
- mds_group_name is defined
- mds_group_name in group_names
- tags:
- - firewall
+ tags: firewall
- name: open nfs ports
firewalld:
when:
- nfs_group_name is defined
- nfs_group_name in group_names
- tags:
- - firewall
+ tags: firewall
- name: open nfs ports (portmapper)
firewalld:
when:
- nfs_group_name is defined
- nfs_group_name in group_names
- tags:
- - firewall
+ tags: firewall
- name: open rbdmirror ports
firewalld:
when:
- rbdmirror_group_name is defined
- rbdmirror_group_name in group_names
- tags:
- - firewall
+ tags: firewall
- name: open iscsi target ports
firewalld:
when:
- iscsi_gw_group_name is defined
- iscsi_gw_group_name in group_names
- tags:
- - firewall
+ tags: firewall
- name: open iscsi api ports
firewalld:
when:
- iscsi_gw_group_name is defined
- iscsi_gw_group_name in group_names
- tags:
- - firewall
+ tags: firewall
- meta: flush_handlers
state: present
register: result
until: result is succeeded
- when:
- - ntp_daemon_type == "ntpd"
+ when: ntp_daemon_type == "ntpd"
- name: install chrony
package:
state: present
register: result
until: result is succeeded
- when:
- - ntp_daemon_type == "chronyd"
+ when: ntp_daemon_type == "chronyd"
- name: enable the ntp daemon and disable the rest
block:
notify:
- disable ntpd
- disable chronyd
- when:
- - ntp_daemon_type == "timesyncd"
+ when: ntp_daemon_type == "timesyncd"
- name: disable time sync using timesyncd if we are not using it
command: timedatectl set-ntp no
- when:
- - ntp_daemon_type != "timesyncd"
+ when: ntp_daemon_type != "timesyncd"
- name: enable ntpd
service:
notify:
- disable chronyd
- disable timesyncd
- when:
- - ntp_daemon_type == "ntpd"
+ when: ntp_daemon_type == "ntpd"
- name: enable chronyd
service:
notify:
- disable ntpd
- disable timesyncd
- when:
- - ntp_daemon_type == "chronyd"
+ when: ntp_daemon_type == "chronyd"
- name: make sure gateway_ip_list is configured
fail:
msg: "you must set a list of IPs (comma separated) for gateway_ip_list"
- when:
- - gateway_ip_list == "0.0.0.0"
+ when: gateway_ip_list == "0.0.0.0"
- name: copy admin key
copy:
owner: "root"
group: "root"
mode: "{{ ceph_keyring_permissions }}"
- when:
- - cephx
+ when: cephx
- name: deploy gateway settings, used by the ceph_iscsi_config modules
template:
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set rbd size {{ rbd_pool_size | default(osd_pool_default_size) }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
- when:
- - rbd_pool_size | default(osd_pool_default_size) != ceph_osd_pool_default_size
+ when: rbd_pool_size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- tcmu-runner
- rbd-target-gw
- rbd-target-api
- notify:
- - restart ceph {{ item }}
+ notify: restart ceph {{ item }}
- name: systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers
systemd:
become: False
run_once: True
with_items: "{{ crt_files_exist.results }}"
- when:
- - not item.stat.exists
+ when: not item.stat.exists
- name: create pem
shell: >
run_once: True
register: pem
with_items: "{{ crt_files_exist.results }}"
- when:
- - not item.stat.exists
+ when: not item.stat.exists
- name: create public key from pem
shell: >
delegate_to: localhost
become: False
run_once: True
- when:
- - pem.changed
- tags:
- - skip_ansible_lint
+ when: pem.changed
+ tags: skip_ansible_lint
- name: copy crt file(s) to gateway nodes
copy:
- name: include non-container/prerequisites.yml
include_tasks: non-container/prerequisites.yml
- when:
- - not containerized_deployment
+ when: not containerized_deployment
# deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files
# and transfers them to /etc/ceph directory on each controller. SSL certs are used by
# the API for https support.
- name: include deploy_ssl_keys.yml
include_tasks: deploy_ssl_keys.yml
- when:
- - generate_crt|bool
+ when: generate_crt|bool
- name: include non-container/configure_iscsi.yml
include_tasks: non-container/configure_iscsi.yml
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: include containerized.yml
include_tasks: container/containerized.yml
- when:
- - containerized_deployment
+ when: containerized_deployment
---
- name: red hat based systems tasks
- when:
- - ansible_os_family == 'RedHat'
+ when: ansible_os_family == 'RedHat'
block:
- name: when ceph_iscsi_config_dev is true
when:
set_fact:
admin_keyring:
- "/etc/ceph/{{ cluster }}.client.admin.keyring"
- when:
- - copy_admin_key
+ when: copy_admin_key
- name: set_fact ceph_config_keys
set_fact:
- name: merge ceph_config_keys and admin_keyring
set_fact:
ceph_config_keys: "{{ ceph_config_keys + admin_keyring }}"
- when:
- - copy_admin_key
+ when: copy_admin_key
- name: stat for ceph config and keys
stat:
owner: "root"
group: "root"
mode: "0644"
- notify:
- - restart ceph mdss
+ notify: restart ceph mdss
- name: systemd start mds container
systemd:
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
- when:
- - item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
+ when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- name: customize pool min_size
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
- when:
- - (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
+ when: (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- name: check if ceph filesystem already exists
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}"
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - check_existing_cephfs.rc != 0
+ when: check_existing_cephfs.rc != 0
- name: assign application to cephfs pools
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item }} cephfs"
- "{{ cephfs_metadata }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - check_existing_cephfs.rc != 0
+ when: check_existing_cephfs.rc != 0
- name: set max_mds
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - mds_max_mds > 1
+ when: mds_max_mds > 1
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: include common.yml
include_tasks: common.yml
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- when:
- - groups.get(mgr_group_name, []) | length == 0 # the key is present already since one of the mons created it in "create ceph mgr keyring(s)"
+ when: groups.get(mgr_group_name, []) | length == 0 # the key is present already since one of the mons created it in "create ceph mgr keyring(s)"
- name: copy ceph keyring(s) if needed
copy:
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
- when:
- - cephx
\ No newline at end of file
+ when: cephx
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: include common.yml
include_tasks: common.yml
when:
- ceph_mgr_modules | length > 0
- ((groups[mgr_group_name] | default([]) | length == 0 and inventory_hostname == groups[mon_group_name] | last) or
- (groups[mgr_group_name] | default([]) | length > 0 and inventory_hostname == groups[mgr_group_name] | last))
\ No newline at end of file
+ (groups[mgr_group_name] | default([]) | length > 0 and inventory_hostname == groups[mgr_group_name] | last))
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module disable {{ item }}"
with_items: "{{ _ceph_mgr_modules.get('enabled_modules', []) }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - item not in ceph_mgr_modules
+ when: item not in ceph_mgr_modules
- name: add modules to ceph-mgr
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module enable {{ item }}"
with_items: "{{ ceph_mgr_modules }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == [])
\ No newline at end of file
+ when: (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == [])
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
- when:
- - ansible_os_family in ['RedHat', 'Suse']
+ when: ansible_os_family in ['RedHat', 'Suse']
- name: install ceph-mgr packages for debian
apt:
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else '' }}"
register: result
until: result is succeeded
- when:
- - ansible_os_family == 'Debian'
+ when: ansible_os_family == 'Debian'
owner: "root"
group: "root"
mode: "0644"
- when:
- - containerized_deployment
- notify:
- - restart ceph mgrs
+ when: containerized_deployment
+ notify: restart ceph mgrs
- name: systemd start mgr
systemd:
changed_when: false
- name: tasks for MONs when cephx is enabled
- when:
- - cephx
+ when: cephx
block:
- name: fetch ceph initial keys
ceph_key:
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- with_items:
- - "{{ groups.get(mon_group_name) if groups.get(mgr_group_name, []) | length == 0 else groups.get(mgr_group_name, []) }}"
+ with_items: "{{ groups.get(mon_group_name) if groups.get(mgr_group_name, []) | length == 0 else groups.get(mgr_group_name, []) }}"
run_once: True
delegate_to: "{{ groups[mon_group_name][0] }}"
src: "{{ ceph_conf_key_directory }}/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring"
dest: "{{ fetch_directory }}/{{ fsid }}/{{ ceph_conf_key_directory }}/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring"
flat: yes
- with_items:
- - "{{ groups.get(mon_group_name) if groups.get(mgr_group_name, []) | length == 0 else groups.get(mgr_group_name, []) }}"
+ with_items: "{{ groups.get(mon_group_name) if groups.get(mgr_group_name, []) | length == 0 else groups.get(mgr_group_name, []) }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: copy keys to the ansible server
- /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring
- /etc/ceph/{{ cluster }}.client.admin.keyring
- when:
- - inventory_hostname == groups[mon_group_name] | last
+ when: inventory_hostname == groups[mon_group_name] | last
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush rule create-simple {{ item.name }} {{ item.root }} {{ item.type }}"
with_items: "{{ crush_rules | unique }}"
changed_when: false
- when:
- - inventory_hostname == groups.get(mon_group_name) | last
+ when: inventory_hostname == groups.get(mon_group_name) | last
- name: get id for new default crush rule
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd -f json crush rule dump {{ item.name }}"
--keyring /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
- when:
- - cephx
+ when: cephx
- name: ceph monitor mkfs without keyring
command: >
--fsid {{ fsid }}
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
- when:
- - not cephx
+ when: not cephx
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: include deploy_monitors.yml
include_tasks: deploy_monitors.yml
- name: include_tasks ceph_keys.yml
include_tasks: ceph_keys.yml
- when:
- - not switch_to_containers | default(False)
+ when: not switch_to_containers | default(False)
- name: include secure_cluster.yml
include_tasks: secure_cluster.yml
- name: crush_rules.yml
include_tasks: crush_rules.yml
- when:
- - crush_rule_config
+ when: crush_rule_config
+
owner: "root"
group: "root"
mode: "0644"
- notify:
- - restart ceph mons
+ notify: restart ceph mons
when: containerized_deployment
- name: start the monitor service
- name: set_fact docker_exec_cmd_nfs
set_fact:
docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: check if "{{ ceph_nfs_rgw_user }}" exists
command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}"
changed_when: false
failed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - nfs_obj_gw
+ when: nfs_obj_gw
- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"
command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'"
state: present
register: result
until: result is succeeded
- when:
- - selinuxstatus.stdout != 'Disabled'
+ when: selinuxstatus.stdout != 'Disabled'
- name: test if ganesha_t is already permissive
shell: |
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: include common.yml
include_tasks: common.yml
- name: include pre_requisite_non_container.yml
include_tasks: pre_requisite_non_container.yml
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: include pre_requisite_container.yml
include_tasks: pre_requisite_container.yml
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: include create_rgw_nfs_user.yml
import_tasks: create_rgw_nfs_user.yml
set_fact:
admin_keyring:
- "/etc/ceph/{{ cluster }}.client.admin.keyring"
- when:
- - copy_admin_key
+ when: copy_admin_key
- name: set_fact ceph_config_keys
set_fact:
- name: merge ceph_config_keys and admin_keyring
set_fact:
ceph_config_keys: "{{ ceph_config_keys + admin_keyring }}"
- when:
- - copy_admin_key
+ when: copy_admin_key
- name: stat for config and keys
stat:
with_together:
- "{{ ceph_config_keys }}"
- "{{ statconfig.results }}"
- when:
- - item.1.stat.exists
+ when: item.1.stat.exists
- name: create dbus service file
become: true
owner: "root"
group: "root"
mode: "0644"
- when:
- - ceph_nfs_dynamic_exports
+ when: ceph_nfs_dynamic_exports
- name: reload dbus configuration
command: "killall -SIGHUP dbus-daemon"
- when:
- - ceph_nfs_dynamic_exports
+ when: ceph_nfs_dynamic_exports
---
- name: include red hat based system related tasks
include_tasks: pre_requisite_non_container_red_hat.yml
- when:
- - ansible_os_family == 'RedHat'
+ when: ansible_os_family == 'RedHat'
- name: include debian based system related tasks
include_tasks: pre_requisite_non_container_debian.yml
- when:
- - ansible_os_family == 'Debian'
+ when: ansible_os_family == 'Debian'
- name: install nfs rgw/cephfs gateway - suse
zypper:
- { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
- { name: "/var/log/ceph", create: true }
- { name: "/var/run/ceph", create: true }
- when:
- - item.create|bool
+ when: item.create|bool
- name: cephx related tasks
- when:
- - cephx
+ when: cephx
block:
- name: copy bootstrap cephx keys
copy:
owner: "ceph"
group: "ceph"
mode: "0600"
- with_items:
- - { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
- when:
- - item.copy_key|bool
+ with_items: { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
+ when: item.copy_key|bool
- name: nfs object gateway related tasks
- when:
- - nfs_obj_gw
+ when: nfs_obj_gw
block:
- name: create rados gateway keyring
command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
---
- name: debian based systems - repo handling
- when:
- - ceph_origin == 'repository'
+ when: ceph_origin == 'repository'
block:
- name: stable repos specific tasks
- when:
- - nfs_ganesha_stable
+ when: nfs_ganesha_stable
- ceph_repository == 'community'
block:
- name: add nfs-ganesha stable repository
retries: 5
delay: 2
until: update_ganesha_apt_cache | success
- when:
- - add_ganesha_apt_repo | changed
+ when: add_ganesha_apt_repo | changed
- name: debian based systems - dev repos specific tasks
when:
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
- when:
- - nfs_file_gw
+ when: nfs_file_gw
- name: install red hat storage nfs obj gateway
apt:
name: nfs-ganesha-rgw
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
- when:
- - nfs_obj_gw
+ when: nfs_obj_gw
---
- name: red hat based systems - repo handling
- when:
- - ceph_origin == 'repository'
+ when: ceph_origin == 'repository'
block:
- name: add nfs-ganesha stable repository
yum_repository:
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
- when:
- - nfs_file_gw
+ when: nfs_file_gw
- name: install redhat nfs-ganesha-rgw and ceph-radosgw packages
package:
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
- when:
- - nfs_obj_gw
+ when: nfs_obj_gw
- name: set_fact docker_exec_cmd_nfs
set_fact:
docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: check if rados index object exists
shell: "{{ docker_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}"
failed_when: false
register: rados_index_exists
check_mode: no
- when:
- - ceph_nfs_rados_backend
+ when: ceph_nfs_rados_backend
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
group: "root"
mode: "0644"
config_type: ini
- notify:
- - restart ceph nfss
+ notify: restart ceph nfss
- name: create exports directory
file:
owner: "root"
group: "root"
mode: "0755"
- when:
- - ceph_nfs_dynamic_exports
+ when: ceph_nfs_dynamic_exports
- name: create exports dir index file
copy:
owner: "root"
group: "root"
mode: "0644"
- when:
- - ceph_nfs_dynamic_exports
+ when: ceph_nfs_dynamic_exports
- name: generate systemd unit file
become: true
owner: "root"
group: "root"
mode: "0644"
- when:
- - containerized_deployment
- notify:
- - restart ceph nfss
+ when: containerized_deployment
+ notify: restart ceph nfss
- name: systemd start nfs container
systemd:
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "0755"
- when:
- - cephx
+ when: cephx
with_items:
- /var/lib/ceph/bootstrap-osd/
- /var/lib/ceph/osd/
when:
- containerized_deployment
- ceph_osd_numactl_opts != ""
- tags:
- - with_pkg
+ tags: with_pkg
- name: install lvm2
package:
name: lvm2
register: result
until: result is succeeded
- when:
- - not is_atomic
- tags:
- - with_pkg
+ when: not is_atomic
+ tags: with_pkg
- name: include_tasks common.yml
include_tasks: common.yml
- "{{ created_pools.results }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - item.1.get('rc', 0) != 0
+ when: item.1.get('rc', 0) != 0
- name: customize pool size
command: >
with_items: "{{ openstack_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
- when:
- - item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
+ when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- name: customize pool min_size
command: >
with_items: "{{ openstack_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
- when:
- - (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
+ when: (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- name: assign application to pool(s)
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
with_items: "{{ openstack_pools | unique }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - item.application is defined
+ when: item.application is defined
- name: create openstack cephx key(s)
ceph_key:
when:
- cephx
- openstack_config
- - item.0 != groups[mon_group_name]
\ No newline at end of file
+ - item.0 != groups[mon_group_name]
---
- name: container specific tasks
- when:
- - containerized_deployment
+ when: containerized_deployment
block:
- name: umount ceph disk (if on openstack)
mount:
src: /dev/vdb
fstype: ext3
state: unmounted
- when:
- - ceph_docker_on_openstack
+ when: ceph_docker_on_openstack
- name: generate ceph osd docker run script
become: true
group: "root"
mode: "0744"
setype: "bin_t"
- notify:
- - restart ceph osds
+ notify: restart ceph osds
# this is for ceph-disk, the ceph-disk command is gone so we have to list /var/lib/ceph
- name: get osd ids
owner: "root"
group: "root"
mode: "0644"
- notify:
- - restart ceph osds
- when:
- - containerized_deployment
+ notify: restart ceph osds
+ when: containerized_deployment
- name: systemd start osd
systemd:
group: "root"
mode: "0755"
register: "tmpfiles_d"
- when:
- - disable_transparent_hugepage
+ when: disable_transparent_hugepage
- name: disable transparent hugepage
template:
mode: "0644"
force: "yes"
validate: "systemd-tmpfiles --create %s"
- when:
- - disable_transparent_hugepage
+ when: disable_transparent_hugepage
- name: get default vm.min_free_kbytes
command: sysctl -b vm.min_free_kbytes
-o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
args:
creates: /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: set rbd-mirror key permissions
file:
owner: "ceph"
group: "ceph"
mode: "{{ ceph_keyring_permissions }}"
- when:
- - not containerized_deployment
\ No newline at end of file
+ when: not containerized_deployment
owner: "root"
group: "root"
mode: "0644"
- notify:
- - restart ceph rbdmirrors
+ notify: restart ceph rbdmirrors
- name: systemd start rbd mirror container
systemd:
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
- when:
- - containerized_deployment
+ when: containerized_deployment
- name: include pre_requisite.yml
include_tasks: pre_requisite.yml
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: include common.yml
include_tasks: common.yml
- when:
- - cephx
+ when: cephx
- name: include start_rbd_mirror.yml
include_tasks: start_rbd_mirror.yml
- when:
- - not containerized_deployment
+ when: not containerized_deployment
- name: include configure_mirroring.yml
include_tasks: configure_mirroring.yml
- name: include docker/main.yml
include_tasks: docker/main.yml
- when:
- - containerized_deployment
+ when: containerized_deployment
state: present
register: result
until: result is succeeded
- tags:
- - package-install
\ No newline at end of file
+ tags: package-install
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "0755"
- with_items:
- - "{{ rbd_client_admin_socket_path }}"
+ with_items: "{{ rbd_client_admin_socket_path }}"
- name: create rados gateway instance directories
file:
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "0755"
with_items: "{{ rgw_instances }}"
- when:
- - rgw_instances is defined
+ when: rgw_instances is defined
- name: copy ceph keyring(s) if needed
copy:
owner: "root"
group: "root"
mode: "0644"
- notify:
- - restart ceph rgws
+ notify: restart ceph rgws
- name: systemd start rgw container
systemd:
when: rgw_multisite
- name: rgw pool related tasks
- when:
- - rgw_create_pools is defined
+ when: rgw_create_pools is defined
block:
- name: create rgw pools if rgw_create_pools is defined
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
run_once: true
register: result
until: result is succeeded
- when:
- - item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
+ when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
failed_when: false
register: rgw_remove_zone_from_zonegroup
changed_when: rgw_remove_zone_from_zonegroup.rc == 0
- notify:
- - update period
+ notify: update period
- name: delete the zone
command: radosgw-admin zone delete --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }}
when:
- rgw_zone is defined
- rgw_zonegroup is defined
- notify:
- - restart rgw
+ notify: restart rgw
section: "client.rgw.{{ ansible_hostname }}"
option: "rgw_zone"
value: "{{ rgw_zone }}"
- notify:
- - restart rgw
+ notify: restart rgw
command: "{{ docker_exec_cmd }} radosgw-admin realm create --rgw-realm={{ rgw_realm }} --default"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- when:
- - "'No such file or directory' in realmcheck.stderr"
+ when: "'No such file or directory' in realmcheck.stderr"
- name: create the zonegroup
command: "{{ docker_exec_cmd }} radosgw-admin zonegroup create --rgw-zonegroup={{ rgw_zonegroup }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --master --default"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- when:
- - "'No such file or directory' in zonegroupcheck.stderr"
+ when: "'No such file or directory' in zonegroupcheck.stderr"
- name: create the zone
command: "{{ docker_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default --master"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- when:
- - "'No such file or directory' in zonecheck.stderr"
+ when: "'No such file or directory' in zonecheck.stderr"
- name: create the zone user
command: "{{ docker_exec_cmd }} radosgw-admin user create --uid={{ rgw_zone_user }} --display-name=\"Zone User\" --access-key={{ system_access_key }} --secret={{ system_secret_key }} --system"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- when:
- - "'could not fetch user info: no user info saved' in usercheck.stderr"
- notify:
- - update period
+ when: "'could not fetch user info: no user info saved' in usercheck.stderr"
+ notify: update period
- name: add other endpoints to the zone
command: "{{ docker_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- when:
- - rgw_multisite_endpoints_list is defined
- notify:
- - update period
+ when: rgw_multisite_endpoints_list is defined
+ notify: update period
command: "{{ docker_exec_cmd }} radosgw-admin realm pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- when:
- - "'No such file or directory' in realmcheck.stderr"
+ when: "'No such file or directory' in realmcheck.stderr"
- name: fetch the period
command: "{{ docker_exec_cmd }} radosgw-admin period pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- when:
- - "'No such file or directory' in realmcheck.stderr"
+ when: "'No such file or directory' in realmcheck.stderr"
- name: set default realm
command: "{{ docker_exec_cmd }} radosgw-admin realm default --rgw-realm={{ rgw_realm }}"
command: "{{ docker_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- when:
- - "'No such file or directory' in zonecheck.stderr"
- notify:
- - update period
+ when: "'No such file or directory' in zonecheck.stderr"
+ notify: update period
- name: add other endpoints to the zone
command: "{{ docker_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- when:
- - rgw_multisite_endpoints_list is defined
- notify:
- - update period
+ when: rgw_multisite_endpoints_list is defined
+ notify: update period
state: present
register: result
until: result is succeeded
- when:
- - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf'
+ when: ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf'
- name: install libnss3-tools on debian
package:
state: present
register: result
until: result is succeeded
- when:
- - ansible_pkg_mgr == 'apt'
+ when: ansible_pkg_mgr == 'apt'
- name: create nss directory for keystone certificates
file:
with_items:
- "openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | certutil -d {{ radosgw_nss_db_path }} -A -n ca -t 'TCu,Cu,Tuw'"
- "openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | certutil -A -d {{ radosgw_nss_db_path }} -n signing_cert -t 'P,P,P'"
- tags:
- - skip_ansible_lint
+ tags: skip_ansible_lint
creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring
changed_when: false
with_items: "{{ rgw_instances }}"
- when:
- - cephx
+ when: cephx
- name: set rados gateway instance key permissions
file:
group: "ceph"
mode: "0600"
with_items: "{{ rgw_instances }}"
- when:
- - cephx
+ when: cephx
file:
state: directory
path: "/etc/systemd/system/ceph-radosgw@.service.d/"
- when:
- - ceph_rgw_systemd_overrides is defined
+ when: ceph_rgw_systemd_overrides is defined
- name: add ceph-rgw systemd service overrides
config_template:
dest: "/etc/systemd/system/ceph-radosgw@.service.d/ceph-radosgw-systemd-overrides.conf"
config_overrides: "{{ ceph_rgw_systemd_overrides | default({}) }}"
config_type: "ini"
- when:
- - ceph_rgw_systemd_overrides is defined
+ when: ceph_rgw_systemd_overrides is defined
- name: start rgw instance
service:
---
- name: devices validation
- when:
- - devices is defined
+ when: devices is defined
block:
- name: validate devices is actually a device
parted:
- name: fail if one of the devices is not a device
fail:
msg: "{{ item }} is not a block special file!"
- when:
- - item.failed
+ when: item.failed
with_items: "{{ devices_parted.results }}"
- name: "fail if {{ monitor_interface }} does not exist on {{ inventory_hostname }}"
fail:
msg: "{{ monitor_interface }} does not exist on {{ inventory_hostname }}"
- when:
- - monitor_interface not in ansible_interfaces
+ when: monitor_interface not in ansible_interfaces
- name: "fail if {{ monitor_interface }} is not active on {{ inventory_hostname }}"
fail:
msg: "{{ monitor_interface }} is not active on {{ inventory_hostname }}"
- when:
- - not hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['active']
+ when: not hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['active']
- name: "fail if {{ monitor_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
fail:
- name: "fail if {{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
fail:
msg: "{{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
- when:
- - radosgw_interface not in ansible_interfaces
+ when: radosgw_interface not in ansible_interfaces
- name: "fail if {{ radosgw_interface }} is not active on {{ inventory_hostname }}"
fail:
msg: "{{ radosgw_interface }} is not active on {{ inventory_hostname }}"
- when:
- - hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['active'] == "false"
+ when: hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['active'] == "false"
- name: "fail if {{ radosgw_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
fail:
- name: "fail if {{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}"
fail:
msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}"
- when:
- - hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[inventory_hostname]['monitor_address_block']) | length == 0
+ when: hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[inventory_hostname]['monitor_address_block']) | length == 0
- name: fail on unsupported system
fail:
msg: "System not supported {{ ansible_system }}"
- when:
- - ansible_system not in ['Linux']
+ when: ansible_system not in ['Linux']
- name: fail on unsupported architecture
fail:
msg: "Architecture not supported {{ ansible_architecture }}"
- when:
- - ansible_architecture not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64']
+ when: ansible_architecture not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64']
- name: fail on unsupported distribution
fail:
msg: "Distribution not supported {{ ansible_os_family }}"
- when:
- - ansible_os_family not in ['Debian', 'RedHat', 'ClearLinux', 'Suse']
+ when: ansible_os_family not in ['Debian', 'RedHat', 'ClearLinux', 'Suse']
- name: red hat based systems tasks
when:
- name: fail on unsupported distribution for red hat ceph storage
fail:
msg: "Distribution not supported {{ ansible_distribution_version }} by Red Hat Ceph Storage, only RHEL >= 7.3"
- when:
- - ansible_distribution_version | version_compare('7.3', '<')
+ when: ansible_distribution_version | version_compare('7.3', '<')
- name: subscription manager related tasks
- when:
- - ceph_repository_type == 'cdn'
+ when: ceph_repository_type == 'cdn'
block:
- name: determine if node is registered with subscription-manager
command: subscription-manager identity
- name: fail on unregistered red hat rhcs linux
fail:
msg: "You must register your machine with subscription-manager"
- when:
- - subscription.rc != '0'
+ when: subscription.rc != '0'
- name: fail on unsupported distribution for ubuntu cloud archive
fail:
- name: fail on unsupported ansible version (1.X)
fail:
msg: "Ansible version must be >= 2.7.x, please update!"
- when:
- - ansible_version.major|int < 2
+ when: ansible_version.major|int < 2
- name: fail on unsupported ansible version
fail:
- name: fail if systemd is not present
fail:
msg: "Systemd must be present"
- when:
- - ansible_service_mgr != 'systemd'
+ when: ansible_service_mgr != 'systemd'
- name: check if iscsi gateways is target on supported distros and versions
block:
- name: fail on unsupported distribution for iscsi gateways
fail:
msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux, CentOS or Fedora"
- when:
- - ansible_distribution not in ['RedHat', 'CentOS', 'Fedora']
+ when: ansible_distribution not in ['RedHat', 'CentOS', 'Fedora']
- name: fail on unsupported distribution version for iscsi gateways
fail:
when:
- ansible_distribution_version < '7.4'
- ansible_distribution in ['RedHat', 'CentOS']
- when:
- - iscsi_gw_group_name in group_names
+ when: iscsi_gw_group_name in group_names
- not use_fqdn_yes_i_am_sure
- name: debian based systems tasks
- when:
- - ansible_os_family == 'Debian'
+ when: ansible_os_family == 'Debian'
block:
- name: fail if local scenario is enabled on debian
fail:
msg: "'local' installation scenario not supported on Debian systems"
- when:
- - ceph_origin == 'local'
+ when: ceph_origin == 'local'
- name: verify that ceph_rhcs_cdn_debian_repo url is valid for red hat storage
fail:
msg: "ceph_rhcs_cdn_debian_repo url is invalid, please set your customername:customerpasswd"
- name: include check_iscsi.yml
include_tasks: check_iscsi.yml
- when:
- - iscsi_gw_group_name in group_names
+ when: iscsi_gw_group_name in group_names
- name: warn about radosgw_civetweb_num_threads option deprecation
debug:
# pre-tasks for following import -
- name: gather facts
setup:
- when:
- - not delegate_facts_host | bool
+ when: not delegate_facts_host | bool
- name: gather and delegate facts
setup:
delegate_facts: True
with_items: "{{ groups['all'] }}"
run_once: true
- when:
- - delegate_facts_host | bool
+ when: delegate_facts_host | bool
- name: check if it is atomic host
stat:
path: /run/ostree-booted
register: stat_ostree
- tags:
- - always
+ tags: always
- name: set_fact is_atomic
set_fact:
is_atomic: '{{ stat_ostree.stat.exists }}'
- tags:
- - always
+ tags: always
- name: check if podman binary is present
stat:
name: ceph-handler
- import_role:
name: ceph-container-common
- when:
- - inventory_hostname == groups.get('clients', ['']) | first
+ when: inventory_hostname == groups.get('clients', ['']) | first
- import_role:
name: ceph-config
tags: ['ceph_update_config']
any_errors_fatal: true
become: true
- tags:
- - always
+ tags: always
vars:
delegate_facts_host: True
- name: gather facts
setup:
- when:
- - not delegate_facts_host | bool
+ when: not delegate_facts_host | bool
- name: gather and delegate facts
setup:
delegate_facts: True
with_items: "{{ groups['all'] }}"
run_once: true
- when:
- - delegate_facts_host | bool
+ when: delegate_facts_host | bool
- name: install required packages for fedora > 23
raw: sudo dnf -y install python2-dnf libselinux-python ntp
stat:
path: /run/ostree-booted
register: stat_ostree
- tags:
- - always
+ tags: always
- name: set_fact is_atomic
set_fact:
is_atomic: '{{ stat_ostree.stat.exists }}'
- tags:
- - always
+ tags: always
tasks:
- import_role:
- hosts: localhost
gather_facts: false
become: no
- tags:
- - vagrant_setup
+ tags: vagrant_setup
tasks:
- name: print contents of {{ group_vars_path }}
command: "cat {{ group_vars_path }}"
- when: dev_setup
\ No newline at end of file
+ when: dev_setup
stat:
path: /run/ostree-booted
register: stat_ostree
- tags:
- - always
+ tags: always
- name: set_fact is_atomic
set_fact:
is_atomic: '{{ stat_ostree.stat.exists }}'
- tags:
- - always
+ tags: always
# Some images may not have lvm2 installed
- name: install lvm2
package:
state: present
register: result
until: result is succeeded
- when:
- - not is_atomic
+ when: not is_atomic
- name: create physical volume
command: pvcreate /dev/sdb
failed_when: false
state: present
register: result
until: result is succeeded
- when:
- - not is_atomic
+ when: not is_atomic
- name: generate and upload a random 10Mb file - containerized deployment
command: >
- hosts: localhost
gather_facts: false
become: yes
- tags:
- - vagrant_setup
+ tags: vagrant_setup
tasks:
- name: change centos/7 vagrant box name to rhel7
baseurl: "{{ repo_url }}/MON/x86_64/os/"
gpgcheck: no
enabled: yes
- when:
- - not is_atomic
+ when: not is_atomic
- hosts: osds
gather_facts: false
baseurl: "{{ repo_url }}/OSD/x86_64/os/"
gpgcheck: no
enabled: yes
- when:
- - not is_atomic
+ when: not is_atomic
- name: set MTU on eth2
command: "ifconfig eth2 mtu 1400 up"
baseurl: "{{ repo_url }}/Tools/x86_64/os/"
gpgcheck: no
enabled: yes
- when:
- - not is_atomic
+ when: not is_atomic
state: present
register: result
until: result is succeeded
- when:
- - not is_atomic
+ when: not is_atomic
- name: centos based systems - configure repos
block: