The `always_run` key is deprecated and being removed in Ansible 2.4.
Using it causes a warning to be displayed:
[DEPRECATION WARNING]: always_run is deprecated.
This patch changes all instances of `always_run` to use the `always`
tag, which causes the task to run each time the playbook runs.
raw: stat $HOME/.python
register: need_python
ignore_errors: true
- always_run: true
+ tags:
+ - always
- include: install_pypy.yml
when: need_python | failed
raw: stat $HOME/.pip
register: need_pip
ignore_errors: true
- always_run: true
+ tags:
+ - always
- include: install_pip.yml
when: need_pip | failed and need_python | failed
failed_when: false
register: nmapexist
run_once: true
- always_run: true
+ tags:
+ - always
- name: inform that nmap is not present
debug:
changed_when: false
failed_when: false
register: monportstate
- always_run: true
when:
- mon_group_name in group_names
- nmapexist.rc == 0
+ tags:
+ - always
- name: fail if monitor port is filtered
fail:
changed_when: false
failed_when: false
register: osdrangestate
- always_run: true
when:
- osd_group_name in group_names
- nmapexist.rc == 0
+ tags:
+ - always
- name: fail if osd and mds range is filtered (osd hosts)
fail:
changed_when: false
failed_when: false
register: mdsrangestate
- always_run: true
when:
- mds_group_name in group_names
- nmapexist.rc == 0
+ tags:
+ - always
- name: fail if osd and mds range is filtered (mds hosts)
fail:
changed_when: false
failed_when: false
register: rgwportstate
- always_run: true
when:
- rgw_group_name in group_names
- nmapexist.rc == 0
+ tags:
+ - always
- name: fail if rados gateway port is filtered
fail:
changed_when: false
failed_when: false
register: nfsportstate
- always_run: true
when:
- nfs_group_name in group_names
- nmapexist.rc == 0
+ tags:
+ - always
- name: fail if NFS ports are filtered
fail:
command: rpm -q chrony
register: ntp_pkg_query
ignore_errors: true
- always_run: true
changed_when: false
+ tags:
+ - always
command: dpkg -s ntp
register: ntp_pkg_query
ignore_errors: true
- always_run: true
changed_when: false
when:
- ansible_os_family == 'Debian'
+ tags:
+ - always
- name: install ntp on debian
package:
command: rpm -q ntp
register: ntp_pkg_query
ignore_errors: true
- always_run: true
changed_when: false
when:
- ansible_os_family == 'RedHat'
+ tags:
+ - always
- name: install ntp on redhat
package:
register: subscription
changed_when: false
failed_when: false
- always_run: true
when:
- ansible_distribution == 'Red Hat Enterprise Linux'
- ceph_repository == 'rhcs'
- ceph_repository_type == 'cdn'
+ tags:
+ - always
- name: fail on unregistered red hat rhcs linux
fail:
stat:
path: /etc/default/ceph
register: etc_default_ceph
- always_run: true
when:
- ansible_os_family == "Debian"
+ tags:
+ - always
- name: configure cluster name
lineinfile:
changed_when: false
failed_when: false
register: rhcs_mon_repo
- always_run: true
when:
- mon_group_name in group_names
+ tags:
+ - always
- name: enable red hat storage monitor repository
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-mon-rpms
changed_when: false
failed_when: false
register: rhcs_osd_repo
- always_run: true
when:
- osd_group_name in group_names
+ tags:
+ - always
- name: enable red hat storage osd repository
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-osd-rpms
changed_when: false
failed_when: false
register: rhcs_rgw_mds_nfs_repo
- always_run: true
when:
- (rgw_group_name in group_names or mds_group_name in group_names or nfs_group_name in group_names)
+ tags:
+ - always
- name: enable red hat storage rados gateway / mds repository
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-tools-rpms
- name: get ceph version
command: ceph --version
changed_when: false
- always_run: yes
register: ceph_version
+ tags:
+ - always
- name: set_fact ceph_version
set_fact:
command: sysctl -b vm.min_free_kbytes
changed_when: false
failed_when: false
- always_run: yes
register: default_vm_min_free_kbytes
+ tags:
+ - always
- name: set_fact vm_min_free_kbytes
set_fact:
removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
changed_when: false
register: cluster_uuid
- always_run: true
become: false
when:
- generate_fsid
+ tags:
+ - always
- name: ensure /etc/ceph exists
file:
register: ceph_mon_container_stat
changed_when: false
failed_when: false
- always_run: true
when:
- inventory_hostname in groups.get(mon_group_name, [])
+ tags:
+ - always
- name: check for an osd container
command: "docker ps -q --filter='name=ceph-osd-{{ ansible_hostname }}'"
register: ceph_osd_container_stat
changed_when: false
failed_when: false
- always_run: true
when:
- inventory_hostname in groups.get(osd_group_name, [])
+ tags:
+ - always
- name: check for a mds container
command: "docker ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
register: ceph_mds_container_stat
changed_when: false
failed_when: false
- always_run: true
when:
- inventory_hostname in groups.get(mds_group_name, [])
+ tags:
+ - always
- name: check for a rgw container
command: "docker ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
register: ceph_rgw_container_stat
changed_when: false
failed_when: false
- always_run: true
when:
- inventory_hostname in groups.get(rgw_group_name, [])
+ tags:
+ - always
- name: check for a mgr container
command: "docker ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
register: ceph_mgr_container_stat
changed_when: false
failed_when: false
- always_run: true
when:
- inventory_hostname in groups.get(mgr_group_name, [])
+ tags:
+ - always
- name: check for a rbd mirror container
command: "docker ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
register: ceph_rbd_mirror_container_stat
changed_when: false
failed_when: false
- always_run: true
when:
- inventory_hostname in groups.get(rbdmirror_group_name, [])
+ tags:
+ - always
- name: check for a nfs container
command: "docker ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'"
register: ceph_nfs_container_stat
changed_when: false
failed_when: false
- always_run: true
when:
- inventory_hostname in groups.get(nfs_group_name, [])
+ tags:
+ - always
shell: stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok
changed_when: false
failed_when: false
- always_run: true
register: mon_socket_stat
when:
- inventory_hostname in groups.get(mon_group_name, [])
+ tags:
+ - always
- name: check if the ceph mon socket is in-use
command: fuser --silent {{ mon_socket_stat.stdout }}
changed_when: false
failed_when: false
- always_run: true
register: mon_socket
when:
- inventory_hostname in groups.get(mon_group_name, [])
- mon_socket_stat.rc == 0
+ tags:
+ - always
- name: remove ceph mon socket if exists and not used by a process
file:
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-osd*.asok
changed_when: false
failed_when: false
- always_run: true
register: osd_socket_stat
when:
- inventory_hostname in groups.get(osd_group_name, [])
+ tags:
+ - always
- name: check if the ceph osd socket is in-use
command: fuser --silent {{ osd_socket_stat.stdout }}
changed_when: false
failed_when: false
- always_run: true
register: osd_socket
when:
- inventory_hostname in groups.get(osd_group_name, [])
- osd_socket_stat.rc == 0
+ tags:
+ - always
- name: remove ceph osd socket if exists and not used by a process
file:
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mds*.asok
changed_when: false
failed_when: false
- always_run: true
register: mds_socket_stat
when:
- inventory_hostname in groups.get(mds_group_name, [])
+ tags:
+ - always
- name: check if the ceph mds socket is in-use
command: fuser --silent {{ mds_socket_stat.stdout }}
changed_when: false
failed_when: false
- always_run: true
register: mds_socket
when:
- inventory_hostname in groups.get(mds_group_name, [])
- mds_socket_stat.rc == 0
+ tags:
+ - always
- name: remove ceph mds socket if exists and not used by a process
file:
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-client.rgw*.asok
changed_when: false
failed_when: false
- always_run: true
register: rgw_socket_stat
when:
- inventory_hostname in groups.get(rgw_group_name, [])
+ tags:
+ - always
- name: check if the ceph rgw socket is in-use
command: fuser --silent {{ rgw_socket_stat.stdout }}
changed_when: false
failed_when: false
- always_run: true
register: rgw_socket
when:
- inventory_hostname in groups.get(rgw_group_name, [])
- rgw_socket_stat.rc == 0
+ tags:
+ - always
- name: remove ceph rgw socket if exists and not used by a process
file:
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mgr*.asok
changed_when: false
failed_when: false
- always_run: true
register: mgr_socket_stat
when:
- inventory_hostname in groups.get(mgr_group_name, [])
+ tags:
+ - always
- name: check if the ceph mgr socket is in-use
command: fuser --silent {{ mgr_socket_stat.stdout }}
changed_when: false
failed_when: false
- always_run: true
register: mgr_socket
when:
- inventory_hostname in groups.get(mgr_group_name, [])
- mgr_socket_stat.rc == 0
+ tags:
+ - always
- name: remove ceph mgr socket if exists and not used by a process
file:
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-client.rbd-mirror*.asok
changed_when: false
failed_when: false
- always_run: true
register: rbd_mirror_socket_stat
when:
- inventory_hostname in groups.get(rbdmirror_group_name, [])
+ tags:
+ - always
- name: check if the ceph rbd mirror socket is in-use
command: fuser --silent {{ rbd_mirror_socket_stat.stdout }}
changed_when: false
failed_when: false
- always_run: true
register: rbd_mirror_socket
when:
- inventory_hostname in groups.get(rbdmirror_group_name, [])
- rbd_mirror_socket_stat.rc == 0
+ tags:
+ - always
- name: remove ceph rbd mirror socket if exists and not used by a process
file:
command: stat --printf=%n /var/run/ganesha.pid
changed_when: false
failed_when: false
- always_run: true
register: nfs_socket_stat
when:
- inventory_hostname in groups.get(nfs_group_name, [])
+ tags:
+ - always
- name: check if the ceph nfs ganesha socket is in-use
command: fuser --silent {{ nfs_socket_stat.stdout }}
changed_when: false
failed_when: false
- always_run: true
register: nfs_socket
when:
- inventory_hostname in groups.get(nfs_group_name, [])
- nfs_socket_stat.rc == 0
+ tags:
+ - always
- name: remove ceph nfs ganesha socket if exists and not used by a process
file:
command: "{{ docker_exec_cmd }} ceph --connect-timeout 3 --cluster {{ cluster }} fsid"
changed_when: false
failed_when: false
- always_run: yes
register: ceph_current_fsid
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- not rolling_update
- groups.get(mon_group_name, []) | length > 0
+ tags:
+ - always
# We want this check to be run only on the first node
- name: check if {{ fetch_directory }} directory exists
changed_when: false
register: cluster_uuid
become: false
- always_run: true
when:
- generate_fsid
+ tags:
+ - always
- name: set_fact fsid
set_fact:
command: rpm -q chrony
register: ntp_pkg_query
ignore_errors: true
- always_run: true
changed_when: false
+ tags:
+ - always
command: dpkg -s ntp
register: ntp_pkg_query
ignore_errors: true
- always_run: true
changed_when: false
when:
- ansible_os_family == 'Debian'
+ tags:
+ - always
- name: install ntp on debian
package:
command: rpm -q ntp
register: ntp_pkg_query
ignore_errors: true
- always_run: true
changed_when: false
when:
- ansible_os_family == 'RedHat'
+ tags:
+ - always
- name: install ntp on redhat
package:
stat:
path: /run/ostree-booted
register: stat_ostree
- always_run: true
+ tags:
+ - always
- name: set_fact is_atomic
set_fact:
- name: get docker version
command: docker --version
changed_when: false
- always_run: yes
register: docker_version
+ tags:
+ - always
- name: set_fact docker_version docker_version.stdout.split
set_fact:
register: ceph_health
changed_when: false
failed_when: false
- always_run: true
+ tags:
+ - always
- name: include checks.yml
include: checks.yml
- name: get ceph version
command: docker run --entrypoint /usr/bin/ceph {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --version
changed_when: false
- always_run: yes
register: ceph_version
+ tags:
+ - always
- name: set_fact ceph_version ceph_version.stdout.split
set_fact:
command: getenforce
register: sestatus
changed_when: false
- always_run: true
+ tags:
+ - always
become: false
failed_when: false
register: statconfig
- always_run: true
+ tags:
+ - always
shell: ls -1 /etc/ceph/*.keyring
changed_when: false
register: ceph_keys
- always_run: true
+ tags:
+ - always
- name: set keys permissions
file:
with_items: "{{ crt_files }}"
changed_when: false
failed_when: false
- always_run: true
register: crt_files_exist
+ tags:
+ - always
- name: try to fetch crt file(s)
copy:
changed_when: false
become: false
failed_when: false
- always_run: true
register: statconfig
+ tags:
+ - always
- name: try to fetch ceph config and keys
copy:
changed_when: false
become: false
failed_when: false
- always_run: true
register: statconfig
+ tags:
+ - always
- name: try to fetch ceph config and keys
copy:
- name: "copy mgr key to /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring"
command: cp /etc/ceph/{{ cluster }}.mgr.{{ ansible_hostname }}.keyring /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring
changed_when: false
- always_run: true
with_items: "{{ statconfig.results }}"
when: item.stat.exists == true
+ tags:
+ - always
- name: set ceph mgr key permission
file:
args:
creates: /etc/ceph/{{ cluster }}.client.admin.keyring
changed_when: false
- always_run: true
when:
- cephx
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+ tags:
+ - always
- name: collect admin and bootstrap keys
command: ceph-create-keys --cluster {{ cluster }} -i {{ monitor_name }}
changed_when: false
- always_run: true
when:
- cephx
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+ tags:
+ - always
# NOTE (leseb): wait for mon discovery and quorum resolution
# the admin key is not instantaneously created so we have to wait a bit
command: ceph --cluster {{ cluster }} config-key get initial_mon_keyring
changed_when: false
ignore_errors: true
- always_run: true
run_once: true
failed_when: false
register: is_initial_mon_keyring_in_kv
+ tags:
+ - always
- name: put initial mon keyring in mon kv store
command: ceph --cluster {{ cluster }} config-key put initial_mon_keyring {{ monitor_keyring.stdout }}
changed_when: false
- always_run: true
run_once: true
when:
- is_initial_mon_keyring_in_kv.rc != 0
- cephx
+ tags:
+ - always
- name: create ceph rest api keyring when mon is not containerized
command: ceph --cluster {{ cluster }} auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/{{ cluster }}.client.restapi.keyring
shell: ls -1 /etc/ceph/*.keyring
changed_when: false
register: ceph_keys
- always_run: true
when:
- cephx
+ tags:
+ - always
- name: set keys permissions
file:
changed_when: false
register: monitor_keyring
become: false
- always_run: true
when: cephx
+ tags:
+ - always
- name: create monitor initial keyring
command: ceph-authtool /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }} --create-keyring --name=mon. --add-key={{ monitor_secret }} --cap mon 'allow *'
become: false
failed_when: false
register: statconfig
- always_run: true
+ tags:
+ - always
- name: try to copy ceph config and keys
copy:
become: false
failed_when: false
register: stat_mgr_keys
- always_run: true
when:
- "{{ groups.get(mgr_group_name, []) | length > 0 }}"
+ tags:
+ - always
- name: fetch ceph mgr key(s)
fetch:
- name: collect all the pools
command: rados --cluster {{ cluster }} lspools
register: ceph_pools
- always_run: true
+ tags:
+ - always
- name: secure the cluster
command: ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
register: selinuxstatus
changed_when: false
failed_when: false
- always_run: true
+ tags:
+ - always
- name: install policycoreutils-python to get semanage
package:
changed_when: false
become: false
failed_when: false
- always_run: true
register: statconfig
+ tags:
+ - always
- name: try to fetch config and keys
copy:
- "{{ dedicated_devices|unique }}"
changed_when: false
failed_when: false
- always_run: true
register: journal_partition_status
when:
- osd_scenario == 'non-collocated'
+ tags:
+ - always
- name: fix partitions gpt header or labels of the journal device(s)
shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}"
- "{{ devices }}"
changed_when: false
failed_when: false
- always_run: true
register: osd_partition_status_results
+ tags:
+ - always
# NOTE: The following calls to sgdisk are retried because sgdisk is known to
# fully wipe a device the first time around. There is no need to halt execution
changed_when: false
become: false
failed_when: false
- always_run: true
register: statconfig
+ tags:
+ - always
- name: try to copy ceph config and keys
copy:
- "{{ devices }}"
changed_when: false
failed_when: false
- always_run: true
register: parted_results
+ tags:
+ - always
- name: include copy_configs.yml
include: copy_configs.yml
with_items: "{{ devices }}"
changed_when: false
failed_when: false
- always_run: true
register: osd_path
+ tags:
+ - always
- name: get osd id
command: cat {{ item.stdout }}/whoami
with_items: "{{ osd_path.results }}"
changed_when: false
failed_when: false
- always_run: true
register: osd_id_non_dir_scenario
+ tags:
+ - always
# NOTE (leseb): we must do this because of
# https://github.com/ansible/ansible/issues/4297
ls /var/lib/ceph/osd/ | sed 's/.*-//'
changed_when: false
failed_when: false
- always_run: true
register: osd_id
until: osd_id.stdout_lines|length == devices|unique|length
retries: 10
+ tags:
+ - always
- name: ensure systemd service override directory exists
file:
changed_when: false
become: false
failed_when: false
- always_run: true
register: statconfig
when: "item | length > 0"
+ tags:
+ - always
- name: try to fetch ceph config and keys
copy:
become: false
ignore_errors: true
register: statconfig
- always_run: true
+ tags:
+ - always
- name: try to fetch ceph config and keys
copy:
shell: "pgrep -f ceph-rest-api"
changed_when: false
failed_when: false
- always_run: true
register: restapi_status
+ tags:
+ - always
- name: start ceph rest api
shell: "nohup ceph-rest-api --conf /etc/ceph/{{ cluster }}.conf &"
changed_when: false
become: false
ignore_errors: true
- always_run: true
register: statconfig
+ tags:
+ - always
- name: try to fetch ceph config and keys
copy:
register: realmcheck
failed_when: False
changed_when: False
- always_run: True
+ tags:
+ - always
- name: check if the zonegroup already exists
command: radosgw-admin zonegroup get --rgw-zonegroup={{ rgw_zonegroup }}
register: zonegroupcheck
failed_when: False
changed_when: False
- always_run: True
+ tags:
+ - always
- name: check if the zone already exists
command: radosgw-admin zone get --rgw-zone={{ rgw_zone }}
register: zonecheck
failed_when: False
changed_when: False
- always_run: True
+ tags:
+ - always
- name: check if the system user already exists
command: radosgw-admin user info --uid=zone.user
register: usercheck
failed_when: False
changed_when: False
- always_run: True
+ tags:
+ - always
stat:
path: /run/ostree-booted
register: stat_ostree
- always_run: true
+ tags:
+ - always
- name: set fact for using Atomic host
set_fact:
- name: set MTU on eth2
command: "ifconfig eth2 mtu 1400 up"
-- hosts: mdss:rgws:clients
+- hosts: mdss:rgws:clients
gather_facts: false
become: yes
tasks:
- name: check if it is Atomic host
stat: path=/run/ostree-booted
register: stat_ostree
- always_run: true
+ tags:
+ - always
- name: set fact for using Atomic host
set_fact: