Use a nicer syntax for `local_action` tasks.
We used to have oneliner like this:
```
local_action: wait_for port=22 host={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} state=started delay=10 timeout=500 }}
```
The usual syntax:
```
local_action:
module: wait_for
port: 22
host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
state: started
delay: 10
timeout: 500
```
is nicer and kind of way to keep consistency regarding the whole
playbook.
This also fix a potential issue about missing quotation :
```
Traceback (most recent call last):
File "/tmp/ansible_wQtWsi/ansible_module_command.py", line 213, in <module>
main()
File "/tmp/ansible_wQtWsi/ansible_module_command.py", line 185, in main
rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin)
File "/tmp/ansible_wQtWsi/ansible_modlib.zip/ansible/module_utils/basic.py", line 2710, in run_command
File "/usr/lib64/python2.7/shlex.py", line 279, in split
return list(lex) File "/usr/lib64/python2.7/shlex.py", line 269, in next
token = self.get_token()
File "/usr/lib64/python2.7/shlex.py", line 96, in get_token
raw = self.read_token()
File "/usr/lib64/python2.7/shlex.py", line 172, in read_token
raise ValueError, "No closing quotation"
ValueError: No closing quotation
```
writing `local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf`
can cause trouble because it's complaining with missing quotes, this fix solves this issue.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1510555
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
- name: wait for server to boot
become: false
- local_action: wait_for port=22 host={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} state=started delay=10 timeout=500
+ local_action:
+ module: wait_for
+ port: 22
+ host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
+ state: started
+ delay: 10
+ timeout: 500
- name: remove data
file:
command: poweroff
- name: Wait for the server to go down
- local_action: >
- wait_for host=<your_host>
- port=22
- state=stopped
+ local_action:
+ module: wait_for
+ host: <your_host>
+ port: 22
+ state: stopped
- name: Wait for the server to come up
- local_action: >
- wait_for host=<your_host>
- port=22
- delay=10
- timeout=3600
+ local_action:
+ module: wait_for
+ host: <your_host>
+ port: 22
+ delay: 10
+ timeout: 3600
- name: Unset the noout flag
command: ceph osd unset noout
when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False
- name: Wait for the monitor to be up again
- local_action: >
- wait_for
- host={{ ansible_ssh_host | default(inventory_hostname) }}
- port=6789
- timeout=10
+ local_action:
+ module: wait_for
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ port: 6789
+ timeout: 10
when: migration_completed.stat.exists == False
- name: Stop the monitor (Upstart)
when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False
- name: Wait for the monitor to be down
- local_action: >
- wait_for
- host={{ ansible_ssh_host | default(inventory_hostname) }}
- port=6789
- timeout=10
- state=stopped
+ local_action:
+ module: wait_for
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ port: 6789
+ timeout: 10
+ state: stopped
when: migration_completed.stat.exists == False
- name: Create a backup directory
when: migration_completed.stat.exists == False
- name: Wait for the server to come up
- local_action: >
- wait_for
- port=22
- delay=10
- timeout=3600
+ local_action:
+ module: wait_for
+ port: 22
+ delay: 10
+ timeout: 3600
when: migration_completed.stat.exists == False
- name: Wait a bit more to be sure that the server is ready
when: migration_completed.stat.exists == False
- name: Wait for the Monitor to be up again
- local_action: >
- wait_for
- host={{ ansible_ssh_host | default(inventory_hostname) }}
- port=6789
- timeout=10
+ local_action:
+ module: wait_for
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ port: 6789
+ timeout: 10
when: migration_completed.stat.exists == False
- name: Waiting for the monitor to join the quorum...
when: osdsysvinit.rc == 0 and migration_completed.stat.exists == False
- name: Wait for the OSDs to be down
- local_action: >
- wait_for
- host={{ ansible_ssh_host | default(inventory_hostname) }}
- port={{ item }}
- timeout=10
- state=stopped
+ local_action:
+ module: wait_for
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ port: {{ item }}
+ timeout: 10
+ state: stopped
with_items: "{{ osd_ports.stdout_lines }}"
when: migration_completed.stat.exists == False
when: migration_completed.stat.exists == False
- name: Wait for the server to come up
- local_action: >
- wait_for
- port=22
- delay=10
- timeout=3600
+ local_action:
+ module: wait_for
+ port: 22
+ delay: 10
+ timeout: 3600
when: migration_completed.stat.exists == False
- name: Wait a bit to be sure that the server is ready for scp
when: migration_completed.stat.exists == False
- name: Wait for radosgw to be down
- local_action: >
- wait_for
- host={{ ansible_ssh_host | default(inventory_hostname) }}
- path=/tmp/radosgw.sock
- state=absent
- timeout=30
+ local_action:
+ module: wait_for
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ path: /tmp/radosgw.sock
+ state: absent
+ timeout: 30
when: migration_completed.stat.exists == False
- name: Reboot the server
when: migration_completed.stat.exists == False
- name: Wait for the server to come up
- local_action: >
- wait_for
- port=22
- delay=10
- timeout=3600
+ local_action:
+ module: wait_for
+ port: 22
+ delay: 10
+ timeout: 3600
when: migration_completed.stat.exists == False
- name: Wait a bit to be sure that the server is ready for scp
when: migration_completed.stat.exists == False
- name: Wait for radosgw to be up again
- local_action: >
- wait_for
- host={{ ansible_ssh_host | default(inventory_hostname) }}
- path=/tmp/radosgw.sock
- state=present
- timeout=30
+ local_action:
+ module: wait_for
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ path: /tmp/radosgw.sock
+ state: present
+ timeout: 30
when: migration_completed.stat.exists == False
- name: Done moving to the next rados gateway
raw: "{{pypy_binary_directory}}/python $HOME/get-pip.py --proxy='{{ lookup('env', 'https_proxy') }}'"
- name: create local temp directory
- local_action: raw mkdir -p {{local_temp_directory}}
+ local_action:
+ module: raw
+ mkdir -p {{local_temp_directory}}
become: no
- name: prepare install_pip.sh
- local_action: template src=install_pip.sh.j2 dest={{local_temp_directory}}/install_pip.sh
+ local_action:
+ module: template
+ src: install_pip.sh.j2
+ dest: "{{local_temp_directory}}/install_pip.sh"
become: no
- name: run pip.sh
raw: touch $HOME/.pip
- name: remove pip.sh
- local_action: file path="{{local_temp_directory}}/pip.sh" state=absent
+ local_action:
+ module: file
+ path: "{{local_temp_directory}}/pip.sh"
+ state: absent
become: no
raw: mv $HOME/pypy-{{coreos_pypy_version}}-{{coreos_pypy_arch}} {{pypy_directory}}
- name: create local temp directory
- local_action: raw mkdir -p {{local_temp_directory}}
+ local_action:
+ module: raw
+ mkdir -p {{local_temp_directory}}
become: no
- name: prepare python executable
- local_action: template src=install_python.sh.j2 dest={{local_temp_directory}}/install_python.sh
+ local_action:
+ module: template
+ src: install_python.sh.j2
+ dest: "{{local_temp_directory}}/install_python.sh"
become: no
- name: fix library
raw: touch $HOME/.python
- name: remove install_python.sh
- local_action: file path="{{local_temp_directory}}/install_python.sh" state=absent
+ local_action:
+ module: file
+ path: "{{local_temp_directory}}/install_python.sh"
+ state: absent
become: no
- nmapexist.rc != 0
- name: check if monitor port is not filtered
- local_action: shell set -o pipefail && nmap -p 6789 {{ hostvars[inventory_hostname]['ansible_' + monitor_interface]['ipv4']['address'] if hostvars[inventory_hostname]['ansible_' + monitor_interface] is defined else hostvars[inventory_hostname]['monitor_address'] }} | grep -sqo -e filtered -e '0 hosts up'
+ local_action:
+ module: shell
+ set -o pipefail && nmap -p 6789 {{ hostvars[inventory_hostname]['ansible_' + monitor_interface]['ipv4']['address'] if hostvars[inventory_hostname]['ansible_' + monitor_interface] is defined else hostvars[inventory_hostname]['monitor_address'] }} | grep -sqo -e filtered -e '0 hosts up'
changed_when: false
failed_when: false
register: monportstate
- monportstate.rc == 0
- name: check if osd and mds range is not filtered (osd hosts)
- local_action: shell set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
+ local_action:
+ module: shell
+ set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
changed_when: false
failed_when: false
register: osdrangestate
- osdrangestate.rc == 0
- name: check if osd and mds range is not filtered (mds hosts)
- local_action: shell set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
+ local_action:
+ module: shell
+ set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
changed_when: false
failed_when: false
register: mdsrangestate
- mdsrangestate.rc == 0
- name: check if rados gateway port is not filtered
- local_action: shell set -o pipefail && nmap -p {{ radosgw_civetweb_port }} {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
+ local_action:
+ module: shell
+ set -o pipefail && nmap -p {{ radosgw_civetweb_port }} {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
changed_when: false
failed_when: false
register: rgwportstate
- rgwportstate.rc == 0
- name: check if NFS ports are not filtered
- local_action: shell set -o pipefail && nmap -p 111,2049 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
+ local_action:
+ module: shell
+ set -o pipefail && nmap -p 111,2049 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
changed_when: false
failed_when: false
register: nfsportstate
- test_initial_monitor_keyring.rc != 0
- name: write initial mon keyring in {{ fetch_directory }}/monitor_keyring.conf if it doesn't exist
- local_action: shell echo {{ monitor_keyring.stdout }} | tee {{ fetch_directory }}/monitor_keyring.conf
+ local_action:
+ module: shell
+ echo {{ monitor_keyring.stdout }} | tee {{ fetch_directory }}/monitor_keyring.conf
become: false
when:
- test_initial_monitor_keyring.rc == 0
state: absent
- name: remove tmp template file for ceph_conf_overrides (localhost)
- local_action: file path="{{ fetch_directory }}/{{ fsid }}/ceph_conf_overrides_temp_{{ ansible_hostname }}" state=absent
+ local_action:
+ module: file
+ path: "{{ fetch_directory }}/{{ fsid }}/ceph_conf_overrides_temp_{{ ansible_hostname }}"
+ state: absent
become: false
- name: "generate ceph configuration file: {{ cluster }}.conf"
# we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs
- block:
- name: create a local fetch directory if it does not exist
- local_action: file path={{ fetch_directory }} state=directory
+ local_action:
+ module: file
+ path: "{{ fetch_directory }}"
+ state: directory
changed_when: false
become: false
run_once: true
and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
- name: generate cluster uuid
- local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
- creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ local_action:
+ module: shell
+ python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+ creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
register: cluster_uuid
become: false
when:
- generate_fsid
- name: read cluster uuid if it already exists
- local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
- removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ local_action:
+ module: command
+ cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+ removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
changed_when: false
register: cluster_uuid
check_mode: no
# We want this check to be run only on the first node
- name: check if {{ fetch_directory }} directory exists
- local_action: stat path="{{ fetch_directory }}/monitor_keyring.conf"
+ local_action:
+ module: stat
+ path: "{{ fetch_directory }}/monitor_keyring.conf"
become: false
register: monitor_keyring_conf
run_once: true
- rolling_update or groups.get(mon_group_name, []) | length == 0
- name: create a local fetch directory if it does not exist
- local_action: file path={{ fetch_directory }} state=directory
+ local_action:
+ module: file
+ path: "{{ fetch_directory }}"
+ state: directory
changed_when: false
become: false
run_once: true
ceph_release: "{{ ceph_stable_release }}"
- name: generate cluster fsid
- local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
- creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ local_action:
+ module: shell
+ python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+ creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
register: cluster_uuid
become: false
when:
- ceph_current_fsid.rc != 0
- name: reuse cluster fsid when cluster is already running
- local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
- creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ local_action:
+ module: shell
+ echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+ creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
become: false
when:
- ceph_current_fsid.rc == 0
- name: read cluster fsid if it already exists
- local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
- removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ local_action:
+ module: command
+ cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+ removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
changed_when: false
register: cluster_uuid
become: false
- repodigest_before_pulling == repodigest_after_pulling
- name: export local ceph dev image
- local_action: command docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
+ local_action:
+ module: command
+ docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
when:
- (ceph_docker_dev_image is defined and ceph_docker_dev_image)
run_once: true
when: groups.get(mgr_group_name, []) | length > 0
- name: stat for ceph config and keys
- local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+ local_action:
+ module: stat
+ path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
with_items: "{{ ceph_config_keys }}"
changed_when: false
become: false
- "/etc/ceph/iscsi-gateway-pub.key"
- name: stat for crt file(s)
- local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+ local_action:
+ module: stat
+ path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
with_items: "{{ crt_files }}"
changed_when: false
failed_when: false
- /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
- name: stat for ceph config and keys
- local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+ local_action:
+ module: stat
+ path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
with_items: "{{ ceph_config_keys }}"
changed_when: false
become: false
- /etc/ceph/{{ cluster }}.client.admin.keyring
- name: stat for ceph config and keys
- local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+ local_action:
+ module: stat
+ path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
with_items: "{{ ceph_config_keys }}"
changed_when: false
become: false
---
- name: generate monitor initial keyring
- local_action: shell python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" | tee {{ fetch_directory }}/monitor_keyring.conf
- creates={{ fetch_directory }}/monitor_keyring.conf
+ local_action:
+ module: shell
+ python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" | tee {{ fetch_directory }}/monitor_keyring.conf
+ creates: "{{ fetch_directory }}/monitor_keyring.conf"
register: monitor_keyring
become: false
when: cephx
- name: read monitor initial keyring if it already exists
- local_action: command cat {{ fetch_directory }}/monitor_keyring.conf
- removes={{ fetch_directory }}/monitor_keyring.conf
+ local_action:
+ module: command
+ cat {{ fetch_directory }}/monitor_keyring.conf
+ removes: "{{ fetch_directory }}/monitor_keyring.conf"
changed_when: false
register: monitor_keyring
become: false
- groups.get(mgr_group_name, []) | length > 0
- name: stat for ceph config and keys
- local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+ local_action:
+ module: stat
+ path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
with_items: "{{ ceph_config_keys }}"
changed_when: false
become: false
- /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- name: stat for config and keys
- local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+ local_action:
+ module: stat
+ path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
with_items: "{{ ceph_config_keys }}"
changed_when: false
become: false
- /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- name: wait for ceph.conf and keys
- local_action: wait_for path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+ local_action:
+ module: wait_for
+ path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
become: false
with_items: "{{ ceph_config_keys }}"
- name: stat for ceph config and keys
- local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+ local_action:
+ module: stat
+ path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
with_items: "{{ ceph_config_keys }}"
changed_when: false
become: false
- "{{ bootstrap_rbd_keyring | default('') }}"
- name: stat for ceph config and keys
- local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+ local_action:
+ module: stat
+ path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
with_items: "{{ ceph_config_keys }}"
changed_when: false
become: false
- /etc/ceph/{{ cluster }}.client.admin.keyring
- name: stat for ceph config and keys
- local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+ local_action:
+ module: stat
+ path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
with_items: "{{ ceph_config_keys }}"
changed_when: false
become: false
- /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- name: stat for ceph config and keys
- local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+ local_action:
+ module: stat
+ path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
with_items: "{{ ceph_config_keys }}"
changed_when: false
become: false
poll: 0
- name: waiting 3 minutes for the machines to come back
- local_action: wait_for host={{ ansible_default_ipv4.address }} port=22 state=started delay=30 timeout=180
+ local_action:
+ module: wait_for
+ host: "{{ ansible_default_ipv4.address }}"
+ port: 22
+ state: started
+ delay: 30
+ timeout: 180
- name: uptime
command: uptime