]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
syntax: change local_action syntax
authorGuillaume Abrioux <gabrioux@redhat.com>
Wed, 31 Jan 2018 08:23:28 +0000 (09:23 +0100)
committerSébastien Han <seb@redhat.com>
Wed, 31 Jan 2018 09:45:34 +0000 (10:45 +0100)
Use a nicer syntax for `local_action` tasks.
We used to have oneliner like this:
```
local_action: wait_for port=22 host={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} state=started delay=10 timeout=500 }}
```

The usual syntax:
```
    local_action:
      module: wait_for
      port: 22
      host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
      state: started
      delay: 10
      timeout: 500
```
is nicer and kind of way to keep consistency regarding the whole
playbook.

This also fix a potential issue about missing quotation :

```
Traceback (most recent call last):
  File "/tmp/ansible_wQtWsi/ansible_module_command.py", line 213, in <module>
    main()
  File "/tmp/ansible_wQtWsi/ansible_module_command.py", line 185, in main
    rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin)
  File "/tmp/ansible_wQtWsi/ansible_modlib.zip/ansible/module_utils/basic.py", line 2710, in run_command
  File "/usr/lib64/python2.7/shlex.py", line 279, in split
    return list(lex)                                                                                                                                                                                                                                                                                                            File "/usr/lib64/python2.7/shlex.py", line 269, in next
    token = self.get_token()
  File "/usr/lib64/python2.7/shlex.py", line 96, in get_token
    raw = self.read_token()
  File "/usr/lib64/python2.7/shlex.py", line 172, in read_token
    raise ValueError, "No closing quotation"
ValueError: No closing quotation
```

writing `local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf`
can cause trouble because it's complaining with missing quotes, this fix solves this issue.

Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1510555
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
22 files changed:
infrastructure-playbooks/purge-cluster.yml
infrastructure-playbooks/untested-by-ci/cluster-maintenance.yml
infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml
roles/ceph-common-coreos/tasks/install_pip.yml
roles/ceph-common-coreos/tasks/install_pypy.yml
roles/ceph-common/tasks/checks/check_firewall.yml
roles/ceph-common/tasks/facts_mon_fsid.yml
roles/ceph-config/tasks/main.yml
roles/ceph-defaults/tasks/facts.yml
roles/ceph-docker-common/tasks/fetch_image.yml
roles/ceph-docker-common/tasks/stat_ceph_files.yml
roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml
roles/ceph-mds/tasks/containerized.yml
roles/ceph-mgr/tasks/docker/copy_configs.yml
roles/ceph-mon/tasks/deploy_monitors.yml
roles/ceph-mon/tasks/docker/copy_configs.yml
roles/ceph-nfs/tasks/pre_requisite_container.yml
roles/ceph-osd/tasks/copy_configs.yml
roles/ceph-rbd-mirror/tasks/docker/copy_configs.yml
roles/ceph-restapi/tasks/docker/copy_configs.yml
roles/ceph-rgw/tasks/docker/copy_configs.yml
tests/functional/reboot.yml

index c04c408a5a95d9b9a3b3f7f6a3efa1246cde0448..c142525e59c5fb763593e38bc57138b158cece6a 100644 (file)
 
   - name: wait for server to boot
     become: false
-    local_action: wait_for port=22 host={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} state=started delay=10 timeout=500
+    local_action:
+      module: wait_for
+      port: 22
+      host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
+      state: started
+      delay: 10
+      timeout: 500
 
   - name: remove data
     file:
index c559ed62f53e0d695ca4cd0c2de4ccf554821162..3d8b8afe714b4d37cb3cb8d9eba0f142e418bfb5 100644 (file)
     command: poweroff
 
   - name: Wait for the server to go down
-    local_action: >
-      wait_for host=<your_host>
-      port=22
-      state=stopped
+    local_action:
+      module: wait_for
+      host: <your_host>
+      port: 22
+      state: stopped
 
   - name: Wait for the server to come up
-    local_action: >
-      wait_for host=<your_host>
-      port=22
-      delay=10
-      timeout=3600
+    local_action:
+      module: wait_for
+      host: <your_host>
+      port: 22
+      delay: 10
+      timeout: 3600
 
   - name: Unset the noout flag
     command: ceph osd unset noout
index 843056f644b444961ec2ed0678c7e2ca4c6bd5db..e09d85bfc118a49226264f9c613147eacd9573f5 100644 (file)
       when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False
 
     - name: Wait for the monitor to be up again
-      local_action: >
-        wait_for
-        host={{ ansible_ssh_host | default(inventory_hostname) }}
-        port=6789
-        timeout=10
+      local_action:
+        module: wait_for
+        host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+        port6789
+        timeout10
       when: migration_completed.stat.exists == False
 
     - name: Stop the monitor (Upstart)
       when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False
 
     - name: Wait for the monitor to be down
-      local_action: >
-        wait_for
-        host={{ ansible_ssh_host | default(inventory_hostname) }}
-        port=6789
-        timeout=10
-        state=stopped
+      local_action:
+        module: wait_for
+        host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+        port6789
+        timeout10
+        statestopped
       when: migration_completed.stat.exists == False
 
     - name: Create a backup directory
       when: migration_completed.stat.exists == False
 
     - name: Wait for the server to come up
-      local_action: >
-        wait_for
-        port=22
-        delay=10
-        timeout=3600
+      local_action:
+        module: wait_for
+        port22
+        delay10
+        timeout3600
       when: migration_completed.stat.exists == False
 
     - name: Wait a bit more to be sure that the server is ready
       when: migration_completed.stat.exists == False
 
     - name: Wait for the Monitor to be up again
-      local_action: >
-        wait_for
-        host={{ ansible_ssh_host | default(inventory_hostname) }}
-        port=6789
-        timeout=10
+      local_action:
+        module: wait_for
+        host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+        port6789
+        timeout10
       when: migration_completed.stat.exists == False
 
     - name: Waiting for the monitor to join the quorum...
       when: osdsysvinit.rc == 0 and migration_completed.stat.exists == False
 
     - name: Wait for the OSDs to be down
-      local_action: >
-        wait_for
-        host={{ ansible_ssh_host | default(inventory_hostname) }}
-        port={{ item }}
-        timeout=10
-        state=stopped
+      local_action:
+        module: wait_for
+        host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+        port{{ item }}
+        timeout10
+        statestopped
       with_items: "{{ osd_ports.stdout_lines }}"
       when: migration_completed.stat.exists == False
 
       when: migration_completed.stat.exists == False
 
     - name: Wait for the server to come up
-      local_action: >
-        wait_for
-        port=22
-        delay=10
-        timeout=3600
+      local_action:
+        module: wait_for
+        port22
+        delay10
+        timeout3600
       when: migration_completed.stat.exists == False
 
     - name: Wait a bit to be sure that the server is ready for scp
       when: migration_completed.stat.exists == False
 
     - name: Wait for radosgw to be down
-      local_action: >
-        wait_for
-        host={{ ansible_ssh_host | default(inventory_hostname) }}
-        path=/tmp/radosgw.sock
-        state=absent
-        timeout=30
+      local_action:
+        module: wait_for
+        host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+        path/tmp/radosgw.sock
+        stateabsent
+        timeout30
       when: migration_completed.stat.exists == False
 
     - name: Reboot the server
       when: migration_completed.stat.exists == False
 
     - name: Wait for the server to come up
-      local_action: >
-        wait_for
-        port=22
-        delay=10
-        timeout=3600
+      local_action:
+        module: wait_for
+        port22
+        delay10
+        timeout3600
       when: migration_completed.stat.exists == False
 
     - name: Wait a bit to be sure that the server is ready for scp
       when: migration_completed.stat.exists == False
 
     - name: Wait for radosgw to be up again
-      local_action: >
-        wait_for
-        host={{ ansible_ssh_host | default(inventory_hostname) }}
-        path=/tmp/radosgw.sock
-        state=present
-        timeout=30
+      local_action:
+        module: wait_for
+        host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+        path/tmp/radosgw.sock
+        statepresent
+        timeout30
       when: migration_completed.stat.exists == False
 
     - name: Done moving to the next rados gateway
index a7b759d42d9c4b51866a57e50d28ec402f6cee3d..c2c85bb28b768b1c4571194ed131009052442c46 100644 (file)
@@ -6,11 +6,16 @@
   raw: "{{pypy_binary_directory}}/python $HOME/get-pip.py --proxy='{{ lookup('env', 'https_proxy') }}'"
 
 - name: create local temp directory
-  local_action: raw mkdir -p {{local_temp_directory}}
+  local_action:
+    module: raw
+      mkdir -p {{local_temp_directory}}
   become: no
 
 - name: prepare install_pip.sh
-  local_action: template src=install_pip.sh.j2 dest={{local_temp_directory}}/install_pip.sh
+  local_action:
+    module: template
+    src: install_pip.sh.j2
+    dest: "{{local_temp_directory}}/install_pip.sh"
   become: no
 
 - name: run pip.sh
@@ -29,5 +34,8 @@
   raw: touch $HOME/.pip
 
 - name: remove pip.sh
-  local_action: file path="{{local_temp_directory}}/pip.sh" state=absent
+  local_action:
+    module: file
+    path: "{{local_temp_directory}}/pip.sh"
+    state: absent
   become: no
index c49c80d101db59a042897a64ad5375df65bb6ee3..41b6e6fd1af15dac4bf6cb0508f3b4ce40e87c78 100644 (file)
@@ -9,11 +9,16 @@
   raw:  mv $HOME/pypy-{{coreos_pypy_version}}-{{coreos_pypy_arch}} {{pypy_directory}}
 
 - name: create local temp directory
-  local_action: raw mkdir -p {{local_temp_directory}}
+  local_action:
+    module: raw
+      mkdir -p {{local_temp_directory}}
   become: no
 
 - name: prepare python executable
-  local_action: template src=install_python.sh.j2 dest={{local_temp_directory}}/install_python.sh
+  local_action:
+    module: template
+    src: install_python.sh.j2
+    dest: "{{local_temp_directory}}/install_python.sh"
   become: no
 
 - name: fix library
@@ -32,5 +37,8 @@
   raw: touch $HOME/.python
 
 - name: remove install_python.sh
-  local_action: file path="{{local_temp_directory}}/install_python.sh" state=absent
+  local_action:
+    module: file
+    path: "{{local_temp_directory}}/install_python.sh"
+    state: absent
   become: no
index f52d612f4c39a9e8bcccd9751f7e0225f5868afe..f5ed176385e965ae8f3cece8f68e792278d20053 100644 (file)
@@ -15,7 +15,9 @@
     - nmapexist.rc != 0
 
 - name: check if monitor port is not filtered
-  local_action: shell set -o pipefail && nmap -p 6789 {{ hostvars[inventory_hostname]['ansible_' + monitor_interface]['ipv4']['address'] if hostvars[inventory_hostname]['ansible_' + monitor_interface] is defined else hostvars[inventory_hostname]['monitor_address'] }} | grep -sqo -e filtered -e '0 hosts up'
+  local_action:
+    module: shell
+      set -o pipefail && nmap -p 6789 {{ hostvars[inventory_hostname]['ansible_' + monitor_interface]['ipv4']['address'] if hostvars[inventory_hostname]['ansible_' + monitor_interface] is defined else hostvars[inventory_hostname]['monitor_address'] }} | grep -sqo -e filtered -e '0 hosts up'
   changed_when: false
   failed_when: false
   register: monportstate
@@ -33,7 +35,9 @@
     - monportstate.rc == 0
 
 - name: check if osd and mds range is not filtered (osd hosts)
-  local_action: shell set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
+  local_action:
+    module: shell
+      set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
   changed_when: false
   failed_when: false
   register: osdrangestate
@@ -51,7 +55,9 @@
     - osdrangestate.rc == 0
 
 - name: check if osd and mds range is not filtered (mds hosts)
-  local_action: shell set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
+  local_action:
+    module: shell
+      set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
   changed_when: false
   failed_when: false
   register: mdsrangestate
@@ -69,7 +75,9 @@
     - mdsrangestate.rc == 0
 
 - name: check if rados gateway port is not filtered
-  local_action: shell set -o pipefail && nmap -p {{ radosgw_civetweb_port }} {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
+  local_action:
+    module: shell
+      set -o pipefail && nmap -p {{ radosgw_civetweb_port }} {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
   changed_when: false
   failed_when: false
   register: rgwportstate
@@ -87,7 +95,9 @@
     - rgwportstate.rc == 0
 
 - name: check if NFS ports are not filtered
-  local_action: shell set -o pipefail && nmap -p 111,2049 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
+  local_action:
+    module: shell
+      set -o pipefail && nmap -p 111,2049 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
   changed_when: false
   failed_when: false
   register: nfsportstate
index 683ff0cf6fe9ca9adee3b980953f2cdae3134df2..f1038426e915e9692738de596ea0bac86a30117a 100644 (file)
@@ -31,7 +31,9 @@
     - test_initial_monitor_keyring.rc != 0
 
 - name: write initial mon keyring in {{ fetch_directory }}/monitor_keyring.conf if it doesn't exist
-  local_action: shell echo {{ monitor_keyring.stdout }} | tee {{ fetch_directory }}/monitor_keyring.conf
+  local_action:
+    module: shell
+      echo {{ monitor_keyring.stdout }} | tee {{ fetch_directory }}/monitor_keyring.conf
   become: false
   when:
     - test_initial_monitor_keyring.rc == 0
index 284a9ef40a636173936ead11d4c8f522fcec7120..570db49b03339ee9c4f6720808fe910820f6b374 100644 (file)
       state: absent
 
   - name: remove tmp template file for ceph_conf_overrides (localhost)
-    local_action: file path="{{ fetch_directory }}/{{ fsid }}/ceph_conf_overrides_temp_{{ ansible_hostname }}" state=absent
+    local_action:
+      module: file
+      path: "{{ fetch_directory }}/{{ fsid }}/ceph_conf_overrides_temp_{{ ansible_hostname }}"
+      state: absent
     become: false
 
   - name: "generate ceph configuration file: {{ cluster }}.conf"
 # we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs
 - block:
   - name: create a local fetch directory if it does not exist
-    local_action: file path={{ fetch_directory }} state=directory
+    local_action:
+      module: file
+      path: "{{ fetch_directory }}"
+      state: directory
     changed_when: false
     become: false
     run_once: true
             and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
 
   - name: generate cluster uuid
-    local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
-      creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+    local_action:
+      module: shell
+        python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+      creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
     register: cluster_uuid
     become: false
     when:
       - generate_fsid
 
   - name: read cluster uuid if it already exists
-    local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
-      removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+    local_action:
+      module: command
+        cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+      removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
     changed_when: false
     register: cluster_uuid
     check_mode: no
index 040c88c7aaa9814d244ce364717fae95da9356d3..120e6b4866f6aaa5029d8d157287d84e1bbdbf41 100644 (file)
@@ -34,7 +34,9 @@
 
 # We want this check to be run only on the first node
 - name: check if {{ fetch_directory }} directory exists
-  local_action: stat path="{{ fetch_directory }}/monitor_keyring.conf"
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/monitor_keyring.conf"
   become: false
   register: monitor_keyring_conf
   run_once: true
     - rolling_update or groups.get(mon_group_name, []) | length == 0
 
 - name: create a local fetch directory if it does not exist
-  local_action: file path={{ fetch_directory }} state=directory
+  local_action:
+    module: file
+    path: "{{ fetch_directory }}"
+    state: directory
   changed_when: false
   become: false
   run_once: true
     ceph_release: "{{ ceph_stable_release }}"
 
 - name: generate cluster fsid
-  local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
-    creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  local_action:
+    module: shell
+      python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+    creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
   register: cluster_uuid
   become: false
   when:
     - ceph_current_fsid.rc != 0
 
 - name: reuse cluster fsid when cluster is already running
-  local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
-    creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  local_action:
+    module: shell
+      echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+    creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
   become: false
   when:
     - ceph_current_fsid.rc == 0
 
 - name: read cluster fsid if it already exists
-  local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
-    removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  local_action:
+    module: command
+      cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+    removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
   changed_when: false
   register: cluster_uuid
   become: false
index 77b3d90086f4bb4aeec877bc6b40d00958918c65..d357ab83f22fccdb077267d0a98eaa4ee591b232 100644 (file)
@@ -49,7 +49,9 @@
     - repodigest_before_pulling == repodigest_after_pulling
 
 - name: export local ceph dev image
-  local_action: command docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
+  local_action:
+    module: command
+      docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
   when:
     - (ceph_docker_dev_image is defined and ceph_docker_dev_image)
   run_once: true
index add07fcc4df12427e70926ff27f1acc252a7fa21..c9231559291e2186617d21363ab728ceebc312be 100644 (file)
@@ -28,7 +28,9 @@
   when: groups.get(mgr_group_name, []) | length > 0
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index 1b3a05d5d6bfc1d3fa948d89881400da37aa7880..fe5b4f8df9913729cceaa96a37df689b9877104f 100644 (file)
@@ -8,7 +8,9 @@
       - "/etc/ceph/iscsi-gateway-pub.key"
 
 - name: stat for crt file(s)
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ crt_files }}"
   changed_when: false
   failed_when: false
index e928bd78645b4e131e1a0bec7f51148fb38b9c81..7be1b7dfd7813e761e56b5bdbcd78b1b714a645d 100644 (file)
@@ -9,7 +9,9 @@
       - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index 024acca188745a86a651f711758f2ea937eb09a0..db446b7fb8ebbb026b8e932fecc13bee8ea93277 100644 (file)
@@ -6,7 +6,9 @@
       - /etc/ceph/{{ cluster }}.client.admin.keyring
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index a35150b864b6b75d20df7a578c087c5ff2c89e24..f9e34a69b17e4837ea0b1cdd808a53b00f310f14 100644 (file)
@@ -1,14 +1,18 @@
 ---
 - name: generate monitor initial keyring
-  local_action: shell python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" | tee {{ fetch_directory }}/monitor_keyring.conf
-    creates={{ fetch_directory }}/monitor_keyring.conf
+  local_action:
+    module: shell
+      python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" | tee {{ fetch_directory }}/monitor_keyring.conf
+    creates: "{{ fetch_directory }}/monitor_keyring.conf"
   register: monitor_keyring
   become: false
   when: cephx
 
 - name: read monitor initial keyring if it already exists
-  local_action: command cat {{ fetch_directory }}/monitor_keyring.conf
-    removes={{ fetch_directory }}/monitor_keyring.conf
+  local_action:
+    module: command
+      cat {{ fetch_directory }}/monitor_keyring.conf
+    removes: "{{ fetch_directory }}/monitor_keyring.conf"
   changed_when: false
   register: monitor_keyring
   become: false
index 51dbe643a79e9a29d1707a3ded75cf32e7cd18e9..c8485f2fec98f5e4f3e0da851d76ea16fc6b674a 100644 (file)
@@ -41,7 +41,9 @@
     - groups.get(mgr_group_name, []) | length > 0
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index 50e5f264ce42139e5346c7fad51bb80d3a951866..b7deb56cc9b4f450a668bb9b05540da9edc91cca 100644 (file)
@@ -5,7 +5,9 @@
       - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
 
 - name: stat for config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index da98e7729361a0b438c93edd6aa6b93e86cb19a9..71af1306557edad5976967c29b273c8751e517db 100644 (file)
@@ -5,12 +5,16 @@
       - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
 
 - name: wait for ceph.conf and keys
-  local_action: wait_for path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: wait_for
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   become: false
   with_items: "{{ ceph_config_keys }}"
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index 42740b7628b6b11100f3561c2671f4b49c1af042..0741895e259df53e37c4ba3bcad23f8e89c448c7 100644 (file)
@@ -12,7 +12,9 @@
       - "{{ bootstrap_rbd_keyring | default('') }}"
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index cec96182cf43a375d571800461b391f467a71555..f3da4fce37085b5c9c491d4698aac9bcc3c4abcc 100644 (file)
@@ -5,7 +5,9 @@
       - /etc/ceph/{{ cluster }}.client.admin.keyring
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index a5500f52aab0fd943357f464187eecb51c17d523..c22e24d49205c70f2b8e952f5de6ce01b46819f1 100644 (file)
@@ -5,7 +5,9 @@
       - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index dbefdef4c3b7cf837b9c3f6a355e2dd2f0f18a37..50b6dc8cac9b896ac80af80286714c85138b45c9 100644 (file)
       poll: 0
 
     - name: waiting 3 minutes for the machines to come back
-      local_action: wait_for host={{ ansible_default_ipv4.address }} port=22 state=started delay=30 timeout=180
+      local_action:
+        module: wait_for
+        host: "{{ ansible_default_ipv4.address }}"
+        port: 22
+        state: started
+        delay: 30
+        timeout: 180
 
     - name: uptime
       command: uptime