]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
syntax: change local_action syntax v3.0.22
authorGuillaume Abrioux <gabrioux@redhat.com>
Wed, 31 Jan 2018 08:23:28 +0000 (09:23 +0100)
committerGuillaume Abrioux <gabrioux@redhat.com>
Wed, 31 Jan 2018 11:32:26 +0000 (12:32 +0100)
Use a nicer syntax for `local_action` tasks.
We used to have oneliner like this:
```
local_action: wait_for port=22 host={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} state=started delay=10 timeout=500 }}
```

The usual syntax:
```
    local_action:
      module: wait_for
      port: 22
      host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
      state: started
      delay: 10
      timeout: 500
```
is nicer and kind of way to keep consistency regarding the whole
playbook.

This also fix a potential issue about missing quotation :

```
Traceback (most recent call last):
  File "/tmp/ansible_wQtWsi/ansible_module_command.py", line 213, in <module>
    main()
  File "/tmp/ansible_wQtWsi/ansible_module_command.py", line 185, in main
    rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin)
  File "/tmp/ansible_wQtWsi/ansible_modlib.zip/ansible/module_utils/basic.py", line 2710, in run_command
  File "/usr/lib64/python2.7/shlex.py", line 279, in split
    return list(lex)                                                                                                                                                                                                                                                                                                            File "/usr/lib64/python2.7/shlex.py", line 269, in next
    token = self.get_token()
  File "/usr/lib64/python2.7/shlex.py", line 96, in get_token
    raw = self.read_token()
  File "/usr/lib64/python2.7/shlex.py", line 172, in read_token
    raise ValueError, "No closing quotation"
ValueError: No closing quotation
```

writing `local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf`
can cause trouble because it's complaining with missing quotes, this fix solves this issue.

Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1510555
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit deaf273b25601991fc16712cc03820207125554f)
Signed-off-by: Sébastien Han <seb@redhat.com>
22 files changed:
infrastructure-playbooks/purge-cluster.yml
infrastructure-playbooks/untested-by-ci/cluster-maintenance.yml
infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml
roles/ceph-common-coreos/tasks/install_pip.yml
roles/ceph-common-coreos/tasks/install_pypy.yml
roles/ceph-common/tasks/checks/check_firewall.yml
roles/ceph-common/tasks/facts_mon_fsid.yml
roles/ceph-config/tasks/main.yml
roles/ceph-defaults/tasks/facts.yml
roles/ceph-docker-common/tasks/fetch_image.yml
roles/ceph-docker-common/tasks/stat_ceph_files.yml
roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml
roles/ceph-mds/tasks/containerized.yml
roles/ceph-mgr/tasks/docker/copy_configs.yml
roles/ceph-mon/tasks/deploy_monitors.yml
roles/ceph-mon/tasks/docker/copy_configs.yml
roles/ceph-nfs/tasks/pre_requisite_container.yml
roles/ceph-osd/tasks/copy_configs.yml
roles/ceph-rbd-mirror/tasks/docker/copy_configs.yml
roles/ceph-restapi/tasks/docker/copy_configs.yml
roles/ceph-rgw/tasks/docker/copy_configs.yml
tests/functional/reboot.yml [new file with mode: 0644]

index e2b1478c234757066dcd8259413b7a0f451fc501..b8945f7b3a3b8f2f1529d6523fc3402c1c419b8e 100644 (file)
 
   - name: wait for server to boot
     become: false
-    local_action: wait_for port=22 host={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} state=started delay=10 timeout=500
+    local_action:
+      module: wait_for
+      port: 22
+      host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
+      state: started
+      delay: 10
+      timeout: 500
 
   - name: remove data
     file:
index c559ed62f53e0d695ca4cd0c2de4ccf554821162..3d8b8afe714b4d37cb3cb8d9eba0f142e418bfb5 100644 (file)
     command: poweroff
 
   - name: Wait for the server to go down
-    local_action: >
-      wait_for host=<your_host>
-      port=22
-      state=stopped
+    local_action:
+      module: wait_for
+      host: <your_host>
+      port: 22
+      state: stopped
 
   - name: Wait for the server to come up
-    local_action: >
-      wait_for host=<your_host>
-      port=22
-      delay=10
-      timeout=3600
+    local_action:
+      module: wait_for
+      host: <your_host>
+      port: 22
+      delay: 10
+      timeout: 3600
 
   - name: Unset the noout flag
     command: ceph osd unset noout
index 843056f644b444961ec2ed0678c7e2ca4c6bd5db..e09d85bfc118a49226264f9c613147eacd9573f5 100644 (file)
       when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False
 
     - name: Wait for the monitor to be up again
-      local_action: >
-        wait_for
-        host={{ ansible_ssh_host | default(inventory_hostname) }}
-        port=6789
-        timeout=10
+      local_action:
+        module: wait_for
+        host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+        port6789
+        timeout10
       when: migration_completed.stat.exists == False
 
     - name: Stop the monitor (Upstart)
       when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False
 
     - name: Wait for the monitor to be down
-      local_action: >
-        wait_for
-        host={{ ansible_ssh_host | default(inventory_hostname) }}
-        port=6789
-        timeout=10
-        state=stopped
+      local_action:
+        module: wait_for
+        host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+        port6789
+        timeout10
+        statestopped
       when: migration_completed.stat.exists == False
 
     - name: Create a backup directory
       when: migration_completed.stat.exists == False
 
     - name: Wait for the server to come up
-      local_action: >
-        wait_for
-        port=22
-        delay=10
-        timeout=3600
+      local_action:
+        module: wait_for
+        port22
+        delay10
+        timeout3600
       when: migration_completed.stat.exists == False
 
     - name: Wait a bit more to be sure that the server is ready
       when: migration_completed.stat.exists == False
 
     - name: Wait for the Monitor to be up again
-      local_action: >
-        wait_for
-        host={{ ansible_ssh_host | default(inventory_hostname) }}
-        port=6789
-        timeout=10
+      local_action:
+        module: wait_for
+        host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+        port6789
+        timeout10
       when: migration_completed.stat.exists == False
 
     - name: Waiting for the monitor to join the quorum...
       when: osdsysvinit.rc == 0 and migration_completed.stat.exists == False
 
     - name: Wait for the OSDs to be down
-      local_action: >
-        wait_for
-        host={{ ansible_ssh_host | default(inventory_hostname) }}
-        port={{ item }}
-        timeout=10
-        state=stopped
+      local_action:
+        module: wait_for
+        host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+        port{{ item }}
+        timeout10
+        statestopped
       with_items: "{{ osd_ports.stdout_lines }}"
       when: migration_completed.stat.exists == False
 
       when: migration_completed.stat.exists == False
 
     - name: Wait for the server to come up
-      local_action: >
-        wait_for
-        port=22
-        delay=10
-        timeout=3600
+      local_action:
+        module: wait_for
+        port22
+        delay10
+        timeout3600
       when: migration_completed.stat.exists == False
 
     - name: Wait a bit to be sure that the server is ready for scp
       when: migration_completed.stat.exists == False
 
     - name: Wait for radosgw to be down
-      local_action: >
-        wait_for
-        host={{ ansible_ssh_host | default(inventory_hostname) }}
-        path=/tmp/radosgw.sock
-        state=absent
-        timeout=30
+      local_action:
+        module: wait_for
+        host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+        path/tmp/radosgw.sock
+        stateabsent
+        timeout30
       when: migration_completed.stat.exists == False
 
     - name: Reboot the server
       when: migration_completed.stat.exists == False
 
     - name: Wait for the server to come up
-      local_action: >
-        wait_for
-        port=22
-        delay=10
-        timeout=3600
+      local_action:
+        module: wait_for
+        port22
+        delay10
+        timeout3600
       when: migration_completed.stat.exists == False
 
     - name: Wait a bit to be sure that the server is ready for scp
       when: migration_completed.stat.exists == False
 
     - name: Wait for radosgw to be up again
-      local_action: >
-        wait_for
-        host={{ ansible_ssh_host | default(inventory_hostname) }}
-        path=/tmp/radosgw.sock
-        state=present
-        timeout=30
+      local_action:
+        module: wait_for
+        host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+        path/tmp/radosgw.sock
+        statepresent
+        timeout30
       when: migration_completed.stat.exists == False
 
     - name: Done moving to the next rados gateway
index a7b759d42d9c4b51866a57e50d28ec402f6cee3d..c2c85bb28b768b1c4571194ed131009052442c46 100644 (file)
@@ -6,11 +6,16 @@
   raw: "{{pypy_binary_directory}}/python $HOME/get-pip.py --proxy='{{ lookup('env', 'https_proxy') }}'"
 
 - name: create local temp directory
-  local_action: raw mkdir -p {{local_temp_directory}}
+  local_action:
+    module: raw
+      mkdir -p {{local_temp_directory}}
   become: no
 
 - name: prepare install_pip.sh
-  local_action: template src=install_pip.sh.j2 dest={{local_temp_directory}}/install_pip.sh
+  local_action:
+    module: template
+    src: install_pip.sh.j2
+    dest: "{{local_temp_directory}}/install_pip.sh"
   become: no
 
 - name: run pip.sh
@@ -29,5 +34,8 @@
   raw: touch $HOME/.pip
 
 - name: remove pip.sh
-  local_action: file path="{{local_temp_directory}}/pip.sh" state=absent
+  local_action:
+    module: file
+    path: "{{local_temp_directory}}/pip.sh"
+    state: absent
   become: no
index c49c80d101db59a042897a64ad5375df65bb6ee3..41b6e6fd1af15dac4bf6cb0508f3b4ce40e87c78 100644 (file)
@@ -9,11 +9,16 @@
   raw:  mv $HOME/pypy-{{coreos_pypy_version}}-{{coreos_pypy_arch}} {{pypy_directory}}
 
 - name: create local temp directory
-  local_action: raw mkdir -p {{local_temp_directory}}
+  local_action:
+    module: raw
+      mkdir -p {{local_temp_directory}}
   become: no
 
 - name: prepare python executable
-  local_action: template src=install_python.sh.j2 dest={{local_temp_directory}}/install_python.sh
+  local_action:
+    module: template
+    src: install_python.sh.j2
+    dest: "{{local_temp_directory}}/install_python.sh"
   become: no
 
 - name: fix library
@@ -32,5 +37,8 @@
   raw: touch $HOME/.python
 
 - name: remove install_python.sh
-  local_action: file path="{{local_temp_directory}}/install_python.sh" state=absent
+  local_action:
+    module: file
+    path: "{{local_temp_directory}}/install_python.sh"
+    state: absent
   become: no
index 15c0623d2240cc24a1eeb1083a3441ababbdd5b5..e5cce11d9d0e7529ae1f6495017e70685ccc7c6b 100644 (file)
@@ -17,7 +17,9 @@
     - nmapexist.rc != 0
 
 - name: check if monitor port is not filtered
-  local_action: shell set -o pipefail && nmap -p 6789 {{ hostvars[inventory_hostname]['ansible_' + monitor_interface]['ipv4']['address'] if hostvars[inventory_hostname]['ansible_' + monitor_interface] is defined else hostvars[inventory_hostname]['monitor_address'] }} | grep -sqo -e filtered -e '0 hosts up'
+  local_action:
+    module: shell
+      set -o pipefail && nmap -p 6789 {{ hostvars[inventory_hostname]['ansible_' + monitor_interface]['ipv4']['address'] if hostvars[inventory_hostname]['ansible_' + monitor_interface] is defined else hostvars[inventory_hostname]['monitor_address'] }} | grep -sqo -e filtered -e '0 hosts up'
   changed_when: false
   failed_when: false
   register: monportstate
@@ -35,7 +37,9 @@
     - monportstate.rc == 0
 
 - name: check if osd and mds range is not filtered (osd hosts)
-  local_action: shell set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
+  local_action:
+    module: shell
+      set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
   changed_when: false
   failed_when: false
   register: osdrangestate
@@ -53,7 +57,9 @@
     - osdrangestate.rc == 0
 
 - name: check if osd and mds range is not filtered (mds hosts)
-  local_action: shell set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
+  local_action:
+    module: shell
+      set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
   changed_when: false
   failed_when: false
   register: mdsrangestate
@@ -71,7 +77,9 @@
     - mdsrangestate.rc == 0
 
 - name: check if rados gateway port is not filtered
-  local_action: shell set -o pipefail && nmap -p {{ radosgw_civetweb_port }} {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
+  local_action:
+    module: shell
+      set -o pipefail && nmap -p {{ radosgw_civetweb_port }} {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
   changed_when: false
   failed_when: false
   register: rgwportstate
@@ -89,7 +97,9 @@
     - rgwportstate.rc == 0
 
 - name: check if NFS ports are not filtered
-  local_action: shell set -o pipefail && nmap -p 111,2049 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
+  local_action:
+    module: shell
+      set -o pipefail && nmap -p 111,2049 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
   changed_when: false
   failed_when: false
   register: nfsportstate
index 683ff0cf6fe9ca9adee3b980953f2cdae3134df2..f1038426e915e9692738de596ea0bac86a30117a 100644 (file)
@@ -31,7 +31,9 @@
     - test_initial_monitor_keyring.rc != 0
 
 - name: write initial mon keyring in {{ fetch_directory }}/monitor_keyring.conf if it doesn't exist
-  local_action: shell echo {{ monitor_keyring.stdout }} | tee {{ fetch_directory }}/monitor_keyring.conf
+  local_action:
+    module: shell
+      echo {{ monitor_keyring.stdout }} | tee {{ fetch_directory }}/monitor_keyring.conf
   become: false
   when:
     - test_initial_monitor_keyring.rc == 0
index 61c8b73022e407bf0ff14c61c7ccc61b4c8ba4fc..2705ed832b87b0f3bf20de91c5918a4c0ca34b0a 100644 (file)
       state: absent
 
   - name: remove tmp template file for ceph_conf_overrides (localhost)
-    local_action: file path="{{ fetch_directory }}/{{ fsid }}/ceph_conf_overrides_temp_{{ ansible_hostname }}" state=absent
+    local_action:
+      module: file
+      path: "{{ fetch_directory }}/{{ fsid }}/ceph_conf_overrides_temp_{{ ansible_hostname }}"
+      state: absent
     become: false
 
   - name: "generate ceph configuration file: {{ cluster }}.conf"
 # we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs
 - block:
   - name: create a local fetch directory if it does not exist
-    local_action: file path={{ fetch_directory }} state=directory
+    local_action:
+      module: file
+      path: "{{ fetch_directory }}"
+      state: directory
     changed_when: false
     become: false
     run_once: true
             and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
 
   - name: generate cluster uuid
-    local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
-      creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+    local_action:
+      module: shell
+        python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+      creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
     register: cluster_uuid
     become: false
     when:
       - generate_fsid
 
   - name: read cluster uuid if it already exists
-    local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
-      removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+    local_action:
+      module: command
+        cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+      removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
     changed_when: false
     register: cluster_uuid
     always_run: true
index 4799e90a5b35938263acde436db3e09778b27454..40c9bcc661397f73c4785ceb8332884d387db9f3 100644 (file)
@@ -34,7 +34,9 @@
 
 # We want this check to be run only on the first node
 - name: check if {{ fetch_directory }} directory exists
-  local_action: stat path="{{ fetch_directory }}/monitor_keyring.conf"
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/monitor_keyring.conf"
   become: false
   register: monitor_keyring_conf
   run_once: true
     - rolling_update or groups.get(mon_group_name, []) | length == 0
 
 - name: create a local fetch directory if it does not exist
-  local_action: file path={{ fetch_directory }} state=directory
+  local_action:
+    module: file
+    path: "{{ fetch_directory }}"
+    state: directory
   changed_when: false
   become: false
   run_once: true
     ceph_release: "{{ ceph_stable_release }}"
 
 - name: generate cluster fsid
-  local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
-    creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  local_action:
+    module: shell
+      python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+    creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
   register: cluster_uuid
   become: false
   when:
     - ceph_current_fsid.rc != 0
 
 - name: reuse cluster fsid when cluster is already running
-  local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
-    creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  local_action:
+    module: shell
+      echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+    creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
   become: false
   when:
     - ceph_current_fsid.rc == 0
 
 - name: read cluster fsid if it already exists
-  local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
-    removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  local_action:
+    module: command
+      cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+    removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
   changed_when: false
   register: cluster_uuid
   become: false
index 77b3d90086f4bb4aeec877bc6b40d00958918c65..d357ab83f22fccdb077267d0a98eaa4ee591b232 100644 (file)
@@ -49,7 +49,9 @@
     - repodigest_before_pulling == repodigest_after_pulling
 
 - name: export local ceph dev image
-  local_action: command docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
+  local_action:
+    module: command
+      docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
   when:
     - (ceph_docker_dev_image is defined and ceph_docker_dev_image)
   run_once: true
index ffd50b57ff19a6995ea529d50b41a9d83fa47341..214661b3fcd83cd34362273fe5a05c60d7ad4131 100644 (file)
@@ -28,7 +28,9 @@
   when: groups.get(mgr_group_name, []) | length > 0
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index c4df5830f79de95d2d02f3a66f836236b43a0f5b..96de286f20d081ba6a62f6fcd00b4250ce91b54e 100644 (file)
@@ -8,7 +8,9 @@
       - "/etc/ceph/iscsi-gateway-pub.key"
 
 - name: stat for crt file(s)
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ crt_files }}"
   changed_when: false
   failed_when: false
index f516e7982d77d1ec18a39a7cd340fcadf22b7c42..afa52c2b516060d6bb41a9b88254be4db4917c63 100644 (file)
@@ -9,7 +9,9 @@
       - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index 5540d83a1da30b1e9d59ec222ac3557d2445ad89..117ad09a3d3d3f916d1afecba4563496979499ca 100644 (file)
@@ -6,7 +6,9 @@
       - /etc/ceph/{{ cluster }}.client.admin.keyring
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index c4706e37e5a262892bb5f6369e576f7d743f318e..87096e56af4b588861cfdfe180b6b73f5ba5cd32 100644 (file)
@@ -1,14 +1,18 @@
 ---
 - name: generate monitor initial keyring
-  local_action: shell python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" | tee {{ fetch_directory }}/monitor_keyring.conf
-    creates={{ fetch_directory }}/monitor_keyring.conf
+  local_action:
+    module: shell
+      python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" | tee {{ fetch_directory }}/monitor_keyring.conf
+    creates: "{{ fetch_directory }}/monitor_keyring.conf"
   register: monitor_keyring
   become: false
   when: cephx
 
 - name: read monitor initial keyring if it already exists
-  local_action: command cat {{ fetch_directory }}/monitor_keyring.conf
-    removes={{ fetch_directory }}/monitor_keyring.conf
+  local_action:
+    module: command
+      cat {{ fetch_directory }}/monitor_keyring.conf
+    removes: "{{ fetch_directory }}/monitor_keyring.conf"
   changed_when: false
   register: monitor_keyring
   become: false
index e8d8606a4692a28738603ec3186180c207cf1e44..74f28d467799ff2beb86a8027333b074f2b31246 100644 (file)
@@ -41,7 +41,9 @@
     - groups.get(mgr_group_name, []) | length > 0
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index 142b9a38b941e5bfd527eeeab3e20e39a31f8868..acd026fe279c3106aecfe99b4420142f8be1d117 100644 (file)
@@ -5,7 +5,9 @@
       - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
 
 - name: stat for config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index 616e6d28c34194b6e427a096c7fdd49422e2285c..f3f438a04c6815d5a5243b1890e05c6df81481e4 100644 (file)
@@ -5,12 +5,16 @@
       - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
 
 - name: wait for ceph.conf and keys
-  local_action: wait_for path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: wait_for
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   become: false
   with_items: "{{ ceph_config_keys }}"
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index 581087db3a3ceff979b6cc662789bb329dc87eba..f17bfd71988f1d639b0081cc76d911627010d0dc 100644 (file)
@@ -12,7 +12,9 @@
       - "{{ bootstrap_rbd_keyring | default('') }}"
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index cc8ea7f8d552f43277198e7916b78c550deb9aa9..ae3627d7381ed5bad33d280d16d88a65d667987d 100644 (file)
@@ -5,7 +5,9 @@
       - /etc/ceph/{{ cluster }}.client.admin.keyring
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
index 46469f2863625bde79aa9250d7b527e26bc7e048..fb80e01bebcd8ad74f199ffa037507df5c35446d 100644 (file)
@@ -5,7 +5,9 @@
       - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
 
 - name: stat for ceph config and keys
-  local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
   with_items: "{{ ceph_config_keys }}"
   changed_when: false
   become: false
diff --git a/tests/functional/reboot.yml b/tests/functional/reboot.yml
new file mode 100644 (file)
index 0000000..50b6dc8
--- /dev/null
@@ -0,0 +1,22 @@
+---
+- hosts: all
+  gather_facts: true
+  tasks:
+    # why sleep 2? see here: https://github.com/ansible/ansible/issues/14413
+    - name: reboot the machines
+      shell: sleep 2 && shutdown -r now
+      become: yes
+      async: 1
+      poll: 0
+
+    - name: waiting 3 minutes for the machines to come back
+      local_action:
+        module: wait_for
+        host: "{{ ansible_default_ipv4.address }}"
+        port: 22
+        state: started
+        delay: 30
+        timeout: 180
+
+    - name: uptime
+      command: uptime