From c5c354a61a8fa7e3bcb1dde4418426dee6ff1eb4 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Wed, 10 Apr 2019 09:23:17 +0200 Subject: [PATCH] remove all NBSPs char in stable-3.2 branch this can cause issues, let's replace all of these chars with real spaces. Signed-off-by: Guillaume Abrioux --- RELEASE-NOTE.md | 2 +- infrastructure-playbooks/purge-cluster.yml | 4 ++-- infrastructure-playbooks/rolling_update.yml | 4 ++-- roles/ceph-client/tasks/create_users_keys.yml | 6 +++--- roles/ceph-common/tasks/facts_mon_fsid.yml | 12 ++++++------ roles/ceph-nfs/tasks/create_rgw_nfs_user.yml | 4 ++-- roles/ceph-osd/tasks/openstack_config.yml | 4 ++-- roles/ceph-rgw/handlers/main.yml | 2 +- roles/ceph-validate/tasks/check_ipaddr_mon.yml | 2 +- 9 files changed, 20 insertions(+), 20 deletions(-) diff --git a/RELEASE-NOTE.md b/RELEASE-NOTE.md index e7bce1102..f32b5b3e4 100644 --- a/RELEASE-NOTE.md +++ b/RELEASE-NOTE.md @@ -556,7 +556,7 @@ Notable changes from stable-3.1 * [98cb6ed8](https://github.com/ceph/ceph-ansible/commit/98cb6ed8) - tests: avoid yum failures (Guillaume Abrioux) * [144b2fce](https://github.com/ceph/ceph-ansible/commit/144b2fce) - python-netaddr is required to generate ceph.conf (Ha Phan) * [e91648a7](https://github.com/ceph/ceph-ansible/commit/e91648a7) - rolling_update: add role ceph-iscsi-gw (Sébastien Han) -* [2890b57c](https://github.com/ceph/ceph-ansible/commit/2890b57c) - Add privilege escalation to iscsi purge tasks (Paul Cuzner) +* [2890b57c](https://github.com/ceph/ceph-ansible/commit/2890b57c) - Add privilege escalation to iscsi purge tasks (Paul Cuzner) * [608ea947](https://github.com/ceph/ceph-ansible/commit/608ea947) - mds: move mds fs pools creation (Guillaume Abrioux) * [1c084efb](https://github.com/ceph/ceph-ansible/commit/1c084efb) - rgw: container add option to configure multi-site zone (Sébastien Han) * [82884801](https://github.com/ceph/ceph-ansible/commit/82884801) - playbook: follow up on #2553 (Guillaume Abrioux) diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 7dd2401bb..0f63cc340 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -41,7 +41,7 @@ - "{{ rbdmirror_group_name|default('rbdmirrors') }}" - "{{ nfs_group_name|default('nfss') }}" - "{{ client_group_name|default('clients') }}" - - "{{ mgr_group_name|default('mgrs') }}" + - "{{ mgr_group_name|default('mgrs') }}" become: true @@ -385,7 +385,7 @@ - name: wipe partitions shell: | wipefs --all "{{ item }}" - dd if=/dev/zero of="{{ item }}" bs=1 count=4096 + dd if=/dev/zero of="{{ item }}" bs=1 count=4096 with_items: "{{ combined_devices_list }}" - name: zap ceph journal/block db/block wal partitions diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 7872c7201..63311681a 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -226,14 +226,14 @@ pre_tasks: - name: non container - get current fsid - command: "ceph --cluster {{ cluster }} fsid" + command: "ceph --cluster {{ cluster }} fsid" register: cluster_uuid_non_container delegate_to: "{{ groups[mon_group_name][0] }}" when: - not containerized_deployment - name: container - get current fsid - command: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} fsid" + command: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} fsid" register: cluster_uuid_container delegate_to: "{{ groups[mon_group_name][0] }}" when: diff --git a/roles/ceph-client/tasks/create_users_keys.yml b/roles/ceph-client/tasks/create_users_keys.yml index 9151c695d..d8165d0b1 100644 --- a/roles/ceph-client/tasks/create_users_keys.yml +++ b/roles/ceph-client/tasks/create_users_keys.yml @@ -70,7 +70,7 @@ when: - cephx - keys | length > 0 - - inventory_hostname == groups.get('_filtered_clients') | first + - inventory_hostname == groups.get('_filtered_clients') | first - name: slurp client cephx key(s) slurp: @@ -82,7 +82,7 @@ when: - cephx - keys | length > 0 - - inventory_hostname == groups.get('_filtered_clients') | first + - inventory_hostname == groups.get('_filtered_clients') | first - name: pool related tasks when: @@ -132,7 +132,7 @@ - name: get client cephx keys copy: - dest: "{{ item.source }}" + dest: "{{ item.source }}" content: "{{ item.content | b64decode }}" mode: "{{ item.item.get('mode', '0600') }}" owner: "{{ ceph_uid }}" diff --git a/roles/ceph-common/tasks/facts_mon_fsid.yml b/roles/ceph-common/tasks/facts_mon_fsid.yml index 8b6bc2a4e..5bdbb6d19 100644 --- a/roles/ceph-common/tasks/facts_mon_fsid.yml +++ b/roles/ceph-common/tasks/facts_mon_fsid.yml @@ -1,24 +1,24 @@ --- -- name: check if /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring already exists +- name: check if /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring already exists stat: - path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring + path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring register: initial_mon_keyring -- name: fail if /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring doesn't exist +- name: fail if /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring doesn't exist fail: - msg: "/var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring not found" + msg: "/var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring not found" when: - not initial_mon_keyring.stat.exists - name: get existing initial mon keyring if it already exists but not monitor_keyring.conf in {{ fetch_directory }} shell: | - grep key /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring | sed 's/^.*= //' + grep key /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring | sed 's/^.*= //' register: monitor_keyring when: - not monitor_keyring_conf.stat.exists - name: test existing initial mon keyring - command: ceph --connect-timeout 3 --cluster {{ cluster }} --keyring /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring -n mon. fsid + command: ceph --connect-timeout 3 --cluster {{ cluster }} --keyring /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring -n mon. fsid register: test_initial_monitor_keyring ignore_errors: true diff --git a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml index 73f6e3690..2df847908 100644 --- a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml +++ b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml @@ -6,7 +6,7 @@ - containerized_deployment - name: check if "{{ ceph_nfs_rgw_user }}" exists - command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}" + command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}" run_once: true register: rgwuser_exists changed_when: false @@ -16,7 +16,7 @@ - nfs_obj_gw - name: create rgw nfs user "{{ ceph_nfs_rgw_user }}" - command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'" + command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'" run_once: true register: rgwuser changed_when: false diff --git a/roles/ceph-osd/tasks/openstack_config.yml b/roles/ceph-osd/tasks/openstack_config.yml index a72d5ab8f..19d00b0a0 100644 --- a/roles/ceph-osd/tasks/openstack_config.yml +++ b/roles/ceph-osd/tasks/openstack_config.yml @@ -3,7 +3,7 @@ shell: > test "$({{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_osds"])')" -gt 0 && test "$({{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_osds"])')" = - "$({{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_up_osds"])')" + "$({{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_up_osds"])')" register: wait_for_all_osds_up retries: "{{ nb_retry_wait_osd_up }}" delay: "{{ delay_wait_osd_up }}" @@ -83,7 +83,7 @@ - name: copy to other mons the openstack cephx key(s) copy: - src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring" + src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring" dest: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring" owner: "{{ ceph_uid }}" group: "{{ ceph_uid }}" diff --git a/roles/ceph-rgw/handlers/main.yml b/roles/ceph-rgw/handlers/main.yml index f4b6318ca..129078767 100644 --- a/roles/ceph-rgw/handlers/main.yml +++ b/roles/ceph-rgw/handlers/main.yml @@ -1,6 +1,6 @@ --- - name: update period - command: "{{ docker_exec_cmd }} radosgw-admin --cluster {{ cluster }} period update --commit" + command: "{{ docker_exec_cmd }} radosgw-admin --cluster {{ cluster }} period update --commit" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true diff --git a/roles/ceph-validate/tasks/check_ipaddr_mon.yml b/roles/ceph-validate/tasks/check_ipaddr_mon.yml index e5cb1074f..4e2174a4c 100644 --- a/roles/ceph-validate/tasks/check_ipaddr_mon.yml +++ b/roles/ceph-validate/tasks/check_ipaddr_mon.yml @@ -3,4 +3,4 @@ fail: msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}" when: - - hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[inventory_hostname]['monitor_address_block']) | length == 0 + - hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[inventory_hostname]['monitor_address_block']) | length == 0 -- 2.39.5