From: Sébastien Han Date: Tue, 21 Aug 2018 18:50:31 +0000 (+0200) Subject: defaults: fix rgw_hostname X-Git-Tag: v3.1.0rc21 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=8f9d97d3a1d1a20d460aace18a2fd4fb4d4cbc7f;p=ceph-ansible.git defaults: fix rgw_hostname A couple if things were wrong in the initial commit: * ceph_release_num[ceph_release] >= ceph_release_num['luminous'] will never work since the ceph_release fact is set in the roles after. So either ceph-common or ceph-docker-common set it * we can easily re-use the initial command to check if a cluster is running, it's more elegant than running it twice. * set the fact rgw_hostname on rgw nodes only Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1618678 Signed-off-by: Sébastien Han (cherry picked from commit 6d7fa99ff74b3ec25d1a6010b1ddb25e00c123be) --- diff --git a/roles/ceph-common/tasks/facts_mon_fsid.yml b/roles/ceph-common/tasks/facts_mon_fsid.yml index f1038426e..8b6bc2a4e 100644 --- a/roles/ceph-common/tasks/facts_mon_fsid.yml +++ b/roles/ceph-common/tasks/facts_mon_fsid.yml @@ -9,7 +9,6 @@ msg: "/var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring not found" when: - not initial_mon_keyring.stat.exists - - ceph_current_fsid.rc == 0 - name: get existing initial mon keyring if it already exists but not monitor_keyring.conf in {{ fetch_directory }} shell: | @@ -17,7 +16,6 @@ register: monitor_keyring when: - not monitor_keyring_conf.stat.exists - - ceph_current_fsid.rc == 0 - name: test existing initial mon keyring command: ceph --connect-timeout 3 --cluster {{ cluster }} --keyring /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring -n mon. fsid diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index 6eea5b546..f49b1903a 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -95,9 +95,9 @@ run_once: true when: - cephx - - not monitor_keyring_conf.stat.exists - - ceph_current_fsid.rc == 0 - mon_group_name in group_names + - not monitor_keyring_conf.stat.exists + - ceph_current_status.fsid is defined - name: include create_rbd_client_dir.yml include: create_rbd_client_dir.yml diff --git a/roles/ceph-defaults/handlers/main.yml b/roles/ceph-defaults/handlers/main.yml index 11e1a16de..691a9693e 100644 --- a/roles/ceph-defaults/handlers/main.yml +++ b/roles/ceph-defaults/handlers/main.yml @@ -89,7 +89,7 @@ # We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`) # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified - osd_socket_stat.rc == 0 - - ceph_current_fsid.rc == 0 + - ceph_current_status.fsid is defined - handler_health_osd_check - hostvars[item]['_osd_handler_called'] | default(False) with_items: "{{ groups[osd_group_name] }}" diff --git a/roles/ceph-defaults/tasks/facts.yml b/roles/ceph-defaults/tasks/facts.yml index d4f347677..7c8dda971 100644 --- a/roles/ceph-defaults/tasks/facts.yml +++ b/roles/ceph-defaults/tasks/facts.yml @@ -32,11 +32,11 @@ # because it blindly picks a mon, which may be down because # of the rolling update - name: is ceph running already? - command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} fsid" + command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json" changed_when: false failed_when: false check_mode: no - register: ceph_current_fsid + register: ceph_current_status run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" when: @@ -54,9 +54,9 @@ # set this as a default when performing a rolling_update # so the rest of the tasks here will succeed -- name: set_fact ceph_current_fsid rc 1 +- name: set_fact ceph_current_status rc 1 set_fact: - ceph_current_fsid: + ceph_current_status: rc: 1 when: - rolling_update or groups.get(mon_group_name, []) | length == 0 @@ -71,11 +71,18 @@ when: - (cephx or generate_fsid) -- name: set_fact fsid ceph_current_fsid.stdout +- name: set_fact ceph_current_status (convert to json) set_fact: - fsid: "{{ ceph_current_fsid.stdout }}" + ceph_current_status: "{{ ceph_current_status.stdout | from_json }}" when: - - ceph_current_fsid.get('rc', 1) == 0 + - not rolling_update + - ceph_current_status.rc == 0 + +- name: set_fact fsid from ceph_current_status + set_fact: + fsid: "{{ ceph_current_status.fsid }}" + when: + - ceph_current_status.fsid is defined # Set ceph_release to ceph_stable by default - name: set_fact ceph_release ceph_stable_release @@ -91,7 +98,7 @@ become: false when: - generate_fsid - - ceph_current_fsid.rc != 0 + - ceph_current_status.fsid is undefined - name: reuse cluster fsid when cluster is already running local_action: @@ -100,7 +107,7 @@ creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf" become: false when: - - ceph_current_fsid.get('rc', 1) == 0 + - ceph_current_status.fsid is defined - name: read cluster fsid if it already exists local_action: @@ -215,36 +222,19 @@ - containerized_deployment - ceph_docker_image | search("rhceph") -- block: - - name: get current cluster status (if already running) - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json" - changed_when: false - failed_when: false - check_mode: no - run_once: true - delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - not rolling_update - - groups.get(mon_group_name, []) | length > 0 - register: ceph_current_status - - - name: set_fact ceph_current_status (convert to json) - set_fact: - ceph_current_status: "{{ ceph_current_status.stdout | from_json }}" - - - name: set_fact rgw_hostname - set_fact: - rgw_hostname: "{% for key in ceph_current_status['servicemap']['services']['rgw']['daemons'].keys() %}{% if key == ansible_fqdn %}{{ key }}{% endif %}{% endfor %}" - when: ceph_current_status['servicemap']['services']['rgw'] is defined - when: - - ceph_current_fsid.get('rc', 1) == 0 - - inventory_hostname in groups.get(rgw_group_name, []) - # no servicemap before luminous - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] +- name: set_fact rgw_hostname - fqdn + set_fact: + rgw_hostname: "{% for key in ceph_current_status['servicemap']['services']['rgw']['daemons'].keys() %}{% if key == ansible_fqdn %}{{ key }}{% endif %}{% endfor %}" + when: + - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, []) + - ceph_current_status['servicemap'] is defined + - ceph_current_status['servicemap']['services'] is defined + - ceph_current_status['servicemap']['services']['rgw'] is defined # that's the way to cover ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - ansible_hostname != ansible_fqdn -- name: set_fact rgw_hostname +- name: set_fact rgw_hostname - no fqdn set_fact: rgw_hostname: "{{ ansible_hostname }}" when: + - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, []) - rgw_hostname is undefined \ No newline at end of file