From: Sébastien Han Date: Wed, 21 Feb 2018 14:56:32 +0000 (+0100) Subject: osd: remove old crush_location implementation X-Git-Tag: v3.1.0beta4~20 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=3261ab23b8b2a31a18144bfd254449e9cb17c610;p=ceph-ansible.git osd: remove old crush_location implementation This was causing a lot of pain with the handlers. Also the implementation was not ideal since we were assembling files. Everything can now be done with the ceph_crush module so let's remove that. Signed-off-by: Sébastien Han --- diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index eb06253bc..15d2c899a 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -28,36 +28,6 @@ dummy: #copy_admin_key: false -#################### -# OSD CRUSH LOCATION -#################### - -# /!\ -# -# BE EXTREMELY CAREFUL WITH THIS OPTION -# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING -# -# /!\ -# -# It is probably best to keep this option to 'false' as the default -# suggests it. This option should only be used while doing some complex -# CRUSH map. It allows you to force a specific location for a set of OSDs. -# -# The following options will build a ceph.conf with OSD sections -# Example: -# [osd.X] -# osd crush location = "root=location" -# -# This works with your inventory file -# To match the following 'osd_crush_location' option the inventory must look like: -# -# [osds] -# osd0 ceph_crush_root=foo ceph_crush_rack=bar - -#crush_location: false -#osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}\"" - - ############## # CEPH OPTIONS ############## diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml index 570db49b0..b9dd05970 100644 --- a/roles/ceph-config/tasks/main.yml +++ b/roles/ceph-config/tasks/main.yml @@ -1,16 +1,13 @@ --- # ceph-common - block: - - name: create ceph conf directory and assemble directory + - name: create ceph conf directory file: - path: "{{ item }}" + path: "/etc/ceph" state: directory owner: "ceph" group: "ceph" mode: "0755" - with_items: - - /etc/ceph/ - - /etc/ceph/ceph.d/ - name: template ceph_conf_overrides copy: @@ -43,21 +40,12 @@ action: config_template args: src: ceph.conf.j2 - dest: /etc/ceph/ceph.d/{{ cluster }}.conf + dest: /etc/ceph/{{ cluster }}.conf owner: "ceph" group: "ceph" mode: "0644" config_overrides: "{{ ceph_conf_overrides_rendered }}" config_type: ini - - - name: assemble {{ cluster }}.conf and fragments - assemble: - src: /etc/ceph/ceph.d/ - dest: /etc/ceph/{{ cluster }}.conf - regexp: "^(({{cluster}})|(osd)).conf$" - owner: "ceph" - group: "ceph" - mode: "0644" notify: - restart ceph mons - restart ceph osds diff --git a/roles/ceph-defaults/handlers/main.yml b/roles/ceph-defaults/handlers/main.yml index 50eb92462..65b227a37 100644 --- a/roles/ceph-defaults/handlers/main.yml +++ b/roles/ceph-defaults/handlers/main.yml @@ -83,7 +83,7 @@ - not containerized_deployment # We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`) # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified - - ((crush_location is defined and crush_location) or osd_socket_stat.rc == 0) + - osd_socket_stat.rc == 0 - ceph_current_fsid.rc == 0 - handler_health_osd_check - hostvars[item]['_osd_handler_called'] | default(False) @@ -100,7 +100,8 @@ - osd_group_name in group_names - containerized_deployment - ceph_osd_container_stat.get('rc') == 0 - - ((crush_location is defined and crush_location) or ceph_osd_container_stat.get('stdout_lines', [])|length != 0) + - inventory_hostname == groups.get(osd_group_name) | last + - ceph_osd_container_stat.get('stdout_lines', [])|length != 0 - handler_health_osd_check - hostvars[item]['_osd_handler_called'] | default(False) with_items: "{{ groups[osd_group_name] }}" diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 411d491cc..a196994bd 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -20,36 +20,6 @@ dmcrypt_dedicated_journal: False # backward compatibility with stable-2.2, will copy_admin_key: false -#################### -# OSD CRUSH LOCATION -#################### - -# /!\ -# -# BE EXTREMELY CAREFUL WITH THIS OPTION -# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING -# -# /!\ -# -# It is probably best to keep this option to 'false' as the default -# suggests it. This option should only be used while doing some complex -# CRUSH map. It allows you to force a specific location for a set of OSDs. -# -# The following options will build a ceph.conf with OSD sections -# Example: -# [osd.X] -# osd crush location = "root=location" -# -# This works with your inventory file -# To match the following 'osd_crush_location' option the inventory must look like: -# -# [osds] -# osd0 ceph_crush_root=foo ceph_crush_rack=bar - -crush_location: false -osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}\"" - - ############## # CEPH OPTIONS ############## diff --git a/roles/ceph-osd/tasks/docker/main.yml b/roles/ceph-osd/tasks/docker/main.yml index 02942e64e..fde59c2d8 100644 --- a/roles/ceph-osd/tasks/docker/main.yml +++ b/roles/ceph-osd/tasks/docker/main.yml @@ -1,8 +1,3 @@ --- -- name: include osd_fragment.yml - include: ../osd_fragment.yml - when: - - crush_location - - name: include start_docker_osd.yml include: start_docker_osd.yml diff --git a/roles/ceph-osd/tasks/osd_fragment.yml b/roles/ceph-osd/tasks/osd_fragment.yml deleted file mode 100644 index ba781dbc1..000000000 --- a/roles/ceph-osd/tasks/osd_fragment.yml +++ /dev/null @@ -1,63 +0,0 @@ ---- -- name: get osd path - shell: "df | grep {{ item }} | awk '{print $6}'" - with_items: "{{ devices }}" - changed_when: false - failed_when: false - check_mode: no - register: osd_path - -- name: get osd id - command: cat {{ item.stdout }}/whoami - with_items: "{{ osd_path.results }}" - changed_when: false - failed_when: false - check_mode: no - register: osd_id_non_dir_scenario - -# NOTE (leseb): we must do this because of -# https://github.com/ansible/ansible/issues/4297 -- name: set_fact combined_osd_id - set_fact: - combined_osd_id: "{{ osd_id_non_dir_scenario }}" - -- name: create a ceph fragment and assemble directory - file: - path: "{{ item }}" - state: directory - owner: "ceph" - group: "ceph" - mode: "0755" - with_items: - - /etc/ceph/ceph.d/ - - /etc/ceph/ceph.d/osd_fragments - -- name: create the osd fragment - template: - src: osd.conf.j2 - dest: /etc/ceph/ceph.d/osd_fragments/osd.{{ item.stdout }}.conf - owner: "ceph" - group: "ceph" - mode: "0644" - with_items: "{{ combined_osd_id.results }}" - -- name: copy {{ cluster }}.conf for assembling - command: cp /etc/ceph/{{ cluster }}.conf /etc/ceph/ceph.d/ - changed_when: false - -- name: assemble osd sections - assemble: - src: /etc/ceph/ceph.d/osd_fragments/ - dest: /etc/ceph/ceph.d/osd.conf - owner: "ceph" - group: "ceph" - mode: "0644" - -- name: assemble {{ cluster }}.conf and osd fragments - assemble: - src: /etc/ceph/ceph.d/ - dest: /etc/ceph/{{ cluster }}.conf - regexp: "^(({{cluster}})|(osd)).conf$" - owner: "ceph" - group: "ceph" - mode: "0644" diff --git a/roles/ceph-osd/tasks/start_osds.yml b/roles/ceph-osd/tasks/start_osds.yml index b4d729953..feabd6048 100644 --- a/roles/ceph-osd/tasks/start_osds.yml +++ b/roles/ceph-osd/tasks/start_osds.yml @@ -1,8 +1,4 @@ --- -- name: include osd_fragment.yml - include: osd_fragment.yml - when: crush_location - - name: get osd id shell: | ls /var/lib/ceph/osd/ | sed 's/.*-//'