#copy_admin_key: false
-####################
-# OSD CRUSH LOCATION
-####################
-
-# /!\
-#
-# BE EXTREMELY CAREFUL WITH THIS OPTION
-# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
-#
-# /!\
-#
-# It is probably best to keep this option to 'false' as the default
-# suggests it. This option should only be used while doing some complex
-# CRUSH map. It allows you to force a specific location for a set of OSDs.
-#
-# The following options will build a ceph.conf with OSD sections
-# Example:
-# [osd.X]
-# osd crush location = "root=location"
-#
-# This works with your inventory file
-# To match the following 'osd_crush_location' option the inventory must look like:
-#
-# [osds]
-# osd0 ceph_crush_root=foo ceph_crush_rack=bar
-
-#crush_location: false
-#osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}\""
-
-
##############
# CEPH OPTIONS
##############
---
# ceph-common
- block:
- - name: create ceph conf directory and assemble directory
+ - name: create ceph conf directory
file:
- path: "{{ item }}"
+ path: "/etc/ceph"
state: directory
owner: "ceph"
group: "ceph"
mode: "0755"
- with_items:
- - /etc/ceph/
- - /etc/ceph/ceph.d/
- name: template ceph_conf_overrides
copy:
action: config_template
args:
src: ceph.conf.j2
- dest: /etc/ceph/ceph.d/{{ cluster }}.conf
+ dest: /etc/ceph/{{ cluster }}.conf
owner: "ceph"
group: "ceph"
mode: "0644"
config_overrides: "{{ ceph_conf_overrides_rendered }}"
config_type: ini
-
- - name: assemble {{ cluster }}.conf and fragments
- assemble:
- src: /etc/ceph/ceph.d/
- dest: /etc/ceph/{{ cluster }}.conf
- regexp: "^(({{cluster}})|(osd)).conf$"
- owner: "ceph"
- group: "ceph"
- mode: "0644"
notify:
- restart ceph mons
- restart ceph osds
- not containerized_deployment
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
- - ((crush_location is defined and crush_location) or osd_socket_stat.rc == 0)
+ - osd_socket_stat.rc == 0
- ceph_current_fsid.rc == 0
- handler_health_osd_check
- hostvars[item]['_osd_handler_called'] | default(False)
- osd_group_name in group_names
- containerized_deployment
- ceph_osd_container_stat.get('rc') == 0
- - ((crush_location is defined and crush_location) or ceph_osd_container_stat.get('stdout_lines', [])|length != 0)
+ - inventory_hostname == groups.get(osd_group_name) | last
+ - ceph_osd_container_stat.get('stdout_lines', [])|length != 0
- handler_health_osd_check
- hostvars[item]['_osd_handler_called'] | default(False)
with_items: "{{ groups[osd_group_name] }}"
copy_admin_key: false
-####################
-# OSD CRUSH LOCATION
-####################
-
-# /!\
-#
-# BE EXTREMELY CAREFUL WITH THIS OPTION
-# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
-#
-# /!\
-#
-# It is probably best to keep this option to 'false' as the default
-# suggests it. This option should only be used while doing some complex
-# CRUSH map. It allows you to force a specific location for a set of OSDs.
-#
-# The following options will build a ceph.conf with OSD sections
-# Example:
-# [osd.X]
-# osd crush location = "root=location"
-#
-# This works with your inventory file
-# To match the following 'osd_crush_location' option the inventory must look like:
-#
-# [osds]
-# osd0 ceph_crush_root=foo ceph_crush_rack=bar
-
-crush_location: false
-osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}\""
-
-
##############
# CEPH OPTIONS
##############
---
-- name: include osd_fragment.yml
- include: ../osd_fragment.yml
- when:
- - crush_location
-
- name: include start_docker_osd.yml
include: start_docker_osd.yml
+++ /dev/null
----
-- name: get osd path
- shell: "df | grep {{ item }} | awk '{print $6}'"
- with_items: "{{ devices }}"
- changed_when: false
- failed_when: false
- check_mode: no
- register: osd_path
-
-- name: get osd id
- command: cat {{ item.stdout }}/whoami
- with_items: "{{ osd_path.results }}"
- changed_when: false
- failed_when: false
- check_mode: no
- register: osd_id_non_dir_scenario
-
-# NOTE (leseb): we must do this because of
-# https://github.com/ansible/ansible/issues/4297
-- name: set_fact combined_osd_id
- set_fact:
- combined_osd_id: "{{ osd_id_non_dir_scenario }}"
-
-- name: create a ceph fragment and assemble directory
- file:
- path: "{{ item }}"
- state: directory
- owner: "ceph"
- group: "ceph"
- mode: "0755"
- with_items:
- - /etc/ceph/ceph.d/
- - /etc/ceph/ceph.d/osd_fragments
-
-- name: create the osd fragment
- template:
- src: osd.conf.j2
- dest: /etc/ceph/ceph.d/osd_fragments/osd.{{ item.stdout }}.conf
- owner: "ceph"
- group: "ceph"
- mode: "0644"
- with_items: "{{ combined_osd_id.results }}"
-
-- name: copy {{ cluster }}.conf for assembling
- command: cp /etc/ceph/{{ cluster }}.conf /etc/ceph/ceph.d/
- changed_when: false
-
-- name: assemble osd sections
- assemble:
- src: /etc/ceph/ceph.d/osd_fragments/
- dest: /etc/ceph/ceph.d/osd.conf
- owner: "ceph"
- group: "ceph"
- mode: "0644"
-
-- name: assemble {{ cluster }}.conf and osd fragments
- assemble:
- src: /etc/ceph/ceph.d/
- dest: /etc/ceph/{{ cluster }}.conf
- regexp: "^(({{cluster}})|(osd)).conf$"
- owner: "ceph"
- group: "ceph"
- mode: "0644"
---
-- name: include osd_fragment.yml
- include: osd_fragment.yml
- when: crush_location
-
- name: get osd id
shell: |
ls /var/lib/ceph/osd/ | sed 's/.*-//'