since `ceph-volume` introduction, there is no need to split those tasks.
Let's refact this part of the code so it's clearer.
By the way, this was breaking rolling_update.yml when `openstack_config:
true` playbook because nothing ensured OSDs were started in ceph-osd role (In
`openstack_config.yml` there is a check ensuring all OSD are UP which was
obviously failing) and resulted with OSDs on the last OSD node not started
anyway.
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit
f7fcc012e9a5b5d37bcffd39f3062adbc2886006)
+++ /dev/null
----
-- name: include start_docker_osd.yml
- include_tasks: start_docker_osd.yml
+++ /dev/null
----
-# For openstack VMs modify the mount point below depending on if the Openstack
-# VM deploy tool defaults to mounting ephemeral disks
-- name: umount ceph disk (if on openstack)
- mount:
- name: /mnt
- src: /dev/vdb
- fstype: ext3
- state: unmounted
- when:
- - ceph_docker_on_openstack
-
-- name: test if the container image has the disk_list function
- command: docker run --rm --entrypoint=stat {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list.sh
- changed_when: false
- failed_when: false
- register: disk_list
- when:
- - osd_scenario != 'lvm'
-
-- name: generate ceph osd docker run script
- become: true
- template:
- src: "{{ role_path }}/templates/ceph-osd-run.sh.j2"
- dest: "{{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh"
- owner: "root"
- group: "root"
- mode: "0744"
- notify:
- - restart ceph osds
-
-- name: generate systemd unit file
- become: true
- template:
- src: "{{ role_path }}/templates/ceph-osd.service.j2"
- dest: /etc/systemd/system/ceph-osd@.service
- owner: "root"
- group: "root"
- mode: "0644"
- notify:
- - restart ceph osds
-
-- name: collect osd ids
- shell: >
- docker run --rm
- --privileged=true
- -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket
- -v /etc/ceph:/etc/ceph:z
- -v /dev:/dev
- --entrypoint=ceph-volume
- {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
- lvm list --format json | python -c 'import sys, json; print("\n".join(json.load(sys.stdin).keys()))'
- changed_when: false
- failed_when: false
- register: ceph_osd_ids
- when:
- - containerized_deployment
- - osd_scenario == 'lvm'
-
-- name: systemd start osd container
- systemd:
- name: ceph-osd@{{ item | regex_replace('/dev/', '') if osd_scenario != 'lvm' else item }}
- state: started
- enabled: yes
- daemon_reload: yes
- with_items: "{{ devices if osd_scenario != 'lvm' else ceph_osd_ids.stdout_lines }}"
\ No newline at end of file
- name: include_tasks start_osds.yml
include_tasks: start_osds.yml
- when:
- - not containerized_deployment
- - osd_scenario != 'lvm'
-
-- name: include_tasks docker/main.yml
- include_tasks: docker/main.yml
- when:
- - containerized_deployment
- name: set_fact openstack_keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module
set_fact:
---
-- name: get osd id
- shell: |
- ls /var/lib/ceph/osd/ | sed 's/.*-//'
+- block:
+ # For openstack VMs modify the mount point below depending on if the Openstack
+ # VM deploy tool defaults to mounting ephemeral disks
+ - name: umount ceph disk (if on openstack)
+ mount:
+ name: /mnt
+ src: /dev/vdb
+ fstype: ext3
+ state: unmounted
+ when:
+ - ceph_docker_on_openstack
+
+ - name: generate ceph osd docker run script
+ become: true
+ template:
+ src: "{{ role_path }}/templates/ceph-osd-run.sh.j2"
+ dest: "{{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh"
+ owner: "root"
+ group: "root"
+ mode: "0744"
+ notify:
+ - restart ceph osds
+ when:
+ - containerized_deployment
+
+- name: set_fact docker_exec_start_osd
+ set_fact:
+ docker_exec_start_osd: "{{ 'docker run --rm --privileged=true -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket -v /etc/ceph:/etc/ceph:z -v /dev:/dev --entrypoint=ceph-volume ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else 'ceph-volume' }}"
+
+- name: collect osd ids
+ shell: >
+ {{ docker_exec_start_osd }} lvm list --format json
changed_when: false
failed_when: false
- check_mode: no
- register: osd_id
- until: osd_id.stdout_lines|length == devices|unique|length
- retries: 10
+ register: ceph_osd_ids
+
+- name: generate systemd unit file
+ become: true
+ template:
+ src: "{{ role_path }}/templates/ceph-osd.service.j2"
+ dest: /etc/systemd/system/ceph-osd@.service
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify:
+ - restart ceph osds
when:
- - osd_scenario != 'lvm'
+ - containerized_deployment
+
+- name: systemd start osd
+ systemd:
+ name: ceph-osd@{{ item | regex_replace('/dev/', '') if osd_scenario != 'lvm' else item }}
+ state: started
+ enabled: yes
+ daemon_reload: yes
+ with_items: "{{ devices if osd_scenario != 'lvm' else (ceph_osd_ids.stdout | from_json).keys() }}"
- name: ensure systemd service override directory exists
file:
config_type: "ini"
when:
- ceph_osd_systemd_overrides is defined
- - ansible_service_mgr == 'systemd'
-
-- name: ensure osd daemons are started
- service:
- name: ceph-osd@{{ item }}
- state: started
- enabled: true
- with_items: "{{ (osd_id|default({})).stdout_lines|default([]) }}"
- changed_when: false
+ - ansible_service_mgr == 'systemd'
\ No newline at end of file