From d86a159a7914f3214f626cce258af7d7e2d04061 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 17 Nov 2020 10:45:14 +0100 Subject: [PATCH] osd: ensure /var/lib/ceph/osd/{cluster}-{id} is present This commit ensures that the `/var/lib/ceph/osd/{{ cluster }}-{{ osd_id }}` is present before starting OSDs. This is needed specificly when redeploying an OSD in case of OS upgrade failure. Since ceph data are still present on its devices then the node can be redeployed, however those directories aren't present since they are initially created by ceph-volume. We could recreate them manually but for better user experience we can ask ceph-ansible to recreate them. NOTE: this only works for OSDs that were deployed with ceph-volume. ceph-disk deployed OSDs would have to get those directories recreated manually. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1898486 Signed-off-by: Guillaume Abrioux (cherry picked from commit 873fc8ec0ff12fa1d1b45c5400050f15d0417480) --- roles/ceph-osd/tasks/start_osds.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/roles/ceph-osd/tasks/start_osds.yml b/roles/ceph-osd/tasks/start_osds.yml index b073ac1ba..5397d5841 100644 --- a/roles/ceph-osd/tasks/start_osds.yml +++ b/roles/ceph-osd/tasks/start_osds.yml @@ -51,6 +51,15 @@ - ceph_osd_systemd_overrides is defined - ansible_service_mgr == 'systemd' +- name: ensure "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" is present + file: + state: directory + path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" + mode: "{{ ceph_directories_mode }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + with_items: "{{ ((ceph_osd_ids.stdout | default('{}') | from_json).keys() | list) | union(osd_ids_non_container.stdout_lines | default([])) }}" + - name: systemd start osd systemd: name: ceph-osd@{{ item }} -- 2.39.5