]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Add playbook for converting cluster to cephadm 5475/head
authorDimitri Savineau <dsavinea@redhat.com>
Thu, 9 Apr 2020 21:50:54 +0000 (17:50 -0400)
committerDimitri Savineau <savineau.dimitri@gmail.com>
Mon, 29 Jun 2020 13:21:38 +0000 (09:21 -0400)
The commit adds a new playbook for converting an existing ceph cluster
deployed by ceph-ansible to the cephadm orchestrator.

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
infrastructure-playbooks/cephadm-adopt.yml [new file with mode: 0644]

diff --git a/infrastructure-playbooks/cephadm-adopt.yml b/infrastructure-playbooks/cephadm-adopt.yml
new file mode 100644 (file)
index 0000000..4e05254
--- /dev/null
@@ -0,0 +1,847 @@
+---
+#
+# This playbook does a cephadm adopt for all the Ceph services
+#
+
+- name: confirm whether user really meant to adopt the cluster by cephadm
+  hosts: localhost
+  connection: local
+  become: false
+  gather_facts: false
+  vars_prompt:
+    - name: ireallymeanit
+      prompt: Are you sure you want to adopt the cluster by cephadm ?
+      default: 'no'
+      private: no
+  tasks:
+    - name: exit playbook, if user did not mean to adopt the cluster by cephadm
+      fail:
+        msg: >
+          Exiting cephadm-adopt playbook, cluster was NOT adopted.
+           To adopt the cluster, either say 'yes' on the prompt or
+           use `-e ireallymeanit=yes` on the command line when
+           invoking the playbook
+      when: ireallymeanit != 'yes'
+
+- name: gather facts and prepare system for cephadm
+  hosts:
+    - "{{ mon_group_name|default('mons') }}"
+    - "{{ osd_group_name|default('osds') }}"
+    - "{{ mds_group_name|default('mdss') }}"
+    - "{{ rgw_group_name|default('rgws') }}"
+    - "{{ mgr_group_name|default('mgrs') }}"
+    - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+    - "{{ nfs_group_name|default('nfss') }}"
+    - "{{ iscsi_gw_group_name|default('iscsigws') }}"
+    - "{{ grafana_server_group_name|default('grafana-server') }}"
+  become: true
+  gather_facts: false
+  vars:
+    delegate_facts_host: true
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: gather facts
+      setup:
+      when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
+
+    - name: gather and delegate facts
+      setup:
+      delegate_to: "{{ item }}"
+      delegate_facts: true
+      with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}"
+      run_once: true
+      when: delegate_facts_host | bool
+
+    - name: fail if one osd node is using filestore
+      fail:
+        msg: >
+          filestore OSDs are not supported with cephadm.
+          Please convert them with the filestore-to-bluestore.yml playbook first.
+      when:
+        - osd_group_name in group_names
+        - osd_objectstore == 'filestore'
+
+    - import_role:
+        name: ceph-facts
+        tasks_from: container_binary.yml
+
+    - name: get the ceph version
+      command: "{{ container_binary + ' run --rm --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --version"
+      changed_when: false
+      register: ceph_version_out
+
+    - name: set_fact ceph_version
+      set_fact:
+        ceph_version: "{{ ceph_version_out.stdout.split(' ')[2] }}"
+
+    - name: fail on pre octopus ceph releases
+      fail:
+        msg: >
+          Your Ceph version {{ ceph_version }} is not supported for this operation.
+          Please upgrade your cluster with the rolling_update.yml playbook first.
+      when: ceph_version is version('15.2', '<')
+
+    - name: check if it is atomic host
+      stat:
+        path: /run/ostree-booted
+      register: stat_ostree
+
+    - name: set_fact is_atomic
+      set_fact:
+        is_atomic: "{{ stat_ostree.stat.exists }}"
+
+    - import_role:
+        name: ceph-container-engine
+      when: not containerized_deployment | bool
+
+    - import_role:
+        name: ceph-container-common
+        tasks_from: registry.yml
+      when:
+        - not containerized_deployment | bool
+        - ceph_docker_registry_auth | bool
+
+    - name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
+      command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+      changed_when: false
+      register: docker_image
+      until: docker_image.rc == 0
+      retries: "{{ docker_pull_retry }}"
+      delay: 10
+      when:
+        - not containerized_deployment | bool
+        - inventory_hostname in groups.get(mon_group_name, []) or
+          inventory_hostname in groups.get(osd_group_name, []) or
+          inventory_hostname in groups.get(mds_group_name, []) or
+          inventory_hostname in groups.get(rgw_group_name, []) or
+          inventory_hostname in groups.get(mgr_group_name, []) or
+          inventory_hostname in groups.get(rbdmirror_group_name, []) or
+          inventory_hostname in groups.get(iscsi_gw_group_name, []) or
+          inventory_hostname in groups.get(nfs_group_name, [])
+
+    - name: install cephadm requirements
+      package:
+        name: ['python3', 'lvm2']
+      register: result
+      until: result is succeeded
+
+    - name: install cephadm
+      package:
+        name: cephadm
+      register: result
+      until: result is succeeded
+      when: not containerized_deployment | bool
+
+    - name: install cephadm mgr module
+      package:
+        name: ceph-mgr-cephadm
+      register: result
+      until: result is succeeded
+      when:
+        - not containerized_deployment | bool
+        - mgr_group_name in group_names
+
+    - name: get cephadm from the container image
+      when: containerized_deployment | bool
+      block:
+        - name: create a cephadm container
+          command: "{{ container_binary }} create --name cephadm {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+          changed_when: false
+
+        - name: cp the cephadm cli file
+          command: "{{ container_binary }} cp cephadm:/usr/sbin/cephadm /usr/sbin/cephadm"
+          args:
+            creates: /usr/sbin/cephadm
+
+        - name: remove the cephadm container
+          command: "{{ container_binary }} rm cephadm"
+          changed_when: false
+
+    - name: set_fact container_exec_cmd
+      set_fact:
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+      when: containerized_deployment | bool
+
+    - name: get current fsid
+      command: "{{ container_exec_cmd | default('') }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}.asok config get fsid --format json"
+      register: current_fsid
+      run_once: true
+      changed_when: false
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+
+    - name: set_fact fsid
+      set_fact:
+        fsid: "{{ (current_fsid.stdout | from_json).fsid }}"
+
+    - name: enable cephadm mgr module
+      command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr module enable cephadm"
+      changed_when: false
+      run_once: true
+      delegate_to: '{{ groups[mon_group_name][0] }}'
+
+    - name: set cephadm as orchestrator backend
+      command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} orch set backend cephadm"
+      changed_when: false
+      run_once: true
+      delegate_to: '{{ groups[mon_group_name][0] }}'
+
+    - name: generate cephadm ssh key
+      command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} cephadm generate-key"
+      changed_when: false
+      run_once: true
+      delegate_to: '{{ groups[mon_group_name][0] }}'
+
+    - name: get the cephadm ssh pub key
+      command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} cephadm get-pub-key"
+      changed_when: false
+      run_once: true
+      register: cephadm_pubpkey
+      delegate_to: '{{ groups[mon_group_name][0] }}'
+
+    - name: allow cephadm key for root account
+      authorized_key:
+        user: root
+        key: '{{ cephadm_pubpkey.stdout }}'
+
+    - name: run cephadm prepare-host
+      command: cephadm prepare-host
+      changed_when: false
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+    - name: set default container image in ceph configuration
+      command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+      changed_when: false
+      run_once: true
+      delegate_to: '{{ groups[mon_group_name][0] }}'
+
+    - name: manage nodes with cephadm
+      command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} orch host add {{ hostvars[item]['ansible_hostname'] }} {{ hostvars[item]['ansible_default_ipv4']['address'] }} {{ hostvars[item]['group_names'] | join(' ') }}"
+      changed_when: false
+      run_once: true
+      loop: '{{ ansible_play_hosts_all }}'
+      delegate_to: '{{ groups[mon_group_name][0] }}'
+
+    - name: add ceph label for core component
+      command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} orch host label add {{ hostvars[item]['ansible_hostname'] }} ceph"
+      changed_when: false
+      run_once: true
+      loop: '{{ ansible_play_hosts_all }}'
+      delegate_to: '{{ groups[mon_group_name][0] }}'
+      when: item in groups.get(mon_group_name, []) or
+            item in groups.get(osd_group_name, []) or
+            item in groups.get(mds_group_name, []) or
+            item in groups.get(rgw_group_name, []) or
+            item in groups.get(mgr_group_name, []) or
+            item in groups.get(rbdmirror_group_name, [])
+
+    - name: set_fact ceph_cmd
+      set_fact:
+        ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
+
+    - name: assimilate ceph configuration
+      command: "{{ ceph_cmd }} --cluster {{ cluster }} config assimilate-conf -i /etc/ceph/{{ cluster }}.conf"
+      changed_when: false
+      when: inventory_hostname in groups.get(mon_group_name, []) or
+            inventory_hostname in groups.get(osd_group_name, []) or
+            inventory_hostname in groups.get(mds_group_name, []) or
+            inventory_hostname in groups.get(rgw_group_name, []) or
+            inventory_hostname in groups.get(mgr_group_name, []) or
+            inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+- name: adopt ceph mon daemons
+  hosts: "{{ mon_group_name|default('mons') }}"
+  serial: 1
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: adopt mon daemon
+      command: "cephadm adopt --cluster {{ cluster }} --skip-pull --style legacy --name mon.{{ ansible_hostname }} {{ '--skip-firewalld' if not configure_firewall | bool else '' }} {{ '--docker' if container_binary == 'docker' else '' }}"
+      args:
+        creates: '/var/lib/ceph/{{ fsid }}/mon.{{ ansible_hostname }}/unit.run'
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+    - name: reset failed ceph-mon systemd unit
+      command: 'systemctl reset-failed ceph-mon@{{ ansible_hostname }}'
+      changed_when: false
+      when: containerized_deployment | bool
+
+    - name: remove ceph-mon systemd unit file
+      file:
+        path: /etc/systemd/system/ceph-mon@.service
+        state: absent
+      when: containerized_deployment | bool
+
+    - name: remove ceph-mon systemd override directory
+      file:
+        path: /etc/systemd/system/ceph-mon@.service.d
+        state: absent
+      when: not containerized_deployment | bool
+
+- name: adopt ceph mgr daemons
+  hosts: "{{ mgr_group_name|default('mgrs') }}"
+  serial: 1
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: adopt mgr daemon
+      command: "cephadm adopt --cluster {{ cluster }} --skip-pull --style legacy --name mgr.{{ ansible_hostname }} {{ '--skip-firewalld' if not configure_firewall | bool else '' }} {{ '--docker' if container_binary == 'docker' else '' }}"
+      args:
+        creates: '/var/lib/ceph/{{ fsid }}/mgr.{{ ansible_hostname }}/unit.run'
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+    - name: reset failed ceph-mgr systemd unit
+      command: 'systemctl reset-failed ceph-mgr@{{ ansible_hostname }}'
+      changed_when: false
+      when: containerized_deployment | bool
+
+    - name: remove ceph-mgr systemd unit file
+      file:
+        path: /etc/systemd/system/ceph-mgr@.service
+        state: absent
+      when: containerized_deployment | bool
+
+    - name: remove ceph-mgr systemd override directory
+      file:
+        path: /etc/systemd/system/ceph-mgr@.service.d
+        state: absent
+      when: not containerized_deployment | bool
+
+- name: adopt ceph osd daemons
+  hosts: "{{ osd_group_name|default('osd') }}"
+  serial: 1
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - import_role:
+        name: ceph-facts
+        tasks_from: container_binary.yml
+      when: containerized_deployment | bool
+
+    - name: set_fact ceph_volume
+      set_fact:
+        ceph_volume: "{{ container_binary + ' run --rm --privileged=true --net=host --pid=host --ipc=host -v /dev:/dev -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run:/var/run --entrypoint=ceph-volume ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph-volume' }}"
+
+    - name: get osd list
+      command: "{{ ceph_volume }} --cluster {{ cluster }} lvm list --format json"
+      changed_when: false
+      register: osd_list
+
+    - name: set osd fsid for containerized deployment
+      lineinfile:
+        path: '/var/lib/ceph/osd/{{ cluster }}-{{ item.key }}/fsid'
+        line: "{{ (item.value | selectattr('type', 'equalto', 'block') | map(attribute='tags') | first)['ceph.osd_fsid'] }}"
+        owner: '{{ ceph_uid }}'
+        group: '{{ ceph_uid }}'
+        create: true
+      with_dict: '{{ osd_list.stdout | from_json }}'
+      when: containerized_deployment | bool
+
+    - name: set osd type for containerized deployment
+      lineinfile:
+        path: '/var/lib/ceph/osd/{{ cluster }}-{{ item }}/type'
+        line: 'bluestore'
+        owner: '{{ ceph_uid }}'
+        group: '{{ ceph_uid }}'
+        create: true
+      loop: '{{ (osd_list.stdout | from_json).keys() | list }}'
+      when: containerized_deployment | bool
+
+    - name: adopt osd daemon
+      command: "cephadm adopt --cluster {{ cluster }} --skip-pull --style legacy --name osd.{{ item }} {{ '--skip-firewalld' if not configure_firewall | bool else '' }} {{ '--docker' if container_binary == 'docker' else '' }}"
+      loop: '{{ (osd_list.stdout | from_json).keys() | list }}'
+      args:
+        creates: '/var/lib/ceph/{{ fsid }}/osd.{{ item }}/unit.run'
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+    - name: remove ceph-osd systemd unit and ceph-osd-run.sh files
+      file:
+        path: '{{ item }}'
+        state: absent
+      loop:
+        - /etc/systemd/system/ceph-osd@.service
+        - "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh"
+      when: containerized_deployment | bool
+
+    - name: remove ceph-osd systemd override directory
+      file:
+        path: /etc/systemd/system/ceph-osd@.service.d
+        state: absent
+      when: not containerized_deployment | bool
+
+- name: redeploy mds daemons
+  hosts: "{{ mds_group_name|default('mdss') }}"
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: update the placement of metadata hosts
+      command: "cephadm shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply mds {{ cephfs }} '{{ groups.get(mds_group_name, []) | length }} label:{{ mds_group_name }}'"
+      run_once: true
+      changed_when: false
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: stop and remove legacy ceph mds daemons
+  hosts: "{{ mds_group_name|default('mdss') }}"
+  serial: 1
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: stop and disable ceph-mds systemd service
+      service:
+        name: 'ceph-mds@{{ ansible_hostname }}'
+        state: stopped
+        enabled: false
+
+    - name: stop and disable ceph-mds systemd target
+      service:
+        name: ceph-mds.target
+        state: stopped
+        enabled: false
+      when: not containerized_deployment | bool
+
+    - name: reset failed ceph-mds systemd unit
+      command: 'systemctl reset-failed ceph-mds@{{ ansible_hostname }}'
+      changed_when: false
+      when: containerized_deployment | bool
+
+    - name: remove ceph-mds systemd unit file
+      file:
+        path: /etc/systemd/system/ceph-mds@.service
+        state: absent
+      when: containerized_deployment | bool
+
+    - name: remove ceph-mds systemd override directory
+      file:
+        path: /etc/systemd/system/ceph-mds@.service.d
+        state: absent
+      when: not containerized_deployment | bool
+
+    - name: remove legacy ceph mds data
+      file:
+        path: '/var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }}'
+        state: absent
+
+- name: rgw realm/zonegroup/zone requirements
+  hosts: "{{ rgw_group_name|default('rgws') }}"
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: for non multisite setup
+      when: not rgw_multisite | bool
+      run_once: true
+      block:
+        - name: create a default realm
+          command: "cephadm shell --fsid {{ fsid }} -- radosgw-admin --cluster {{ cluster }} realm create --rgw-realm=default --default"
+          run_once: true
+          changed_when: false
+          environment:
+            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+        - name: modify the default zonegroup
+          command: "cephadm shell --fsid {{ fsid }} -- radosgw-admin --cluster {{ cluster }} zonegroup modify --rgw-realm=default --rgw-zonegroup=default"
+          run_once: true
+          changed_when: false
+          environment:
+            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+        - name: modify the default zone
+          command: "cephadm shell --fsid {{ fsid }} -- radosgw-admin --cluster {{ cluster }} zone modify --rgw-realm=default --rgw-zonegroup=default --rgw-zone=default"
+          run_once: true
+          changed_when: false
+          environment:
+            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+        - name: commit the period
+          command: "cephadm shell --fsid {{ fsid }} -- radosgw-admin --cluster {{ cluster }} period update --commit"
+          run_once: true
+          changed_when: false
+          environment:
+            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: redeploy rgw daemons
+  hosts: "{{ rgw_group_name|default('rgws') }}"
+  serial: 1
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - import_role:
+        name: ceph-facts
+        tasks_from: set_radosgw_address.yml
+
+    - name: update the placement of radosgw hosts
+      command: "cephadm shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply rgw {{ item.rgw_realm | default('default') }} {{ item.rgw_zone | default('default') }} 1 {{ ansible_hostname }} --port {{ item.radosgw_frontend_port }} {{ '--ssl' if radosgw_frontend_ssl_certificate else '' }}"
+      changed_when: false
+      loop: '{{ rgw_instances }}'
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+    - name: stop and disable ceph-radosgw systemd service
+      service:
+        name: 'ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}'
+        state: stopped
+        enabled: false
+      loop: '{{ rgw_instances }}'
+
+    - name: stop and disable ceph-radosgw systemd target
+      service:
+        name: ceph-rgw.target
+        state: stopped
+        enabled: false
+      when: not containerized_deployment | bool
+
+    - name: reset failed ceph-radosgw systemd unit
+      command: 'systemctl reset-failed ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}'
+      changed_when: false
+      loop: '{{ rgw_instances }}'
+      when: containerized_deployment | bool
+
+    - name: remove ceph-radosgw systemd unit file
+      file:
+        path: /etc/systemd/system/ceph-radosgw@.service
+        state: absent
+      when: containerized_deployment | bool
+
+    - name: remove ceph-radosgw systemd override directory
+      file:
+        path: /etc/systemd/system/ceph-radosgw@.service.d
+        state: absent
+      when: not containerized_deployment | bool
+
+    - name: remove legacy ceph radosgw data
+      file:
+        path: '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}'
+        state: absent
+      loop: '{{ rgw_instances }}'
+
+    - name: remove legacy ceph radosgw directory
+      file:
+        path: '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}'
+        state: absent
+
+- name: redeploy rbd-mirror daemons
+  hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: update the placement of rbd-mirror hosts
+      command: "cephadm shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply rbd-mirror '{{ groups.get(rbdmirror_group_name, []) | length }} label:{{ rbdmirror_group_name }}'"
+      run_once: true
+      changed_when: false
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: stop and remove legacy rbd-mirror daemons
+  hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+  serial: 1
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: stop and disable rbd-mirror systemd service
+      service:
+        name: 'ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}'
+        state: stopped
+        enabled: false
+
+    - name: stop and disable rbd-mirror systemd target
+      service:
+        name: ceph-rbd-mirror.target
+        state: stopped
+        enabled: false
+      when: not containerized_deployment | bool
+
+    - name: reset failed rbd-mirror systemd unit
+      command: 'systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}'
+      changed_when: false
+      when: containerized_deployment | bool
+
+    - name: remove rbd-mirror systemd unit file
+      file:
+        path: /etc/systemd/system/ceph-rbd-mirror@.service
+        state: absent
+      when: containerized_deployment | bool
+
+    - name: remove rbd-mirror systemd override directory
+      file:
+        path: /etc/systemd/system/ceph-rbd-mirror@.service.d
+        state: absent
+      when: not containerized_deployment | bool
+
+- name: redeploy ceph-crash daemons
+  hosts:
+    - "{{ mon_group_name|default('mons') }}"
+    - "{{ osd_group_name|default('osds') }}"
+    - "{{ mds_group_name|default('mdss') }}"
+    - "{{ rgw_group_name|default('rgws') }}"
+    - "{{ mgr_group_name|default('mgrs') }}"
+    - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: stop and disable ceph-crash systemd service
+      service:
+        name: ceph-crash
+        state: stopped
+        enabled: false
+      failed_when: false
+      when: not containerized_deployment | bool
+
+    - name: update the placement of ceph-crash hosts
+      command: "cephadm shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply crash 'label:ceph'"
+      run_once: true
+      changed_when: false
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: redeploy alertmanager/grafana/prometheus daemons
+  hosts: "{{ grafana_server_group_name|default('grafana-server') }}"
+  serial: 1
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: with dashboard enabled
+      when: dashboard_enabled | bool
+      block:
+        # (workaround) cephadm adopt alertmanager only stops prometheus-alertmanager systemd service
+        - name: stop and disable alertmanager systemd unit
+          service:
+            name: alertmanager
+            state: stopped
+            enabled: false
+
+        # (workaround) cephadm adopt alertmanager only uses /etc/prometheus/alertmanager.yml
+        - name: create alertmanager config symlink
+          file:
+            path: /etc/prometheus/alertmanager.yml
+            src: '{{ alertmanager_conf_dir }}/alertmanager.yml'
+            state: link
+
+        # (workaround) cephadm adopt alertmanager only uses /var/lib/prometheus/alertmanager/
+        - name: create alertmanager data symlink
+          file:
+            path: '{{ prometheus_data_dir }}/alertmanager'
+            src: '{{ alertmanager_data_dir }}'
+            state: link
+
+        - name: adopt alertmanager daemon
+          command: "cephadm adopt --cluster {{ cluster }} --skip-pull --style legacy --name alertmanager.{{ ansible_hostname }} {{ '--skip-firewalld' if not configure_firewall | bool else '' }} {{ '--docker' if container_binary == 'docker' else '' }}"
+          args:
+            creates: '/var/lib/ceph/{{ fsid }}/alertmanager.{{ ansible_hostname }}/unit.run'
+          environment:
+            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+        - name: remove alertmanager systemd unit file
+          file:
+            path: /etc/systemd/system/alertmanager.service
+            state: absent
+
+        - name: remove the legacy alertmanager data
+          file:
+            path: '{{ alertmanager_data_dir }}'
+            state: absent
+
+        - name: stop and disable prometheus systemd unit
+          service:
+            name: prometheus
+            state: stopped
+            enabled: false
+
+        - name: remove alertmanager data symlink
+          file:
+            path: '{{ prometheus_data_dir }}/alertmanager'
+            state: absent
+
+        # (workaround) cephadm adopt prometheus only uses /var/lib/prometheus/metrics/
+        - name: tmp copy the prometheus data
+          copy:
+            src: '{{ prometheus_data_dir }}/'
+            dest: /var/lib/prom_metrics
+            owner: 65534
+            group: 65534
+            remote_src: true
+
+        # (workaround) cephadm adopt prometheus only uses /var/lib/prometheus/metrics/
+        - name: restore the prometheus data
+          copy:
+            src: /var/lib/prom_metrics/
+            dest: /var/lib/prometheus/metrics
+            owner: 65534
+            group: 65534
+            remote_src: true
+
+        - name: remove the tmp prometheus data copy
+          file:
+            path: /var/lib/prom_metrics
+            state: absent
+
+        # (workaround) https://tracker.ceph.com/issues/45120
+        - name: create missing prometheus target directory
+          file:
+            path: '/var/lib/ceph/{{ fsid }}/prometheus.{{ ansible_hostname }}/etc/prometheus'
+            state: directory
+            owner: 65534
+            group: 65534
+            recurse: true
+
+        - name: adopt prometheus daemon
+          command: "cephadm adopt --cluster {{ cluster }} --skip-pull --style legacy --name prometheus.{{ ansible_hostname }} {{ '--skip-firewalld' if not configure_firewall | bool else '' }} {{ '--docker' if container_binary == 'docker' else '' }}"
+          args:
+            creates: '/var/lib/ceph/{{ fsid }}/prometheus.{{ ansible_hostname }}/unit.run'
+          environment:
+            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+        - name: remove prometheus systemd unit file
+          file:
+            path: /etc/systemd/system/prometheus.service
+            state: absent
+
+        - name: remove the legacy prometheus data
+          file:
+            path: '{{ prometheus_data_dir }}'
+            state: absent
+
+        # (workaround) cephadm adopt grafana only stops grafana systemd service
+        - name: stop and disable grafana systemd unit
+          service:
+            name: grafana-server
+            state: stopped
+            enabled: false
+
+        - name: adopt grafana daemon
+          command: "cephadm adopt --cluster {{ cluster }} --skip-pull --style legacy --name grafana.{{ ansible_hostname }} {{ '--skip-firewalld' if not configure_firewall | bool else '' }} {{ '--docker' if container_binary == 'docker' else '' }}"
+          args:
+            creates: '/var/lib/ceph/{{ fsid }}/grafana.{{ ansible_hostname }}/unit.run'
+          environment:
+            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+        - name: remove grafana systemd unit file
+          file:
+            path: /etc/systemd/system/grafana-server.service
+            state: absent
+
+        - name: remove the legacy grafana data
+          file:
+            path: /var/lib/grafana
+            state: absent
+
+- name: redeploy node-exporter daemons
+  hosts:
+    - "{{ mon_group_name|default('mons') }}"
+    - "{{ osd_group_name|default('osds') }}"
+    - "{{ mds_group_name|default('mdss') }}"
+    - "{{ rgw_group_name|default('rgws') }}"
+    - "{{ mgr_group_name|default('mgrs') }}"
+    - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+    - "{{ nfs_group_name|default('nfss') }}"
+    - "{{ iscsi_gw_group_name|default('iscsigws') }}"
+    - "{{ grafana_server_group_name|default('grafana-server') }}"
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: with dashboard enabled
+      when: dashboard_enabled | bool
+      block:
+        - name: stop and disable node-exporter systemd service
+          service:
+            name: node_exporter
+            state: stopped
+            enabled: false
+
+        - name: remove node_exporter systemd unit file
+          file:
+            path: /etc/systemd/system/node_exporter.service
+            state: absent
+
+        - name: update the placement of node-exporter hosts
+          command: "cephadm shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply node-exporter '*'"
+          run_once: true
+          changed_when: false
+          environment:
+            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: adjust placement daemons
+  hosts: "{{ mon_group_name|default('mons') }}"
+  become: true
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: update the placement of monitor hosts
+      command: "cephadm shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply mon '{{ groups.get(mon_group_name, []) | length }} label:{{ mon_group_name }}'"
+      run_once: true
+      changed_when: false
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+    - name: update the placement of manager hosts
+      command: "cephadm shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply mgr '{{ groups.get(mgr_group_name, []) | length }} label:{{ mgr_group_name }}'"
+      run_once: true
+      changed_when: false
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+    - name: with dashboard enabled
+      when: dashboard_enabled | bool
+      block:
+        - name: update the placement of alertmanager hosts
+          command: "cephadm shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply alertmanager '{{ groups.get(grafana_server_group_name, []) | length }} label:{{ grafana_server_group_name }}'"
+          run_once: true
+          changed_when: false
+          environment:
+            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+        - name: update the placement of grafana hosts
+          command: "cephadm shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply grafana '{{ groups.get(grafana_server_group_name, []) | length }} label:{{ grafana_server_group_name }}'"
+          run_once: true
+          changed_when: false
+          environment:
+            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+        - name: update the placement of prometheus hosts
+          command: "cephadm shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply prometheus '{{ groups.get(grafana_server_group_name, []) | length }} label:{{ grafana_server_group_name }}'"
+          run_once: true
+          changed_when: false
+          environment:
+            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'