when:
- not containerized_deployment
+ - name: stop ceph mgr
+ systemd:
+ name: ceph-mgr@{{ ansible_hostname }}
+ state: stopped
+ enabled: yes
+ ignore_errors: True # if no mgr collocated with mons
+ when:
+ - not containerized_deployment
+
- import_role:
name: ceph-defaults
private: false
- import_role:
name: ceph-mon
private: false
+ - import_role:
+ name: ceph-mgr
+ private: false
+ when: groups.get(mgr_group_name, []) | length == 0
- name: start ceph mon
systemd:
when:
- not containerized_deployment
+ - name: start ceph mgr
+ systemd:
+ name: ceph-mgr@{{ ansible_hostname }}
+ state: started
+ enabled: yes
+ ignore_errors: True # if no mgr collocated with mons
+ when:
+ - not containerized_deployment
+
- name: restart containerized ceph mon
systemd:
name: ceph-mon@{{ monitor_name }}
when:
- containerized_deployment
+ - name: restart containerized ceph mgr
+ systemd:
+ name: ceph-mgr@{{ monitor_name }}
+ state: restarted
+ enabled: yes
+ daemon_reload: yes
+ ignore_errors: True # if no mgr collocated with mons
+ when:
+ - containerized_deployment
+
- name: set mon_host_count
set_fact: mon_host_count={{ groups[mon_group_name] | length }}
secret: "{{ (mgr_secret != 'mgr_secret') | ternary(mgr_secret, omit) }}"
when:
- cephx
- - groups.get(mgr_group_name, []) | length > 0
- inventory_hostname == groups[mon_group_name]|last
- with_items: "{{ groups.get(mgr_group_name, []) }}"
+ with_items:
+ - "{{ groups.get(mgr_group_name, []) }}" # this honors the condition where mgrs run on separate machines
+ - "{{ groups.get(mon_group_name, []) }}" # this honors the new rule where mgrs are always collocated with mons
# once this gets backported github.com/ceph/ceph/pull/20983
# we will be able to remove these 2 tasks below
when:
- cephx
- containerized_deployment
- - groups.get(mgr_group_name, []) | length > 0
- with_items: "{{ groups.get(mgr_group_name, []) }}"
+ with_items:
+ - "{{ groups.get(mgr_group_name, []) }}" # this honors the condition where mgrs run on separate machines
+ - "{{ groups.get(mon_group_name, []) }}" # this honors the new rule where mgrs are always collocated with mons
- name: fetch ceph mgr key(s)
fetch:
- import_role:
name: ceph-mon
private: false
+ - import_role:
+ name: ceph-mgr
+ private: false
+
serial: 1 # MUST be '1' WHEN DEPLOYING MONITORS ON DOCKER CONTAINERS
- hosts: mons
- import_role:
name: ceph-mon
private: false
+ - import_role:
+ name: ceph-mgr
+ private: false
# post-tasks for preceding imports -
- name: set ceph monitor install 'Complete'
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
osd1
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
osd1
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
ceph-mon1 monitor_interface=eth1
ceph-mon2 monitor_address=192.168.1.12
-[mgrs]
-ceph-mgr0
-
[osds]
ceph-osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd0' }"
ceph-osd1 osd_crush_location="{ 'root': 'default', 'host': 'ceph-osd1' }"
rbd_mirror_vms: 1
client_vms: 2
iscsi_gw_vms: 1
-mgr_vms: 1
+mgr_vms: 0
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
rgw0
mds0
-[mgrs]
-mon0
-osd0
-
[rbdmirrors]
rgw0
mds0
mon1
mon2
-[mgrs]
-mgr0
-
[osds]
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
rbd_mirror_vms: 1
client_vms: 2
iscsi_gw_vms: 1
-mgr_vms: 1
+mgr_vms: 0
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.17
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
[mons]
ceph-mon0 monitor_address=192.168.71.10
-[mgrs]
-ceph-mon0
-
[osds]
ceph-osd0
ceph-osd1
[mons]
mon0
-[mgrs]
-mon0
-
[osds]
osd0
osd1
mon1
mon2
-[mgrs]
-mgr0
-
[osds]
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"