## OSD options
#
+#is_hci: false
+#hci_safety_factor: 0.2
+#non_hci_safety_factor: 0.7
+#osd_memory_target: 4000000000
#journal_size: 5120 # OSD journal size in MB
#public_network: 0.0.0.0/0
#cluster_network: "{{ public_network | regex_replace(' ', '') }}"
## OSD options
#
+#is_hci: false
+#hci_safety_factor: 0.2
+#non_hci_safety_factor: 0.7
+#osd_memory_target: 4000000000
#journal_size: 5120 # OSD journal size in MB
#public_network: 0.0.0.0/0
#cluster_network: "{{ public_network | regex_replace(' ', '') }}"
group: "ceph"
mode: "0755"
+ - block:
+ - name: count number of osds for non-lvm scenario
+ set_fact:
+ num_osds: "{{ devices | length | int }}"
+ when:
+ - devices | length > 0
+ - (osd_scenario == 'collocated' or osd_scenario == 'non-collocated')
+
+ - name: count number of osds for lvm scenario
+ set_fact:
+ num_osds: "{{ lvm_volumes | length | int }}"
+ when:
+ - lvm_volumes | length > 0
+ - osd_scenario == 'lvm'
+
+ - name: get number of osds for lvm-batch scenario
+ command: "ceph-volume lvm batch --report --format=json --osds-per-device osds_per_device {{ devices | join(' ') }}"
+ register: lvm_batch_devices
+ when:
+ - devices | length > 0
+ - osd_scenario == 'lvm'
+
+ - name: set_fact num_osds
+ set_fact:
+ num_osds: "{{ (lvm_batch_devices.stdout | from_json).osds | length | int }}"
+ when:
+ - devices | length > 0
+ - osd_scenario == 'lvm'
+ when:
+ - inventory_hostname in groups.get(osd_group_name, [])
+
- name: "generate ceph configuration file: {{ cluster }}.conf"
action: config_template
args:
{# else, default is false #}
{% endif %}
{% endif %}
+{% if osd_objectstore == 'bluestore' %}
+{% set _num_osds = num_osds | default(0) | int %}
+[osd]
+{% if is_hci and _num_osds > 0 %}
+{% if ansible_memtotal_mb * hci_safety_factor / _num_osds > osd_memory_target %} # hci_safety_factor is the safety factor for HCI deployments
+{% set _osd_memory_target = (ansible_memtotal_mb * hci_safety_factor / _num_osds) %}
+{% endif %}
+{% elif _num_osds > 0 %}
+{% if ansible_memtotal_mb * non_hci_safety_factor / _num_osds > osd_memory_target %} # non_hci_safety_factor is the safety factor for dedicated nodes
+{% set _osd_memory_target = (ansible_memtotal_mb * non_hci_safety_factor / _num_osds) %}
+{% endif %}
+{% endif %}
+osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
+{% endif %}
{% endif %}
{% if inventory_hostname in groups.get(rgw_group_name, []) %}
## OSD options
#
+is_hci: false
+hci_safety_factor: 0.2
+non_hci_safety_factor: 0.7
+osd_memory_target: 4000000000
journal_size: 5120 # OSD journal size in MB
public_network: 0.0.0.0/0
cluster_network: "{{ public_network | regex_replace(' ', '') }}"
- inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
- ceph_current_status['servicemap'] is defined
- ceph_current_status['servicemap']['services'] is defined
- - ceph_current_status['servicemap']['services']['rgw'] is defined # that's the way to cover ceph_release_num[ceph_release] >= ceph_release_num['luminous']
\ No newline at end of file
+ - ceph_current_status['servicemap']['services']['rgw'] is defined # that's the way to cover ceph_release_num[ceph_release] >= ceph_release_num['luminous']