set_fact:
delegated_node: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else inventory_hostname }}"
-- name: set_fact condition_copy_admin_key
+- name: set_fact admin_key_presence
set_fact:
- condition_copy_admin_key: "{{ True if groups.get(mon_group_name, []) | length > 0 else copy_admin_key }}"
+ admin_key_presence: "{{ True if groups.get(mon_group_name, []) | length > 0 else copy_admin_key }}"
- name: create cephx key(s)
ceph_key:
secret: "{{ item.key | default('') }}"
cluster: "{{ cluster }}"
dest: "{{ ceph_conf_key_directory }}"
- import_key: "{{ condition_copy_admin_key }}"
+ import_key: "{{ admin_key_presence }}"
mode: "{{ item.mode|default(omit) }}"
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
- name: pool related tasks
when:
- - condition_copy_admin_key | bool
+ - admin_key_presence | bool
- inventory_hostname == groups.get('_filtered_clients', []) | first
block:
+ - import_role:
+ name: ceph-facts
+ tasks_from: get_def_crush_rule_name.yml
+
- name: list existing pool(s)
command: >
- {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
+ {{ ceph_admin_command }} --cluster {{ cluster }}
osd pool get {{ item.name }} size
with_items: "{{ pools }}"
register: created_pools
- name: create ceph pool(s)
command: >
- {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
+ {{ ceph_admin_command | default('') }} --cluster {{ cluster }}
osd pool create {{ item.0.name }}
{{ item.0.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.0.type | default(1) | int == 1 or item.0.type | default('replicated') == 'replicated' %}
replicated
- {{ item.0.rule_name | default(osd_pool_default_crush_rule) }}
+ {{ item.0.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}
{{ item.0.expected_num_objects | default(0) }}
{%- else %}
erasure
- item.1.rc != 0
- name: set the target ratio on pool(s)
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
+ command: "{{ ceph_admin_command | default('') }} --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
when: item.pg_autoscale_mode | default(False) | bool
- name: set pg_autoscale_mode value on pool(s)
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
+ command: "{{ ceph_admin_command | default('') }} --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
delegate_to: "{{ delegated_node }}"
with_items: "{{ pools | unique }}"
- name: customize pool size
command: >
- {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
+ {{ ceph_admin_command | default('') }} --cluster {{ cluster }}
osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if item.size | default(osd_pool_default_size) | int == 1 else '' }}
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
- name: customize pool min_size
command: >
- {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
+ {{ ceph_admin_command | default('') }} --cluster {{ cluster }}
osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
- item.type | default('replicated') != 'erasure'
- name: assign application to pool(s)
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
+ command: "{{ ceph_admin_command | default('') }} --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
with_items: "{{ pools | unique }}"
changed_when: false
delegate_to: "{{ delegated_node }}"
- name: set_fact osd_pool_default_crush_rule
set_fact:
- osd_pool_default_crush_rule: "{% if crush_rule_variable.rc == 0 %}{{ crush_rule_variable.stdout.split(' = ')[1] }}{% else %}{{ ceph_osd_pool_default_crush_rule }}{% endif %}"
- when: ceph_conf.stat.exists
+ osd_pool_default_crush_rule: "{{ crush_rule_variable.stdout.split(' = ')[1] if crush_rule_variable.get('rc', 1) | int == 0 else ceph_osd_pool_default_crush_rule }}"
- name: import_tasks set_monitor_address.yml
import_tasks: set_monitor_address.yml
set_fact:
use_new_ceph_iscsi: "{{ (gateway_ip_list == '0.0.0.0' and gateway_iqn | length == 0 and client_connections | length == 0 and rbd_devices | length == 0) | bool | ternary(true, false) }}"
when: iscsi_gw_group_name in group_names
+
+- name: set_fact ceph_admin_command
+ set_fact:
+ ceph_admin_command: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else 'ceph' }} -n client.admin -k /etc/ceph/{{ cluster }}.client.admin.keyring"
\ No newline at end of file