include: create_users_keys.yml
when:
- user_config
- - global_in_ceph_conf_overrides
- - ceph_conf_overrides.global.osd_pool_default_pg_num is defined
+ - ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False
when:
- cephx
- copy_admin_key
-
-- name: set_fact global_in_ceph_conf_overrides
- set_fact:
- global_in_ceph_conf_overrides: "{{ 'global' in ceph_conf_overrides }}"
- ceph_release_num.{{ ceph_release }} > ceph_release_num.jewel
with_items: "{{ groups.get(mgr_group_name, []) }}"
-- name: include set_osd_pool_default_pg_num.yml
- include: set_osd_pool_default_pg_num.yml
-
- name: crush_rules.yml
include: crush_rules.yml
when:
- crush_rule_config
-# Create the pools listed in openstack_pools
-- name: include openstack_config.yml
- include: openstack_config.yml
- when:
- - openstack_config
- - inventory_hostname == groups[mon_group_name] | last
-
-# CEPH creates the rbd pool during the ceph cluster initialization in
-# releases prior to luminous. If the rbd_pool.yml playbook is called too
-# early, the rbd pool does not exist yet.
-- name: include rbd_pool.yml
- include: rbd_pool.yml
- when: ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
-
- name: find ceph keys
shell: ls -1 /etc/ceph/*.keyring
changed_when: false
- "{{ inventory_hostname == groups[mon_group_name] | last }}"
- not containerized_deployment_with_kv
-- name: include ceph-mon/tasks/set_osd_pool_default_pg_num.yml
- include: "{{ lookup('env', 'ANSIBLE_ROLES_PATH') | default (playbook_dir + '/roles', true) }}/ceph-mon/tasks/set_osd_pool_default_pg_num.yml"
-
-# create openstack pools only when all mons are up.
-- name: include ceph-mon/tasks/set_osd_pool_default_pg_num.yml
- include: "{{ lookup('env', 'ANSIBLE_ROLES_PATH') | default (playbook_dir + '/roles', true) }}/ceph-mon/tasks/openstack_config.yml"
- when:
- - openstack_config
- - "{{ inventory_hostname == groups[mon_group_name] | last }}"
-
- block:
- name: create ceph mgr keyring(s) when mon is containerized
command: docker exec ceph-mon-{{ ansible_hostname }} ceph --cluster {{ cluster }} auth get-or-create mgr.{{ hostvars[item]['ansible_hostname'] }} mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o /etc/ceph/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring
include: docker/main.yml
when: containerized_deployment
+- name: include set_osd_pool_default_pg_num.yml
+ include: set_osd_pool_default_pg_num.yml
+
+# Create the pools listed in openstack_pools
+- name: include openstack_config.yml
+ include: openstack_config.yml
+ when:
+ - openstack_config
+ - inventory_hostname == groups[mon_group_name] | last
+
+# CEPH creates the rbd pool during the ceph cluster initialization in
+# releases prior to luminous. If the rbd_pool.yml playbook is called too
+# early, the rbd pool does not exist yet.
+- name: include rbd_pool.yml
+ include: rbd_pool.yml
+ when: ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+
- name: include create_mds_filesystems.yml
include: create_mds_filesystems.yml
when:
---
- name: test if rbd exists
shell: |
- ceph --cluster {{ cluster }} osd pool ls | grep -sq rbd
+ "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool ls | grep -sq rbd"
changed_when: false
failed_when: false
run_once: true
include: rbd_pool_pgs.yml
when:
- rbd_pool_exist.rc == 0
- - global_in_ceph_conf_overrides
- - ceph_conf_overrides.global.osd_pool_default_pg_num is defined
+ - ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False
- name: include rbd_pool_size.yml
include: rbd_pool_size.yml
when:
- rbd_pool_exist.rc == 0
- - global_in_ceph_conf_overrides
- - ceph_conf_overrides.global.osd_pool_default_size is defined
-
-# In luminous release, ceph does not create the rbd pool by default.
+ - ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False
- name: check rbd pool usage
shell: |
- ceph --connect-timeout 5 --cluster {{ cluster }} df | awk '/rbd/ {print $3}'
+ "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} df | awk '/rbd/ {print $3}'"
changed_when: false
failed_when: false
check_mode: true
- name: check pg num for rbd pool
shell: |
- ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd pg_num | awk '{print $2}'
+ "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd pg_num | awk '{print $2}'"
changed_when: false
failed_when: false
check_mode: true
- name: destroy and recreate rbd pool if osd_pool_default_pg_num is not honoured
shell: |
- ceph --connect-timeout 5 --cluster {{ cluster }} osd pool delete rbd rbd --yes-i-really-really-mean-it
- ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}
+ "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool delete rbd rbd --yes-i-really-really-mean-it"
+ "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
changed_when: false
failed_when: false
run_once: true
- name: check size for rbd pool
shell: |
- ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd size | awk '{print $2}'
+ "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd size | awk '{print $2}'"
changed_when: false
failed_when: false
check_mode: true
register: rbd_pool_size
- name: change rbd pool size if osd_pool_default_size is not honoured
- command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set rbd size {{ ceph_conf_overrides.global.osd_pool_default_size }}
+ command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set rbd size {{ ceph_conf_overrides.global.osd_pool_default_size }}"
changed_when: false
failed_when: false
run_once: true
# so people that had 'pool_default_pg_num' declared will get
# the same behaviour
#
-- name: check if does global key exist in ceph_conf_overrides
- set_fact:
- global_in_ceph_conf_overrides: "{{ 'global' in ceph_conf_overrides }}"
-
-- name: check if ceph_conf_overrides.global.osd_pool_default_pg_num is set
- set_fact:
- osd_pool_default_pg_num_in_overrides: "{{ 'osd_pool_default_pg_num' in ceph_conf_overrides.global }}"
- when: global_in_ceph_conf_overrides
-
- name: get default value for osd_pool_default_pg_num
shell: |
- {{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num | grep -Po '(?<="osd_pool_default_pg_num": ")[^"]*'
+ {{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num
failed_when: false
changed_when: false
run_once: true
register: default_pool_default_pg_num
- when: pool_default_pg_num is not defined or not global_in_ceph_conf_overrides
+ when:
+ - pool_default_pg_num is not defined
+ - ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) == False
-- name: set_fact osd_pool_default_pg_num pool_default_pg_num
+- name: set_fact osd_pool_default_pg_num with pool_default_pg_num (backward compatibility)
set_fact:
osd_pool_default_pg_num: "{{ pool_default_pg_num }}"
when: pool_default_pg_num is defined
-- name: set_fact osd_pool_default_pg_num default_pool_default_pg_num.stdout
+- name: set_fact osd_pool_default_pg_num with default_pool_default_pg_num.stdout
set_fact:
- osd_pool_default_pg_num: "{{ default_pool_default_pg_num.stdout }}"
+ osd_pool_default_pg_num: "{{ (default_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
when:
- - pool_default_pg_num is not defined
- - default_pool_default_pg_num.rc == 0
- - (osd_pool_default_pg_num_in_overrides is not defined or not osd_pool_default_pg_num_in_overrides)
+ - default_pool_default_pg_num.get('rc') == 0
- name: set_fact osd_pool_default_pg_num ceph_conf_overrides.global.osd_pool_default_pg_num
set_fact:
osd_pool_default_pg_num: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
when:
- - global_in_ceph_conf_overrides
- - ceph_conf_overrides.global.osd_pool_default_pg_num is defined
+ - ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False
openstack_config: True
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
openstack_config: True
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
nfs_ganesha_stable: true
nfs_ganesha_dev: false
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
user_config: True
keys:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
- osd_pool_default_pg_num: 8
osd_pool_default_size: 1
debian_ceph_packages:
- ceph