# $ ceph-authtool --gen-print-key
# or
# $ python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)"
+#
+# To use a particular secret, you have to add 'key' to the dict below, so something like:
+# - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ...
+#
#keys:
-# - { name: client.test, key: "ADD-KEYRING-HERE==", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test", mode: "0600", acls: [] }
-# - { name: client.test2, key: "ADD-KEYRING-HERE==", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test2", mode: "0600", acls: [] }
+# - { name: client.test, caps: { mon: "allow r", osd: "allow class-read object_prefix rbd_children, allow rwx pool=test" }, mode: "0600", acls: [] }
+# - { name: client.test2, caps: { mon: "allow r", osd: "allow class-read object_prefix rbd_children, allow rwx pool=test2" }, mode: "0600", acls: [] }
# To have have ansible setfacl the generated key, set the acls var like so:
# acls: ["u:nova:r--", "u:cinder:r--", "u:glance:r--", "u:gnocchi:r--"]
#openstack_keys:
-# - { name: client.glance, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}", mode: "0600", acls: [] }
-# - { name: client.cinder, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}", mode: "0600", acls: [] }
-# - { name: client.cinder-backup, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}", mode: "0600", acls: [] }
-# - { name: client.gnocchi, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_gnocchi_pool.name }}", mode: "0600", acls: [] }
-# - { name: client.openstack, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_cinder_backup_pool.name }}", mode: "0600", acls: [] }
+# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600", acls: [] }
+# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600", acls: [] }
+# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600", acls: [] }
+# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", acls: [] }
+# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600", acls: [] }
##########
# $ ceph-authtool --gen-print-key
# or
# $ python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)"
+#
+# To use a particular secret, you have to add 'key' to the dict below, so something like:
+# - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ...
+#
keys:
- - { name: client.test, key: "ADD-KEYRING-HERE==", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test", mode: "0600", acls: [] }
- - { name: client.test2, key: "ADD-KEYRING-HERE==", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test2", mode: "0600", acls: [] }
+ - { name: client.test, caps: { mon: "allow r", osd: "allow class-read object_prefix rbd_children, allow rwx pool=test" }, mode: "0600", acls: [] }
+ - { name: client.test2, caps: { mon: "allow r", osd: "allow class-read object_prefix rbd_children, allow rwx pool=test2" }, mode: "0600", acls: [] }
---
-- name: set docker_exec_client_cmd_binary to ceph-authtool
- set_fact:
- docker_exec_client_cmd_binary: ceph-authtool
+- name: run a dummy container (sleep 300) from where we can create pool(s)/key(s)
+ command: >
+ docker run \
+ -d \
+ -v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }} \
+ --name ceph-create-keys \
+ --entrypoint=sleep \
+ {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ 300
+ changed_when: false
+ run_once: true
when: containerized_deployment
- name: set docker_exec_client_cmd for containers
set_fact:
- docker_exec_client_cmd: docker run --rm -v /etc/ceph:/etc/ceph --entrypoint /usr/bin/{{ docker_exec_client_cmd_binary }} {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+ docker_exec_client_cmd: "docker exec ceph-create-keys"
+ run_once: true
when: containerized_deployment
-- name: set docker_exec_client_cmd for non-containers
- set_fact:
- docker_exec_client_cmd: ceph-authtool
- when: not containerized_deployment
-
-- name: create key(s)
- shell: "{{ docker_exec_client_cmd }} -C /etc/ceph/{{ cluster }}.{{ item.name }}.keyring --name {{ item.name }} --add-key {{ item.key }} --cap mon \"{{ item.mon_cap|default('') }}\" --cap osd \"{{ item.osd_cap|default('') }}\" --cap mds \"{{ item.mds_cap|default('') }}\""
- args:
- creates: /etc/ceph/{{ cluster }}.{{ item.name }}.keyring
+- name: create cephx key(s)
+ ceph_key:
+ state: present
+ name: "{{ item.name }}"
+ caps: "{{ item.caps }}"
+ secret: "{{ item.key | default('') }}"
+ containerized: "{{ docker_exec_client_cmd | default('') }}"
+ cluster: "{{ cluster }}"
+ dest: "{{ ceph_conf_key_directory }}"
with_items: "{{ keys }}"
- changed_when: false
run_once: true
when:
- cephx
- keys | length > 0
- inventory_hostname in groups.get(client_group_name) | first
-- name: set docker_exec_client_cmd_binary to ceph
- set_fact:
- docker_exec_client_cmd_binary: ceph
- when: containerized_deployment
-
-- name: replace docker_exec_client_cmd by ceph
- set_fact:
- docker_exec_client_cmd: ceph
- when:
- - not containerized_deployment
- - docker_exec_client_cmd == 'ceph-authtool'
-
-- name: slurp client key(s)
+- name: slurp client cephx key(s)
slurp:
src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring"
with_items:
- keys | length > 0
- inventory_hostname in groups.get(client_group_name) | first
-- name: check if key(s) already exist(s)
- command: "{{ docker_exec_client_cmd }} --cluster {{ cluster }} auth get {{ item.name }}"
- changed_when: false
- failed_when: false
- with_items: "{{ keys }}"
- register: keys_exist
- run_once: true
- when:
- - copy_admin_key
- - inventory_hostname in groups.get(client_group_name) | first
-
-- name: create pool(s)
+- name: create ceph pool(s)
command: >
- {{ docker_exec_client_cmd }} --cluster {{ cluster }}
+ {{ docker_exec_client_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.name }}
{{ item.get('pg_num', hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num']) }}
{{ item.pgp_num | default(item.pg_num) }}
- copy_admin_key
- inventory_hostname in groups.get(client_group_name) | first
-- name: add key(s) to ceph
- command: "{{ docker_exec_client_cmd }} --cluster {{ cluster }} auth import -i /etc/ceph/{{ cluster }}.{{ item.0.name }}.keyring"
+- name: kill a dummy container that created pool(s)/key(s)
+ command: docker rm -f ceph-create-keys
changed_when: false
run_once: true
- with_together:
- - "{{ keys }}"
- - "{{ keys_exist.results | default([]) }}"
- when:
- - not item.1.get("skipped")
- - copy_admin_key
- - item.1.rc != 0
- - inventory_hostname in groups.get(client_group_name) | first
-
-- name: put docker_exec_client_cmd back to normal with a none value
- set_fact:
- docker_exec_client_cmd:
- when: docker_exec_client_cmd == 'ceph'
+ when: containerized_deployment
-- name: get client keys
+- name: get client cephx keys
copy:
dest: "{{ item.source }}"
content: "{{ item.content | b64decode }}"
- not item.get('skipped', False)
- not inventory_hostname == groups.get(client_group_name, []) | first
-- name: chmod key(s)
+- name: chmod cephx key(s)
file:
- path: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
+ path: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring"
mode: "{{ item.mode|default(omit) }}" # if mode not in list, uses mode from ps umask
with_items: "{{ keys }}"
when:
- cephx
- keys | length > 0
-- name: setfacl for key(s)
+- name: setfacl for cephx key(s)
acl:
- path: "/etc/ceph/{{ cluster }}.{{ item.0.name }}.keyring"
+ path: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.0.name }}.keyring"
entry: "{{ item.1 }}"
state: present
with_subelements:
---
-- name: check keys has been filled in users.key variables
- fail:
- msg: "you must generate and set keys properly in users.key variables"
- with_items: "{{ keys }}"
- when:
- - user_config
- - item.key == 'ADD-KEYRING-HERE=='
-
- name: set selinux permissions
shell: |
chcon -Rt svirt_sandbox_file_t /etc/ceph
# To have have ansible setfacl the generated key, set the acls var like so:
# acls: ["u:nova:r--", "u:cinder:r--", "u:glance:r--", "u:gnocchi:r--"]
openstack_keys:
- - { name: client.glance, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}", mode: "0600", acls: [] }
- - { name: client.cinder, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}", mode: "0600", acls: [] }
- - { name: client.cinder-backup, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}", mode: "0600", acls: [] }
- - { name: client.gnocchi, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_gnocchi_pool.name }}", mode: "0600", acls: [] }
- - { name: client.openstack, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_cinder_backup_pool.name }}", mode: "0600", acls: [] }
+ - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600", acls: [] }
+ - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600", acls: [] }
+ - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600", acls: [] }
+ - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", acls: [] }
+ - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600", acls: [] }
##########
- cephx
- name: create ceph rest api keyring when mon is not containerized
- command: ceph --cluster {{ cluster }} auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/{{ cluster }}.client.restapi.keyring
- args:
- creates: /etc/ceph/{{ cluster }}.client.restapi.keyring
- changed_when: false
+ ceph_key:
+ name: client.restapi
+ state: present
+ caps:
+ mon: allow *
+ osd: allow *
+ cluster: "{{ cluster }}"
when:
- cephx
- groups.get(restapi_group_name, []) | length > 0
- inventory_hostname == groups[mon_group_name]|last
- name: create ceph mgr keyring(s) when mon is not containerized
- command: ceph --cluster {{ cluster }} auth get-or-create mgr.{{ hostvars[item]['ansible_hostname'] }} mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o /etc/ceph/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring
- args:
- creates: /etc/ceph/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring
- changed_when: false
+ ceph_key:
+ name: "mgr.{{ hostvars[item]['ansible_hostname'] }}"
+ state: present
+ caps:
+ mon: allow profile mgr
+ osd: allow *
+ mds: allow *
+ cluster: "{{ cluster }}"
when:
- cephx
- groups.get(mgr_group_name, []) | length > 0
- ceph_release_num[ceph_release] > ceph_release_num.jewel
with_items: "{{ groups.get(mgr_group_name, []) }}"
+# once this gets backported github.com/ceph/ceph/pull/20983
+# we will be able to remove these 2 tasks below
- name: find ceph keys
shell: ls -1 /etc/ceph/*.keyring
changed_when: false
when: cephx
- name: create monitor initial keyring
- command: ceph-authtool /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }} --create-keyring --name=mon. --add-key={{ monitor_secret }} --cap mon 'allow *'
- args:
- creates: /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
+ ceph_key:
+ name: mon.
+ state: present
+ dest: "/var/lib/ceph/tmp/"
+ secret: "{{ monitor_secret }}"
+ cluster: "{{ cluster }}"
+ caps:
+ mon: allow *
+ import_key: False
when: cephx
- name: set initial monitor key permissions
file:
- path: /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
+ path: "/var/lib/ceph/tmp/{{ cluster }}.mon..keyring"
owner: "ceph"
group: "ceph"
mode: "0600"
mode: "0755"
recurse: true
-- name: set_fact ceph_authtool_cap >= ceph_release_num.luminous
+- name: set_fact client_admin_ceph_authtool_cap >= ceph_release_num.luminous
set_fact:
- ceph_authtool_cap: "--cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow' --cap mgr 'allow *'"
+ client_admin_ceph_authtool_cap:
+ mon: allow *
+ osd: allow *
+ mds: allow
+ mgr: allow *
when:
- ceph_release_num[ceph_release] >= ceph_release_num.luminous
- cephx
- admin_secret != 'admin_secret'
-- name: set_fact ceph_authtool_cap < ceph_release_num.luminous
+- name: set_fact client_admin_ceph_authtool_cap < ceph_release_num.luminous
set_fact:
- ceph_authtool_cap: "--cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'"
+ client_admin_ceph_authtool_cap:
+ mon: allow *
+ osd: allow *
+ mds: allow
when:
- ceph_release_num[ceph_release] < ceph_release_num.luminous
- cephx
- admin_secret != 'admin_secret'
- name: create custom admin keyring
- command: "ceph-authtool /etc/ceph/{{ cluster }}.client.admin.keyring --create-keyring --name=client.admin --add-key={{ admin_secret }} --set-uid=0 {{ ceph_authtool_cap }}"
- args:
- creates: /etc/ceph/{{ cluster }}.client.admin.keyring
+ ceph_key:
+ name: client.admin
+ state: present
+ secret: "{{ admin_secret }}"
+ auid: 0
+ caps: "{{ client_admin_ceph_authtool_cap }}"
+ import_key: False
+ cluster: "{{ cluster }}"
register: create_custom_admin_secret
when:
- cephx
- admin_secret != 'admin_secret'
- name: import admin keyring into mon keyring
- command: ceph-authtool /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }} --import-keyring /etc/ceph/{{ cluster }}.client.admin.keyring
+ command: ceph-authtool /var/lib/ceph/tmp/{{ cluster }}.mon..keyring --import-keyring /etc/ceph/{{ cluster }}.client.admin.keyring
when:
- not create_custom_admin_secret.get('skipped')
- cephx
- admin_secret != 'admin_secret'
- name: ceph monitor mkfs with keyring
- command: ceph-mon --cluster {{ cluster }} --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
+ command: ceph-mon --cluster {{ cluster }} --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when:
when:
- ceph_release_num[ceph_release] >= ceph_release_num['luminous']
-# A future version could use "--caps CAPSFILE"
-# which will set all of capabilities associated with a given key, for all subsystems
-- name: create openstack key(s)
- shell: "{{ docker_exec_cmd }} bash -c 'ceph-authtool -C /etc/ceph/{{ cluster }}.{{ item.name }}.keyring --name {{ item.name }} --add-key {{ item.key }} --cap mon \"{{ item.mon_cap|default('') }}\" --cap osd \"{{ item.osd_cap|default('') }}\" --cap mds \"{{ item.mds_cap|default('') }}\"'"
- args:
- creates: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
+- name: create openstack cephx key(s)
+ ceph_key:
+ state: present
+ name: "{{ item.name }}"
+ caps: "{{ item.caps }}"
+ secret: "{{ item.key | default('') }}"
+ containerized: "{{ docker_exec_cmd | default(False) }}"
+ cluster: "{{ cluster }}"
with_items: "{{ openstack_keys }}"
- changed_when: false
when: cephx
-- name: check if openstack key(s) already exist(s)
- command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
- changed_when: false
- failed_when: false
- with_items: "{{ openstack_keys }}"
- register: openstack_key_exist
-
-- name: add openstack key(s) to ceph
- command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} auth import -i /etc/ceph/{{ cluster }}.{{ item.0.name }}.keyring"
- changed_when: false
- with_together:
- - "{{ openstack_keys }}"
- - "{{ openstack_key_exist.results }}"
- when: item.1.rc != 0
-
-- name: fetch openstack key(s)
+- name: fetch openstack cephx key(s)
fetch:
src: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
flat: yes
with_items: "{{ openstack_keys }}"
-- name: copy to other mons the openstack key(s)
+- name: copy to other mons the openstack cephx key(s)
copy:
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
dest: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
- openstack_config
- item.0 != groups[mon_group_name] | last
-- name: chmod openstack key(s) on the other mons and this mon
+- name: chmod openstack cephx key(s) on the other mons and this mon
file:
path: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
mode: "{{ item.1.mode|default(omit) }}" # if mode not in list, uses mode from ps umask
- openstack_config
- cephx
-- name: setfacl for openstack key(s) on the other mons and this mon
+- name: setfacl for openstack cephx key(s) on the other mons and this mon
command: "setfacl -m {{ item.1.acls | join(',') }} /etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
with_nested:
- "{{ groups[mon_group_name] }}"