# In a production deployment, these should be secret
if DOCKER then
ansible.extra_vars = ansible.extra_vars.merge({
- mon_containerized_deployment: 'true',
- osd_containerized_deployment: 'true',
- mds_containerized_deployment: 'true',
- rgw_containerized_deployment: 'true',
- nfs_containerized_deployment: 'true',
- restapi_containerized_deployment: 'true',
- rbd_mirror_containerized_deployment: 'true',
- mgr_containerized_deployment: 'true',
+ containerized_deployment: 'true',
ceph_mon_docker_interface: ETH,
ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
ceph_osd_docker_devices: settings['disks'],
#ceph_docker_image: "ceph/daemon"
#ceph_docker_image_tag: latest
-# Do not comment the following variables mon_containerized_deployment_* here. These variables are being used
+# Do not comment the following variables containerized_deployment_* here. These variables are being used
# by ceph.conf.j2 template. so it should always be defined
-#mon_containerized_deployment_with_kv: false
-#mon_containerized_deployment: false
+#containerized_deployment_with_kv: false
+#containerized_deployment: false
#mon_containerized_default_ceph_conf_with_kv: false
# Confiure the type of NFS gatway access. At least one must be enabled for an
# do not ever change this here
#rolling_update: false
+#fsid: "{{ cluster_uuid.stdout }}"
+#generate_fsid: true
#ceph_docker_registry: docker.io
#ceph_docker_enable_centos_extra_repo: false
# These values have to be set according to the base OS used by the container image, NOT the host.
#bootstrap_dirs_owner: "64045"
#bootstrap_dirs_group: "64045"
+#
+############
+# KV store #
+############
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#kv_port: 4001
+#containerized_deployment_with_kv: false
# will copy the admin key to the /etc/ceph/ directory
#copy_admin_key: false
-#cephx: true
-
-
##########
# DOCKER #
##########
-#mds_containerized_deployment: false
-#mds_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#ceph_docker_image: "ceph/daemon"
-#ceph_docker_image_tag: latest
#ceph_mds_docker_extra_env: -e CLUSTER={{ cluster }} -e MDS_NAME={{ ansible_hostname }}
-#ceph_docker_on_openstack: false
#ceph_config_keys: [] # DON'T TOUCH ME
# DOCKER #
##########
-#mgr_containerized_deployment: false
-#mgr_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#ceph_docker_image: "ceph/daemon"
-#ceph_docker_image_tag: latest
#ceph_mgr_docker_extra_env: -e CLUSTER={{ cluster }} -e MGR_NAME={{ ansible_hostname }}
-#ceph_docker_on_openstack: false
#ceph_config_keys: [] # DON'T TOUCH ME
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"
#monitor_secret: "{{ monitor_keyring.stdout }}"
-#cephx: true
# CephFS
#cephfs_data: cephfs_data
# DOCKER #
##########
#docker_exec_cmd:
-#mon_containerized_deployment: false
-#mon_containerized_deployment_with_kv: false
-# This is currently in ceph-common defaults because it is shared with ceph-nfs
-#mon_containerized_default_ceph_conf_with_kv: false
#ceph_mon_docker_interface: "{{ monitor_interface }}"
#ceph_mon_docker_subnet: "{{ public_network }}"# subnet of the ceph_mon_docker_interface
-#ceph_docker_image: "ceph/daemon"
-#ceph_docker_image_tag: latest
#ceph_mon_docker_extra_env: -e CLUSTER={{ cluster }} -e FSID={{ fsid }} -e MON_NAME={{ monitor_name }}
-#ceph_docker_on_openstack: false
#mon_docker_privileged: false
#mon_docker_net_host: true
#ceph_config_keys: [] # DON'T TOUCH ME
#fetch_directory: fetch/
-## Ceph options
-#
-#cephx: true
-
-
#######################
# Access type options #
#######################
# DOCKER #
##########
-#nfs_containerized_deployment: false
-#nfs_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
#ceph_docker_image: "ceph/ganesha"
#ceph_docker_image_tag: latest
#ceph_nfs_docker_extra_env: -e GANESHA_EPOCH={{ ganesha_epoch }}
-#ceph_docker_on_openstack: false
#ceph_config_keys: [] # DON'T TOUCH ME
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"
-#cephx: true
# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# DOCKER #
##########
-#osd_containerized_deployment: false
-#osd_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#kv_port: 4001
-#ceph_docker_image: "ceph/daemon"
-#ceph_docker_image_tag: latest
#ceph_config_keys: [] # DON'T TOUCH ME
-#ceph_docker_on_openstack: false
# PREPARE DEVICE
# Make sure you only pass a single device to raw_journal_devices, otherwise this will fail horribly.
# DOCKER #
##########
-#rbd_mirror_containerized_deployment: false
-#rbd_mirror_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#ceph_docker_image: "ceph/daemon"
-#ceph_docker_image_tag: latest
-#ceph_docker_on_openstack: false
#ceph_config_keys: [] # DON'T TOUCH ME
# DOCKER #
##########
-#restapi_containerized_deployment: false
#ceph_restapi_docker_interface: eth0
#ceph_restapi_port: 5000
-#ceph_docker_image: "ceph/daemon"
-#ceph_docker_image_tag: latest
#ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables
-#ceph_docker_on_openstack: false
#ceph_config_keys: [] # DON'T TOUCH ME
# will copy the admin key to the /etc/ceph/ directory
#copy_admin_key: false
-## Ceph options
-#
-#cephx: true
-
# Multi-site remote pull URL variables
#rgw_pull_port: "{{ radosgw_civetweb_port }}"
#rgw_pull_proto: "http"
# DOCKER #
##########
-#rgw_containerized_deployment: false
-#rgw_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
#ceph_rgw_civetweb_port: "{{ radosgw_civetweb_port }}"
-#ceph_docker_image: "ceph/daemon"
-#ceph_docker_image_tag: latest
#ceph_rgw_docker_extra_env: -e CLUSTER={{ cluster }} -e RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}
-#ceph_docker_on_openstack: false
#ceph_config_keys: [] # DON'T TOUCH ME
#rgw_config_keys: "/" # DON'T TOUCH ME
enabled: yes
when:
- ansible_service_mgr == 'systemd'
- - not mon_containerized_deployment
+ - not containerized_deployment
roles:
- ceph-mon
enabled: yes
when:
- ansible_service_mgr == 'systemd'
- - not mon_containerized_deployment
+ - not containerized_deployment
- name: restart containerized ceph mons with systemd
service:
enabled: yes
when:
- ansible_service_mgr == 'systemd'
- - mon_containerized_deployment
+ - containerized_deployment
- name: set mon_host_count
set_fact: mon_host_count={{ groups[mon_group_name] | length }}
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
delegate_to: "{{ mon_host }}"
- when: not mon_containerized_deployment
+ when: not containerized_deployment
- name: waiting for the containerized monitor to join the quorum...
shell: |
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
delegate_to: "{{ mon_host }}"
- when: mon_containerized_deployment
+ when: containerized_deployment
- name: upgrade ceph osds cluster
- noscrub
- nodeep-scrub
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: not mon_containerized_deployment
+ when: not containerized_deployment
- name: set containerized osd flags
command: |
- noscrub
- nodeep-scrub
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: mon_containerized_deployment
+ when: containerized_deployment
- name: get osd numbers
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
register: osd_ids
changed_when: false
- when: not osd_containerized_deployment
+ when: not containerized_deployment
- name: stop ceph osds with upstart
service:
with_items: "{{ osd_ids.stdout_lines }}"
when:
- ansible_service_mgr == 'systemd'
- - not osd_containerized_deployment
+ - not containerized_deployment
roles:
- ceph-osd
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
register: osd_ids
changed_when: false
- when: not osd_containerized_deployment
+ when: not containerized_deployment
- name: start ceph osds with upstart
service:
with_items: "{{ osd_ids.stdout_lines }}"
when:
- ansible_service_mgr == 'systemd'
- - not osd_containerized_deployment
+ - not containerized_deployment
- name: restart containerized ceph osds with systemd
service:
with_items: "{{ ceph_osd_docker_devices }}"
when:
- ansible_service_mgr == 'systemd'
- - osd_containerized_deployment
+ - containerized_deployment
- name: waiting for clean pgs...
shell: |
retries: "{{ health_osd_check_retries }}"
delay: "{{ health_osd_check_delay }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: not osd_containerized_deployment
+ when: not containerized_deployment
- name: container - waiting for clean pgs...
shell: |
retries: "{{ health_osd_check_retries }}"
delay: "{{ health_osd_check_delay }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: osd_containerized_deployment
+ when: containerized_deployment
- name: unset osd flags
command: ceph osd unset {{ item }} --cluster {{ cluster }}
- noscrub
- nodeep-scrub
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: not osd_containerized_deployment
+ when: not containerized_deployment
- name: unset containerized osd flags
command: |
- noscrub
- nodeep-scrub
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: osd_containerized_deployment
+ when: containerized_deployment
- name: upgrade ceph mdss cluster
enabled: yes
when:
- ansible_service_mgr == 'systemd'
- - not mds_containerized_deployment
+ - not containerized_deployment
roles:
- ceph-mds
enabled: yes
when:
- ansible_service_mgr == 'systemd'
- - not mds_containerized_deployment
+ - not containerized_deployment
- name: restart ceph mdss
service:
enabled: yes
when:
- ansible_service_mgr == 'systemd'
- - mds_containerized_deployment
+ - containerized_deployment
- name: upgrade ceph rgws cluster
enabled: yes
when:
- ansible_service_mgr == 'systemd'
- - not rgw_containerized_deployment
+ - not containerized_deployment
roles:
- ceph-rgw
enabled: yes
when:
- ansible_service_mgr == 'systemd'
- - not rgw_containerized_deployment
+ - not containerized_deployment
- name: restart containerized ceph rgws with systemd
service:
enabled: yes
when:
- ansible_service_mgr == 'systemd'
- - rgw_containerized_deployment
+ - containerized_deployment
ceph_docker_image: "ceph/daemon"
ceph_docker_image_tag: latest
-# Do not comment the following variables mon_containerized_deployment_* here. These variables are being used
+# Do not comment the following variables containerized_deployment_* here. These variables are being used
# by ceph.conf.j2 template. so it should always be defined
-mon_containerized_deployment_with_kv: false
-mon_containerized_deployment: false
+containerized_deployment_with_kv: false
+containerized_deployment: false
mon_containerized_default_ceph_conf_with_kv: false
# Confiure the type of NFS gatway access. At least one must be enabled for an
+++ /dev/null
----
-# Normal case - pull image from registry
-- name: "pull {{ ceph_docker_image }} image"
- command: "docker pull {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- changed_when: false
- when: ceph_docker_dev_image is undefined or not ceph_docker_dev_image
-
-# Dev case - export local dev image and send it across
-- name: export local ceph dev image
- local_action: command docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
- when: ceph_docker_dev_image is defined and ceph_docker_dev_image
- run_once: true
-
-- name: copy ceph dev image file
- copy:
- src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
- dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
- when: ceph_docker_dev_image is defined and ceph_docker_dev_image
-
-- name: load ceph dev image
- command: "docker load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
- when: ceph_docker_dev_image is defined and ceph_docker_dev_image
-
-- name: remove tmp ceph dev image file
- command: "rm /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
- when: ceph_docker_dev_image is defined and ceph_docker_dev_image
when:
- rbd_client_directory_mode is not defined
or not rbd_client_directory_mode
-
become: false
always_run: true
when: generate_fsid
+
+- name: set fsid fact when generate_fsid = true
+ set_fact:
+ fsid: "{{ cluster_uuid.stdout }}"
+ when: generate_fsid
{% if ip_version == 'ipv6' %}
ms bind ipv6 = true
{% endif %}
-{% if not mon_containerized_deployment_with_kv and not mon_containerized_deployment %}
+{% if not containerized_deployment_with_kv and not containerized_deployment %}
fsid = {{ fsid }}
{% endif %}
max open files = {{ max_open_files }}
{% endfor %}
{% endif %}
-{% if not mon_containerized_deployment and not mon_containerized_deployment_with_kv %}
+{% if not containerized_deployment and not containerized_deployment_with_kv %}
{% if monitor_address_block %}
mon host = {% for host in groups[mon_group_name] %}{{ hostvars[host]['ansible_all_ipv4_addresses'] | ipaddr(monitor_address_block) | first }}{% if not loop.last %},{% endif %}{% endfor %}
{% elif groups[mon_group_name] is defined %}
{% endfor %}
{% endif %}
{% endif %}
-{% if mon_containerized_deployment %}
+{% if containerized_deployment %}
fsid = {{ fsid }}
{% if groups[mon_group_name] is defined %}
mon host = {% for host in groups[mon_group_name] %}
{% set interface = ["ansible_",ceph_mon_docker_interface]|join %}
- {% if mon_containerized_deployment -%}
+ {% if containerized_deployment -%}
{{ hostvars[host][interface]['ipv4']['address'] }}
{%- elif hostvars[host]['monitor_address'] is defined -%}
{{ hostvars[host]['monitor_address'] }}
---
+fsid: "{{ cluster_uuid.stdout }}"
+generate_fsid: true
ceph_docker_registry: docker.io
ceph_docker_enable_centos_extra_repo: false
# These values have to be set according to the base OS used by the container image, NOT the host.
bootstrap_dirs_owner: "64045"
bootstrap_dirs_group: "64045"
+#
+############
+# KV store #
+############
+kv_type: etcd
+kv_endpoint: 127.0.0.1
+kv_port: 4001
+containerized_deployment_with_kv: false
--- /dev/null
+---
+- name: set config and keys paths
+ set_fact:
+ ceph_config_keys:
+ - /etc/ceph/{{ cluster }}.client.admin.keyring
+ - /etc/ceph/{{ cluster }}.conf
+ - /etc/ceph/monmap-{{ cluster }}
+ - /etc/ceph/{{ cluster }}.mon.keyring
+ - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
+ - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
+ - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
+
+- name: stat for ceph config and keys
+ stat:
+ path: "{{ item }}"
+ with_items: "{{ ceph_config_keys }}"
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: statleftover
+
+- name: fail if we find existing cluster files
+ fail:
+ msg: "looks like no cluster is running but ceph files are present, please remove them"
+ with_together:
+ - "{{ ceph_config_keys }}"
+ - "{{ statleftover.results }}"
+ when: item.1.stat.exists == true
--- /dev/null
+---
+- name: check ntp installation on atomic
+ command: rpm -q chrony
+ register: ntp_pkg_query
+ ignore_errors: true
+ always_run: true
+ changed_when: false
--- /dev/null
+---
+- name: check ntp installation on debian
+ command: dpkg -s ntp
+ register: ntp_pkg_query
+ ignore_errors: true
+ always_run: true
+ changed_when: false
+ when: ansible_os_family == 'Debian'
+
+- name: install ntp on debian
+ package:
+ name: ntp
+ state: present
--- /dev/null
+---
+- name: check ntp installation on redhat
+ command: rpm -q ntp
+ register: ntp_pkg_query
+ ignore_errors: true
+ always_run: true
+ changed_when: false
+ when: ansible_os_family == 'RedHat'
+
+- name: install ntp on redhat
+ package:
+ name: ntp
+ state: present
--- /dev/null
+---
+- name: create a local fetch directory if it does not exist
+ local_action: file path={{ fetch_directory }} state=directory
+ changed_when: false
+ become: false
+ run_once: true
+ when: cephx or generate_fsid
+
+- name: generate cluster uuid
+ local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+ creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ register: cluster_uuid
+ become: false
+ when: generate_fsid
+
+- name: read cluster uuid if it already exists
+ local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+ removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ changed_when: false
+ register: cluster_uuid
+ always_run: true
+ become: false
+ when: generate_fsid
+
+- name: "generate {{ cluster }}.conf configuration file"
+ action: config_template
+ args:
+ src: "{{ playbook_dir }}/roles/ceph-common/templates/ceph.conf.j2"
+ dest: "/etc/ceph/{{ cluster }}.conf"
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ config_overrides: "{{ ceph_conf_overrides }}"
+ config_type: ini
+
+- name: set fsid fact when generate_fsid = true
+ set_fact:
+ fsid: "{{ cluster_uuid.stdout }}"
+ when: generate_fsid
--- /dev/null
+---
+- name: create bootstrap directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ bootstrap_dirs_owner }}"
+ group: "{{ bootstrap_dirs_group }}"
+ mode: "0755"
+ with_items:
+ - /etc/ceph/
+ - /var/lib/ceph/bootstrap-osd
+ - /var/lib/ceph/bootstrap-mds
+ - /var/lib/ceph/bootstrap-rgw
--- /dev/null
+---
+- name: set config and keys paths
+ set_fact:
+ ceph_config_keys:
+ - /etc/ceph/{{ cluster }}.conf
+ - /etc/ceph/{{ cluster }}.client.admin.keyring
+ - /etc/ceph/monmap-{{ cluster }}
+ - /etc/ceph/{{ cluster }}.mon.keyring
+ - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
+ - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
+ - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
+
+- name: add mgr keys to config and keys paths
+ set_fact:
+ tmp_ceph_mgr_keys: /etc/ceph/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring
+ with_items: "{{ groups.get(mgr_group_name, []) }}"
+ register: tmp_ceph_mgr_keys_result
+ when: "{{ groups.get(mgr_group_name, []) | length > 0 }}"
+
+- name: convert mgr keys to an array
+ set_fact:
+ ceph_mgr_keys: "{{ tmp_ceph_mgr_keys_result.results | map(attribute='ansible_facts.tmp_ceph_mgr_keys') | list }}"
+ when: "{{ groups.get(mgr_group_name, []) | length > 0 }}"
+
+- name: merge mgr keys to config and keys paths
+ set_fact:
+ ceph_config_keys: "{{ ceph_config_keys + ceph_mgr_keys }}"
+ when: "{{ groups.get(mgr_group_name, []) | length > 0 }}"
+
+- name: stat for ceph config and keys
+ local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }}
+ with_items: "{{ ceph_config_keys }}"
+ changed_when: false
+ become: false
+ failed_when: false
+ register: statconfig
+ always_run: true
+
+- name: try to fetch ceph config and keys
+ copy:
+ src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
+ dest: "{{ item.0 }}"
+ owner: root
+ group: root
+ mode: 0644
+ changed_when: false
+ with_together:
+ - "{{ ceph_config_keys }}"
+ - "{{ statconfig.results }}"
+ when: item.1.stat.exists == true
--- /dev/null
+---
+# Normal case - pull image from registry
+- name: "pull {{ ceph_docker_image }} image"
+ command: "docker pull {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ changed_when: false
+ when: ceph_docker_dev_image is undefined or not ceph_docker_dev_image
+
+# Dev case - export local dev image and send it across
+- name: export local ceph dev image
+ local_action: command docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
+ when: ceph_docker_dev_image is defined and ceph_docker_dev_image
+ run_once: true
+
+- name: copy ceph dev image file
+ copy:
+ src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ when: ceph_docker_dev_image is defined and ceph_docker_dev_image
+
+- name: load ceph dev image
+ command: "docker load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ when: ceph_docker_dev_image is defined and ceph_docker_dev_image
+
+- name: remove tmp ceph dev image file
+ command: "rm /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ when: ceph_docker_dev_image is defined and ceph_docker_dev_image
- set_fact:
monitor_name: "{{ ansible_fqdn }}"
when: mon_use_fqdn
+
+- name: check if a cluster is already running
+ command: "docker ps -q -a --filter='ancestor={{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'"
+ register: ceph_health
+ changed_when: false
+ failed_when: false
+ always_run: true
+
+# Only include 'checks.yml' when :
+# we are deploying containers without kv AND host is either a mon OR a nfs OR an osd,
+# AND
+# a cluster is not already running,
+# AND
+# we are not playing rolling-update.yml playbook.
+- include: checks.yml
+ when:
+ - (not containerized_deployment_with_kv and
+ ((inventory_hostname in groups.get(mon_group_name, [])) or
+ (inventory_hostname in groups.get(nfs_group_name, [])) or
+ (inventory_hostname in groups.get(osd_group_name, []))))
+ - ceph_health.rc != 0
+ - not "{{ rolling_update | default(false) }}"
+
+- include: "./misc/ntp_atomic.yml"
+ when:
+ - is_atomic
+ - ansible_os_family == 'RedHat'
+ - ntp_service_enabled
+
+- include: "./misc/ntp_redhat.yml"
+ when:
+ - not is_atomic
+ - ansible_os_family == 'RedHat'
+ - ntp_service_enabled
+
+- include: "./misc/ntp_debian.yml"
+ when:
+ - ansible_os_family == 'Debian'
+ - ntp_service_enabled
+
+- include: "./fetch_image.yml"
+
+# NOTE (jimcurtis): dirs_permissions.yml must precede fetch_configs.yml
+# # because it creates the directories needed by the latter.
+- include: ./dirs_permissions.yml
+
+# let the first mon create configs and keyrings
+# Only include 'create_configs.yml" when:
+# we are not populating kv_store with default ceph.conf AND host is a mon
+# OR
+# we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs
+- include: create_configs.yml
+ when:
+ - (not mon_containerized_default_ceph_conf_with_kv and
+ (inventory_hostname in groups.get(mon_group_name, []))) or
+ (not mon_containerized_default_ceph_conf_with_kv and
+ ((groups.get(nfs_group_name, []) | length > 0)
+ and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
+
+# Only include 'fetch_configs.yml' when:
+# - we are deploying containers without kv AND host is either a mon OR a nfs OR an osd
+# OR
+# - host is either a mdss OR mgrs OR rgws
+- include: fetch_configs.yml
+ when:
+ - (not containerized_deployment_with_kv and
+ ((inventory_hostname in groups.get(mon_group_name, [])) or
+ (inventory_hostname in groups.get(nfs_group_name, [])) or
+ (inventory_hostname in groups.get(osd_group_name, [])))) or
+ (inventory_hostname in groups.get('mdss', [])) or
+ (inventory_hostname in groups.get('mgrs', [])) or
+ (inventory_hostname in groups.get('rgws', []))
+
+- include: selinux.yml
--- /dev/null
+---
+- include: ../checks/check_ntp_atomic.yml
+ when: is_atomic
+
+- name: start the ntp service
+ service:
+ name: chronyd
+ enabled: yes
+ state: started
+ when:
+ - ntp_pkg_query.rc == 0
--- /dev/null
+---
+- include: ../checks/check_ntp_debian.yml
+ when: ansible_os_family == 'Debian'
+
+- name: start the ntp service
+ service:
+ name: ntp
+ enabled: yes
+ state: started
+ when:
+ - ntp_pkg_query.rc == 0
--- /dev/null
+---
+- include: ../checks/check_ntp_redhat.yml
+ when: ansible_os_family == 'RedHat'
+
+- name: start the ntp service
+ service:
+ name: ntpd
+ enabled: yes
+ state: started
+ when:
+ - ntp_pkg_query.rc == 0
--- /dev/null
+---
+- name: check if selinux is enabled
+ command: getenforce
+ register: sestatus
+ changed_when: false
+ always_run: true
+
+- name: set selinux permissions
+ shell: chcon -Rt svirt_sandbox_file_t {{ item }}
+ with_items:
+ - /etc/ceph
+ - /var/lib/ceph
+ changed_when: false
+ when: sestatus.stdout != 'Disabled'
# will copy the admin key to the /etc/ceph/ directory
copy_admin_key: false
-cephx: true
-
-
##########
# DOCKER #
##########
-mds_containerized_deployment: false
-mds_containerized_deployment_with_kv: false
-kv_type: etcd
-kv_endpoint: 127.0.0.1
-ceph_docker_image: "ceph/daemon"
-ceph_docker_image_tag: latest
ceph_mds_docker_extra_env: -e CLUSTER={{ cluster }} -e MDS_NAME={{ ansible_hostname }}
-ceph_docker_on_openstack: false
ceph_config_keys: [] # DON'T TOUCH ME
categories:
- system
dependencies:
- - { role: ceph.ceph-common, when: not mds_containerized_deployment }
- - { role: ceph.ceph-docker-common, when: mds_containerized_deployment }
+ - { role: ceph.ceph-common, when: not containerized_deployment }
+ - { role: ceph.ceph-docker-common, when: containerized_deployment }
+++ /dev/null
----
-- name: set config and keys paths
- set_fact:
- ceph_config_keys:
- - /etc/ceph/{{ cluster }}.client.admin.keyring
- - /etc/ceph/{{ cluster }}.conf
- - /etc/ceph/monmap-{{ cluster }}
- - /etc/ceph/{{ cluster }}.mon.keyring
- - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
-
-- name: stat for ceph config and keys
- stat:
- path: "{{ item }}"
- with_items: "{{ ceph_config_keys }}"
- changed_when: false
- failed_when: false
- always_run: true
- register: statleftover
-
-- name: fail if we find existing cluster files
- fail:
- msg: "looks like no cluster is running but ceph files are present, please remove them"
- with_together:
- - "{{ ceph_config_keys }}"
- - "{{ statleftover.results }}"
- when: item.1.stat.exists == true
---
-- name: check if a cluster is already running
- command: "docker ps -q -a --filter='ancestor={{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'"
- register: ceph_health
- changed_when: false
- failed_when: false
- always_run: true
-
-- include: checks.yml
- when:
- - ceph_health.rc != 0
- - not "{{ rolling_update | default(false) }}"
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
- when:
- - is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
- when:
- - not is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
- when:
- - ansible_os_family == 'Debian'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
-- include: dirs_permissions.yml
-- include: fetch_configs.yml
-
-- include: selinux.yml
- when: ansible_os_family == 'RedHat'
-
- include: start_docker_mds.yml
---
- include: pre_requisite.yml
- when: not mds_containerized_deployment
+ when: not containerized_deployment
- include: ./docker/main.yml
- when: mds_containerized_deployment
+ when: containerized_deployment
ExecStartPre=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/docker rm ceph-mds-{{ ansible_hostname }}
ExecStart=/usr/bin/docker run --rm --net=host \
- {% if not mds_containerized_deployment_with_kv -%}
+ {% if not containerized_deployment_with_kv -%}
-v /var/lib/ceph:/var/lib/ceph \
-v /etc/ceph:/etc/ceph \
{% else -%}
# DOCKER #
##########
-mgr_containerized_deployment: false
-mgr_containerized_deployment_with_kv: false
-kv_type: etcd
-kv_endpoint: 127.0.0.1
-ceph_docker_image: "ceph/daemon"
-ceph_docker_image_tag: latest
ceph_mgr_docker_extra_env: -e CLUSTER={{ cluster }} -e MGR_NAME={{ ansible_hostname }}
-ceph_docker_on_openstack: false
ceph_config_keys: [] # DON'T TOUCH ME
categories:
- system
dependencies:
- - { role: ceph.ceph-common, when: not mgr_containerized_deployment }
- - { role: ceph.ceph-docker-common, when: mgr_containerized_deployment }
+ - { role: ceph.ceph-common, when: not containerized_deployment }
+ - { role: ceph.ceph-docker-common, when: containerized_deployment }
+++ /dev/null
----
-- name: set config and keys paths
- set_fact:
- ceph_config_keys:
- - /etc/ceph/{{ cluster }}.client.admin.keyring
- - /etc/ceph/{{ cluster }}.conf
- - /etc/ceph/monmap-{{ cluster }}
- - /etc/ceph/{{ cluster }}.mon.keyring
- - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
-
-- name: stat for ceph config and keys
- stat:
- path: "{{ item }}"
- with_items: "{{ ceph_config_keys }}"
- changed_when: false
- failed_when: false
- always_run: true
- register: statleftover
-
-- name: fail if we find existing cluster files
- fail:
- msg: "looks like no cluster is running but ceph files are present, please remove them"
- with_together:
- - "{{ ceph_config_keys }}"
- - "{{ statleftover.results }}"
- when: item.1.stat.exists == true
---
-- name: check if a cluster is already running
- command: "docker ps -q -a --filter='ancestor={{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'"
- register: ceph_health
- changed_when: false
- failed_when: false
- always_run: true
-
-- include: checks.yml
- when: ceph_health.rc != 0
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
- when:
- - is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
- when:
- - not is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
- when:
- - ansible_os_family == 'Debian'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
-- include: dirs_permissions.yml
-- include: fetch_configs.yml
-
-- include: selinux.yml
- when: ansible_os_family == 'RedHat'
-
- include: start_docker_mgr.yml
---
- include: pre_requisite.yml
- when: not mgr_containerized_deployment
+ when: not containerized_deployment
- include: ./docker/main.yml
- when: mgr_containerized_deployment
+ when: containerized_deployment
ExecStartPre=-/usr/bin/docker stop ceph-mgr-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/docker rm ceph-mgr-{{ ansible_hostname }}
ExecStart=/usr/bin/docker run --rm --net=host \
- {% if not mgr_containerized_deployment_with_kv -%}
+ {% if not containerized_deployment_with_kv -%}
-v /var/lib/ceph:/var/lib/ceph \
-v /etc/ceph:/etc/ceph \
{% else -%}
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
fsid: "{{ cluster_uuid.stdout }}"
monitor_secret: "{{ monitor_keyring.stdout }}"
-cephx: true
# CephFS
cephfs_data: cephfs_data
# DOCKER #
##########
docker_exec_cmd:
-mon_containerized_deployment: false
-mon_containerized_deployment_with_kv: false
-# This is currently in ceph-common defaults because it is shared with ceph-nfs
-#mon_containerized_default_ceph_conf_with_kv: false
ceph_mon_docker_interface: "{{ monitor_interface }}"
ceph_mon_docker_subnet: "{{ public_network }}"# subnet of the ceph_mon_docker_interface
-ceph_docker_image: "ceph/daemon"
-ceph_docker_image_tag: latest
ceph_mon_docker_extra_env: -e CLUSTER={{ cluster }} -e FSID={{ fsid }} -e MON_NAME={{ monitor_name }}
-ceph_docker_on_openstack: false
mon_docker_privileged: false
mon_docker_net_host: true
ceph_config_keys: [] # DON'T TOUCH ME
categories:
- system
dependencies:
- - { role: ceph.ceph-common, when: not mon_containerized_deployment }
- - { role: ceph.ceph-docker-common, when: mon_containerized_deployment }
+ - { role: ceph.ceph-common, when: not containerized_deployment }
+ - { role: ceph.ceph-docker-common, when: containerized_deployment }
- /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
when:
- cephx
- - inventory_hostname == groups[mon_group_name]|last
+ - "{{ inventory_hostname == groups[mon_group_name] | last }}"
- name: drop in a motd script to report status when logging in
copy:
+++ /dev/null
----
-- name: set config and keys paths
- set_fact:
- ceph_config_keys:
- - /etc/ceph/{{ cluster }}.client.admin.keyring
- - /etc/ceph/{{ cluster }}.conf
- - /etc/ceph/monmap-{{ cluster }}
- - /etc/ceph/{{ cluster }}.mon.keyring
- - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
-
-- name: stat for ceph config and keys
- stat:
- path: "{{ item }}"
- with_items: "{{ ceph_config_keys }}"
- changed_when: false
- failed_when: false
- register: statleftover
- always_run: true
-
-- name: fail if we find existing cluster files
- fail:
- msg: "looks like no cluster is running but ceph files are present, please remove them"
- with_together:
- - "{{ ceph_config_keys }}"
- - "{{ statleftover.results }}"
- when: item.1.stat.exists == true
+++ /dev/null
----
-- name: create a local fetch directory if it does not exist
- local_action: file path={{ fetch_directory }} state=directory
- changed_when: false
- become: false
- run_once: true
- when: cephx or generate_fsid
-
-- name: generate cluster uuid
- local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
- creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
- register: cluster_uuid
- become: false
- when: generate_fsid
-
-- name: read cluster uuid if it already exists
- local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
- removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
- changed_when: false
- register: cluster_uuid
- always_run: true
- become: false
- when: generate_fsid
-
-- name: "generate {{ cluster }}.conf configuration file"
- action: config_template
- args:
- src: "{{ playbook_dir }}/roles/ceph-common/templates/ceph.conf.j2"
- dest: "/etc/ceph/{{ cluster }}.conf"
- owner: "root"
- group: "root"
- mode: "0644"
- config_overrides: "{{ ceph_conf_overrides }}"
- config_type: ini
---
-- name: check if a cluster is already running
- command: "docker ps -q -a --filter='ancestor={{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'"
- register: ceph_health
- changed_when: false
- failed_when: false
- always_run: true
-
-- include: checks.yml
- when:
- - ceph_health.rc != 0
- - not mon_containerized_deployment_with_kv
- - not "{{ rolling_update | default(false) }}"
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
- when:
- - is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
- when:
- - not is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
- when:
- - ansible_os_family == 'Debian'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
-
-- include: dirs_permissions.yml
-
-# let the first mon create configs and keyrings
-- include: create_configs.yml
- when:
- - not mon_containerized_default_ceph_conf_with_kv
-
-- include: fetch_configs.yml
- when: not mon_containerized_deployment_with_kv
-
-- include: selinux.yml
- when: ansible_os_family == 'RedHat'
-
- name: set docker_exec_cmd fact
set_fact:
docker_exec_cmd: "docker exec ceph-mon-{{ ansible_hostname }}"
failed_when: false
when:
- "{{ inventory_hostname == groups[mon_group_name][0] }}"
- - not mon_containerized_deployment_with_kv
+ - not containerized_deployment_with_kv
- include: copy_configs.yml
- when: not mon_containerized_deployment_with_kv
+ when: not containerized_deployment_with_kv
- name: create ceph rest api keyring when mon is containerized
command: docker exec ceph-mon-{{ ansible_hostname }} ceph --cluster {{ cluster }} auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/{{ cluster }}.client.restapi.keyring
changed_when: false
when:
- cephx
- - mon_containerized_deployment
+ - containerized_deployment
- groups[restapi_group_name] is defined
- "{{ inventory_hostname == groups[mon_group_name] | last }}"
- - not mon_containerized_deployment_with_kv
+ - not containerized_deployment_with_kv
- include: "{{ playbook_dir }}/roles/ceph-mon/tasks/set_osd_pool_default_pg_num.yml"
changed_when: false
when:
- cephx
- - mon_containerized_deployment
+ - containerized_deployment
- "{{ groups.get(mgr_group_name, []) | length > 0 }}"
- - not mon_containerized_deployment_with_kv
+ - not containerized_deployment_with_kv
with_items: "{{ groups.get(mgr_group_name, []) }}"
- name: stat for ceph mgr key(s)
run_once: true
when:
- "{{ inventory_hostname == groups[mon_group_name][0] }}"
- - mon_containerized_deployment_with_kv
+ - containerized_deployment_with_kv
- mon_containerized_default_ceph_conf_with_kv
- name: populate kv_store with custom ceph.conf
run_once: true
when:
- "{{ inventory_hostname == groups[mon_group_name][0] }}"
- - mon_containerized_deployment_with_kv
+ - containerized_deployment_with_kv
- not mon_containerized_default_ceph_conf_with_kv
- name: delete populate-kv-store docker
name: populate-kv-store
state: absent
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- when: mon_containerized_deployment_with_kv
+ when: containerized_deployment_with_kv
- name: generate systemd unit file
become: true
---
- include: deploy_monitors.yml
- when: not mon_containerized_deployment
+ when: not containerized_deployment
- include: start_monitor.yml
- when: not mon_containerized_deployment
+ when: not containerized_deployment
- include: ceph_keys.yml
- when: not mon_containerized_deployment
+ when: not containerized_deployment
# this avoids the bug mentioned here: https://github.com/ansible/ansible/issues/18206
static: no
- include: create_mds_filesystems.yml
when:
- - not mon_containerized_deployment
+ - not containerized_deployment
- groups[mds_group_name] is defined
- "{{ groups[mds_group_name]|length > 0 }}"
- "{{ inventory_hostname == groups[mon_group_name] | last }}"
- include: secure_cluster.yml
when:
- secure_cluster
- - not mon_containerized_deployment
+ - not containerized_deployment
- include: ./docker/main.yml
- when: mon_containerized_deployment
+ when: containerized_deployment
- include: calamari.yml
when: calamari
ExecStartPre=-/usr/bin/docker rm ceph-mon-%i
ExecStartPre=$(command -v mkdir) -p /etc/ceph /var/lib/ceph/mon
ExecStart=/usr/bin/docker run --rm --name ceph-mon-%i --net=host \
- {% if not mon_containerized_deployment_with_kv -%}
+ {% if not containerized_deployment_with_kv -%}
-v /var/lib/ceph:/var/lib/ceph \
-v /etc/ceph:/etc/ceph \
{% else -%}
fetch_directory: fetch/
-## Ceph options
-#
-cephx: true
-
-
#######################
# Access type options #
#######################
# DOCKER #
##########
-nfs_containerized_deployment: false
-nfs_containerized_deployment_with_kv: false
-kv_type: etcd
-kv_endpoint: 127.0.0.1
ceph_docker_image: "ceph/ganesha"
ceph_docker_image_tag: latest
#ceph_nfs_docker_extra_env: -e GANESHA_EPOCH={{ ganesha_epoch }}
-ceph_docker_on_openstack: false
ceph_config_keys: [] # DON'T TOUCH ME
categories:
- system
dependencies:
- - { role: ceph.ceph-common, when: not nfs_containerized_deployment }
- - { role: ceph.ceph-docker-common, when: nfs_containerized_deployment }
+ - { role: ceph.ceph-common, when: not containerized_deployment }
+ - { role: ceph.ceph-docker-common, when: containerized_deployment }
+++ /dev/null
----
-- name: set config and keys paths
- set_fact:
- ceph_config_keys:
- - /etc/ceph/{{ cluster }}.client.admin.keyring
- - /etc/ceph/{{ cluster }}.conf
- - /etc/ceph/monmap-{{ cluster }}
- - /etc/ceph/{{ cluster }}.mon.keyring
- - /etc/ganesha/ganesha.conf
-
-- name: stat for ceph config and keys
- stat:
- path: "{{ item }}"
- with_items: "{{ ceph_config_keys }}"
- changed_when: false
- failed_when: false
- always_run: true
- register: statleftover
-
-- name: fail if we find existing cluster files
- fail:
- msg: "looks like no cluster is running but ceph files are present, please remove them"
- with_together:
- - "{{ ceph_config_keys }}"
- - "{{ statleftover.results }}"
- when: item.1.stat.exists == true
+++ /dev/null
----
-- name: create ganesha conf directory
- file:
- path: /etc/ganesha
- state: directory
- owner: root
- group: root
- mode: 0644
-
-- name: create the nfs rgw user
- docker:
- image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- name: ceph-rgw-user-{{ ansible_hostname }}
- hostname: "{{ ansible_hostname }}"
- expose: "{{ ceph_rgw_civetweb_port }}"
- ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}"
- state: running
- env: "CEPH_DAEMON=RGW_USER,RGW_USERNAME={{ ceph_nfs_rgw_user }},RGW_USER_ACCESS_KEY={{ ceph_nfs_rgw_access_key }},RGW_USER_SECRET_KEY={{ ceph_nfs_rgw_secret_key }}"
- volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
- when: nfs_obj_gw
-
-- name: get user create output
- command: docker logs ceph-rgw-user-{{ ansible_hostname }}
- always_run: true
- register: rgwuser
-
-- name: generate ganesha configuration file
- action: config_template
- args:
- src: "{{ playbook_dir }}/roles/ceph-common/templates/ganesha.conf.j2"
- dest: /etc/ganesha/ganesha.conf
- owner: "root"
- group: "root"
- mode: "0644"
- config_overrides: "{{ ganesha_conf_overrides }}"
- config_type: ini
---
-- name: check if a cluster is already running
- command: "docker ps -q -a --filter='ancestor={{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'"
- register: ceph_health
- changed_when: false
- failed_when: false
- always_run: true
-
-- include: checks.yml
- when:
- ceph_health.rc != 0 and
- not mon_containerized_deployment_with_kv
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
- when:
- - is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
- when:
- - not is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
- when:
- - ansible_os_family == 'Debian'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
-
-- include: dirs_permissions.yml
-
# Copy Ceph configs to host
- include: copy_configs.yml
-- include: selinux.yml
- when: ansible_os_family == 'RedHat'
-
-# let the first ganesha create configs and users
-- include: create_configs.yml
- when:
- inventory_hostname == groups[nfs_group_name][0] and
- not mon_containerized_default_ceph_conf_with_kv
-
-# Copy Ganesha configs to host
-- include: fetch_configs.yml
- when: not mon_containerized_deployment_with_kv
-
- include: selinux.yml
when: ansible_os_family == 'RedHat'
---
- include: pre_requisite.yml
- when: not nfs_containerized_deployment
+ when: not containerized_deployment
- include: ./docker/main.yml
- when: nfs_containerized_deployment
+ when: containerized_deployment
ExecStartPre=-/usr/bin/docker rm ceph-nfs-%i
ExecStartPre=/usr/bin/mkdir -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha
ExecStart=/usr/bin/docker run --rm --net=host \
- {% if not mon_containerized_deployment_with_kv -%}
+ {% if not containerized_deployment_with_kv -%}
-v /etc/ceph:/etc/ceph \
-v /etc/ganesha:/etc/ganesha \
{% else -%}
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
fsid: "{{ cluster_uuid.stdout }}"
-cephx: true
# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# DOCKER #
##########
-osd_containerized_deployment: false
-osd_containerized_deployment_with_kv: false
-kv_type: etcd
-kv_endpoint: 127.0.0.1
-kv_port: 4001
-ceph_docker_image: "ceph/daemon"
-ceph_docker_image_tag: latest
ceph_config_keys: [] # DON'T TOUCH ME
-ceph_docker_on_openstack: false
# PREPARE DEVICE
# Make sure you only pass a single device to raw_journal_devices, otherwise this will fail horribly.
categories:
- system
dependencies:
- - { role: ceph.ceph-common, when: not osd_containerized_deployment }
- - { role: ceph.ceph-docker-common, when: osd_containerized_deployment }
+ - { role: ceph.ceph-common, when: not containerized_deployment }
+ - { role: ceph.ceph-docker-common, when: containerized_deployment }
msg: "cluster_network must be configured. Ceph replication network"
when:
- cluster_network == '0.0.0.0/0'
- - not osd_containerized_deployment
+ - not containerized_deployment
- name: make sure journal_size configured
debug:
when:
- osd_group_name is defined
- osd_group_name in group_names
- - not osd_containerized_deployment
+ - not containerized_deployment
- not journal_collocation
- not raw_multi_journal
- not bluestore
when:
- osd_group_name is defined
- osd_group_name in group_names
- - not osd_containerized_deployment
+ - not containerized_deployment
- (journal_collocation and raw_multi_journal)
or (journal_collocation and bluestore)
or (raw_multi_journal and bluestore)
when:
- osd_group_name is defined
- osd_group_name in group_names
- - (journal_collocation or osd_containerized_deployment)
+ - (journal_collocation or containerized_deployment)
- not osd_auto_discovery
- devices|length == 0
when:
- osd_group_name is defined
- osd_group_name in group_names
- - not osd_containerized_deployment
+ - not containerized_deployment
- raw_multi_journal
- raw_journal_devices|length == 0
or devices|length == 0
+++ /dev/null
----
-- name: set config and keys paths
- set_fact:
- ceph_config_keys:
- - /etc/ceph/{{ cluster }}.client.admin.keyring
- - /etc/ceph/{{ cluster }}.conf
- - /etc/ceph/monmap-{{ cluster }}
- - /etc/ceph/{{ cluster }}.mon.keyring
- - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
-
-- name: stat for ceph config and keys
- stat:
- path: "{{ item }}"
- with_items: "{{ ceph_config_keys }}"
- changed_when: false
- failed_when: false
- always_run: true
- register: statleftover
-
-- name: fail if we find existing cluster files
- fail:
- msg: "looks like no cluster is running but ceph files are present, please remove them"
- with_together:
- - "{{ ceph_config_keys }}"
- - "{{ statleftover.results }}"
- when: item.1.stat.exists == true
---
-- name: check if a cluster is already running
- command: "docker ps -q -a --filter='ancestor={{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'"
- register: ceph_health
- changed_when: false
- failed_when: false
- always_run: true
-
-- include: checks.yml
- when:
- - ceph_health.rc != 0
- - not osd_containerized_deployment_with_kv
- - not "{{ rolling_update | default(false) }}"
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
- when:
- - is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
- when:
- - not is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
- when:
- - ansible_os_family == 'Debian'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
-
-# NOTE (jimcurtis): dirs_permissions.yml must precede fetch_configs.yml
-# because it creates the directories needed by the latter.
-- include: dirs_permissions.yml
-
- include: fetch_configs.yml
- when: not osd_containerized_deployment_with_kv
-
-- include: selinux.yml
- when: ansible_os_family == 'RedHat'
+ when: not containerized_deployment_with_kv
- include: start_docker_osd.yml
when:
- item.1.get("rc", 0) != 0
- ceph_osd_docker_prepare_env is defined
- - not osd_containerized_deployment_with_kv
+ - not containerized_deployment_with_kv
- name: prepare ceph osd disk with kv_store
shell: |
when:
- item.1.get("rc", 0) != 0
- ceph_osd_docker_prepare_env is defined
- - osd_containerized_deployment_with_kv
+ - containerized_deployment_with_kv
- name: generate ceph osd docker run script
become: true
- include: check_mandatory_vars.yml
- include: pre_requisite.yml
- when: not osd_containerized_deployment
+ when: not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/journal_collocation.yml
when:
- journal_collocation
- - not osd_containerized_deployment
+ - not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/raw_multi_journal.yml
when:
- raw_multi_journal
- - not osd_containerized_deployment
+ - not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/bluestore.yml
when:
- osd_objectstore == 'bluestore'
- - not osd_containerized_deployment
+ - not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/dmcrypt-journal-collocation.yml
when:
- dmcrypt_journal_collocation
- - not osd_containerized_deployment
+ - not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/dmcrypt-dedicated-journal.yml
when:
- dmcrypt_dedicated_journal
- - not osd_containerized_deployment
+ - not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./docker/main.yml
- when: osd_containerized_deployment
+ when: containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
--security-opt apparmor:unconfined \
{% endif -%}
--pid=host \
- {% if not osd_containerized_deployment_with_kv -%}
+ {% if not containerized_deployment_with_kv -%}
-v /var/lib/ceph:/var/lib/ceph \
-v /etc/ceph:/etc/ceph \
{% else -%}
# DOCKER #
##########
-rbd_mirror_containerized_deployment: false
-rbd_mirror_containerized_deployment_with_kv: false
-kv_type: etcd
-kv_endpoint: 127.0.0.1
-ceph_docker_image: "ceph/daemon"
-ceph_docker_image_tag: latest
-ceph_docker_on_openstack: false
ceph_config_keys: [] # DON'T TOUCH ME
categories:
- system
dependencies:
- - { role: ceph.ceph-common, when: not rbd_mirror_containerized_deployment }
- - { role: ceph.ceph-docker-common, when: rbd_mirror_containerized_deployment }
+ - { role: ceph.ceph-common, when: not containerized_deployment }
+ - { role: ceph.ceph-docker-common, when: containerized_deployment }
+++ /dev/null
----
-- name: set config and keys paths
- set_fact:
- ceph_config_keys:
- - /etc/ceph/{{ cluster }}.client.admin.keyring
- - /etc/ceph/{{ cluster }}.conf
- - /etc/ceph/monmap-{{ cluster }}
- - /etc/ceph/{{ cluster }}.mon.keyring
- - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
-
-- name: stat for ceph config and keys
- stat:
- path: "{{ item }}"
- with_items: "{{ ceph_config_keys }}"
- changed_when: false
- failed_when: false
- always_run: true
- register: statleftover
-
-- name: fail if we find existing cluster files
- fail:
- msg: "looks like no cluster is running but ceph files are present, please remove them"
- with_together:
- - "{{ ceph_config_keys }}"
- - "{{ statleftover.results }}"
- when: item.1.stat.exists == true
---
-- name: check if a cluster is already running
- command: "docker ps -q -a --filter='ancestor={{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'"
- register: ceph_health
- changed_when: false
- failed_when: false
- always_run: true
-
-- include: checks.yml
- when: ceph_health.rc != 0
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
- when:
- - is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
- when:
- - not is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
- when:
- - ansible_os_family == 'Debian'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
-- include: dirs_permissions.yml
- include: fetch_configs.yml
-- include: selinux.yml
- when: ansible_os_family == 'RedHat'
-
- include: start_docker_rbd_mirror.yml
---
- include: pre_requisite.yml
- when: not rbd_mirror_containerized_deployment
+ when: not containerized_deployment
- include: start_rbd_mirror.yml
- when: not rbd_mirror_containerized_deployment
+ when: not containerized_deployment
- include: configure_mirroring.yml
when:
- ceph_rbd_mirror_configure
- - not rbd_mirror_containerized_deployment
+ - not containerized_deployment
- include: ./docker/main.yml
- when: rbd_mirror_containerized_deployment
+ when: containerized_deployment
ExecStartPre=-/usr/bin/docker stop ceph-rbd-mirror-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/docker rm ceph-rbd-mirror-{{ ansible_hostname }}
ExecStart=/usr/bin/docker run --rm --net=host \
- {% if not rbd_mirror_containerized_deployment_with_kv -%}
+ {% if not containerized_deployment_with_kv -%}
-v /etc/ceph:/etc/ceph \
{% else -%}
-e KV_TYPE={{kv_type}} \
# DOCKER #
##########
-restapi_containerized_deployment: false
ceph_restapi_docker_interface: eth0
ceph_restapi_port: 5000
-ceph_docker_image: "ceph/daemon"
-ceph_docker_image_tag: latest
ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables
-ceph_docker_on_openstack: false
ceph_config_keys: [] # DON'T TOUCH ME
categories:
- system
dependencies:
- - { role: ceph.ceph-common, when: not restapi_containerized_deployment }
- - { role: ceph.ceph-docker-common, when: restapi_containerized_deployment }
+ - { role: ceph.ceph-common, when: not containerized_deployment }
+ - { role: ceph.ceph-docker-common, when: containerized_deployment }
---
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
- when:
- - is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
- when:
- - not is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
- when:
- - ansible_os_family == 'Debian'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
-- include: dirs_permissions.yml
- include: fetch_configs.yml
- include: start_docker_restapi.yml
---
- include: pre_requisite.yml
- when: not restapi_containerized_deployment
+ when: not containerized_deployment
- include: start_restapi.yml
- when: not restapi_containerized_deployment
+ when: not containerized_deployment
- include: ./docker/main.yml
- when: restapi_containerized_deployment
+ when: containerized_deployment
# will copy the admin key to the /etc/ceph/ directory
copy_admin_key: false
-## Ceph options
-#
-cephx: true
-
# Multi-site remote pull URL variables
rgw_pull_port: "{{ radosgw_civetweb_port }}"
rgw_pull_proto: "http"
# DOCKER #
##########
-rgw_containerized_deployment: false
-rgw_containerized_deployment_with_kv: false
-kv_type: etcd
-kv_endpoint: 127.0.0.1
ceph_rgw_civetweb_port: "{{ radosgw_civetweb_port }}"
-ceph_docker_image: "ceph/daemon"
-ceph_docker_image_tag: latest
ceph_rgw_docker_extra_env: -e CLUSTER={{ cluster }} -e RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}
-ceph_docker_on_openstack: false
ceph_config_keys: [] # DON'T TOUCH ME
rgw_config_keys: "/" # DON'T TOUCH ME
categories:
- system
dependencies:
- - { role: ceph.ceph-common, when: not rgw_containerized_deployment }
- - { role: ceph.ceph-docker-common, when: rgw_containerized_deployment }
+ - { role: ceph.ceph-common, when: not containerized_deployment }
+ - { role: ceph.ceph-docker-common, when: containerized_deployment }
+++ /dev/null
----
-- name: set config and keys paths
- set_fact:
- ceph_config_keys:
- - /etc/ceph/{{ cluster }}.client.admin.keyring
- - /etc/ceph/{{ cluster }}.conf
- - /etc/ceph/monmap-{{ cluster }}
- - /etc/ceph/{{ cluster }}.mon.keyring
- - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
-
-- name: stat for ceph config and keys
- stat:
- path: "{{ item }}"
- with_items: "{{ ceph_config_keys }}"
- changed_when: false
- failed_when: false
- always_run: true
- register: statleftover
-
-- name: fail if we find existing cluster files
- fail:
- msg: "looks like no cluster is running but ceph files are present, please remove them"
- with_together:
- - "{{ ceph_config_keys }}"
- - "{{ statleftover.results }}"
- when: item.1.stat.exists == true
---
-- name: check if a cluster is already running
- command: "docker ps -q -a --filter='ancestor={{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'"
- register: ceph_health
- changed_when: false
- failed_when: false
- always_run: true
-
-- include: checks.yml
- when:
- - ceph_health.rc != 0
- - not "{{ rolling_update | default(false) }}"
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
- when:
- - is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
- when:
- - not is_atomic
- - ansible_os_family == 'RedHat'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
- when:
- - ansible_os_family == 'Debian'
- - ntp_service_enabled
-
-- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
-- include: dirs_permissions.yml
-# NOTE (jimcurtis): dirs_permissions.yml must precede fetch_configs.yml
-# because it creates the directories needed by the latter.
- include: fetch_configs.yml
-- include: selinux.yml
- when: ansible_os_family == 'RedHat'
-
- include: start_docker_rgw.yml
- include: copy_configs.yml
---
- include: pre_requisite.yml
- when: not rgw_containerized_deployment
+ when: not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
static: False
- include: start_radosgw.yml
- when: not rgw_containerized_deployment
+ when: not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
static: False
- include: ./docker/main.yml
- when: rgw_containerized_deployment
+ when: containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
ExecStartPre=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/docker rm ceph-rgw-{{ ansible_hostname }}
ExecStart=/usr/bin/docker run --rm --net=host \
- {% if not rgw_containerized_deployment_with_kv -%}
+ {% if not containerized_deployment_with_kv -%}
-v /var/lib/ceph:/var/lib/ceph \
-v /etc/ceph:/etc/ceph \
{% else -%}
# In a production deployment, these should be secret
if DOCKER then
ansible.extra_vars = ansible.extra_vars.merge({
- mon_containerized_deployment: 'true',
- osd_containerized_deployment: 'true',
- mds_containerized_deployment: 'true',
- rgw_containerized_deployment: 'true',
- nfs_containerized_deployment: 'true',
- restapi_containerized_deployment: 'true',
- rbd_mirror_containerized_deployment: 'true',
+ containerized_deployment: 'true',
ceph_mon_docker_interface: ETH,
ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
ceph_osd_docker_devices: settings['disks'],
docker: True
ceph_stable: True
-mon_containerized_deployment: True
-osd_containerized_deployment: True
-mds_containerized_deployment: True
-rgw_containerized_deployment: True
+containerized_deployment: True
cluster: test
ceph_mon_docker_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
# In a production deployment, these should be secret
if DOCKER then
ansible.extra_vars = ansible.extra_vars.merge({
- mon_containerized_deployment: 'true',
- osd_containerized_deployment: 'true',
- mds_containerized_deployment: 'true',
- rgw_containerized_deployment: 'true',
- nfs_containerized_deployment: 'true',
- restapi_containerized_deployment: 'true',
- rbd_mirror_containerized_deployment: 'true',
+ containerized_deployment: 'true',
+ containerized_deployment: 'true',
+ containerized_deployment: 'true',
+ containerized_deployment: 'true',
+ containerized_deployment: 'true',
+ containerized_deployment: 'true',
+ containerized_deployment: 'true',
ceph_mon_docker_interface: ETH,
ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
ceph_osd_docker_devices: settings['disks'],
docker: True
ceph_stable: True
-mon_containerized_deployment: True
-osd_containerized_deployment: True
-mds_containerized_deployment: True
-rgw_containerized_deployment: True
+containerized_deployment: True
cluster: ceph
ceph_mon_docker_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
docker: True
ceph_stable: True
-mon_containerized_deployment: True
-osd_containerized_deployment: True
-mds_containerized_deployment: True
-rgw_containerized_deployment: True
-mgr_containerized_deployment: True
+containerized_deployment: True
cluster: test
ceph_mon_docker_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"