containerized_deployment: 'true',
monitor_interface: ETH,
ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
- ceph_osd_docker_devices: settings['disks'],
devices: settings['disks'],
ceph_docker_on_openstack: BOX == 'openstack',
ceph_rgw_civetweb_port: 8080,
#ceph_config_keys: [] # DON'T TOUCH ME
# PREPARE DEVICE
-# Make sure you only pass a single device to dedicated_devices, otherwise this will fail horribly.
-# This is why we use [0] in the example.
#
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
#
-# Examples:
-# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1
-# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
-# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
-# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
-#
-# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1
-# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }}
-# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }}
-# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
-# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
-#
-#
#ceph_osd_docker_devices: "{{ devices }}"
-#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
+#ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
# ACTIVATE DEVICE
-# Examples:
-# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1
-# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
-# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
#
-#ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }}
+#ceph_osd_docker_extra_env:
#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
###########
# SYSTEMD #
###########
+
# ceph_osd_systemd_overrides will override the systemd settings
# for the ceph-osd services.
# For example,to set "PrivateDevices=false" you can specify:
name: "ceph-osd@{{ item | basename }}"
state: stopped
enabled: no
- with_items: "{{ ceph_osd_docker_devices }}"
+ with_items: "{{ devices }}"
ignore_errors: true
- name: remove ceph osd prepare container
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
state: absent
- with_items: "{{ ceph_osd_docker_devices }}"
+ with_items: "{{ devices }}"
ignore_errors: true
- name: remove ceph osd container
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "ceph-osd-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
state: absent
- with_items: "{{ ceph_osd_docker_devices }}"
+ with_items: "{{ devices }}"
ignore_errors: true
- name: zap ceph osd disks
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
zap_device
with_items:
- - "{{ ceph_osd_docker_devices }}"
+ - "{{ devices }}"
- "{{ dedicated_devices|default([]) }}"
- name: wait until the zap containers die
name: "ceph-osd-zap-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
state: absent
with_items:
- - "{{ ceph_osd_docker_devices }}"
+ - "{{ devices }}"
- "{{ dedicated_devices|default([]) }}"
- name: remove ceph osd service
name: ceph-osd@{{ item | basename }}
state: restarted
enabled: yes
- with_items: "{{ ceph_osd_docker_devices }}"
+ with_items: "{{ devices }}"
when:
- ansible_service_mgr == 'systemd'
- containerized_deployment
- name: collect osd devices
shell: |
blkid | awk '/ceph data/ { sub ("1:", "", $1); print $1 }'
- register: ceph_osd_docker_devices
+ register: devices
changed_when: false
- name: stop non-containerized ceph osd(s)
docker ps | grep -sq {{ item | regex_replace('/', '') }}
changed_when: false
failed_when: false
- with_items: "{{ ceph_osd_docker_devices.stdout_lines }}"
+ with_items: "{{ devices.stdout_lines }}"
register: osd_running
- name: unmount all the osd directories
changed_when: false
failed_when: false
with_together:
- - "{{ ceph_osd_docker_devices.stdout_lines }}"
+ - "{{ devices.stdout_lines }}"
- "{{ osd_running.results }}"
when:
- item.1.get("rc", 0) != 0
- - set_fact: ceph_osd_docker_devices={{ ceph_osd_docker_devices.stdout_lines }}
+ - set_fact: devices={{ devices.stdout_lines }}
roles:
- ceph-defaults
changed_when: false
with_together:
- "{{ ceph_config_keys }}"
- - "{{ statconfig.results }}"
+ - "{{ statconfig.results | default([]) }}"
when: item.1.stat.exists == true
ceph_config_keys: [] # DON'T TOUCH ME
# PREPARE DEVICE
-# Make sure you only pass a single device to dedicated_devices, otherwise this will fail horribly.
-# This is why we use [0] in the example.
#
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
#
-# Examples:
-# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1
-# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
-# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
-# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
-#
-# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1
-# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }}
-# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }}
-# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
-# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
-#
-#
ceph_osd_docker_devices: "{{ devices }}"
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
# ACTIVATE DEVICE
-# Examples:
-# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1
-# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
-# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
#
-ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }}
+ceph_osd_docker_extra_env:
ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
###########
# SYSTEMD #
###########
+
# ceph_osd_systemd_overrides will override the systemd settings
# for the ceph-osd services.
# For example,to set "PrivateDevices=false" you can specify:
--- /dev/null
+---
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
+ when:
+ - osd_objectstore == 'bluestore'
+ - not dmcrypt
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+ - not containerized_deployment
+
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
+ when:
+ - osd_objectstore == 'filestore'
+ - not dmcrypt
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+ - not containerized_deployment
+
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }}"
+ when:
+ - osd_objectstore == 'filestore'
+ - not dmcrypt
+ - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+ - not containerized_deployment
+
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
+ when:
+ - osd_objectstore == 'bluestore'
+ - dmcrypt
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+ - not containerized_deployment
+
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
+ when:
+ - osd_objectstore == 'filestore'
+ - dmcrypt
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+ - not containerized_deployment
+
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
+ when:
+ - osd_objectstore == 'filestore'
+ - dmcrypt
+ - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+ - not containerized_deployment
+
+- set_fact:
+ docker_env_args: -e KV_TYPE={{ kv_type }} -e KV_IP={{ kv_endpoint }} -e KV_PORT={{ kv_port }}
+ when:
+ - containerized_deployment_with_kv
+
+- set_fact:
+ docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0
+ when:
+ - containerized_deployment
+ - osd_objectstore == 'filestore'
+ - not dmcrypt
+
+- set_fact:
+ docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1
+ when:
+ - containerized_deployment
+ - osd_objectstore == 'filestore'
+ - dmcrypt
+
+- set_fact:
+ docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0
+ when:
+ - containerized_deployment
+ - osd_objectstore == 'bluestore'
+ - not dmcrypt
+
+- set_fact:
+ docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1
+ when:
+ - containerized_deployment
+ - osd_objectstore == 'bluestore'
+ - dmcrypt
always_run: true
register: journal_partition_status
when:
- - osd_scenario == 'non-collocated'
- item.0.rc != 0
+ - osd_scenario == 'non-collocated'
-- name: fix partitions gpt header or labels of the journal devices
+- name: fix partitions gpt header or labels of the journal device(s)
shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}"
with_together:
- "{{ journal_partition_status.results }}"
changed_when: false
when:
- not item.0.get("skipped")
+ - item.0.get("rc", 0) != 0
- osd_scenario == 'non-collocated'
+ - not containerized_deployment
+
+- name: create gpt disk label of the journal device(s)
+ command: parted --script {{ item.1 }} mklabel gpt
+ with_together:
+ - "{{ osd_partition_status_results.results }}"
+ - "{{ dedicated_devices|unique }}"
+ changed_when: false
+ when:
+ - not item.0.get("skipped")
- item.0.get("rc", 0) != 0
+ - osd_scenario == 'non-collocated'
+ - containerized_deployment
changed_when: false
when:
- ansible_devices is defined
- - item.0.item.value.removable == 0
- - item.0.item.value.partitions|count == 0
- - item.0.item.value.holders|count == 0
- - item.0.rc != 0
+ - not item.0.get("skipped")
+ - item.0.get("rc", 0) != 0
+ - item.1.value.removable == 0
+ - item.1.value.partitions|count == 0
+ - item.1.value.holders|count == 0
+ - not containerized_deployment
+
+- name: create gpt disk label
+ command: parted --script {{ item.1 }} mklabel gpt
+ with_together:
+ - "{{ osd_partition_status_results.results }}"
+ - "{{ ansible_devices }}"
+ changed_when: false
+ when:
+ - ansible_devices is defined
+ - not item.0.get("skipped")
+ - item.0.get("rc", 0) != 0
+ - item.1.value.removable == 0
+ - item.1.value.partitions|count == 0
+ - item.1.value.holders|count == 0
+ - containerized_deployment
- name: check if a partition named 'ceph' exists (autodiscover disks)
shell: "parted --script /dev/{{ item.key }} print | egrep -sq '^ 1.*ceph'"
when:
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
+ - not containerized_deployment
+
+- name: create gpt disk label
+ command: parted --script {{ item.1 }} mklabel gpt
+ with_together:
+ - "{{ osd_partition_status_results.results }}"
+ - "{{ devices }}"
+ changed_when: false
+ when:
+ - not item.0.get("skipped")
+ - item.0.get("rc", 0) != 0
+ - containerized_deployment
- name: check if a partition named 'ceph' exists
shell: "parted --script {{ item.1 }} print | egrep -sq '^ 1.*ceph'"
state: unmounted
when: ceph_docker_on_openstack
-- name: verify if the disk was already prepared
- shell: "lsblk -o PARTLABEL {{ item }} | grep -sq 'ceph'"
- failed_when: false
- always_run: true
- with_items: "{{ ceph_osd_docker_devices }}"
- register: osd_prepared
-
-# use shell rather than docker module
-# to ensure osd disk prepare finishes before
-# starting the next task
-- name: prepare ceph osd disk
- shell: |
- docker run --net=host \
- --pid=host \
- --privileged=true \
- --name="ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.0 | regex_replace('/', '') }}" \
- -v /etc/ceph:/etc/ceph \
- -v /var/lib/ceph/:/var/lib/ceph/ \
- -v /dev:/dev \
- -v /etc/localtime:/etc/localtime:ro \
- -e "OSD_DEVICE={{ item.0 }}" \
- -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
- {{ ceph_osd_docker_prepare_env }} \
- "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- with_together:
- - "{{ ceph_osd_docker_devices }}"
- - "{{ osd_prepared.results }}"
- when:
- - item.1.get("rc", 0) != 0
- - ceph_osd_docker_prepare_env is defined
- - not containerized_deployment_with_kv
-
-- name: prepare ceph osd disk with kv_store
- shell: |
- docker run --net=host \
- --pid=host \
- --privileged=true \
- --name="ceph-osd-prepare-{{ ansible_hostname }}-dev-{{ item.0 | regex_replace('/', '') }}" \
- -v /dev:/dev \
- -v /etc/localtime:/etc/localtime:ro \
- -e "OSD_DEVICE={{ item.0 }}" \
- -e "{{ ceph_osd_docker_prepare_env }}" \
- -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
- -e KV_TYPE={{kv_type}} \
- -e KV_IP={{kv_endpoint}} \
- -e KV_PORT={{kv_port}} \
- {{ ceph_osd_docker_prepare_env }} \
- "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" \
- with_together:
- - "{{ ceph_osd_docker_devices }}"
- - "{{ osd_prepared.results }}"
- when:
- - item.1.get("rc", 0) != 0
- - ceph_osd_docker_prepare_env is defined
- - containerized_deployment_with_kv
-
- name: generate ceph osd docker run script
become: true
template:
- name: enable systemd unit file for osd instance
shell: systemctl enable ceph-osd@{{ item | basename }}.service
changed_when: false
- with_items: "{{ ceph_osd_docker_devices }}"
+ with_items: "{{ devices }}"
- name: reload systemd unit files
shell: systemctl daemon-reload
state: started
enabled: yes
changed_when: false
- with_items: "{{ ceph_osd_docker_devices }}"
+ with_items: "{{ devices }}"
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
+- include: ceph_disk_cli_options_facts.yml
+
+- include: check_devices.yml
+
- include: ./scenarios/collocated.yml
when:
- osd_scenario == 'collocated'
- - not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/non-collocated.yml
when:
- osd_scenario == 'non-collocated'
- - not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
---
-## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
-
-- include: ../check_devices.yml
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-
-- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
- when:
- - osd_objectstore == 'bluestore'
- - osd_scenario == 'collocated'
- - not dmcrypt
- - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-
-- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
- when:
- - osd_objectstore == 'filestore'
- - osd_scenario == 'collocated'
- - not dmcrypt
- - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-
-- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }}"
- when:
- - osd_objectstore == 'filestore'
- - osd_scenario == 'collocated'
- - not dmcrypt
- - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
-
-- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
- when:
- - osd_objectstore == 'bluestore'
- - osd_scenario == 'collocated'
- - dmcrypt
- - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-
-- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
+# use shell rather than docker module
+# to ensure osd disk prepare finishes before
+# starting the next task
+- name: prepare ceph containerized osd disk collocated
+ shell: |
+ docker run --net=host \
+ --pid=host \
+ --privileged=true \
+ --name=ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.1 | regex_replace('/', '') }} \
+ -v /etc/ceph:/etc/ceph \
+ -v /var/lib/ceph/:/var/lib/ceph/ \
+ -v /dev:/dev \
+ -v /etc/localtime:/etc/localtime:ro \
+ -e CLUSTER={{ cluster }} \
+ -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
+ -e OSD_DEVICE={{ item.1 }} \
+ {{ docker_env_args }} \
+ {{ ceph_osd_docker_prepare_env }} \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+ with_together:
+ - "{{ parted_results.results }}"
+ - "{{ devices }}"
when:
- - osd_objectstore == 'filestore'
- - osd_scenario == 'collocated'
- - dmcrypt
- - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+ - not item.0.get("skipped")
+ - not osd_auto_discovery
+ - containerized_deployment
-- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
+- name: automatic prepare ceph containerized osd disk collocated
+ shell: |
+ docker run --net=host \
+ --pid=host \
+ --privileged=true \
+ --name=ceph-osd-prepare-{{ ansible_hostname }}-devdev{{ item.key }} \
+ -v /etc/ceph:/etc/ceph \
+ -v /var/lib/ceph/:/var/lib/ceph/ \
+ -v /dev:/dev \
+ -v /etc/localtime:/etc/localtime:ro \
+ -e CLUSTER={{ cluster }} \
+ -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
+ -e OSD_DEVICE=/dev/{{ item.key }} \
+ {{ docker_env_args }} \
+ {{ ceph_osd_docker_prepare_env }} \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+ with_dict: "{{ ansible_devices }}"
when:
- - osd_objectstore == 'filestore'
- - osd_scenario == 'collocated'
- - dmcrypt
- - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+ - ansible_devices is defined
+ - item.value.removable == "0"
+ - item.value.partitions|count == 0
+ - item.value.holders|count == 0
+ - osd_auto_discovery
+ - containerized_deployment
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
-- name: automatic prepare "{{ osd_objectstore }}" osd disk(s) without partitions with collocated osd data and journal
+- name: automatic prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) without partitions with collocated osd data and journal
command: "ceph-disk prepare {{ ceph_disk_cli_options }} /dev/{{ item.key }}"
register: prepared_osds
with_dict: "{{ ansible_devices }}"
- item.value.partitions|count == 0
- item.value.holders|count == 0
- osd_auto_discovery
+ - not containerized_deployment
-- name: manually prepare "{{ osd_objectstore }}" osd disk(s) with collocated osd data and journal
+- name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with collocated osd data and journal
command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }}"
with_together:
- "{{ parted_results.results }}"
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- not osd_auto_discovery
+ - not containerized_deployment
- include: ../activate_osds.yml
---
-- include: ../check_devices.yml
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-
-- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
- when:
- - osd_objectstore == 'bluestore'
- - not dmcrypt
- - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-
-- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
- when:
- - osd_objectstore == 'filestore'
- - not dmcrypt
- - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-
-- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }}"
+# use shell rather than docker module
+# to ensure osd disk prepare finishes before
+# starting the next task
+- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated
+ shell: |
+ docker run --net=host \
+ --pid=host \
+ --privileged=true \
+ --name=ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.1 | regex_replace('/', '') }} \
+ -v /etc/ceph:/etc/ceph \
+ -v /var/lib/ceph/:/var/lib/ceph/ \
+ -v /dev:/dev \
+ -v /etc/localtime:/etc/localtime:ro \
+ -e CLUSTER={{ cluster }} \
+ -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
+ -e OSD_DEVICE={{ item.1 }} \
+ -e OSD_JOURNAL={{ item.2 }} \
+ {{ docker_env_args }} \
+ {{ ceph_osd_docker_prepare_env }} \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+ with_together:
+ - "{{ parted_results.results }}"
+ - "{{ devices }}"
+ - "{{ dedicated_devices }}"
when:
+ - not item.0.get("skipped")
+ - not osd_auto_discovery
+ - containerized_deployment
- osd_objectstore == 'filestore'
- - not dmcrypt
- - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
-- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
+- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated with a dedicated device for db and wal
+ shell: |
+ docker run --net=host \
+ --pid=host \
+ --privileged=true \
+ --name=ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.1 | regex_replace('/', '') }} \
+ -v /etc/ceph:/etc/ceph \
+ -v /var/lib/ceph/:/var/lib/ceph/ \
+ -v /dev:/dev \
+ -v /etc/localtime:/etc/localtime:ro \
+ -e CLUSTER={{ cluster }} \
+ -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
+ -e OSD_DEVICE={{ item.1 }} \
+ -e OSD_BLUESTORE_BLOCK_DB={{ item.2 }} \
+ -e OSD_BLUESTORE_BLOCK_WAL={{ item.3 }} \
+ {{ docker_env_args }} \
+ {{ ceph_osd_docker_prepare_env }} \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+ with_together:
+ - "{{ parted_results.results }}"
+ - "{{ devices }}"
+ - "{{ dedicated_devices }}"
+ - "{{ bluestore_wal_devices }}"
when:
+ - not item.0.get("skipped")
+ - not osd_auto_discovery
+ - containerized_deployment
- osd_objectstore == 'bluestore'
- - dmcrypt
- - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-
-- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
- when:
- - osd_objectstore == 'filestore'
- - dmcrypt
- - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
- when:
- - osd_objectstore == 'filestore'
- - dmcrypt
- - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
-
-- name: prepare filestore osd disk(s) non-collocated
+- name: prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) non-collocated
command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }} {{ item.3 }}"
with_together:
- "{{ parted_results.results }}"
- not item.1.get("skipped")
- item.1.get("rc", 0) != 0
- osd_objectstore == 'filestore'
- - not osd_auto_discovery
+ - not containerized_deployment
-- name: manually prepare "{{ osd_objectstore }}" osd disk(s) with a dedicated device for db and wal
+- name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with a dedicated device for db and wal
command: "ceph-disk prepare {{ ceph_disk_cli_options }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}"
with_together:
- "{{ parted_results.results }}"
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- osd_objectstore == 'bluestore'
- - not osd_auto_discovery
+ - not containerized_deployment
- include: ../activate_osds.yml
# {{ ansible_managed }}
+#############
+# VARIABLES #
+#############
+
+REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
+
+
#############
# FUNCTIONS #
#############
-function create_dev_list {
- local regex
+
+function expose_devices {
local disks
- regex="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
# we use the prepare container to find the partitions to expose
- disks=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo /dev/disk/by-partuuid/${regex} | uniq)
+ disks=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
for disk in $disks; do
- DEVICES="--device $disk "
+ DEVICES="--device=$disk "
done
}
-create_dev_list $1
+function expose_partitions {
+ local partition
+ for partition in Block.wal Block.db Journal; do
+ if docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo "$partition is GPT partition"; then
+ if [[ "$partition" == "Block.wal" ]]; then
+ part=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep "$partition is GPT partition" | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
+ DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE_BLOCK_WAL=$part"
+ elif [[ "$partition" == "Block.db" ]]; then
+ part=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep "$partition is GPT partition" | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
+ DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE_BLOCK_DB=$part"
+ elif [[ "$partition" == "Journal" ]]; then
+ part=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep "$partition is GPT partition" | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
+ DOCKER_ENV="$DOCKER_ENV -e OSD_JOURNAL=$part"
+ fi
+ fi
+ done
+}
+
+#expose_devices $1
+expose_partitions $1
########
--rm \
--net=host \
--privileged=true \
+ --pid=host \
+ -v /dev:/dev \
+ -v /etc/localtime:/etc/localtime:ro \
+ -v /var/lib/ceph:/var/lib/ceph \
+ -v /etc/ceph:/etc/ceph \
+ $DOCKER_ENV \
{% if ansible_distribution == 'Ubuntu' -%}
--security-opt apparmor:unconfined \
{% endif -%}
- --pid=host \
{% if not containerized_deployment_with_kv -%}
- -v /var/lib/ceph:/var/lib/ceph \
- -v /etc/ceph:/etc/ceph \
{% else -%}
- -e KV_TYPE={{kv_type}} \
- -e KV_IP={{kv_endpoint}} \
- -e KV_PORT={{kv_port}} \
+ -e KV_TYPE={{ kv_type }} \
+ -e KV_IP={{ kv_endpoint }} \
+ -e KV_PORT={{ kv_port }} \
{% endif -%}
- -v /dev:/dev \
- -v /etc/localtime:/etc/localtime:ro \
- --device=/dev/${1} \
- --device=/dev/${1}1 \
- {% if dedicated_devices|length > 0 -%}
- -e OSD_JOURNAL={{ dedicated_devices[0] }} \
- {% else -%}
- --device=/dev/${1}2 \
+ {% if osd_objectstore == 'filestore' and not dmcrypt -%}
+ -e OSD_FILESTORE=1 \
+ -e OSD_DMCRYPT=0 \
+ {% endif -%}
+ {% if osd_objectstore == 'filestore' and dmcrypt -%}
+ -e OSD_FILESTORE=1 \
+ -e OSD_DMCRYPT=1 \
+ {% endif -%}
+ {% if osd_objectstore == 'bluestore' and not dmcrypt -%}
+ -e OSD_BLUESTORE=1 \
+ -e OSD_DMCRYPT=0 \
{% endif -%}
+ {% if osd_objectstore == 'bluestore' and dmcrypt -%}
+ -e OSD_BLUESTORE=1 \
+ -e OSD_DMCRYPT=1 \
+ {% endif -%}
+ -e CLUSTER={{ cluster }} \
-e OSD_DEVICE=/dev/${1} \
+ -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
{{ ceph_osd_docker_extra_env }} \
--name=ceph-osd-{{ ansible_hostname }}-dev${1} \
- -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
radosgw_interface: eth1
osd_scenario: collocated
dmcrypt: true
-osd_objectstore: "bluestore"
+osd_objectstore: bluestore
devices:
- '/dev/sda'
- '/dev/sdb'
radosgw_interface: eth1
osd_scenario: collocated
dmcrypt: true
-osd_objectstore: "bluestore"
+osd_objectstore: bluestore
devices:
- '/dev/sda'
- '/dev/sdb'
osd:
bluestore block db size = 67108864
bluestore block wal size = 1048576000
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1
-ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24"
ceph_rgw_civetweb_port: 8080
-ceph_osd_docker_devices: "{{ devices }}"
+osd_scenario: non-collocated
+osd_objectstore: bluestore
devices:
- /dev/sda
dedicated_devices:
- /dev/sdb
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE=1
-ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
+ceph_osd_docker_prepare_env: -e OSD_FORCE_ZAP=1
ceph_osd_docker_run_script_path: /var/tmp
cluster_network: "192.168.16.0/24"
osd_scenario: collocated
ceph_rgw_civetweb_port: 8080
-ceph_osd_docker_devices: "{{ devices }}"
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_BLUESTORE=1
-ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
+ceph_osd_docker_prepare_env: -e OSD_FORCE_ZAP=1
devices:
- /dev/sda
- /dev/sdb
cluster_network: "192.168.2.0/24"
journal_size: 100
radosgw_interface: eth1
-osd_objectstore: "filestore"
+osd_objectstore: filestore
devices:
- '/dev/sda'
dedicated_devices:
monitor_interface: eth1
radosgw_interface: eth1
osd_scenario: non-collocated
-osd_objectstore: "filestore"
+dmcrypt: true
+osd_objectstore: filestore
devices:
- '/dev/sda'
dedicated_devices:
monitor_interface: eth1
radosgw_interface: eth1
osd_scenario: collocated
-osd_objectstore: "filestore"
+osd_objectstore: filestore
+dmcrypt: true
devices:
- '/dev/sda'
- '/dev/sdb'
public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24"
osd_scenario: collocated
+osd_objectstore: filestore
dmcrypt: true
ceph_rgw_civetweb_port: 8080
-ceph_osd_docker_devices: "{{ devices }}"
devices:
- /dev/sda
- /dev/sdb
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1
-ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24"
ceph_rgw_civetweb_port: 8080
-ceph_osd_docker_devices: "{{ devices }}"
+osd_objectstore: filestore
osd_scenario: non-collocated
devices:
- /dev/sda
dedicated_devices:
- /dev/sdb
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }}
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
ceph_osd_docker_run_script_path: /var/tmp
cluster_network: "192.168.16.0/24"
osd_scenario: collocated
ceph_rgw_civetweb_port: 8080
-ceph_osd_docker_devices: "{{ devices }}"
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
-ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE
+osd_objectstore: filestore
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
devices:
- /dev/sda
- /dev/sdb
monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
-osd_objectstore: "filestore"
+osd_objectstore: filestore
devices:
- '/dev/sda'
- '/dev/sdb'