according to ceph/ceph-container#840, this variable is no longer needed.
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
--- /dev/null
+---
+- name: set_fact devices generate device list when osd_auto_discovery
+ set_fact:
+ devices: "{{ devices | default([]) + [ item.key | regex_replace('^', '/dev/') ] }}"
+ with_dict: "{{ ansible_devices }}"
+ when:
+ - osd_auto_discovery
+ - ansible_devices is defined
+ - item.value.removable == "0"
+ - item.value.sectors != "0"
+ - item.value.partitions|count == 0
+ - item.value.holders|count == 0
+ - "'dm-' not in item.key"
+
+- name: resolve dedicated device link(s)
+ command: readlink -f {{ item }}
+ changed_when: false
+ with_items: "{{ dedicated_devices }}"
+ register: dedicated_devices_prepare_canonicalize
+ when:
+ - osd_scenario == 'non-collocated'
+ - not osd_auto_discovery
+
+- name: set_fact build dedicated_devices from resolved symlinks
+ set_fact:
+ dedicated_devices_tmp: "{{ dedicated_devices_tmp | default([]) + [ item.stdout ] }}"
+ with_items: "{{ dedicated_devices_prepare_canonicalize.results }}"
+ when:
+ - osd_scenario == 'non-collocated'
+ - not osd_auto_discovery
+
+- name: set_fact build final dedicated_devices list
+ set_fact:
+ dedicated_devices: "{{ dedicated_devices_tmp | reject('search','/dev/disk') | list }}"
+ when:
+ - osd_scenario == 'non-collocated'
+ - not osd_auto_discovery
+++ /dev/null
----
-# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
-# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
-# it should exist we rc=0 and don't do anything unless we do something like --force
-# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "failed_when: false"
-# I believe it's safer
-#
-# regex syntax uses (pat1|pat2|...|patN) for different families of device
-# names, but has a common expression for partition number at the end.
-# allow 2-digit partition numbers so fast SSDs can be shared by > 9 disks
-# for SSD journals.
-
-- name: resolve dedicated device link(s)
- command: readlink -f {{ item }}
- changed_when: false
- with_items: "{{ dedicated_devices }}"
- register: dedicated_devices_prepare_canonicalize
- when:
- - osd_scenario == 'non-collocated'
-
-- name: set_fact build dedicated_devices from resolved symlinks
- set_fact:
- dedicated_devices_tmp: "{{ dedicated_devices_tmp | default([]) + [ item.stdout ] }}"
- with_items: "{{ dedicated_devices_prepare_canonicalize.results }}"
- when:
- - osd_scenario == 'non-collocated'
-
-- name: set_fact build final dedicated_devices list
- set_fact:
- dedicated_devices: "{{ dedicated_devices_tmp | reject('search','/dev/disk') | list }}"
- when:
- - osd_scenario == 'non-collocated'
-
-- name: include check_devices_static.yml
- include: check_devices_static.yml
- # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
- static: False
-
-- name: check the partition status of the journal devices
- command: "parted --script {{ item }} print"
- with_items:
- - "{{ dedicated_devices|unique }}"
- changed_when: false
- failed_when: false
- check_mode: no
- register: journal_partition_status
- when:
- - osd_scenario == 'non-collocated'
-
-- name: fix partitions gpt header or labels of the journal device(s)
- shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}"
- with_together:
- - "{{ journal_partition_status.results }}"
- - "{{ dedicated_devices|unique }}"
- changed_when: false
- when:
- - not containerized_deployment
- - osd_scenario == 'non-collocated'
- - not item.0.get("skipped")
- - item.0.get("rc", 0) != 0
-
-- name: create gpt disk label of the journal device(s)
- command: parted --script {{ item.1 }} mklabel gpt
- with_together:
- - "{{ journal_partition_status.results }}"
- - "{{ dedicated_devices|unique }}"
- changed_when: false
- when:
- - containerized_deployment
- - osd_scenario == 'non-collocated'
- - not item.0.get("skipped")
- - item.0.get("rc", 0) != 0
+++ /dev/null
----
-- name: check the partition status of the osd disks
- command: "parted --script {{ item }} print"
- with_items:
- - "{{ devices }}"
- changed_when: false
- failed_when: false
- check_mode: no
- register: osd_partition_status_results
-
-# NOTE: The following calls to sgdisk are retried because sgdisk is known to
-# fully wipe a device the first time around. There is no need to halt execution
-# of zapping the whole device so these try again. It is easier to use `||` to
-# keep the current flow of the task.
-# See: https://github.com/ceph/ceph-ansible/issues/759
-- name: fix partitions gpt header or labels of the osd disks
- shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}"
- with_together:
- - "{{ osd_partition_status_results.results }}"
- - "{{ devices }}"
- changed_when: false
- when:
- - not item.0.get("skipped")
- - item.0.get("rc", 0) != 0
- - not containerized_deployment
-
-- name: create gpt disk label
- command: parted --script {{ item.1 }} mklabel gpt
- with_together:
- - "{{ osd_partition_status_results.results }}"
- - "{{ devices }}"
- changed_when: false
- when:
- - not item.0.get("skipped")
- - item.0.get("rc", 0) != 0
- - containerized_deployment
- name: include ceph_disk_cli_options_facts.yml
include: ceph_disk_cli_options_facts.yml
-- name: set_fact devices generate device list when osd_auto_discovery
- set_fact:
- devices: "{{ devices | default([]) + [ item.key | regex_replace('^', '/dev/') ] }}"
- with_dict: "{{ ansible_devices }}"
- when:
- - osd_auto_discovery
- - ansible_devices is defined
- - item.value.removable == "0"
- - item.value.sectors != "0"
- - item.value.partitions|count == 0
- - item.value.holders|count == 0
- - "'dm-' not in item.key"
-
-- name: include check_devices.yml
- include: check_devices.yml
- when:
- - not osd_auto_discovery
+- name: include build_devices.yml
+ include: build_devices.yml
- name: check if a partition named 'ceph' exists
shell: "parted --script {{ item }} print | egrep -sq '^ 1.*ceph'"
monitor_interface: eth1
public_network: "192.168.35.0/24"
cluster_network: "192.168.36.0/24"
-ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
ceph_conf_overrides:
global:
osd_pool_default_size: 1
osd_scenario: collocated
ceph_rgw_civetweb_port: 8080
osd_objectstore: filestore
-ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
devices:
- /dev/sda
- /dev/sdb
osd_scenario: collocated
ceph_rgw_civetweb_port: 8080
osd_objectstore: filestore
-ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- /dev/sdb
monitor_interface: eth1
public_network: "192.168.55.0/24"
cluster_network: "192.168.56.0/24"
-ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
ceph_conf_overrides:
global:
osd_pool_default_size: 1