]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
common: do not run tasks in main.yml, use include 1158/head
authorSébastien Han <seb@redhat.com>
Fri, 9 Dec 2016 13:51:35 +0000 (14:51 +0100)
committerSébastien Han <seb@redhat.com>
Fri, 9 Dec 2016 15:00:46 +0000 (16:00 +0100)
For readibility and clarity we do not run any tasks directly in the
main.yml file. This file should only contain include, which helps us
later to apply conditionnals if we want to.

Signed-off-by: Sébastien Han <seb@redhat.com>
.gitignore
roles/ceph-common/tasks/checks/check_socket.yml [new file with mode: 0644]
roles/ceph-common/tasks/configure_cluster_name.yml [new file with mode: 0644]
roles/ceph-common/tasks/create_rbd_client_dir.yml [new file with mode: 0644]
roles/ceph-common/tasks/facts.yml
roles/ceph-common/tasks/generate_ceph_conf.yml [new file with mode: 0644]
roles/ceph-common/tasks/generate_cluster_fsid.yml [new file with mode: 0644]
roles/ceph-common/tasks/main.yml
roles/ceph-mon/tasks/check_mandatory_vars.yml [new file with mode: 0644]

index 126142a2cf45b7170198ee216788d7cf6c9b57ce..8373872df151e7018031df7e5dd1503718b91073 100644 (file)
@@ -12,7 +12,7 @@ group_vars/restapis
 group_vars/agent
 group_vars/*.yml
 *.DS_Store
-*.yml
+/*.yml
 *.pyc
 *.sw?
 .tox
diff --git a/roles/ceph-common/tasks/checks/check_socket.yml b/roles/ceph-common/tasks/checks/check_socket.yml
new file mode 100644 (file)
index 0000000..674d34c
--- /dev/null
@@ -0,0 +1,14 @@
+---
+- name: check for a ceph socket
+  shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
+  changed_when: false
+  failed_when: false
+  always_run: true
+  register: socket
+
+- name: check for a rados gateway socket
+  shell: "stat {{ rbd_client_admin_socket_path }}*.asok > /dev/null 2>&1"
+  changed_when: false
+  failed_when: false
+  always_run: true
+  register: socketrgw
diff --git a/roles/ceph-common/tasks/configure_cluster_name.yml b/roles/ceph-common/tasks/configure_cluster_name.yml
new file mode 100644 (file)
index 0000000..bd37af8
--- /dev/null
@@ -0,0 +1,47 @@
+---
+- name: configure cluster name
+  lineinfile:
+    dest: /etc/sysconfig/ceph
+    insertafter: EOF
+    create: yes
+    line: "CLUSTER={{ cluster }}"
+  when: ansible_os_family == "RedHat"
+
+# NOTE(leseb): we are performing the following check
+# to ensure any Jewel installation will not fail.
+# The following commit https://github.com/ceph/ceph/commit/791eba81a5467dd5de4f1680ed0deb647eb3fb8b
+# fixed a package issue where the path was the wrong.
+# This bug is not yet on all the distros package so we are working around it
+# Impacted versions:
+# - Jewel from UCA: https://bugs.launchpad.net/ubuntu/+source/ceph/+bug/1582773
+# - Jewel from latest Canonical 16.04 distro
+# - All previous versions from Canonical
+# - Infernalis from ceph.com
+- name: check /etc/default/ceph exist
+  stat:
+    path: /etc/default/ceph
+  register: etc_default_ceph
+  always_run: true
+  when: ansible_os_family == "Debian"
+
+- name: configure cluster name
+  lineinfile:
+    dest: /etc/default/ceph
+    insertafter: EOF
+    create: yes
+    line: "CLUSTER={{ cluster }}"
+  when:
+    - ansible_os_family == "Debian"
+    - etc_default_ceph.stat.exists
+    - not etc_default_ceph.stat.isdir
+
+- name: configure cluster name
+  lineinfile:
+    dest: /etc/default/ceph/ceph
+    insertafter: EOF
+    create: yes
+    line: "CLUSTER={{ cluster }}"
+  when:
+    - ansible_os_family == "Debian"
+    - etc_default_ceph.stat.exists
+    - etc_default_ceph.stat.isdir
diff --git a/roles/ceph-common/tasks/create_rbd_client_dir.yml b/roles/ceph-common/tasks/create_rbd_client_dir.yml
new file mode 100644 (file)
index 0000000..f893bbc
--- /dev/null
@@ -0,0 +1,12 @@
+---
+- name: create rbd client directory
+  file:
+    path: "{{ item }}"
+    state: directory
+    owner: "{{ rbd_client_directory_owner }}"
+    group: "{{ rbd_client_directory_group }}"
+    mode: "{{ rbd_client_directory_mode }}"
+  with_items:
+    - "{{ rbd_client_admin_socket_path }}"
+    - "{{ rbd_client_log_path }}"
+  when: rbd_client_directories
index 76ae7a9a47b4b21e50d046969e8921325fc333fe..85cccfc3f25e2494a57740a8c9183271cf53104e 100644 (file)
 - set_fact:
     mds_name: "{{ ansible_fqdn }}"
   when: mds_use_fqdn
+
+- set_fact:
+    dir_owner: ceph
+    dir_group: ceph
+    dir_mode: "0755"
+  when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
+
+- set_fact:
+    dir_owner: root
+    dir_group: root
+    dir_mode: "0755"
+  when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
+
+- set_fact:
+    key_owner: root
+    key_group: root
+    key_mode: "0600"
+  when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
+
+- set_fact:
+    key_owner: ceph
+    key_group: ceph
+    key_mode: "0600"
+  when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
+
+- set_fact:
+    activate_file_owner: ceph
+    activate_file_group: ceph
+    activate_file_mode: "0644"
+  when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
+
+- set_fact:
+    activate_file_owner: root
+    activate_file_group: root
+    activate_file_mode: "0644"
+  when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
+
+- set_fact:
+    rbd_client_directory_owner: root
+  when:
+    - ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
+    - rbd_client_directory_owner is not defined
+      or not rbd_client_directory_owner
+
+- set_fact:
+    rbd_client_directory_owner: ceph
+  when:
+    - ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
+    - rbd_client_directory_owner is not defined
+      or not rbd_client_directory_owner
+
+- set_fact:
+    rbd_client_directory_group: root
+  when:
+    - ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
+    - rbd_client_directory_group is not defined
+      or not rbd_client_directory_group
+
+- set_fact:
+    rbd_client_directory_group: ceph
+  when:
+    - ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
+    - rbd_client_directory_group is not defined
+      or not rbd_client_directory_group
+
+- set_fact:
+    rbd_client_directory_mode: "1777"
+  when:
+    - ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
+    - rbd_client_directory_mode is not defined
+      or not rbd_client_directory_mode
+
+- set_fact:
+    rbd_client_directory_mode: "0770"
+  when:
+    - ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
+    - rbd_client_directory_mode is not defined
+      or not rbd_client_directory_mode
+
diff --git a/roles/ceph-common/tasks/generate_ceph_conf.yml b/roles/ceph-common/tasks/generate_ceph_conf.yml
new file mode 100644 (file)
index 0000000..b92d5dc
--- /dev/null
@@ -0,0 +1,34 @@
+---
+- name: create ceph conf directory
+  file:
+    path: /etc/ceph
+    state: directory
+    owner: "{{ dir_owner }}"
+    group: "{{ dir_group }}"
+    mode: "{{ dir_mode }}"
+
+- name: generate ceph configuration file
+  action: config_template
+  args:
+    src: ceph.conf.j2
+    dest: /etc/ceph/{{ cluster }}.conf
+    owner: "{{ dir_owner }}"
+    group: "{{ dir_group }}"
+    mode: "{{ activate_file_mode }}"
+    config_overrides: "{{ ceph_conf_overrides }}"
+    config_type: ini
+  notify:
+    - restart ceph mons
+    - restart ceph mons on ubuntu
+    - restart ceph mons with systemd
+    - restart ceph osds
+    - restart ceph osds on ubuntu
+    - restart ceph osds with systemd
+    - restart ceph mdss
+    - restart ceph mdss on ubuntu
+    - restart ceph mdss with systemd
+    - restart ceph rgws
+    - restart ceph rgws on ubuntu
+    - restart ceph rgws on red hat
+    - restart ceph rgws with systemd
+    - restart ceph nfss
diff --git a/roles/ceph-common/tasks/generate_cluster_fsid.yml b/roles/ceph-common/tasks/generate_cluster_fsid.yml
new file mode 100644 (file)
index 0000000..10d2ad7
--- /dev/null
@@ -0,0 +1,31 @@
+---
+- name: create a local fetch directory if it does not exist
+  local_action: file path={{ fetch_directory }} state=directory
+  changed_when: false
+  become: false
+  run_once: true
+  when: cephx or generate_fsid
+
+- name: generate cluster fsid
+  local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+    creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  register: cluster_uuid
+  become: false
+  when:
+    - generate_fsid
+    - ceph_current_fsid.rc != 0
+
+- name: reuse cluster fsid when cluster is already running
+  local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+    creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  become: false
+  when: ceph_current_fsid.rc == 0
+
+- name: read cluster fsid if it already exists
+  local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+    removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  changed_when: false
+  register: cluster_uuid
+  become: false
+  always_run: true
+  when: generate_fsid
index c69c1490faeb829e6bc6380812b8bb87f8772998..186c190da312b2104640d81b941afef613f33c5c 100644 (file)
   static: False
 
 - include: facts.yml
-
-- set_fact:
-    dir_owner: ceph
-    dir_group: ceph
-    dir_mode: "0755"
-  when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
-
-- set_fact:
-    dir_owner: root
-    dir_group: root
-    dir_mode: "0755"
-  when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
-
-- set_fact:
-    key_owner: root
-    key_group: root
-    key_mode: "0600"
-  when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
-
-- set_fact:
-    key_owner: ceph
-    key_group: ceph
-    key_mode: "0600"
-  when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
-
-- set_fact:
-    activate_file_owner: ceph
-    activate_file_group: ceph
-    activate_file_mode: "0644"
-  when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
-
-- set_fact:
-    activate_file_owner: root
-    activate_file_group: root
-    activate_file_mode: "0644"
-  when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
-
-- set_fact:
-    rbd_client_directory_owner: root
-  when:
-    - ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
-    - rbd_client_directory_owner is not defined
-      or not rbd_client_directory_owner
-
-- set_fact:
-    rbd_client_directory_owner: ceph
-  when:
-    - ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
-    - rbd_client_directory_owner is not defined
-      or not rbd_client_directory_owner
-
-- set_fact:
-    rbd_client_directory_group: root
-  when:
-    - ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
-    - rbd_client_directory_group is not defined
-      or not rbd_client_directory_group
-
-- set_fact:
-    rbd_client_directory_group: ceph
-  when:
-    - ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
-    - rbd_client_directory_group is not defined
-      or not rbd_client_directory_group
-
-- set_fact:
-    rbd_client_directory_mode: "1777"
-  when:
-    - ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
-    - rbd_client_directory_mode is not defined
-      or not rbd_client_directory_mode
-
-- set_fact:
-    rbd_client_directory_mode: "0770"
-  when:
-    - ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
-    - rbd_client_directory_mode is not defined
-      or not rbd_client_directory_mode
-
-- name: check for a ceph socket
-  shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
-  changed_when: false
-  failed_when: false
-  always_run: true
-  register: socket
-
-- name: check for a rados gateway socket
-  shell: "stat {{ rbd_client_admin_socket_path }}*.asok > /dev/null 2>&1"
-  changed_when: false
-  failed_when: false
-  always_run: true
-  register: socketrgw
-
-- name: create a local fetch directory if it does not exist
-  local_action: file path={{ fetch_directory }} state=directory
-  changed_when: false
-  become: false
-  run_once: true
-  when: cephx or generate_fsid
-
-- name: generate cluster fsid
-  local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
-    creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
-  register: cluster_uuid
-  become: false
-  when:
-    - generate_fsid
-    - ceph_current_fsid.rc != 0
-
-- name: reuse cluster fsid when cluster is already running
-  local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
-    creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
-  become: false
-  when: ceph_current_fsid.rc == 0
-
-- name: read cluster fsid if it already exists
-  local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
-    removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
-  changed_when: false
-  register: cluster_uuid
-  become: false
-  always_run: true
-  when: generate_fsid
-
-- name: create ceph conf directory
-  file:
-    path: /etc/ceph
-    state: directory
-    owner: "{{ dir_owner }}"
-    group: "{{ dir_group }}"
-    mode: "{{ dir_mode }}"
-
-- name: generate ceph configuration file
-  action: config_template
-  args:
-    src: ceph.conf.j2
-    dest: /etc/ceph/{{ cluster }}.conf
-    owner: "{{ dir_owner }}"
-    group: "{{ dir_group }}"
-    mode: "{{ activate_file_mode }}"
-    config_overrides: "{{ ceph_conf_overrides }}"
-    config_type: ini
-  notify:
-    - restart ceph mons
-    - restart ceph mons on ubuntu
-    - restart ceph mons with systemd
-    - restart ceph osds
-    - restart ceph osds on ubuntu
-    - restart ceph osds with systemd
-    - restart ceph mdss
-    - restart ceph mdss on ubuntu
-    - restart ceph mdss with systemd
-    - restart ceph rgws
-    - restart ceph rgws on ubuntu
-    - restart ceph rgws on red hat
-    - restart ceph rgws with systemd
-    - restart ceph nfss
-
-- name: create rbd client directory
-  file:
-    path: "{{ item }}"
-    state: directory
-    owner: "{{ rbd_client_directory_owner }}"
-    group: "{{ rbd_client_directory_group }}"
-    mode: "{{ rbd_client_directory_mode }}"
-  with_items:
-    - "{{ rbd_client_admin_socket_path }}"
-    - "{{ rbd_client_log_path }}"
-  when: rbd_client_directories
-
-- name: configure cluster name
-  lineinfile:
-    dest: /etc/sysconfig/ceph
-    insertafter: EOF
-    create: yes
-    line: "CLUSTER={{ cluster }}"
-  when: ansible_os_family == "RedHat"
-
-# NOTE(leseb): we are performing the following check
-# to ensure any Jewel installation will not fail.
-# The following commit https://github.com/ceph/ceph/commit/791eba81a5467dd5de4f1680ed0deb647eb3fb8b
-# fixed a package issue where the path was the wrong.
-# This bug is not yet on all the distros package so we are working around it
-# Impacted versions:
-# - Jewel from UCA: https://bugs.launchpad.net/ubuntu/+source/ceph/+bug/1582773
-# - Jewel from latest Canonical 16.04 distro
-# - All previous versions from Canonical
-# - Infernalis from ceph.com
-- name: check /etc/default/ceph exist
-  stat:
-    path: /etc/default/ceph
-  register: etc_default_ceph
-  always_run: true
-  when: ansible_os_family == "Debian"
-
-- name: configure cluster name
-  lineinfile:
-    dest: /etc/default/ceph
-    insertafter: EOF
-    create: yes
-    line: "CLUSTER={{ cluster }}"
-  when:
-    - ansible_os_family == "Debian"
-    - etc_default_ceph.stat.exists
-    - not etc_default_ceph.stat.isdir
-
-- name: configure cluster name
-  lineinfile:
-    dest: /etc/default/ceph/ceph
-    insertafter: EOF
-    create: yes
-    line: "CLUSTER={{ cluster }}"
-  when:
-    - ansible_os_family == "Debian"
-    - etc_default_ceph.stat.exists
-    - etc_default_ceph.stat.isdir
+- include: ./checks/check_socket.yml
+- include: generate_cluster_fsid.yml
+- include: generate_ceph_conf.yml
+- include: create_rbd_client_dir.yml
+- include: configure_cluster_name.yml
diff --git a/roles/ceph-mon/tasks/check_mandatory_vars.yml b/roles/ceph-mon/tasks/check_mandatory_vars.yml
new file mode 100644 (file)
index 0000000..9707c14
--- /dev/null
@@ -0,0 +1,8 @@
+---
+- name: make sure monitor_interface or monitor_address or monitor_address_block is configured
+  fail:
+    msg: "Either monitor_interface, monitor_address, or monitor_address_block must be configured. Interface for the monitor to listen on or IP address of that interface"
+  when:
+    - monitor_interface == 'interface'
+    - monitor_address == '0.0.0.0'
+    - not monitor_address_block