]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
osds: move openstack pools creation in ceph-osd
authorGuillaume Abrioux <gabrioux@redhat.com>
Tue, 22 May 2018 14:41:40 +0000 (16:41 +0200)
committerSébastien Han <seb@redhat.com>
Thu, 24 May 2018 16:39:38 +0000 (09:39 -0700)
When deploying a large number of OSD nodes it can be an issue because the
protection check [1] won't pass since it tries to create pools before all
OSDs are active.

The idea here is to move openstack pools creation at the end of `ceph-osd` role.

[1] https://github.com/ceph/ceph/blob/e59258943bcfe3e52d40a59ff30df55e1e6a3865/src/mon/OSDMonitor.cc#L5673

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1578086
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
13 files changed:
group_vars/all.yml.sample
group_vars/mons.yml.sample
group_vars/rhcs.yml.sample
roles/ceph-defaults/defaults/main.yml
roles/ceph-mon/defaults/main.yml
roles/ceph-mon/tasks/main.yml
roles/ceph-mon/tasks/openstack_config.yml [deleted file]
roles/ceph-osd/tasks/main.yml
roles/ceph-osd/tasks/openstack_config.yml [new file with mode: 0644]
tests/functional/centos/7/cluster/group_vars/all
tests/functional/centos/7/cluster/group_vars/mons
tests/functional/centos/7/docker/group_vars/all
tests/functional/centos/7/docker/group_vars/mons

index 483cd7be4e4392791a07225e37a81d7ed75f72ef..b142bf4a01c31570758f8e78db90d0094434f42d 100644 (file)
@@ -532,3 +532,69 @@ dummy:
 #docker_pull_retry: 3
 #docker_pull_timeout: "300s"
 
+
+
+#############
+# OPENSTACK #
+#############
+#openstack_config: false
+#openstack_glance_pool:
+#  name: "images"
+#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  rule_name: "replicated_rule"
+#  type: 1
+#  erasure_profile: ""
+#  expected_num_objects: ""
+#openstack_cinder_pool:
+#  name: "volumes"
+#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  rule_name: "replicated_rule"
+#  type: 1
+#  erasure_profile: ""
+#  expected_num_objects: ""
+#openstack_nova_pool:
+#  name: "vms"
+#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  rule_name: "replicated_rule"
+#  type: 1
+#  erasure_profile: ""
+#  expected_num_objects: ""
+#openstack_cinder_backup_pool:
+#  name: "backups"
+#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  rule_name: "replicated_rule"
+#  type: 1
+#  erasure_profile: ""
+#  expected_num_objects: ""
+#openstack_gnocchi_pool:
+#  name: "metrics"
+#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  rule_name: "replicated_rule"
+#  type: 1
+#  erasure_profile: ""
+#  expected_num_objects: ""
+
+#openstack_pools:
+#  - "{{ openstack_glance_pool }}"
+#  - "{{ openstack_cinder_pool }}"
+#  - "{{ openstack_nova_pool }}"
+#  - "{{ openstack_cinder_backup_pool }}"
+#  - "{{ openstack_gnocchi_pool }}"
+
+
+# The value for 'key' can be a pre-generated key,
+# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
+# By default, keys will be auto-generated.
+#
+#openstack_keys:
+#  - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+#  - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+#  - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+#  - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
+#  - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+
index 50a00436414a6d292e8b7586bb3784a51de6b1c5..d65547739bbcba5a0fdc23db84964c7ca1192df6 100644 (file)
@@ -69,72 +69,6 @@ dummy:
 # Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
 #create_crush_tree: false
 
-
-#############
-# OPENSTACK #
-#############
-#openstack_config: false
-#openstack_glance_pool:
-#  name: "images"
-#  pg_num: "{{ osd_pool_default_pg_num }}"
-#  pgp_num: "{{ osd_pool_default_pg_num }}"
-#  rule_name: "replicated_rule"
-#  type: 1
-#  erasure_profile: ""
-#  expected_num_objects: ""
-#openstack_cinder_pool:
-#  name: "volumes"
-#  pg_num: "{{ osd_pool_default_pg_num }}"
-#  pgp_num: "{{ osd_pool_default_pg_num }}"
-#  rule_name: "replicated_rule"
-#  type: 1
-#  erasure_profile: ""
-#  expected_num_objects: ""
-#openstack_nova_pool:
-#  name: "vms"
-#  pg_num: "{{ osd_pool_default_pg_num }}"
-#  pgp_num: "{{ osd_pool_default_pg_num }}"
-#  rule_name: "replicated_rule"
-#  type: 1
-#  erasure_profile: ""
-#  expected_num_objects: ""
-#openstack_cinder_backup_pool:
-#  name: "backups"
-#  pg_num: "{{ osd_pool_default_pg_num }}"
-#  pgp_num: "{{ osd_pool_default_pg_num }}"
-#  rule_name: "replicated_rule"
-#  type: 1
-#  erasure_profile: ""
-#  expected_num_objects: ""
-#openstack_gnocchi_pool:
-#  name: "metrics"
-#  pg_num: "{{ osd_pool_default_pg_num }}"
-#  pgp_num: "{{ osd_pool_default_pg_num }}"
-#  rule_name: "replicated_rule"
-#  type: 1
-#  erasure_profile: ""
-#  expected_num_objects: ""
-
-#openstack_pools:
-#  - "{{ openstack_glance_pool }}"
-#  - "{{ openstack_cinder_pool }}"
-#  - "{{ openstack_nova_pool }}"
-#  - "{{ openstack_cinder_backup_pool }}"
-#  - "{{ openstack_gnocchi_pool }}"
-
-
-# The value for 'key' can be a pre-generated key,
-# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
-# By default, keys will be auto-generated.
-#
-#openstack_keys:
-#  - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-#  - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-#  - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-#  - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
-#  - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-
-
 ##########
 # DOCKER #
 ##########
index 73a4f5bde3051ec00aefd7d07b791205f643848a..b142bf4a01c31570758f8e78db90d0094434f42d 100644 (file)
@@ -29,7 +29,7 @@ dummy:
 #  mimic: 13
 
 # Directory to fetch cluster fsid, keys etc...
-fetch_directory: ~/ceph-ansible-keys
+#fetch_directory: fetch/
 
 # The 'cluster' variable determines the name of the cluster.
 # Changing the default value to something else means that you will
@@ -135,14 +135,14 @@ fetch_directory: ~/ceph-ansible-keys
 # - 'distro' means that no separate repo file will be added
 #  you will get whatever version of Ceph is included in your Linux distro.
 # 'local' means that the ceph binaries will be copied over from the local machine
-ceph_origin: repository
+#ceph_origin: "{{ 'repository' if ceph_rhcs or ceph_stable or ceph_dev or ceph_stable_uca or ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
 #valid_ceph_origins:
 #  - repository
 #  - distro
 #  - local
 
 
-ceph_repository: rhcs
+#ceph_repository: "{{ 'community' if ceph_stable else 'rhcs' if ceph_rhcs else 'dev' if ceph_dev else 'uca' if ceph_stable_uca else 'custom' if ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
 #valid_ceph_repository:
 #  - community
 #  - rhcs
@@ -532,3 +532,69 @@ ceph_repository: rhcs
 #docker_pull_retry: 3
 #docker_pull_timeout: "300s"
 
+
+
+#############
+# OPENSTACK #
+#############
+#openstack_config: false
+#openstack_glance_pool:
+#  name: "images"
+#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  rule_name: "replicated_rule"
+#  type: 1
+#  erasure_profile: ""
+#  expected_num_objects: ""
+#openstack_cinder_pool:
+#  name: "volumes"
+#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  rule_name: "replicated_rule"
+#  type: 1
+#  erasure_profile: ""
+#  expected_num_objects: ""
+#openstack_nova_pool:
+#  name: "vms"
+#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  rule_name: "replicated_rule"
+#  type: 1
+#  erasure_profile: ""
+#  expected_num_objects: ""
+#openstack_cinder_backup_pool:
+#  name: "backups"
+#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  rule_name: "replicated_rule"
+#  type: 1
+#  erasure_profile: ""
+#  expected_num_objects: ""
+#openstack_gnocchi_pool:
+#  name: "metrics"
+#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  rule_name: "replicated_rule"
+#  type: 1
+#  erasure_profile: ""
+#  expected_num_objects: ""
+
+#openstack_pools:
+#  - "{{ openstack_glance_pool }}"
+#  - "{{ openstack_cinder_pool }}"
+#  - "{{ openstack_nova_pool }}"
+#  - "{{ openstack_cinder_backup_pool }}"
+#  - "{{ openstack_gnocchi_pool }}"
+
+
+# The value for 'key' can be a pre-generated key,
+# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
+# By default, keys will be auto-generated.
+#
+#openstack_keys:
+#  - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+#  - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+#  - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+#  - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
+#  - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+
index 8d7c65dc9210db1d2a80265fa6e0f183b8218da4..095f578080b427c6090cfd6b733ec61199924526 100644 (file)
@@ -523,3 +523,68 @@ rolling_update: false
 #####################
 docker_pull_retry: 3
 docker_pull_timeout: "300s"
+
+
+#############
+# OPENSTACK #
+#############
+openstack_config: false
+openstack_glance_pool:
+  name: "images"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "replicated_rule"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+openstack_cinder_pool:
+  name: "volumes"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "replicated_rule"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+openstack_nova_pool:
+  name: "vms"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "replicated_rule"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+openstack_cinder_backup_pool:
+  name: "backups"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "replicated_rule"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+openstack_gnocchi_pool:
+  name: "metrics"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "replicated_rule"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+
+openstack_pools:
+  - "{{ openstack_glance_pool }}"
+  - "{{ openstack_cinder_pool }}"
+  - "{{ openstack_nova_pool }}"
+  - "{{ openstack_cinder_backup_pool }}"
+  - "{{ openstack_gnocchi_pool }}"
+
+
+# The value for 'key' can be a pre-generated key,
+# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
+# By default, keys will be auto-generated.
+#
+openstack_keys:
+  - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+  - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+  - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+  - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
+  - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
index 2de8bb1cf81608538091575d684b0eb25a3c5acd..09183253f5a72f97d07b43a7c0eb39755f327fc0 100644 (file)
@@ -61,72 +61,6 @@ crush_rules:
 # Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
 create_crush_tree: false
 
-
-#############
-# OPENSTACK #
-#############
-openstack_config: false
-openstack_glance_pool:
-  name: "images"
-  pg_num: "{{ osd_pool_default_pg_num }}"
-  pgp_num: "{{ osd_pool_default_pg_num }}"
-  rule_name: "replicated_rule"
-  type: 1
-  erasure_profile: ""
-  expected_num_objects: ""
-openstack_cinder_pool:
-  name: "volumes"
-  pg_num: "{{ osd_pool_default_pg_num }}"
-  pgp_num: "{{ osd_pool_default_pg_num }}"
-  rule_name: "replicated_rule"
-  type: 1
-  erasure_profile: ""
-  expected_num_objects: ""
-openstack_nova_pool:
-  name: "vms"
-  pg_num: "{{ osd_pool_default_pg_num }}"
-  pgp_num: "{{ osd_pool_default_pg_num }}"
-  rule_name: "replicated_rule"
-  type: 1
-  erasure_profile: ""
-  expected_num_objects: ""
-openstack_cinder_backup_pool:
-  name: "backups"
-  pg_num: "{{ osd_pool_default_pg_num }}"
-  pgp_num: "{{ osd_pool_default_pg_num }}"
-  rule_name: "replicated_rule"
-  type: 1
-  erasure_profile: ""
-  expected_num_objects: ""
-openstack_gnocchi_pool:
-  name: "metrics"
-  pg_num: "{{ osd_pool_default_pg_num }}"
-  pgp_num: "{{ osd_pool_default_pg_num }}"
-  rule_name: "replicated_rule"
-  type: 1
-  erasure_profile: ""
-  expected_num_objects: ""
-
-openstack_pools:
-  - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
-  - "{{ openstack_nova_pool }}"
-  - "{{ openstack_cinder_backup_pool }}"
-  - "{{ openstack_gnocchi_pool }}"
-
-
-# The value for 'key' can be a pre-generated key,
-# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
-# By default, keys will be auto-generated.
-#
-openstack_keys:
-  - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-  - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-  - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-  - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
-  - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-
-
 ##########
 # DOCKER #
 ##########
index 94cba40b6d8ee2edc7f23b4419dbaa617d31a66e..ad6b825b49680d4da33abc289dd50e606f283078 100644 (file)
   when:
     - openstack_keys_tmp is defined
 
-# Create the pools listed in openstack_pools
-- name: include openstack_config.yml
-  include: openstack_config.yml
-  when:
-    - openstack_config
-    - inventory_hostname == groups[mon_group_name] | last
-
 - name: include create_mds_filesystems.yml
   include: create_mds_filesystems.yml
   when:
diff --git a/roles/ceph-mon/tasks/openstack_config.yml b/roles/ceph-mon/tasks/openstack_config.yml
deleted file mode 100644 (file)
index f0660a8..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
----
-- name: list existing pool(s)
-  command: >
-    {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
-    osd pool get {{ item.name }} size
-  with_items: "{{ openstack_pools | unique }}"
-  register: created_pools
-  failed_when: false
-
-- name: create openstack pool(s)
-  command: >
-    {{ docker_exec_cmd }} ceph --cluster {{ cluster }}
-    osd pool create {{ item.0.name }}
-    {{ item.0.pg_num }}
-    {{ item.0.pgp_num | default(item.0.pg_num) }}
-    {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
-    {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
-    {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
-    {{ item.0.erasure_profile }}
-    {%- endif %}
-    {{ item.0.expected_num_objects | default('') }}
-  with_together:
-    - "{{ openstack_pools | unique }}"
-    - "{{ created_pools.results }}"
-  changed_when: false
-  when:
-    - item.1.get('rc', 0) != 0
-
-- name: assign application to pool(s)
-  command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
-  with_items: "{{ openstack_pools | unique }}"
-  changed_when: false
-  when:
-    - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
-    - item.application is defined
-
-- name: create openstack cephx key(s)
-  ceph_key:
-    state: present
-    name: "{{ item.name }}"
-    caps: "{{ item.caps }}"
-    secret: "{{ item.key | default('') }}"
-    containerized: "{{ docker_exec_cmd | default(False) }}"
-    cluster: "{{ cluster }}"
-    mode: "{{ item.mode|default(omit) }}"
-  with_items: "{{ openstack_keys }}"
-  when: cephx
-
-- name: fetch openstack cephx key(s)
-  fetch:
-    src: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
-    dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
-    flat: yes
-  with_items: "{{ openstack_keys }}"
-
-- name: copy to other mons the openstack cephx key(s)
-  copy:
-    src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
-    dest: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
-    owner: "{{ ceph_uid }}"
-    group: "{{ ceph_uid }}"
-    mode: "{{ item.1.mode|default(omit) }}"
-  with_nested:
-    - "{{ groups[mon_group_name] }}"
-    - "{{ openstack_keys }}"
-  delegate_to: "{{ item.0 }}"
-  when:
-    - cephx
-    - openstack_config
-    - item.0 != groups[mon_group_name] | last
\ No newline at end of file
index 16220548af1e67c803e35fb06403c8b63d608f18..4adade3b470f7cbfdca2c825e9025604b078310b 100644 (file)
     - containerized_deployment
   # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
   static: False
+
+# Create the pools listed in openstack_pools
+- name: include openstack_config.yml
+  include: openstack_config.yml
+  when:
+    - openstack_config
+    - inventory_hostname == groups[osd_group_name] | last
\ No newline at end of file
diff --git a/roles/ceph-osd/tasks/openstack_config.yml b/roles/ceph-osd/tasks/openstack_config.yml
new file mode 100644 (file)
index 0000000..238abd8
--- /dev/null
@@ -0,0 +1,75 @@
+---
+- name: list existing pool(s)
+  command: >
+    {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
+    osd pool get {{ item.name }} size
+  with_items: "{{ openstack_pools | unique }}"
+  register: created_pools
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  failed_when: false
+
+- name: create openstack pool(s)
+  command: >
+    {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
+    osd pool create {{ item.0.name }}
+    {{ item.0.pg_num }}
+    {{ item.0.pgp_num | default(item.0.pg_num) }}
+    {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
+    {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
+    {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
+    {{ item.0.erasure_profile }}
+    {%- endif %}
+    {{ item.0.expected_num_objects | default('') }}
+  with_together:
+    - "{{ openstack_pools | unique }}"
+    - "{{ created_pools.results }}"
+  changed_when: false
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  when:
+    - item.1.get('rc', 0) != 0
+
+- name: assign application to pool(s)
+  command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
+  with_items: "{{ openstack_pools | unique }}"
+  changed_when: false
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  when:
+    - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
+    - item.application is defined
+
+- name: create openstack cephx key(s)
+  ceph_key:
+    state: present
+    name: "{{ item.name }}"
+    caps: "{{ item.caps }}"
+    secret: "{{ item.key | default('') }}"
+    containerized: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }}"
+    cluster: "{{ cluster }}"
+    mode: "{{ item.mode|default(omit) }}"
+  with_items: "{{ openstack_keys }}"
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  when: cephx
+
+- name: fetch openstack cephx key(s)
+  fetch:
+    src: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
+    dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
+    flat: yes
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  with_items: "{{ openstack_keys }}"
+
+- name: copy to other mons the openstack cephx key(s)
+  copy:
+    src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
+    dest: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
+    owner: "{{ ceph_uid }}"
+    group: "{{ ceph_uid }}"
+    mode: "{{ item.1.mode|default(omit) }}"
+  with_nested:
+    - "{{ groups[mon_group_name] }}"
+    - "{{ openstack_keys }}"
+  delegate_to: "{{ item.0 }}"
+  when:
+    - cephx
+    - openstack_config
+    - item.0 != groups[mon_group_name] | last
\ No newline at end of file
index 2ac4acf39b5c5f5ccd0ad439b52c3bac3d52a4f0..e33f448afb6d8ce4e7e915c563b44ac9ebaf8c4e 100644 (file)
@@ -20,3 +20,23 @@ devices:
 dedicated_devices:
   - '/dev/sdc'
   - '/dev/sdc'
+openstack_config: True
+openstack_glance_pool:
+  name: "images"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "HDD"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+openstack_cinder_pool:
+  name: "volumes"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "HDD"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+openstack_pools:
+  - "{{ openstack_glance_pool }}"
+  - "{{ openstack_cinder_pool }}"
\ No newline at end of file
index 0919b52cc4fca89630d15ac02198776de854e19b..fb7a9268b2a3dd78cb3ee55affe8aa307b7b4ad4 100644 (file)
@@ -11,25 +11,4 @@ crush_rule_hdd:
   type: host
   default: true
 crush_rules:
-  - "{{ crush_rule_hdd }}"
-
-openstack_config: True
-openstack_glance_pool:
-  name: "images"
-  pg_num: "{{ osd_pool_default_pg_num }}"
-  pgp_num: "{{ osd_pool_default_pg_num }}"
-  rule_name: "HDD"
-  type: 1
-  erasure_profile: ""
-  expected_num_objects: ""
-openstack_cinder_pool:
-  name: "volumes"
-  pg_num: "{{ osd_pool_default_pg_num }}"
-  pgp_num: "{{ osd_pool_default_pg_num }}"
-  rule_name: "HDD"
-  type: 1
-  erasure_profile: ""
-  expected_num_objects: ""
-openstack_pools:
-  - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
+  - "{{ crush_rule_hdd }}"
\ No newline at end of file
index 795a76e02f43944508fb758fa0d19d9813a2b1ae..fb2804976b5b86c338f92ebf6c4ddba8f38c7c97 100644 (file)
@@ -21,3 +21,23 @@ ceph_conf_overrides:
 devices:
   - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
   - /dev/sdb
+openstack_config: True
+openstack_glance_pool:
+  name: "images"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "HDD"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+openstack_cinder_pool:
+  name: "volumes"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "HDD"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+openstack_pools:
+  - "{{ openstack_glance_pool }}"
+  - "{{ openstack_cinder_pool }}"
index 0919b52cc4fca89630d15ac02198776de854e19b..d4d1d4334fb4fbea6b8155c44c5f2948b7176681 100644 (file)
@@ -12,24 +12,3 @@ crush_rule_hdd:
   default: true
 crush_rules:
   - "{{ crush_rule_hdd }}"
-
-openstack_config: True
-openstack_glance_pool:
-  name: "images"
-  pg_num: "{{ osd_pool_default_pg_num }}"
-  pgp_num: "{{ osd_pool_default_pg_num }}"
-  rule_name: "HDD"
-  type: 1
-  erasure_profile: ""
-  expected_num_objects: ""
-openstack_cinder_pool:
-  name: "volumes"
-  pg_num: "{{ osd_pool_default_pg_num }}"
-  pgp_num: "{{ osd_pool_default_pg_num }}"
-  rule_name: "HDD"
-  type: 1
-  erasure_profile: ""
-  expected_num_objects: ""
-openstack_pools:
-  - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"