]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
osd: drop openstack related tasks
authorGuillaume Abrioux <gabrioux@ibm.com>
Sat, 9 Mar 2024 09:24:23 +0000 (10:24 +0100)
committerGuillaume Abrioux <gabrioux@ibm.com>
Fri, 15 Mar 2024 23:51:52 +0000 (00:51 +0100)
All of this should be addressed in custom separate playbooks if needed.

Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
(cherry picked from commit 9c467e41b39af63dec9a0b237b550edb9c57da85)

26 files changed:
group_vars/all.yml.sample
roles/ceph-defaults/defaults/main.yml
roles/ceph-osd/tasks/main.yml
roles/ceph-osd/tasks/openstack_config.yml [deleted file]
roles/ceph-validate/tasks/check_pools.yml
tests/functional/all-in-one/container/group_vars/all
tests/functional/all-in-one/group_vars/all
tests/functional/all_daemons/container/group_vars/all
tests/functional/all_daemons/group_vars/all
tests/functional/all_daemons_ipv6/container/group_vars/all
tests/functional/all_daemons_ipv6/group_vars/all
tests/functional/docker2podman/group_vars/all
tests/functional/external_clients/container/inventory/group_vars/all
tests/functional/external_clients/inventory/group_vars/all
tests/functional/lvm-osds/container/group_vars/all
tests/functional/lvm-osds/group_vars/all
tests/functional/podman/group_vars/all
tests/functional/shrink_mds/container/group_vars/all
tests/functional/shrink_mgr/container/group_vars/all
tests/functional/shrink_mon/container/group_vars/all
tests/functional/shrink_osd/container/group_vars/all
tests/functional/shrink_osd/group_vars/all
tests/functional/shrink_rbdmirror/container/group_vars/all
tests/functional/shrink_rgw/container/group_vars/all
tests/functional/subset_update/container/group_vars/all
tests/functional/subset_update/group_vars/all

index 05d1b3f75ca1b2cf79d3ce707108be2350afd320..72a5bc6b8b79adb8f15e73a0e0f9284ba9888a5c 100644 (file)
@@ -554,64 +554,6 @@ dummy:
 #docker_pull_timeout: "300s"
 
 
-#############
-# OPENSTACK #
-#############
-#openstack_config: false
-# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
-# `pg_num` and `pgp_num` keys will be ignored, even if specified.
-# eg:
-#  openstack_glance_pool:
-#    name: "images"
-#    rule_name: "my_replicated_rule"
-#    application: "rbd"
-#    pg_autoscale_mode: false
-#    pg_num: 16
-#    pgp_num: 16
-#    target_size_ratio: 0.2
-#openstack_glance_pool:
-#  name: "images"
-#  application: "rbd"
-#openstack_cinder_pool:
-#  name: "volumes"
-#  application: "rbd"
-#openstack_nova_pool:
-#  name: "vms"
-#  application: "rbd"
-#openstack_cinder_backup_pool:
-#  name: "backups"
-#  application: "rbd"
-#openstack_gnocchi_pool:
-#  name: "metrics"
-#  application: "rbd"
-#openstack_cephfs_data_pool:
-#  name: "manila_data"
-#  application: "cephfs"
-#openstack_cephfs_metadata_pool:
-#  name: "manila_metadata"
-#  application: "cephfs"
-#openstack_pools:
-#  - "{{ openstack_glance_pool }}"
-#  - "{{ openstack_cinder_pool }}"
-#  - "{{ openstack_nova_pool }}"
-#  - "{{ openstack_cinder_backup_pool }}"
-#  - "{{ openstack_gnocchi_pool }}"
-#  - "{{ openstack_cephfs_data_pool }}"
-#  - "{{ openstack_cephfs_metadata_pool }}"
-
-
-# The value for 'key' can be a pre-generated key,
-# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
-# By default, keys will be auto-generated.
-#
-#openstack_keys:
-#  - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-#  - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-#  - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-#  - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
-#  - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-
-
 #############
 # DASHBOARD #
 #############
index 489022dc0ef88106c3ad7994294e4c7df0c99eb7..c22f63b5e6f6a107e2eb7c99ad38657dede5dc2e 100644 (file)
@@ -546,64 +546,6 @@ docker_pull_retry: 3
 docker_pull_timeout: "300s"
 
 
-#############
-# OPENSTACK #
-#############
-openstack_config: false
-# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
-# `pg_num` and `pgp_num` keys will be ignored, even if specified.
-# eg:
-#  openstack_glance_pool:
-#    name: "images"
-#    rule_name: "my_replicated_rule"
-#    application: "rbd"
-#    pg_autoscale_mode: false
-#    pg_num: 16
-#    pgp_num: 16
-#    target_size_ratio: 0.2
-openstack_glance_pool:
-  name: "images"
-  application: "rbd"
-openstack_cinder_pool:
-  name: "volumes"
-  application: "rbd"
-openstack_nova_pool:
-  name: "vms"
-  application: "rbd"
-openstack_cinder_backup_pool:
-  name: "backups"
-  application: "rbd"
-openstack_gnocchi_pool:
-  name: "metrics"
-  application: "rbd"
-openstack_cephfs_data_pool:
-  name: "manila_data"
-  application: "cephfs"
-openstack_cephfs_metadata_pool:
-  name: "manila_metadata"
-  application: "cephfs"
-openstack_pools:
-  - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
-  - "{{ openstack_nova_pool }}"
-  - "{{ openstack_cinder_backup_pool }}"
-  - "{{ openstack_gnocchi_pool }}"
-  - "{{ openstack_cephfs_data_pool }}"
-  - "{{ openstack_cephfs_metadata_pool }}"
-
-
-# The value for 'key' can be a pre-generated key,
-# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
-# By default, keys will be auto-generated.
-#
-openstack_keys:
-  - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-  - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-  - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-  - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
-  - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-
-
 #############
 # DASHBOARD #
 #############
index f51fc984594cdff63db6df1dd840085592e51b5e..8987f57cfc99bae57e7351c7a52cc6a9303276e9 100644 (file)
   ansible.builtin.include_tasks: crush_rules.yml
   when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool
   tags: wait_all_osds_up
-
-# Create the pools listed in openstack_pools
-- name: Include openstack_config.yml
-  ansible.builtin.include_tasks: openstack_config.yml
-  when:
-    - not add_osd | bool
-    - not rolling_update | default(False) | bool
-    - openstack_config | bool
-    - inventory_hostname == groups[osd_group_name] | last
-  tags: wait_all_osds_up
diff --git a/roles/ceph-osd/tasks/openstack_config.yml b/roles/ceph-osd/tasks/openstack_config.yml
deleted file mode 100644 (file)
index 6b276c2..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
----
-- name: Pool related tasks
-  block:
-    - name: Create openstack pool(s)
-      ceph_pool:
-        name: "{{ item.name }}"
-        cluster: "{{ cluster }}"
-        pg_num: "{{ item.pg_num | default(omit) }}"
-        pgp_num: "{{ item.pgp_num | default(omit) }}"
-        size: "{{ item.size | default(omit) }}"
-        min_size: "{{ item.min_size | default(omit) }}"
-        pool_type: "{{ item.type | default('replicated') }}"
-        rule_name: "{{ item.rule_name | default(omit) }}"
-        erasure_profile: "{{ item.erasure_profile | default(omit) }}"
-        pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}"
-        target_size_ratio: "{{ item.target_size_ratio | default(omit) }}"
-        application: "{{ item.application | default(omit) }}"
-      with_items: "{{ openstack_pools }}"
-      delegate_to: "{{ groups[mon_group_name][0] }}"
-      environment:
-        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
-        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-
-- name: Create openstack cephx key(s)
-  when:
-    - cephx | bool
-    - openstack_config | bool
-  block:
-    - name: Generate keys
-      ceph_key:
-        name: "{{ item.name }}"
-        caps: "{{ item.caps }}"
-        secret: "{{ item.key | default('') }}"
-        cluster: "{{ cluster }}"
-        mode: "{{ item.mode | default(ceph_keyring_permissions) }}"
-      environment:
-        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
-        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-      with_items: "{{ openstack_keys }}"
-      delegate_to: "{{ groups[mon_group_name][0] }}"
-      no_log: "{{ no_log_on_ceph_key_tasks }}"
-
-    - name: Get keys from monitors
-      ceph_key:
-        name: "{{ item.name }}"
-        cluster: "{{ cluster }}"
-        output_format: plain
-        state: info
-      environment:
-        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
-        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-      register: _osp_keys
-      with_items: "{{ openstack_keys }}"
-      delegate_to: "{{ groups.get(mon_group_name)[0] }}"
-      no_log: "{{ no_log_on_ceph_key_tasks }}"
-
-    - name: Copy ceph key(s) if needed
-      ansible.builtin.copy:
-        dest: "/etc/ceph/{{ cluster }}.{{ item.0.item.name }}.keyring"
-        content: "{{ item.0.stdout + '\n' }}"
-        owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
-        group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
-        mode: "{{ item.0.item.mode | default(ceph_keyring_permissions) }}"
-      with_nested:
-        - "{{ _osp_keys.results }}"
-        - "{{ groups[mon_group_name] }}"
-      delegate_to: "{{ item.1 }}"
-      no_log: "{{ no_log_on_ceph_key_tasks }}"
index bca9d26ff484fa2f2e6b0fa11fdff520bfcf71a2..0acf7c9572420c7d1a891a92f0af4282ce3d3bec 100644 (file)
@@ -3,7 +3,6 @@
   ansible.builtin.fail:
     msg: "You must set a target_size_ratio value on following pool: {{ item.name }}."
   with_items:
-    - "{{ openstack_pools | default([]) }}"
     - "{{ cephfs_pools | default([]) }}"
     - "{{ pools | default([]) }}"
   when:
index 1a9a4c9f6c69f00f5fceb21b4d481ab8e69493ee..bed6ae593bd2202785ee5bc08f167903a9f0a1a1 100644 (file)
@@ -11,7 +11,6 @@ public_network: "192.168.19.0/24"
 cluster_network: "192.168.20.0/24"
 rgw_override_bucket_index_max_shards: 16
 rgw_bucket_default_quota_max_objects: 1638400
-openstack_config: True
 dashboard_enabled: false
 ceph_conf_overrides:
   global:
index 1b5ddd6348e8b13303656410acbbfaf3da4a0b55..e4967a8b147e34928d0f68873e3ec5d76dd4791d 100644 (file)
@@ -4,7 +4,6 @@ ceph_origin: repository
 ceph_repository: community
 radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
-openstack_config: True
 dashboard_enabled: False
 public_network: "192.168.17.0/24"
 cluster_network: "192.168.18.0/24"
index a33e13d1e3b5e06771291a018f84981a9e890273..65d71f3849a302f8bfbc9d191004ccd455908022 100644 (file)
@@ -17,18 +17,6 @@ ceph_conf_overrides:
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
     mon_max_pg_per_osd: 300
-openstack_config: True
-openstack_glance_pool:
-  name: "images"
-  size: 1
-  target_size_ratio: 0.2
-openstack_cinder_pool:
-  name: "volumes"
-  rule_name: "HDD"
-  size: 1
-openstack_pools:
-  - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
 docker_pull_timeout: 600s
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
index 51663bc8683c0ea486af3fbccc83c07ee6fb3ca7..75c50ecb0e0fdedd95b4f1daf743c71e0707193f 100644 (file)
@@ -11,20 +11,6 @@ ceph_conf_overrides:
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
     mon_max_pg_per_osd: 300
-openstack_config: True
-openstack_glance_pool:
-  name: "images"
-  size: 1
-  application: rbd
-  target_size_ratio: 0.2
-openstack_cinder_pool:
-  name: "volumes"
-  rule_name: "HDD"
-  size: 1
-  application: rbd
-openstack_pools:
-  - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
 mds_max_mds: 2
index e4503e0eedbf4c2854806070e84fd18d294c9d05..4e3ef7de40561aa57bdd3a229ba5209ed1ada8a0 100644 (file)
@@ -18,18 +18,6 @@ ceph_conf_overrides:
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
     mon_max_pg_per_osd: 300
-openstack_config: True
-openstack_glance_pool:
-  name: "images"
-  size: 1
-  target_size_ratio: 0.2
-openstack_cinder_pool:
-  name: "volumes"
-  rule_name: "HDD"
-  size: 1
-openstack_pools:
-  - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
 docker_pull_timeout: 600s
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
index aa5b93f6623ca4a7009da40a6459f6ecc5c2cf4c..1748266d13db808ed7c89815edc7d7d76127be7f 100644 (file)
@@ -12,20 +12,6 @@ ceph_conf_overrides:
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
     mon_max_pg_per_osd: 300
-openstack_config: True
-openstack_glance_pool:
-  name: "images"
-  size: 1
-  application: rbd
-  target_size_ratio: 0.2
-openstack_cinder_pool:
-  name: "volumes"
-  rule_name: "HDD"
-  size: 1
-  application: rbd
-openstack_pools:
-  - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
 mds_max_mds: 2
index 3621722663df39dfff0b411180f68e4378a22e41..966ab68540d33c2cfee383bda5650034b6085598 100644 (file)
@@ -16,18 +16,6 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
-openstack_config: False
-openstack_glance_pool:
-  name: "images"
-  rule_name: "HDD"
-  size: 1
-openstack_cinder_pool:
-  name: "volumes"
-  rule_name: "HDD"
-  size: 1
-openstack_pools:
-  - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
 dashboard_admin_password: $sX!cD$rYU6qR^B!
@@ -38,4 +26,4 @@ ceph_docker_image_tag: latest-reef
 node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
 prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
 alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
-grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
\ No newline at end of file
+grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
index 84474d502919019371d9e43c8e866bc873e44807..beb09c357a937f0dd19e9ecdd0662077ca7bf929 100644 (file)
@@ -11,7 +11,6 @@ public_network: "192.168.31.0/24"
 cluster_network: "192.168.32.0/24"
 rgw_override_bucket_index_max_shards: 16
 rgw_bucket_default_quota_max_objects: 1638400
-openstack_config: True
 dashboard_enabled: false
 ceph_conf_overrides:
   global:
index 1152b04cef4feeccd82b5954fe45207af4705f84..29c0ed43ba4a647a1d793234ab290747e1eb4c27 100644 (file)
@@ -4,7 +4,6 @@ ceph_origin: repository
 ceph_repository: community
 radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
-openstack_config: True
 dashboard_enabled: False
 public_network: "192.168.31.0/24"
 cluster_network: "192.168.32.0/24"
index 087d0a5e44eaaf9a2c8779e484e5db5f85a7a4b7..64286b499b2234935bb917728dd85b0c7e1a6f0c 100644 (file)
@@ -21,20 +21,6 @@ ceph_conf_overrides:
 dashboard_enabled: False
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
-openstack_config: True
-openstack_glance_pool:
-  name: "images"
-  type: 3
-  size: 1
-  application: rbd
-  target_size_ratio: 0.2
-openstack_cinder_pool:
-  name: "volumes"
-  size: 1
-  application: rbd
-openstack_pools:
-  - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
 ceph_docker_registry: quay.io
 ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-reef
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
index c8066f0132067243a46a44a2802e252d42e8bf84..c39cfd433d16c91f22bec70204be544803119e8c 100644 (file)
@@ -16,17 +16,4 @@ ceph_conf_overrides:
 dashboard_enabled: False
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
-openstack_config: True
-openstack_glance_pool:
-  name: "images"
-  type: 3
-  size: 1
-  application: rbd
-  target_size_ratio: 0.2
-openstack_cinder_pool:
-  name: "volumes"
-  size: 1
-  application: rbd
-openstack_pools:
-  - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
\ No newline at end of file
+
index 3a8328a79cacb3b5c9bd1f31dc1e548876152add..427f6bed539af1daf016f35e83ee6066f81cc83b 100644 (file)
@@ -15,18 +15,6 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
-openstack_config: True
-openstack_glance_pool:
-  name: "images"
-  rule_name: "HDD"
-  size: 1
-openstack_cinder_pool:
-  name: "volumes"
-  rule_name: "HDD"
-  size: 1
-openstack_pools:
-  - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
 dashboard_admin_password: $sX!cD$rYU6qR^B!
@@ -37,4 +25,4 @@ ceph_docker_image_tag: latest-reef
 node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
 prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
 alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
-grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
\ No newline at end of file
+grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
index 3e9787332fa8388c8b34b23f5f0c03f445e5e010..97a6ea8f43f6d90c7ee095898e0af23e43fe35b0 100644 (file)
@@ -12,7 +12,6 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
-openstack_config: False
 dashboard_enabled: False
 copy_admin_key: True
 ceph_docker_registry: quay.io
index e38b9e43d76b3a352a7a0da409c9f2b6eeeb39cb..367c38b70bdadb13ce00753d78d2323a42f1f8aa 100644 (file)
@@ -12,7 +12,6 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
-openstack_config: False
 dashboard_enabled: False
 ceph_docker_registry: quay.io
 ceph_docker_image: ceph/daemon-base
index 8522ee71096724a5b5207a810c3293760591086d..2d75b6fa616f8eab8af599f611af6814f13aa2da 100644 (file)
@@ -12,7 +12,6 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
-openstack_config: False
 dashboard_enabled: False
 ceph_docker_registry: quay.io
 ceph_docker_image: ceph/daemon-base
index 2f778f363d217edd1ab9bc2d5b088580f9a7c0eb..5cc5f6e81fd799aec66539d47f95861854c54a11 100644 (file)
@@ -12,7 +12,6 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
-openstack_config: False
 dashboard_enabled: False
 copy_admin_key: True
 ceph_docker_registry: quay.io
index d6999b17eea53f856b389eea108718dabae9f8f4..956f325f85a16992032a5820f5516a3271c4baba 100644 (file)
@@ -6,6 +6,5 @@ cluster_network: "192.168.72.0/24"
 ceph_conf_overrides:
   global:
     osd_pool_default_size: 3
-openstack_config: False
 dashboard_enabled: False
 copy_admin_key: True
\ No newline at end of file
index f253911dd152827bd8166e5983ab9272bd3e63d3..684d5b5c292d4a65afa0f10407350ff5be20ae70 100644 (file)
@@ -11,7 +11,6 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
-openstack_config: False
 dashboard_enabled: False
 copy_admin_key: True
 ceph_docker_registry: quay.io
index 1451936c232caf7a130c7ee456512612919e32e7..900211e8dee0cc0b16b20524f7cddc5e3e72d186 100644 (file)
@@ -13,7 +13,6 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
-openstack_config: False
 dashboard_enabled: False
 copy_admin_key: True
 ceph_docker_registry: quay.io
index 70115443aa596283b6f45718ca01f30c2322be85..a9b38103af66dca5cbcabcb7965bec0c46ad130a 100644 (file)
@@ -17,7 +17,6 @@ ceph_conf_overrides:
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
     mon_max_pg_per_osd: 300
-openstack_config: false
 docker_pull_timeout: 600s
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
index 13725dd32cbd21871be24f62f1ae3c62595ab2f0..4161ddea363e336f3308e78d4c27edffad46cd2c 100644 (file)
@@ -11,7 +11,6 @@ ceph_conf_overrides:
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
     mon_max_pg_per_osd: 300
-openstack_config: false
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
 mds_max_mds: 2