]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
allow custom pool size
authorRishabh Dave <ridave@redhat.com>
Mon, 1 Oct 2018 15:11:13 +0000 (11:11 -0400)
committerSébastien Han <seb@redhat.com>
Mon, 22 Oct 2018 14:00:21 +0000 (16:00 +0200)
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1596339
Signed-off-by: Rishabh Dave <ridave@redhat.com>
15 files changed:
group_vars/all.yml.sample
group_vars/rhcs.yml.sample
roles/ceph-client/defaults/main.yml
roles/ceph-client/tasks/create_users_keys.yml
roles/ceph-defaults/defaults/main.yml
roles/ceph-iscsi-gw/defaults/main.yml
roles/ceph-iscsi-gw/tasks/common.yml
roles/ceph-mds/tasks/create_mds_filesystems.yml
roles/ceph-osd/tasks/openstack_config.yml
roles/ceph-rgw/defaults/main.yml
roles/ceph-rgw/tasks/main.yml
tests/functional/centos/7/cluster/ceph-override.json
tests/functional/centos/7/cluster/group_vars/all
tests/functional/centos/7/cluster/group_vars/clients
tests/functional/centos/7/docker/group_vars/all

index 738efeed5eb7cf6a6e2585958d5806f55a4ba131..f2fc4d54bdd95104c3ce553360f3703af484e9eb 100644 (file)
@@ -558,6 +558,7 @@ dummy:
 #  erasure_profile: ""
 #  expected_num_objects: ""
 #  application: "rbd"
+#  size: ""
 #openstack_cinder_pool:
 #  name: "volumes"
 #  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -567,6 +568,7 @@ dummy:
 #  erasure_profile: ""
 #  expected_num_objects: ""
 #  application: "rbd"
+#  size: ""
 #openstack_nova_pool:
 #  name: "vms"
 #  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -576,6 +578,7 @@ dummy:
 #  erasure_profile: ""
 #  expected_num_objects: ""
 #  application: "rbd"
+#  size: ""
 #openstack_cinder_backup_pool:
 #  name: "backups"
 #  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -585,6 +588,7 @@ dummy:
 #  erasure_profile: ""
 #  expected_num_objects: ""
 #  application: "rbd"
+#  size: ""
 #openstack_gnocchi_pool:
 #  name: "metrics"
 #  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -594,6 +598,7 @@ dummy:
 #  erasure_profile: ""
 #  expected_num_objects: ""
 #  application: "rbd"
+#  size: ""
 
 #openstack_pools:
 #  - "{{ openstack_glance_pool }}"
index b9cac0d62091198818d6f36039cec88515ef0f0e..5cc7da71579467aa09ceebd7995f0eca789f48bd 100644 (file)
@@ -558,6 +558,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
 #  erasure_profile: ""
 #  expected_num_objects: ""
 #  application: "rbd"
+#  size: ""
 #openstack_cinder_pool:
 #  name: "volumes"
 #  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -567,6 +568,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
 #  erasure_profile: ""
 #  expected_num_objects: ""
 #  application: "rbd"
+#  size: ""
 #openstack_nova_pool:
 #  name: "vms"
 #  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -576,6 +578,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
 #  erasure_profile: ""
 #  expected_num_objects: ""
 #  application: "rbd"
+#  size: ""
 #openstack_cinder_backup_pool:
 #  name: "backups"
 #  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -585,6 +588,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
 #  erasure_profile: ""
 #  expected_num_objects: ""
 #  application: "rbd"
+#  size: ""
 #openstack_gnocchi_pool:
 #  name: "metrics"
 #  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -594,6 +598,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
 #  erasure_profile: ""
 #  expected_num_objects: ""
 #  application: "rbd"
+#  size: ""
 
 #openstack_pools:
 #  - "{{ openstack_glance_pool }}"
index ec477f2991507deb6a87402349dfc865673c544c..80f9a5552e7d29654e5f4539e49c713f7d3b2c87 100644 (file)
@@ -18,6 +18,7 @@ test:
   type: 1
   erasure_profile: ""
   expected_num_objects: ""
+  size: ""
 test2:
   name: "test2"
   pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -26,6 +27,7 @@ test2:
   type: 1
   erasure_profile: ""
   expected_num_objects: ""
+  size: ""
 pools:
   - "{{ test }}"
   - "{{ test2 }}"
index 72f11d0e71e4d64726b93fd069ff65606ea63003..b98f4bb88303f6e6bbf3a605b01163a4936dd0a4 100644 (file)
     - keys | length > 0
     - inventory_hostname == groups.get('_filtered_clients') | first
 
-- name: list existing pool(s)
-  command: >
-    {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
-    osd pool get {{ item.name }} size
-  with_items: "{{ pools }}"
-  register: created_pools
-  failed_when: false
-  delegate_to: "{{ delegated_node }}"
+- name: pool related tasks
   when:
     - condition_copy_admin_key
     - inventory_hostname == groups.get('_filtered_clients', []) | first
+  block:
+    - name: list existing pool(s)
+      command: >
+        {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
+        osd pool get {{ item.name }} size
+      with_items: "{{ pools }}"
+      register: created_pools
+      failed_when: false
+      delegate_to: "{{ delegated_node }}"
 
-- name: create ceph pool(s)
-  command: >
-    {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
-    osd pool create {{ item.0.name }}
-    {{ item.0.pg_num }}
-    {{ item.0.pgp_num }}
-    {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
-    {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
-    {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
-    {{ item.0.erasure_profile }}
-    {%- endif %}
-    {{ item.0.expected_num_objects | default('') }}
-  with_together:
-    - "{{ pools }}"
-    - "{{ created_pools.results }}"
-  changed_when: false
-  delegate_to: "{{ delegated_node }}"
-  when:
-    - pools | length > 0
-    - condition_copy_admin_key
-    - inventory_hostname in groups.get('_filtered_clients') | first
-    - item.1.rc != 0
+    - name: create ceph pool(s)
+      command: >
+        {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
+        osd pool create {{ item.0.name }}
+        {{ item.0.pg_num }}
+        {{ item.0.pgp_num }}
+        {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
+        {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
+        {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
+        {{ item.0.erasure_profile }}
+        {%- endif %}
+        {{ item.0.expected_num_objects | default('') }}
+      with_together:
+        - "{{ pools }}"
+        - "{{ created_pools.results }}"
+      changed_when: false
+      delegate_to: "{{ delegated_node }}"
+      when:
+        - pools | length > 0
+        - item.1.rc != 0
+
+    - name: customize pool size
+      command: >
+        {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
+        osd pool set {{ item.name }} size {{ item.size | default('') }}
+      with_items: "{{ pools | unique }}"
+      delegate_to: "{{ delegate_node }}"
+      changed_when: false
+      when:
+        - pools | length > 0
+        - item.size | default ("") != ""
 
 - name: get client cephx keys
   copy:
index 917b8687785207de92416c83ce403e542d73ab5d..2e20d61fb37199df28a8a521d17237728ed5b7e7 100644 (file)
@@ -346,8 +346,8 @@ cephfs_data: cephfs_data # name of the data pool for a given filesystem
 cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
 
 cephfs_pools:
-  - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" }
-  - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" }
+  - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
+  - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
 
 ## OSD options
 #
@@ -550,6 +550,7 @@ openstack_glance_pool:
   erasure_profile: ""
   expected_num_objects: ""
   application: "rbd"
+  size: ""
 openstack_cinder_pool:
   name: "volumes"
   pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -559,6 +560,7 @@ openstack_cinder_pool:
   erasure_profile: ""
   expected_num_objects: ""
   application: "rbd"
+  size: ""
 openstack_nova_pool:
   name: "vms"
   pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -568,6 +570,7 @@ openstack_nova_pool:
   erasure_profile: ""
   expected_num_objects: ""
   application: "rbd"
+  size: ""
 openstack_cinder_backup_pool:
   name: "backups"
   pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -577,6 +580,7 @@ openstack_cinder_backup_pool:
   erasure_profile: ""
   expected_num_objects: ""
   application: "rbd"
+  size: ""
 openstack_gnocchi_pool:
   name: "metrics"
   pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -586,6 +590,27 @@ openstack_gnocchi_pool:
   erasure_profile: ""
   expected_num_objects: ""
   application: "rbd"
+  size: ""
+openstack_cephfs_data_pool:
+  name: "manila_data"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "replicated_rule"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+  application: "rbd"
+  size: ""
+openstack_cephfs_metadata_pool:
+  name: "manila_metadata"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "replicated_rule"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+  application: "rbd"
+  size: ""
 
 openstack_pools:
   - "{{ openstack_glance_pool }}"
@@ -593,6 +618,8 @@ openstack_pools:
   - "{{ openstack_nova_pool }}"
   - "{{ openstack_cinder_backup_pool }}"
   - "{{ openstack_gnocchi_pool }}"
+  - "{{ openstack_cephfs_data_pool }}"
+  - "{{ openstack_cephfs_metadata_pool }}"
 
 
 # The value for 'key' can be a pre-generated key,
index 94309e74a8855bb8d2c6f146cccfdc09a12aa701..5f8b8a703af9858333b576e2c5e6ae045de76cfe 100644 (file)
@@ -56,6 +56,7 @@ client_connections: {}
 # Whether or not to generate secure certificate to iSCSI gateway nodes
 generate_crt: False
 
+rbd_pool_size: ""
 
 ##################
 # RBD-TARGET-API #
index 8e9383c9db249a3b6d54ba599619e9e44825c6ca..9f54ebde2909ec31cc6577dd06159746f6c01acb 100644 (file)
   register: rbd_pool_exists
   delegate_to: "{{ groups[mon_group_name][0] }}"
 
-- name: get default value for osd_pool_default_pg_num
-  command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
-  changed_when: false
-  register: osd_pool_default_pg_num
-  delegate_to: "{{ groups[mon_group_name][0] }}"
+- name: rbd pool related tasks
   when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
+  block:
+    - name: get default value for osd_pool_default_pg_num
+      command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
+      changed_when: false
+      register: osd_pool_default_pg_num
+      delegate_to: "{{ groups[mon_group_name][0] }}"
 
-- name: create a rbd pool if it doesn't exist
-  command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
-  changed_when: false
-  delegate_to: "{{ groups[mon_group_name][0] }}"
-  when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
+    - name: create a rbd pool if it doesn't exist
+      command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
+      changed_when: false
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+
+    - name: customize pool size
+      command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set rbd size {{ rbd_pool_size | default('') }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+      changed_when: false
+      when:
+        - rbd_pool_size | default ("") != ""
index 8418a5cc70ce5393b6bb0a8b78743afab0f837e7..787fbe2bd361c5d3974643fc4ac2373910bd8cc4 100644 (file)
@@ -1,10 +1,19 @@
 ---
-- name: create filesystem pools
-  command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}"
-  changed_when: false
-  delegate_to: "{{ groups[mon_group_name][0] }}"
-  with_items:
-    - "{{ cephfs_pools }}"
+- name: filesystem pools related tasks
+  block:
+    - name: create filesystem pools
+      command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}"
+      changed_when: false
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+      with_items:
+        - "{{ cephfs_pools }}"
+
+    - name: customize pool size
+      command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default('') }}"
+      with_items: "{{ cephfs_pools | unique }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+      changed_when: false
+      when: item.size | default ("") != ""
 
 - name: check if ceph filesystem already exists
   command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}"
index 80fb571adb9099b6c4de561dd9669f1cd94cd2cf..052345aef33c954590f9513231758e9f843191a3 100644 (file)
@@ -9,42 +9,53 @@
   delegate_to: "{{ groups[mon_group_name][0] }}"
   until: wait_for_all_osds_up.rc == 0
 
-- name: list existing pool(s)
-  command: >
-    {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
-    osd pool get {{ item.name }} size
-  with_items: "{{ openstack_pools | unique }}"
-  register: created_pools
-  delegate_to: "{{ groups[mon_group_name][0] }}"
-  failed_when: false
+- name: pool related tasks
+  block:
+    - name: list existing pool(s)
+      command: >
+        {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
+        osd pool get {{ item.name }} size
+      with_items: "{{ openstack_pools | unique }}"
+      register: created_pools
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+      failed_when: false
 
-- name: create openstack pool(s)
-  command: >
-    {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
-    osd pool create {{ item.0.name }}
-    {{ item.0.pg_num }}
-    {{ item.0.pgp_num | default(item.0.pg_num) }}
-    {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
-    {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
-    {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
-    {{ item.0.erasure_profile }}
-    {%- endif %}
-    {{ item.0.expected_num_objects | default('') }}
-  with_together:
-    - "{{ openstack_pools | unique }}"
-    - "{{ created_pools.results }}"
-  changed_when: false
-  delegate_to: "{{ groups[mon_group_name][0] }}"
-  when:
-    - item.1.get('rc', 0) != 0
+    - name: create openstack pool(s)
+      command: >
+        {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
+        osd pool create {{ item.0.name }}
+        {{ item.0.pg_num }}
+        {{ item.0.pgp_num | default(item.0.pg_num) }}
+        {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
+        {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
+        {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
+        {{ item.0.erasure_profile }}
+        {%- endif %}
+        {{ item.0.expected_num_objects | default('') }}
+      with_together:
+        - "{{ openstack_pools | unique }}"
+        - "{{ created_pools.results }}"
+      changed_when: false
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+      when:
+        - item.1.get('rc', 0) != 0
 
-- name: assign application to pool(s)
-  command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
-  with_items: "{{ openstack_pools | unique }}"
-  changed_when: false
-  delegate_to: "{{ groups[mon_group_name][0] }}"
-  when:
-    - item.application is defined
+    - name: customize pool size
+      command: >
+        {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
+        osd pool set {{ item.name }} size {{ item.size | default('') }}
+      with_items: "{{ openstack_pools | unique }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+      changed_when: false
+      when: item.size | default ("") != ""
+
+    - name: assign application to pool(s)
+      command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
+      with_items: "{{ openstack_pools | unique }}"
+      changed_when: false
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+      when:
+        - item.application is defined
 
 - name: create openstack cephx key(s)
   ceph_key:
@@ -81,4 +92,5 @@
   when:
     - cephx
     - openstack_config
-    - item.0 != groups[mon_group_name]
\ No newline at end of file
+    - item.0 != groups[mon_group_name]
+
index b3ff65643fd03f46738b095f530011bb465e9b5c..1e108be4457bb03a3ac07d730d7b1640133870ff 100644 (file)
@@ -37,10 +37,13 @@ rgw_pull_proto: "http"
 #rgw_create_pools:
 #  defaults.rgw.buckets.data:
 #    pg_num: 16
+#    size: ""
 #  defaults.rgw.buckets.index:
 #    pg_num: 32
+#    size: ""
 #  foo:
 #    pg_num: 4
+#    size: ""
 
 
 ##########
index 88935e0f7129e15a33e2851e6ddc03c08adc7072..9a1a65ae57dc935d36bc269e9b18f6a70c393b40 100644 (file)
   include_tasks: docker/main.yml
   when: containerized_deployment
 
-- name: create rgw pools if rgw_create_pools is defined
-  command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
-  changed_when: false
-  with_dict: "{{ rgw_create_pools }}"
-  delegate_to: "{{ groups[mon_group_name][0] }}"
-  run_once: true
+- name: rgw pool realted tasks
   when:
     - rgw_create_pools is defined
+  block:
+    - name: create rgw pools if rgw_create_pools is defined
+      command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
+      changed_when: false
+      with_dict: "{{ rgw_create_pools }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+      run_once: true
+
+    - name: customize pool size
+      command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.size | default('') }}"
+      with_dict: "{{ rgw_create_pools }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+      changed_when: false
+      run_once: true
+      when: item.size | default ("") != ""
index 1a9600a14d29286f5edf5c8f500a3b7f3eed65b2..f2ec9711414f18aefba930ebc6526989bb5667aa 100644 (file)
@@ -1,9 +1,20 @@
 {
        "ceph_conf_overrides": {
                "global": {
-                       "osd_pool_default_pg_num": 12,
-                       "osd_pool_default_size": 1
+                       "osd_pool_default_pg_num": 12
                }
        },
+       "cephfs_pools": [
+               {
+                 "name": "cephfs_metadata",
+                 "pgs": 8,
+                 "size": 2
+               },
+               {
+                 "name": "cephfs_data",
+                 "pgs": 8,
+                 "size": 2
+               }
+       ],
   "ceph_mon_docker_memory_limit": "2g"
 }
index 46934a5b037f076ff44ea5ce5dc0ef4c013cfaaf..5f9c83052467e5616baf729454ba5f1f456c2bbf 100644 (file)
@@ -27,6 +27,7 @@ openstack_glance_pool:
   type: 1
   erasure_profile: ""
   expected_num_objects: ""
+  size: ""
 openstack_cinder_pool:
   name: "volumes"
   pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -35,6 +36,7 @@ openstack_cinder_pool:
   type: 1
   erasure_profile: ""
   expected_num_objects: ""
+  size: ""
 openstack_pools:
   - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
\ No newline at end of file
+  - "{{ openstack_cinder_pool }}"
index 21e5c5691952c9fb191b9dd7582cc5a7a67bb651..55180053b1a941438abe11e4046feabb29f099a8 100644 (file)
@@ -9,6 +9,7 @@ test:
   type: 1
   erasure_profile: ""
   expected_num_objects: ""
+  size: ""
 test2:
   name: "test2"
   pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -17,6 +18,7 @@ test2:
   type: 1
   erasure_profile: ""
   expected_num_objects: ""
+  size: ""
 pools:
   - "{{ test }}"
   - "{{ test2 }}"
index c3b5a92d067696208faa9fcfd350ac4e17b65dd0..57e385766f94635f8b6a725fb5c505bee68bf0ba 100644 (file)
@@ -28,6 +28,7 @@ openstack_glance_pool:
   type: 1
   erasure_profile: ""
   expected_num_objects: ""
+  size: ""
 openstack_cinder_pool:
   name: "volumes"
   pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@@ -36,6 +37,7 @@ openstack_cinder_pool:
   type: 1
   erasure_profile: ""
   expected_num_objects: ""
+  size: ""
 openstack_pools:
   - "{{ openstack_glance_pool }}"
-  - "{{ openstack_cinder_pool }}"
\ No newline at end of file
+  - "{{ openstack_cinder_pool }}"