]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
rgws: renames create_pools variable with rgw_create_pools.
authorjtudelag <jtudelag@redhat.com>
Thu, 31 May 2018 15:01:44 +0000 (17:01 +0200)
committerGuillaume Abrioux <gabrioux@redhat.com>
Tue, 5 Jun 2018 16:56:44 +0000 (18:56 +0200)
Renamed to be consistent with the role (rgw) and have a meaningful name.

Signed-off-by: Jorge Tudela <jtudelag@redhat.com>
(cherry picked from commit 600e1e2c2680e8102f4ef17855d4bcd89d6ef733)
Signed-off-by: Sébastien Han <seb@redhat.com>
15 files changed:
group_vars/rgws.yml.sample
roles/ceph-rgw/defaults/main.yml
roles/ceph-rgw/tasks/docker/main.yml
roles/ceph-rgw/tasks/docker/rgw_pool_pgs.yml
roles/ceph-rgw/tasks/main.yml
roles/ceph-rgw/tasks/rgw_pool_pgs.yml
tests/functional/centos/7/bs-osds-container/group_vars/rgws.yml
tests/functional/centos/7/bs-osds-non-container/group_vars/rgws.yml
tests/functional/centos/7/cluster/group_vars/rgws
tests/functional/centos/7/docker-collocation/group_vars/rgws
tests/functional/centos/7/docker/group_vars/rgws
tests/functional/centos/7/fs-osds-container/group_vars/rgws.yml
tests/functional/centos/7/fs-osds-non-container/group_vars/rgws.yml
tests/functional/tests/rgw/test_rgw_tuning.py
tests/functional/ubuntu/16.04/cluster/group_vars/rgws

index b756756e3375b3f5e6826fc1f48e552b2e9eec3c..a983c5011947b3f5ae696c8e7062ad7f9080eb71 100644 (file)
@@ -41,8 +41,8 @@ dummy:
 # This is important because they would be created with the default
 # of 8.
 # New pools and their corresponding pg_nums can be created
-# by adding to the create_pools dictionary (see foo).
-#create_pools:
+# by adding to the rgw_create_pools dictionary (see foo).
+#rgw_create_pools:
 #  defaults.rgw.buckets.data:
 #    pg_num: 16
 #  defaults.rgw.buckets.index:
index 75f9cc49fb08f2e65be6aec9bf04415c450f8769..617a4977356c8648e6def2d09fd54ca08a3f4c9c 100644 (file)
@@ -33,8 +33,8 @@ rgw_pull_proto: "http"
 # This is important because they would be created with the default
 # of 8.
 # New pools and their corresponding pg_nums can be created
-# by adding to the create_pools dictionary (see foo).
-#create_pools:
+# by adding to the rgw_create_pools dictionary (see foo).
+#rgw_create_pools:
 #  defaults.rgw.buckets.data:
 #    pg_num: 16
 #  defaults.rgw.buckets.index:
index df26503b84a41d9388aaee9cccad485f44a3d45f..1496a42d4e9cb642a96eba3dfa0b541c67982775 100644 (file)
@@ -13,4 +13,4 @@
 - name: include rgw_pool_pgs.yml
   include: rgw_pool_pgs.yml
   when:
-    - create_pools is defined
+    - rgw_create_pools is defined
index ba781bb76cff7280475ed1f75034071d3fcc7734..d2e8feed04e264cbbcb18c1168fa0a236a497428 100644 (file)
@@ -1,10 +1,10 @@
 ---
 # If admin key has been copied to the RGWs, we can run the command from them.
-- name: create rgw pools if create_pools is defined
+- name: create rgw pools if rgw_create_pools is defined
   command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
   changed_when: false
   run_once: true
-  with_dict: "{{ create_pools }}"
+  with_dict: "{{ rgw_create_pools }}"
   when:
     - copy_admin_key
 
     - not copy_admin_key
 
 # If no admin key has been copied to the RGWs, we have to run the command from the first monitor.
-- name: create rgw pools if create_pools is defined, delegated to first monitor
+- name: create rgw pools if rgw_create_pools is defined, delegated to first monitor
   command: "{{ docker_exec_mon_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
   changed_when: false
   run_once: true
   delegate_to: "{{ groups[mon_group_name][0] }}"
-  with_dict: "{{ create_pools }}"
+  with_dict: "{{ rgw_create_pools }}"
   when:
     - not copy_admin_key
index dbbf7f72a929e82fca19ce140ff1654b05e60a7f..7eb05ac099167ea49594d9c21a6bf27647f51033 100644 (file)
@@ -29,7 +29,7 @@
 - name: include rgw_pool_pgs.yml
   include: rgw_pool_pgs.yml
   when:
-    - create_pools is defined
+    - rgw_create_pools is defined
     - not containerized_deployment
   static: False 
     
@@ -48,9 +48,3 @@
     - containerized_deployment
   # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
   static: False
-
-- name: include rgw_pool_pgs.yml
-  include: rgw_pool_pgs.yml
-  when:
-    - create_pools is defined
-  static: False
index 3675475b344e3b82af27bb07f6239faaa19ab61e..41074b67ff8ee2019f71c60898035729dce2ad0a 100644 (file)
@@ -1,9 +1,9 @@
 ---
-- name: create rgw pools if create_pools is defined
+- name: create rgw pools if rgw_create_pools is defined
   command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}
   changed_when: false
   run_once: true
-  with_dict: "{{ create_pools }}"
+  with_dict: "{{ rgw_create_pools }}"
   when:
     - not containerized_deployment
 
   when:
     - containerized_deployment
 
-- name: create rgw pools if create_pools is defined
+- name: create rgw pools if rgw_create_pools is defined
   command: "{{ docker_exec_rgw_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
   changed_when: false
   run_once: true
-  with_dict: "{{ create_pools }}"
+  with_dict: "{{ rgw_create_pools }}"
   delegate_to: "{{ groups[mon_group_name][0] }}"
   when:
     - containerized_deployment
index 6aab772a07efe59c5d7aec48c83f9da473310f66..a882543144ab13e008ffb46a7cd2ea0061459711 100644 (file)
@@ -1,5 +1,5 @@
 copy_admin_key: true
-create_pools:
+rgw_create_pools:
   foo:
     pg_num: 17
   bar:
index 6aab772a07efe59c5d7aec48c83f9da473310f66..a882543144ab13e008ffb46a7cd2ea0061459711 100644 (file)
@@ -1,5 +1,5 @@
 copy_admin_key: true
-create_pools:
+rgw_create_pools:
   foo:
     pg_num: 17
   bar:
index 6aab772a07efe59c5d7aec48c83f9da473310f66..a882543144ab13e008ffb46a7cd2ea0061459711 100644 (file)
@@ -1,5 +1,5 @@
 copy_admin_key: true
-create_pools:
+rgw_create_pools:
   foo:
     pg_num: 17
   bar:
index 75c89230f69debbf1bef4d493130e1a52cd75a72..53dd034b1b7bb8678c4dd697cc428e4e6d96ab96 100644 (file)
@@ -1,5 +1,5 @@
 ---
-create_pools:
+rgw_create_pools:
   foo:
     pg_num: 17
   bar:
index 65f9b231112ff04546a29dec1ca7c4af8eecfffe..8f2a9a36826c94db739407d0324930035d152afe 100644 (file)
@@ -1,6 +1,6 @@
 ---
 copy_admin_key: True
-create_pools:
+rgw_create_pools:
   foo:
     pg_num: 17
   bar:
index 6aab772a07efe59c5d7aec48c83f9da473310f66..a882543144ab13e008ffb46a7cd2ea0061459711 100644 (file)
@@ -1,5 +1,5 @@
 copy_admin_key: true
-create_pools:
+rgw_create_pools:
   foo:
     pg_num: 17
   bar:
index 6aab772a07efe59c5d7aec48c83f9da473310f66..a882543144ab13e008ffb46a7cd2ea0061459711 100644 (file)
@@ -1,5 +1,5 @@
 copy_admin_key: true
-create_pools:
+rgw_create_pools:
   foo:
     pg_num: 17
   bar:
index 2b3d75c2559a61d09613cdc8ceb48a0fa5446cf4..7ef561e913cefd1cd0a147a6dde50307b84e75ab 100644 (file)
@@ -21,7 +21,7 @@ class TestRGWs(object):
     def test_rgw_tuning_pools_are_set(self, node, host):
         cmd = "sudo ceph --cluster={} --connect-timeout 5 osd dump".format(node["cluster_name"])
         output = host.check_output(cmd)
-        pools = node["vars"]["create_pools"]
+        pools = node["vars"]["rgw_create_pools"]
         for pool_name, pg_num in pools.items():
             assert pool_name in output
             pg_num_str = "pg_num {pg_num}".format(pg_num=pg_num["pg_num"])
@@ -36,7 +36,7 @@ class TestRGWs(object):
             cluster=cluster
         )
         output = host.check_output(cmd)
-        pools = node["vars"]["create_pools"]
+        pools = node["vars"]["rgw_create_pools"]
         for pool_name, pg_num in pools.items():
             assert pool_name in output
             pg_num_str = "pg_num {pg_num}".format(pg_num=pg_num["pg_num"])
index 6aab772a07efe59c5d7aec48c83f9da473310f66..a882543144ab13e008ffb46a7cd2ea0061459711 100644 (file)
@@ -1,5 +1,5 @@
 copy_admin_key: true
-create_pools:
+rgw_create_pools:
   foo:
     pg_num: 17
   bar: