]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Adds RGWs pool creation to containerized installation.
authorjtudelag <jtudelag@redhat.com>
Sun, 4 Mar 2018 22:06:48 +0000 (23:06 +0100)
committerSébastien Han <seb@redhat.com>
Mon, 4 Jun 2018 04:23:42 +0000 (06:23 +0200)
ceph command has to be executed from one of the monitor containers
if not admin copy present in RGWs. Task has to be delegated then.

Adds test to check proper RGW pool creation for Docker container scenarios.

Signed-off-by: Jorge Tudela <jtudelag@redhat.com>
roles/ceph-rgw/tasks/docker/main.yml
roles/ceph-rgw/tasks/docker/rgw_pool_pgs.yml [new file with mode: 0644]
roles/ceph-rgw/tasks/main.yml
roles/ceph-rgw/tasks/rgw_pool_pgs.yml
tests/functional/centos/7/docker-collocation/group_vars/rgws [new file with mode: 0644]
tests/functional/centos/7/docker/group_vars/rgws
tests/functional/tests/rgw/test_rgw_tuning.py

index 4850e4b7c792d1aa60d09816af6b6c74bf5ff3f8..a46797a31e7b60da3d274f825f4b74737b51c749 100644 (file)
@@ -6,3 +6,8 @@
 
 - name: include start_docker_rgw.yml
   include: start_docker_rgw.yml
+
+- name: include rgw_pool_pgs.yml
+  include: rgw_pool_pgs.yml
+  when:
+    - create_pools is defined
diff --git a/roles/ceph-rgw/tasks/docker/rgw_pool_pgs.yml b/roles/ceph-rgw/tasks/docker/rgw_pool_pgs.yml
new file mode 100644 (file)
index 0000000..ba781bb
--- /dev/null
@@ -0,0 +1,26 @@
+---
+# If admin key has been copied to the RGWs, we can run the command from them.
+- name: create rgw pools if create_pools is defined
+  command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
+  changed_when: false
+  run_once: true
+  with_dict: "{{ create_pools }}"
+  when:
+    - copy_admin_key
+
+# If no admin key has been copied to the RGWs, we have to run the command from the first monitor.
+- name: set_fact docker_exec_mon_cmd
+  set_fact:
+    docker_exec_mon_cmd: "docker exec ceph-mon-{{ hostvars[groups.get(mon_group_name)[0]]['ansible_hostname'] }}"
+  when:
+    - not copy_admin_key
+
+# If no admin key has been copied to the RGWs, we have to run the command from the first monitor.
+- name: create rgw pools if create_pools is defined, delegated to first monitor
+  command: "{{ docker_exec_mon_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
+  changed_when: false
+  run_once: true
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  with_dict: "{{ create_pools }}"
+  when:
+    - not copy_admin_key
index 6ae24e99782eb81611e5dbd461af4274ac756cae..f5db976913789f466733f4ef68ae77a8e623177e 100644 (file)
@@ -33,8 +33,9 @@
   include: rgw_pool_pgs.yml
   when:
     - create_pools is defined
-  static: False
-
+    - not containerized_deployment
+  static: False 
+    
 - name: include multisite/main.yml
   include: multisite/main.yml
   when:
@@ -50,3 +51,9 @@
     - containerized_deployment
   # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
   static: False
+
+- name: include rgw_pool_pgs.yml
+  include: rgw_pool_pgs.yml
+  when:
+    - create_pools is defined
+  static: False
index 4e6d38817367df06af03d6b33be2a42043c01a76..3675475b344e3b82af27bb07f6239faaa19ab61e 100644 (file)
@@ -2,7 +2,23 @@
 - name: create rgw pools if create_pools is defined
   command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}
   changed_when: false
-  failed_when: false
+  run_once: true
   with_dict: "{{ create_pools }}"
   when:
-    - create_pools is defined
+    - not containerized_deployment
+
+# On first monitor.
+- name: set_fact docker_exec_rgw_cmd
+  set_fact:
+    docker_exec_rgw_cmd: "docker exec ceph-mon-{{ hostvars[groups.get(mon_group_name)[0]]['ansible_hostname'] }}"
+  when:
+    - containerized_deployment
+
+- name: create rgw pools if create_pools is defined
+  command: "{{ docker_exec_rgw_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
+  changed_when: false
+  run_once: true
+  with_dict: "{{ create_pools }}"
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  when:
+    - containerized_deployment
diff --git a/tests/functional/centos/7/docker-collocation/group_vars/rgws b/tests/functional/centos/7/docker-collocation/group_vars/rgws
new file mode 100644 (file)
index 0000000..75c8923
--- /dev/null
@@ -0,0 +1,6 @@
+---
+create_pools:
+  foo:
+    pg_num: 17
+  bar:
+    pg_num: 19
index faec1523329074f762f8a27259efa08b3bfb7721..65f9b231112ff04546a29dec1ca7c4af8eecfffe 100644 (file)
@@ -1,2 +1,7 @@
 ---
 copy_admin_key: True
+create_pools:
+  foo:
+    pg_num: 17
+  bar:
+    pg_num: 19
index 9140d23e5cdaf7572d3bacd98bdd167bf0bcded9..2b3d75c2559a61d09613cdc8ceb48a0fa5446cf4 100644 (file)
@@ -26,3 +26,18 @@ class TestRGWs(object):
             assert pool_name in output
             pg_num_str = "pg_num {pg_num}".format(pg_num=pg_num["pg_num"])
             assert pg_num_str in output
+
+    @pytest.mark.docker
+    def test_docker_rgw_tuning_pools_are_set(self, node, host):
+        hostname = node["vars"]["inventory_hostname"]
+        cluster = node['cluster_name']
+        cmd = "sudo docker exec ceph-rgw-{hostname} ceph --cluster={cluster} --connect-timeout 5 osd dump".format(
+            hostname=hostname,
+            cluster=cluster
+        )
+        output = host.check_output(cmd)
+        pools = node["vars"]["create_pools"]
+        for pool_name, pg_num in pools.items():
+            assert pool_name in output
+            pg_num_str = "pg_num {pg_num}".format(pg_num=pg_num["pg_num"])
+            assert pg_num_str in output