ceph command has to be executed from one of the monitor containers
if not admin copy present in RGWs. Task has to be delegated then.
Adds test to check proper RGW pool creation for Docker container scenarios.
Signed-off-by: Jorge Tudela <jtudelag@redhat.com>
(cherry picked from commit
8704144e3157aa253fb7563fe701d9d434bf2f3e)
Signed-off-by: Sébastien Han <seb@redhat.com>
- name: include start_docker_rgw.yml
include: start_docker_rgw.yml
+
+- name: include rgw_pool_pgs.yml
+ include: rgw_pool_pgs.yml
+ when:
+ - create_pools is defined
--- /dev/null
+---
+# If admin key has been copied to the RGWs, we can run the command from them.
+- name: create rgw pools if create_pools is defined
+ command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
+ changed_when: false
+ run_once: true
+ with_dict: "{{ create_pools }}"
+ when:
+ - copy_admin_key
+
+# If no admin key has been copied to the RGWs, we have to run the command from the first monitor.
+- name: set_fact docker_exec_mon_cmd
+ set_fact:
+ docker_exec_mon_cmd: "docker exec ceph-mon-{{ hostvars[groups.get(mon_group_name)[0]]['ansible_hostname'] }}"
+ when:
+ - not copy_admin_key
+
+# If no admin key has been copied to the RGWs, we have to run the command from the first monitor.
+- name: create rgw pools if create_pools is defined, delegated to first monitor
+ command: "{{ docker_exec_mon_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
+ changed_when: false
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ with_dict: "{{ create_pools }}"
+ when:
+ - not copy_admin_key
include: rgw_pool_pgs.yml
when:
- create_pools is defined
- static: False
-
+ - not containerized_deployment
+ static: False
+
- name: include multisite/main.yml
include: multisite/main.yml
when:
- containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
+
+- name: include rgw_pool_pgs.yml
+ include: rgw_pool_pgs.yml
+ when:
+ - create_pools is defined
+ static: False
- name: create rgw pools if create_pools is defined
command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}
changed_when: false
- failed_when: false
+ run_once: true
with_dict: "{{ create_pools }}"
when:
- - create_pools is defined
+ - not containerized_deployment
+
+# On first monitor.
+- name: set_fact docker_exec_rgw_cmd
+ set_fact:
+ docker_exec_rgw_cmd: "docker exec ceph-mon-{{ hostvars[groups.get(mon_group_name)[0]]['ansible_hostname'] }}"
+ when:
+ - containerized_deployment
+
+- name: create rgw pools if create_pools is defined
+ command: "{{ docker_exec_rgw_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
+ changed_when: false
+ run_once: true
+ with_dict: "{{ create_pools }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - containerized_deployment
--- /dev/null
+---
+create_pools:
+ foo:
+ pg_num: 17
+ bar:
+ pg_num: 19
---
copy_admin_key: True
+create_pools:
+ foo:
+ pg_num: 17
+ bar:
+ pg_num: 19
assert pool_name in output
pg_num_str = "pg_num {pg_num}".format(pg_num=pg_num["pg_num"])
assert pg_num_str in output
+
+ @pytest.mark.docker
+ def test_docker_rgw_tuning_pools_are_set(self, node, host):
+ hostname = node["vars"]["inventory_hostname"]
+ cluster = node['cluster_name']
+ cmd = "sudo docker exec ceph-rgw-{hostname} ceph --cluster={cluster} --connect-timeout 5 osd dump".format(
+ hostname=hostname,
+ cluster=cluster
+ )
+ output = host.check_output(cmd)
+ pools = node["vars"]["create_pools"]
+ for pool_name, pg_num in pools.items():
+ assert pool_name in output
+ pg_num_str = "pg_num {pg_num}".format(pg_num=pg_num["pg_num"])
+ assert pg_num_str in output