From 1ac94c048ff1d1385de2892d0ecef7879ec563e9 Mon Sep 17 00:00:00 2001 From: guihecheng Date: Fri, 9 Nov 2018 08:56:57 +0800 Subject: [PATCH] rgw: add support for multiple rgw instances on a single host With this, we could have multiple rgw instances on a single host with a single run, don't have to use rgw-standalone.yml which does not seems able to bind ports separately. If you want to have multiple rgw instances, just change 'radosgw_instances' to the number you want, which defaults to 1. Not compatible with Multi-Site yet. Signed-off-by: guihecheng --- group_vars/all.yml.sample | 1 + infrastructure-playbooks/purge-cluster.yml | 2 +- .../purge-docker-cluster.yml | 4 +- roles/ceph-config/templates/ceph.conf.j2 | 12 ++- roles/ceph-defaults/defaults/main.yml | 1 + roles/ceph-facts/tasks/facts.yml | 7 ++ .../templates/restart_rgw_daemon.sh.j2 | 77 +++++++++++++------ roles/ceph-rgw/handlers/main.yml | 2 +- roles/ceph-rgw/tasks/common.yml | 12 ++- .../tasks/docker/start_docker_rgw.yml | 17 +++- roles/ceph-rgw/tasks/pre_requisite.yml | 18 +++-- roles/ceph-rgw/tasks/start_radosgw.yml | 5 +- .../templates/ceph-radosgw.service.j2 | 12 +-- .../centos/7/cluster/group_vars/all | 1 + .../7/docker-collocation/group_vars/all | 3 +- .../functional/centos/7/docker/group_vars/all | 1 + tests/functional/tests/rgw/test_rgw.py | 34 +++++--- tests/functional/tests/rgw/test_rgw_tuning.py | 6 +- 18 files changed, 152 insertions(+), 63 deletions(-) diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index cc6481216..e69f2fda9 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -405,6 +405,7 @@ dummy: #radosgw_address: 0.0.0.0 #radosgw_address_block: subnet #radosgw_keystone_ssl: false # activate this when using keystone PKI keys +#radosgw_num_instances: 1 # Rados Gateway options #email_address: foo@bar.com diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 2e918e3ee..31b79235e 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -109,7 +109,7 @@ - name: stop ceph rgws with systemd service: - name: ceph-radosgw@rgw.{{ ansible_hostname }} + name: ceph-radosgw@rgw.* state: stopped enabled: no failed_when: false diff --git a/infrastructure-playbooks/purge-docker-cluster.yml b/infrastructure-playbooks/purge-docker-cluster.yml index aa7d22af8..7ce366802 100644 --- a/infrastructure-playbooks/purge-docker-cluster.yml +++ b/infrastructure-playbooks/purge-docker-cluster.yml @@ -180,7 +180,7 @@ - name: disable ceph rgw service (new unit name) service: - name: "ceph-radosgw@rgw.{{ ansible_hostname }}" + name: "ceph-radosgw@*" state: stopped enabled: no ignore_errors: true @@ -188,7 +188,7 @@ - name: remove ceph rgw container docker_container: image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" - name: "ceph-rgw-{{ ansible_hostname }}" + name: "ceph-rgw-{{ ansible_hostname }}-*" state: absent ignore_errors: true diff --git a/roles/ceph-config/templates/ceph.conf.j2 b/roles/ceph-config/templates/ceph.conf.j2 index 4ce937f07..b73361b75 100644 --- a/roles/ceph-config/templates/ceph.conf.j2 +++ b/roles/ceph-config/templates/ceph.conf.j2 @@ -101,11 +101,15 @@ osd memory target = {{ _osd_memory_target | default(osd_memory_target) }} {% for host in groups[rgw_group_name] %} {% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_hostname']) %} {# {{ hostvars[host]['rgw_hostname'] }} for backward compatibility, fqdn issues. See bz1580408 #} -[client.rgw.{{ _rgw_hostname }}] +{% if hostvars[host]['rgw_instances'] is defined %} +{% for instance in hostvars[host]['rgw_instances'] %} +[client.rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}] host = {{ _rgw_hostname }} -keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname }}/keyring -log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log -rgw frontends = {{ radosgw_frontend_type }} {{ 'port' if radosgw_frontend_type == 'civetweb' else 'endpoint' }}={{ hostvars[host]['_radosgw_address'] }}:{{ radosgw_frontend_port }} {{ radosgw_frontend_options }} +keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}/keyring +log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] + '.' + instance['instance_name'] }}.log +rgw frontends = {{ radosgw_frontend_type }} {{ 'port' if radosgw_frontend_type == 'civetweb' else 'endpoint' }}={{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} {{ radosgw_frontend_options }} +{% endfor %} +{% endif %} {% endfor %} {% endif %} diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index 0de35990b..7ff5994d1 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -397,6 +397,7 @@ radosgw_interface: interface radosgw_address: 0.0.0.0 radosgw_address_block: subnet radosgw_keystone_ssl: false # activate this when using keystone PKI keys +radosgw_num_instances: 1 # Rados Gateway options email_address: foo@bar.com diff --git a/roles/ceph-facts/tasks/facts.yml b/roles/ceph-facts/tasks/facts.yml index eb703ce5a..3973797fc 100644 --- a/roles/ceph-facts/tasks/facts.yml +++ b/roles/ceph-facts/tasks/facts.yml @@ -264,3 +264,10 @@ import_tasks: set_radosgw_address.yml when: - inventory_hostname in groups.get(rgw_group_name, []) + +- name: set_fact rgw_instances + set_fact: + rgw_instances: "{{ rgw_instances|default([]) | union([{'instance_name': 'rgw' + item|string, 'radosgw_address': _radosgw_address, 'radosgw_frontend_port': radosgw_frontend_port|int + item|int}]) }}" + with_sequence: start=0 end={{ radosgw_num_instances|int - 1 }} + when: + - inventory_hostname in groups.get(rgw_group_name, []) diff --git a/roles/ceph-handler/templates/restart_rgw_daemon.sh.j2 b/roles/ceph-handler/templates/restart_rgw_daemon.sh.j2 index 4d970ffa8..b6f58b08b 100644 --- a/roles/ceph-handler/templates/restart_rgw_daemon.sh.j2 +++ b/roles/ceph-handler/templates/restart_rgw_daemon.sh.j2 @@ -2,20 +2,48 @@ RETRIES="{{ handler_health_rgw_check_retries }}" DELAY="{{ handler_health_rgw_check_delay }}" -RGW_NAME="{{ ansible_hostname }}" -RGW_PORT="{{ radosgw_frontend_port }}" +HOST_NAME="{{ ansible_hostname }}" +RGW_NUMS={{ radosgw_num_instances }} +RGW_BASE_PORT={{ radosgw_frontend_port }} +declare -a DOCKER_EXECS +for ((i=0; i<${RGW_NUMS}; i++)); do + DOCKER_EXECS[i]="" {% if containerized_deployment %} -DOCKER_EXEC="{{ container_binary }} exec ceph-rgw-{{ ansible_hostname }}" + CONTAINER_NAME="ceph-rgw-${HOST_NAME}-rgw${i}" + DOCKER_EXECS[i]="{{ container_binary }} exec ${CONTAINER_NAME}" {% endif %} +done +declare -a SOCKETS # Backward compatibility -$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_fqdn }}.asok -$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_hostname }}.asok +for ((i=0; i<${RGW_NUMS}; i++)); do + SOCKET[i]="EMPTY_SOCKET" + ${DOCKER_EXECS[i]} test -S /var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_fqdn }}.asok && SOCKETS[i]=/var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_fqdn }}.asok + ${DOCKER_EXECS[i]} test -S /var/run/ceph/{{ cluster }}-client.rgw.${HOST_NAME}.rgw${i}.asok && SOCKETS[i]=/var/run/ceph/{{ cluster }}-client.rgw.${HOST_NAME}.rgw${i}.asok +done RGW_IP={{ hostvars[inventory_hostname]['_radosgw_address'] }} +check_socket() { + local i=$1 + local succ=0 + local count=10 + # Wait and ensure the socket exists after restarting the daemon + while [ $count -ne 0 ]; do + ${DOCKER_EXECS[i]} test -S ${SOCKETS[i]} && succ=$((succ+1)) && break + sleep $DELAY + let count=count-1 + done + if [ $succ -ne 1 ]; then + echo "Socket file ${SOCKETS[i]} could not be found, which means Rados Gateway is not running. Showing ceph-rgw unit logs now:" + journalctl -u ceph-radosgw@rgw.${HOST_NAME}.rgw${i} + exit 1 + fi +} + check_for_curl_or_wget() { - if $DOCKER_EXEC command -v wget &>/dev/null; then + local i=$1 + if ${DOCKER_EXECS[i]} command -v wget &>/dev/null; then rgw_test_command="wget --quiet" - elif $DOCKER_EXEC command -v curl &>/dev/null; then + elif ${DOCKER_EXECS[i]} command -v curl &>/dev/null; then rgw_test_command="curl --fail --silent --output /dev/null" else echo "It seems that neither curl or wget are available on your system." @@ -25,27 +53,32 @@ check_for_curl_or_wget() { } check_rest() { - check_for_curl_or_wget + local i=$1 + check_for_curl_or_wget ${i} + local succ=0 while [ $RETRIES -ne 0 ]; do - test "$rgw_test_command http://$RGW_IP:$RGW_PORT" && exit 0 + test "$rgw_test_command http://$RGW_IP:$((RGW_BASE_PORT+i))" && succ=$((succ+1)) && break sleep $DELAY let RETRIES=RETRIES-1 done - # If we reach this point, it means there is a problem with the connection to rgw - echo "Error connecting locally to Rados Gateway service: http://$rgw_listen" - exit 1 + if [ $succ -ne 1 ]; then + # If we reach this point, it means there is a problem with the connection to rgw + echo "Error connecting locally to Rados Gateway service: http://$rgw_listen" + exit 1 + fi } # First, restart the daemon -systemctl restart ceph-radosgw@rgw.${RGW_NAME} +for ((i=0; i<${RGW_NUMS}; i++)); do + systemctl restart ceph-radosgw@rgw.${HOST_NAME}.rgw${i} +done + +# Check socket files +for ((i=0; i<${RGW_NUMS}; i++)); do + check_socket ${i} +done -COUNT=10 -# Wait and ensure the socket exists after restarting the daemon -while [ $COUNT -ne 0 ]; do - $DOCKER_EXEC test -S $SOCKET && check_rest - sleep $DELAY - let COUNT=COUNT-1 +# Check rest +for ((i=0; i<${RGW_NUMS}; i++)); do + check_rest ${i} done -echo "Socket file ${SOCKET} could not be found, which means Rados Gateway is not running. Showing ceph-rgw unit logs now:" -journalctl -u ceph-radosgw@rgw.${RGW_NAME} -exit 1 diff --git a/roles/ceph-rgw/handlers/main.yml b/roles/ceph-rgw/handlers/main.yml index 129078767..ce906fa95 100644 --- a/roles/ceph-rgw/handlers/main.yml +++ b/roles/ceph-rgw/handlers/main.yml @@ -6,5 +6,5 @@ - name: restart rgw service: - name: ceph-radosgw@rgw.{{ ansible_hostname }} + name: ceph-radosgw@* state: restarted diff --git a/roles/ceph-rgw/tasks/common.yml b/roles/ceph-rgw/tasks/common.yml index 5c36ef19b..268a0c0bb 100644 --- a/roles/ceph-rgw/tasks/common.yml +++ b/roles/ceph-rgw/tasks/common.yml @@ -7,9 +7,19 @@ group: "{{ ceph_uid if containerized_deployment else 'ceph' }}" mode: "0755" with_items: - - /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }} - "{{ rbd_client_admin_socket_path }}" +- name: create rados gateway instance directories + file: + path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}" + state: directory + owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment else 'ceph' }}" + mode: "0755" + with_items: "{{ rgw_instances }}" + when: + - rgw_instances is defined + - name: copy ceph keyring(s) if needed copy: src: "{{ fetch_directory }}/{{ fsid }}/{{ item.name }}" diff --git a/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml b/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml index 83c270d75..90db90925 100644 --- a/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml +++ b/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml @@ -1,4 +1,16 @@ --- +- name: generate environment file + become: true + copy: + dest: "/var/lib/ceph/radosgw/ceph-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/EnvironmentFile" + owner: "root" + group: "root" + mode: "0644" + content: | + INST_NAME={{ item.instance_name }} + INST_PORT={{ item.radosgw_frontend_port }} + with_items: "{{ rgw_instances }}" + - name: generate systemd unit file become: true template: @@ -26,7 +38,8 @@ - name: systemd start rgw container systemd: - name: "ceph-radosgw@rgw.{{ ansible_hostname }}.service" + name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }} state: started enabled: yes - daemon_reload: yes \ No newline at end of file + daemon_reload: yes + with_items: "{{ rgw_instances }}" diff --git a/roles/ceph-rgw/tasks/pre_requisite.yml b/roles/ceph-rgw/tasks/pre_requisite.yml index d736c8915..6219d2799 100644 --- a/roles/ceph-rgw/tasks/pre_requisite.yml +++ b/roles/ceph-rgw/tasks/pre_requisite.yml @@ -1,15 +1,19 @@ --- -- name: create rados gateway keyring - command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring +- name: create rados gateway instance keyring + command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }}.{{ item.instance_name }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring args: - creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring + creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring changed_when: false - when: cephx + with_items: "{{ rgw_instances }}" + when: + - cephx -- name: set rados gateway key permissions +- name: set rados gateway instance key permissions file: - path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring + path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring owner: "ceph" group: "ceph" mode: "0600" - when: cephx + with_items: "{{ rgw_instances }}" + when: + - cephx diff --git a/roles/ceph-rgw/tasks/start_radosgw.yml b/roles/ceph-rgw/tasks/start_radosgw.yml index ca6f21316..4bac51cc1 100644 --- a/roles/ceph-rgw/tasks/start_radosgw.yml +++ b/roles/ceph-rgw/tasks/start_radosgw.yml @@ -15,11 +15,12 @@ when: - ceph_rgw_systemd_overrides is defined -- name: start rgw +- name: start rgw instance service: - name: ceph-radosgw@rgw.{{ ansible_hostname }} + name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }} state: started enabled: yes + with_items: "{{ rgw_instances }}" - name: enable the ceph-radosgw.target service systemd: diff --git a/roles/ceph-rgw/templates/ceph-radosgw.service.j2 b/roles/ceph-rgw/templates/ceph-radosgw.service.j2 index 0d7944c1e..1e05a8cec 100644 --- a/roles/ceph-rgw/templates/ceph-radosgw.service.j2 +++ b/roles/ceph-rgw/templates/ceph-radosgw.service.j2 @@ -3,9 +3,9 @@ Description=Ceph RGW After=docker.service [Service] -EnvironmentFile=-/etc/environment -ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }} -ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_hostname }} +EnvironmentFile=/var/lib/ceph/radosgw/ceph-%i/EnvironmentFile +ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}-${INST_NAME} +ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_hostname }}-${INST_NAME} ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \ --memory={{ ceph_rgw_docker_memory_limit }} \ {% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%} @@ -19,10 +19,12 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \ -v /etc/localtime:/etc/localtime:ro \ -e CEPH_DAEMON=RGW \ -e CLUSTER={{ cluster }} \ - --name=ceph-rgw-{{ ansible_hostname }} \ + -e RGW_NAME={{ ansible_hostname }}.${INST_NAME} \ + -e RGW_CIVETWEB_PORT=${INST_PORT} \ + --name=ceph-rgw-{{ ansible_hostname }}-${INST_NAME} \ {{ ceph_rgw_docker_extra_env }} \ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }} +ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}-${INST_NAME} Restart=always RestartSec=10s TimeoutStartSec=120 diff --git a/tests/functional/centos/7/cluster/group_vars/all b/tests/functional/centos/7/cluster/group_vars/all index e5342684a..940f08e52 100644 --- a/tests/functional/centos/7/cluster/group_vars/all +++ b/tests/functional/centos/7/cluster/group_vars/all @@ -4,6 +4,7 @@ ceph_repository: community public_network: "192.168.1.0/24" cluster_network: "192.168.2.0/24" radosgw_interface: eth1 +radosgw_num_instances: 2 ceph_conf_overrides: global: osd_pool_default_size: 1 diff --git a/tests/functional/centos/7/docker-collocation/group_vars/all b/tests/functional/centos/7/docker-collocation/group_vars/all index 71554cfd3..fe3e7ab3e 100644 --- a/tests/functional/centos/7/docker-collocation/group_vars/all +++ b/tests/functional/centos/7/docker-collocation/group_vars/all @@ -6,6 +6,7 @@ docker: True containerized_deployment: True monitor_interface: eth1 radosgw_interface: eth1 +radosgw_num_instances: 2 ceph_mon_docker_subnet: "{{ public_network }}" ceph_docker_on_openstack: False public_network: "192.168.15.0/24" @@ -15,4 +16,4 @@ rgw_bucket_default_quota_max_objects: 1638400 ceph_conf_overrides: global: osd_pool_default_pg_num: 8 - osd_pool_default_size: 1 \ No newline at end of file + osd_pool_default_size: 1 diff --git a/tests/functional/centos/7/docker/group_vars/all b/tests/functional/centos/7/docker/group_vars/all index f9582c127..c9d554b09 100644 --- a/tests/functional/centos/7/docker/group_vars/all +++ b/tests/functional/centos/7/docker/group_vars/all @@ -6,6 +6,7 @@ docker: True containerized_deployment: True monitor_interface: eth1 radosgw_interface: eth1 +radosgw_num_instances: 2 ceph_mon_docker_subnet: "{{ public_network }}" ceph_docker_on_openstack: False public_network: "192.168.17.0/24" diff --git a/tests/functional/tests/rgw/test_rgw.py b/tests/functional/tests/rgw/test_rgw.py index 98f5c51e9..d2b1482fc 100644 --- a/tests/functional/tests/rgw/test_rgw.py +++ b/tests/functional/tests/rgw/test_rgw.py @@ -12,16 +12,20 @@ class TestRGWs(object): assert result def test_rgw_service_is_running(self, node, host): - service_name = "ceph-radosgw@rgw.{hostname}".format( - hostname=node["vars"]["inventory_hostname"] - ) - assert host.service(service_name).is_running + for i in range(int(node["vars"]["radosgw_num_instances"])): + service_name = "ceph-radosgw@rgw.{hostname}.rgw{seq}".format( + hostname=node["vars"]["inventory_hostname"], + seq=i + ) + assert host.service(service_name).is_running def test_rgw_service_is_enabled(self, node, host): - service_name = "ceph-radosgw@rgw.{hostname}".format( - hostname=node["vars"]["inventory_hostname"] - ) - assert host.service(service_name).is_enabled + for i in range(int(node["vars"]["radosgw_num_instances"])): + service_name = "ceph-radosgw@rgw.{hostname}.rgw{seq}".format( + hostname=node["vars"]["inventory_hostname"], + seq=i + ) + assert host.service(service_name).is_enabled def test_rgw_is_up(self, node, host): hostname = node["vars"]["inventory_hostname"] @@ -30,7 +34,7 @@ class TestRGWs(object): container_binary = 'docker' if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora': # noqa E501 container_binary = 'podman' - docker_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}'.format( # noqa E501 + docker_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}-rgw0'.format( # noqa E501 hostname=hostname, container_binary=container_binary) else: docker_exec_cmd = '' @@ -42,11 +46,17 @@ class TestRGWs(object): output = host.check_output(cmd) daemons = [i for i in json.loads( output)["servicemap"]["services"]["rgw"]["daemons"]] - assert hostname in daemons + for i in range(int(node["vars"]["radosgw_num_instances"])): + instance_name = "{hostname}.rgw{seq}".format( + hostname=hostname, + seq=i + ) + assert instance_name in daemons @pytest.mark.no_docker def test_rgw_http_endpoint(self, node, host): # rgw frontends ip_addr is configured on eth1 ip_addr = host.interface("eth1").addresses[0] - assert host.socket( - "tcp://{ip_addr}:{port}".format(ip_addr=ip_addr, port=8080)).is_listening # noqa E501 + for i in range(int(node["vars"]["radosgw_num_instances"])): + assert host.socket( + "tcp://{ip_addr}:{port}".format(ip_addr=ip_addr, port=(8080+i))).is_listening # noqa E501 diff --git a/tests/functional/tests/rgw/test_rgw_tuning.py b/tests/functional/tests/rgw/test_rgw_tuning.py index 3dff9cea6..556178084 100644 --- a/tests/functional/tests/rgw/test_rgw_tuning.py +++ b/tests/functional/tests/rgw/test_rgw_tuning.py @@ -13,7 +13,7 @@ class TestRGWs(object): @pytest.mark.no_docker def test_rgw_bucket_default_quota_is_applied(self, node, host): - radosgw_admin_cmd = "sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring user create --uid=test --display-name Test".format( # noqa E501 + radosgw_admin_cmd = "sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring user create --uid=test --display-name Test".format( # noqa E501 hostname=node["vars"]["inventory_hostname"], cluster=node['cluster_name'] ) @@ -24,7 +24,7 @@ class TestRGWs(object): @pytest.mark.no_docker def test_rgw_tuning_pools_are_set(self, node, host): - cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 -n client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring osd dump".format( # noqa E501 + cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd dump".format( # noqa E501 hostname=node["vars"]["inventory_hostname"], cluster=node['cluster_name'] ) @@ -42,7 +42,7 @@ class TestRGWs(object): container_binary = 'docker' if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora': # noqa E501 container_binary = 'podman' - cmd = "sudo {container_binary} exec ceph-rgw-{hostname} ceph --cluster={cluster} -n client.rgw.{hostname} --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring osd dump".format( # noqa E501 + cmd = "sudo {container_binary} exec ceph-rgw-{hostname}-rgw0 ceph --cluster={cluster} -n client.rgw.{hostname}.rgw0 --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd dump".format( # noqa E501 hostname=hostname, cluster=cluster, container_binary=container_binary -- 2.39.5