#radosgw_address: 0.0.0.0
#radosgw_address_block: subnet
#radosgw_keystone_ssl: false # activate this when using keystone PKI keys
+#radosgw_num_instances: 1
# Rados Gateway options
#email_address: foo@bar.com
- name: stop ceph rgws with systemd
service:
- name: ceph-radosgw@rgw.{{ ansible_hostname }}
+ name: ceph-radosgw@rgw.*
state: stopped
enabled: no
failed_when: false
- name: disable ceph rgw service (new unit name)
service:
- name: "ceph-radosgw@rgw.{{ ansible_hostname }}"
+ name: "ceph-radosgw@*"
state: stopped
enabled: no
ignore_errors: true
- name: remove ceph rgw container
docker_container:
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- name: "ceph-rgw-{{ ansible_hostname }}"
+ name: "ceph-rgw-{{ ansible_hostname }}-*"
state: absent
ignore_errors: true
{% for host in groups[rgw_group_name] %}
{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_hostname']) %}
{# {{ hostvars[host]['rgw_hostname'] }} for backward compatibility, fqdn issues. See bz1580408 #}
-[client.rgw.{{ _rgw_hostname }}]
+{% if hostvars[host]['rgw_instances'] is defined %}
+{% for instance in hostvars[host]['rgw_instances'] %}
+[client.rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}]
host = {{ _rgw_hostname }}
-keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname }}/keyring
-log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log
-rgw frontends = {{ radosgw_frontend_type }} {{ 'port' if radosgw_frontend_type == 'civetweb' else 'endpoint' }}={{ hostvars[host]['_radosgw_address'] }}:{{ radosgw_frontend_port }} {{ radosgw_frontend_options }}
+keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}/keyring
+log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] + '.' + instance['instance_name'] }}.log
+rgw frontends = {{ radosgw_frontend_type }} {{ 'port' if radosgw_frontend_type == 'civetweb' else 'endpoint' }}={{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} {{ radosgw_frontend_options }}
+{% endfor %}
+{% endif %}
{% endfor %}
{% endif %}
radosgw_address: 0.0.0.0
radosgw_address_block: subnet
radosgw_keystone_ssl: false # activate this when using keystone PKI keys
+radosgw_num_instances: 1
# Rados Gateway options
email_address: foo@bar.com
import_tasks: set_radosgw_address.yml
when:
- inventory_hostname in groups.get(rgw_group_name, [])
+
+- name: set_fact rgw_instances
+ set_fact:
+ rgw_instances: "{{ rgw_instances|default([]) | union([{'instance_name': 'rgw' + item|string, 'radosgw_address': _radosgw_address, 'radosgw_frontend_port': radosgw_frontend_port|int + item|int}]) }}"
+ with_sequence: start=0 end={{ radosgw_num_instances|int - 1 }}
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
RETRIES="{{ handler_health_rgw_check_retries }}"
DELAY="{{ handler_health_rgw_check_delay }}"
-RGW_NAME="{{ ansible_hostname }}"
-RGW_PORT="{{ radosgw_frontend_port }}"
+HOST_NAME="{{ ansible_hostname }}"
+RGW_NUMS={{ radosgw_num_instances }}
+RGW_BASE_PORT={{ radosgw_frontend_port }}
+declare -a DOCKER_EXECS
+for ((i=0; i<${RGW_NUMS}; i++)); do
+ DOCKER_EXECS[i]=""
{% if containerized_deployment %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-rgw-{{ ansible_hostname }}"
+ CONTAINER_NAME="ceph-rgw-${HOST_NAME}-rgw${i}"
+ DOCKER_EXECS[i]="{{ container_binary }} exec ${CONTAINER_NAME}"
{% endif %}
+done
+declare -a SOCKETS
# Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_hostname }}.asok
+for ((i=0; i<${RGW_NUMS}; i++)); do
+ SOCKET[i]="EMPTY_SOCKET"
+ ${DOCKER_EXECS[i]} test -S /var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_fqdn }}.asok && SOCKETS[i]=/var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_fqdn }}.asok
+ ${DOCKER_EXECS[i]} test -S /var/run/ceph/{{ cluster }}-client.rgw.${HOST_NAME}.rgw${i}.asok && SOCKETS[i]=/var/run/ceph/{{ cluster }}-client.rgw.${HOST_NAME}.rgw${i}.asok
+done
RGW_IP={{ hostvars[inventory_hostname]['_radosgw_address'] }}
+check_socket() {
+ local i=$1
+ local succ=0
+ local count=10
+ # Wait and ensure the socket exists after restarting the daemon
+ while [ $count -ne 0 ]; do
+ ${DOCKER_EXECS[i]} test -S ${SOCKETS[i]} && succ=$((succ+1)) && break
+ sleep $DELAY
+ let count=count-1
+ done
+ if [ $succ -ne 1 ]; then
+ echo "Socket file ${SOCKETS[i]} could not be found, which means Rados Gateway is not running. Showing ceph-rgw unit logs now:"
+ journalctl -u ceph-radosgw@rgw.${HOST_NAME}.rgw${i}
+ exit 1
+ fi
+}
+
check_for_curl_or_wget() {
- if $DOCKER_EXEC command -v wget &>/dev/null; then
+ local i=$1
+ if ${DOCKER_EXECS[i]} command -v wget &>/dev/null; then
rgw_test_command="wget --quiet"
- elif $DOCKER_EXEC command -v curl &>/dev/null; then
+ elif ${DOCKER_EXECS[i]} command -v curl &>/dev/null; then
rgw_test_command="curl --fail --silent --output /dev/null"
else
echo "It seems that neither curl or wget are available on your system."
}
check_rest() {
- check_for_curl_or_wget
+ local i=$1
+ check_for_curl_or_wget ${i}
+ local succ=0
while [ $RETRIES -ne 0 ]; do
- test "$rgw_test_command http://$RGW_IP:$RGW_PORT" && exit 0
+ test "$rgw_test_command http://$RGW_IP:$((RGW_BASE_PORT+i))" && succ=$((succ+1)) && break
sleep $DELAY
let RETRIES=RETRIES-1
done
- # If we reach this point, it means there is a problem with the connection to rgw
- echo "Error connecting locally to Rados Gateway service: http://$rgw_listen"
- exit 1
+ if [ $succ -ne 1 ]; then
+ # If we reach this point, it means there is a problem with the connection to rgw
+ echo "Error connecting locally to Rados Gateway service: http://$rgw_listen"
+ exit 1
+ fi
}
# First, restart the daemon
-systemctl restart ceph-radosgw@rgw.${RGW_NAME}
+for ((i=0; i<${RGW_NUMS}; i++)); do
+ systemctl restart ceph-radosgw@rgw.${HOST_NAME}.rgw${i}
+done
+
+# Check socket files
+for ((i=0; i<${RGW_NUMS}; i++)); do
+ check_socket ${i}
+done
-COUNT=10
-# Wait and ensure the socket exists after restarting the daemon
-while [ $COUNT -ne 0 ]; do
- $DOCKER_EXEC test -S $SOCKET && check_rest
- sleep $DELAY
- let COUNT=COUNT-1
+# Check rest
+for ((i=0; i<${RGW_NUMS}; i++)); do
+ check_rest ${i}
done
-echo "Socket file ${SOCKET} could not be found, which means Rados Gateway is not running. Showing ceph-rgw unit logs now:"
-journalctl -u ceph-radosgw@rgw.${RGW_NAME}
-exit 1
- name: restart rgw
service:
- name: ceph-radosgw@rgw.{{ ansible_hostname }}
+ name: ceph-radosgw@*
state: restarted
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "0755"
with_items:
- - /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}
- "{{ rbd_client_admin_socket_path }}"
+- name: create rados gateway instance directories
+ file:
+ path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+ state: directory
+ owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
+ mode: "0755"
+ with_items: "{{ rgw_instances }}"
+ when:
+ - rgw_instances is defined
+
- name: copy ceph keyring(s) if needed
copy:
src: "{{ fetch_directory }}/{{ fsid }}/{{ item.name }}"
---
+- name: generate environment file
+ become: true
+ copy:
+ dest: "/var/lib/ceph/radosgw/ceph-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/EnvironmentFile"
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ content: |
+ INST_NAME={{ item.instance_name }}
+ INST_PORT={{ item.radosgw_frontend_port }}
+ with_items: "{{ rgw_instances }}"
+
- name: generate systemd unit file
become: true
template:
- name: systemd start rgw container
systemd:
- name: "ceph-radosgw@rgw.{{ ansible_hostname }}.service"
+ name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
state: started
enabled: yes
- daemon_reload: yes
\ No newline at end of file
+ daemon_reload: yes
+ with_items: "{{ rgw_instances }}"
---
-- name: create rados gateway keyring
- command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
+- name: create rados gateway instance keyring
+ command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }}.{{ item.instance_name }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring
args:
- creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
+ creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring
changed_when: false
- when: cephx
+ with_items: "{{ rgw_instances }}"
+ when:
+ - cephx
-- name: set rados gateway key permissions
+- name: set rados gateway instance key permissions
file:
- path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
+ path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring
owner: "ceph"
group: "ceph"
mode: "0600"
- when: cephx
+ with_items: "{{ rgw_instances }}"
+ when:
+ - cephx
when:
- ceph_rgw_systemd_overrides is defined
-- name: start rgw
+- name: start rgw instance
service:
- name: ceph-radosgw@rgw.{{ ansible_hostname }}
+ name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
state: started
enabled: yes
+ with_items: "{{ rgw_instances }}"
- name: enable the ceph-radosgw.target service
systemd:
After=docker.service
[Service]
-EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_hostname }}
+EnvironmentFile=/var/lib/ceph/radosgw/ceph-%i/EnvironmentFile
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
--memory={{ ceph_rgw_docker_memory_limit }} \
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
-v /etc/localtime:/etc/localtime:ro \
-e CEPH_DAEMON=RGW \
-e CLUSTER={{ cluster }} \
- --name=ceph-rgw-{{ ansible_hostname }} \
+ -e RGW_NAME={{ ansible_hostname }}.${INST_NAME} \
+ -e RGW_CIVETWEB_PORT=${INST_PORT} \
+ --name=ceph-rgw-{{ ansible_hostname }}-${INST_NAME} \
{{ ceph_rgw_docker_extra_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
Restart=always
RestartSec=10s
TimeoutStartSec=120
public_network: "192.168.1.0/24"
cluster_network: "192.168.2.0/24"
radosgw_interface: eth1
+radosgw_num_instances: 2
ceph_conf_overrides:
global:
osd_pool_default_size: 1
containerized_deployment: True
monitor_interface: eth1
radosgw_interface: eth1
+radosgw_num_instances: 2
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
public_network: "192.168.15.0/24"
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
\ No newline at end of file
+ osd_pool_default_size: 1
containerized_deployment: True
monitor_interface: eth1
radosgw_interface: eth1
+radosgw_num_instances: 2
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
public_network: "192.168.17.0/24"
assert result
def test_rgw_service_is_running(self, node, host):
- service_name = "ceph-radosgw@rgw.{hostname}".format(
- hostname=node["vars"]["inventory_hostname"]
- )
- assert host.service(service_name).is_running
+ for i in range(int(node["vars"]["radosgw_num_instances"])):
+ service_name = "ceph-radosgw@rgw.{hostname}.rgw{seq}".format(
+ hostname=node["vars"]["inventory_hostname"],
+ seq=i
+ )
+ assert host.service(service_name).is_running
def test_rgw_service_is_enabled(self, node, host):
- service_name = "ceph-radosgw@rgw.{hostname}".format(
- hostname=node["vars"]["inventory_hostname"]
- )
- assert host.service(service_name).is_enabled
+ for i in range(int(node["vars"]["radosgw_num_instances"])):
+ service_name = "ceph-radosgw@rgw.{hostname}.rgw{seq}".format(
+ hostname=node["vars"]["inventory_hostname"],
+ seq=i
+ )
+ assert host.service(service_name).is_enabled
def test_rgw_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
container_binary = 'docker'
if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora': # noqa E501
container_binary = 'podman'
- docker_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}'.format( # noqa E501
+ docker_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}-rgw0'.format( # noqa E501
hostname=hostname, container_binary=container_binary)
else:
docker_exec_cmd = ''
output = host.check_output(cmd)
daemons = [i for i in json.loads(
output)["servicemap"]["services"]["rgw"]["daemons"]]
- assert hostname in daemons
+ for i in range(int(node["vars"]["radosgw_num_instances"])):
+ instance_name = "{hostname}.rgw{seq}".format(
+ hostname=hostname,
+ seq=i
+ )
+ assert instance_name in daemons
@pytest.mark.no_docker
def test_rgw_http_endpoint(self, node, host):
# rgw frontends ip_addr is configured on eth1
ip_addr = host.interface("eth1").addresses[0]
- assert host.socket(
- "tcp://{ip_addr}:{port}".format(ip_addr=ip_addr, port=8080)).is_listening # noqa E501
+ for i in range(int(node["vars"]["radosgw_num_instances"])):
+ assert host.socket(
+ "tcp://{ip_addr}:{port}".format(ip_addr=ip_addr, port=(8080+i))).is_listening # noqa E501
@pytest.mark.no_docker
def test_rgw_bucket_default_quota_is_applied(self, node, host):
- radosgw_admin_cmd = "sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring user create --uid=test --display-name Test".format( # noqa E501
+ radosgw_admin_cmd = "sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring user create --uid=test --display-name Test".format( # noqa E501
hostname=node["vars"]["inventory_hostname"],
cluster=node['cluster_name']
)
@pytest.mark.no_docker
def test_rgw_tuning_pools_are_set(self, node, host):
- cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 -n client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring osd dump".format( # noqa E501
+ cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd dump".format( # noqa E501
hostname=node["vars"]["inventory_hostname"],
cluster=node['cluster_name']
)
container_binary = 'docker'
if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora': # noqa E501
container_binary = 'podman'
- cmd = "sudo {container_binary} exec ceph-rgw-{hostname} ceph --cluster={cluster} -n client.rgw.{hostname} --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring osd dump".format( # noqa E501
+ cmd = "sudo {container_binary} exec ceph-rgw-{hostname}-rgw0 ceph --cluster={cluster} -n client.rgw.{hostname}.rgw0 --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd dump".format( # noqa E501
hostname=hostname,
cluster=cluster,
container_binary=container_binary