From: Keith Schincke Date: Thu, 17 Aug 2017 17:25:20 +0000 (-0400) Subject: Update ceph_rgw_docker_extra_env to add bind ip X-Git-Tag: v3.0.0rc5~1^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=eaccc1279700564b346dfa66c5d6207488a99ff4;p=ceph-ansible.git Update ceph_rgw_docker_extra_env to add bind ip This patch adds passing the RGW_CIVETWEB_IP to the docker container. This IP defaults to the value of radosgw_civetweb_bind_ip. radosgw_civetweb_bind_ip default to ipv4.default Without this value, the RGW containter will bind to 0.0.0.0 --- diff --git a/group_vars/rgws.yml.sample b/group_vars/rgws.yml.sample index 5b274f2d7..2f9945349 100644 --- a/group_vars/rgws.yml.sample +++ b/group_vars/rgws.yml.sample @@ -25,22 +25,23 @@ dummy: #rgw_pull_port: "{{ radosgw_civetweb_port }}" #rgw_pull_proto: "http" + ######## #TUNING# ######## -# To support buckets with a very large number of objects it's +# To support buckets with a very large number of objects it's # important to split them into shards. We suggest about 100K # objects per shard as a conservative maximum. #rgw_override_bucket_index_max_shards: 16 -# + # Consider setting a quota on buckets so that exceeding this # limit will require admin intervention. #rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16 -# This dictionary will create pools with the given number of pgs. +# This dictionary will create pools with the given number of pgs. # This is important because they would be created with the default -# of 8. +# of 8. # New pools and their corresponding pg_nums can be created # by adding to the create_pools dictionary (see foo). #create_pools: @@ -51,12 +52,12 @@ dummy: # foo: # pg_num: 4 + ########## # DOCKER # ########## -#ceph_rgw_civetweb_port: "{{ radosgw_civetweb_port }}" -#ceph_rgw_docker_extra_env: -e CLUSTER={{ cluster }} -e RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }} +#ceph_rgw_docker_extra_env: #ceph_config_keys: [] # DON'T TOUCH ME #rgw_config_keys: "/" # DON'T TOUCH ME diff --git a/roles/ceph-rgw/defaults/main.yml b/roles/ceph-rgw/defaults/main.yml index fda6610b8..4487865dc 100644 --- a/roles/ceph-rgw/defaults/main.yml +++ b/roles/ceph-rgw/defaults/main.yml @@ -17,22 +17,23 @@ copy_admin_key: false rgw_pull_port: "{{ radosgw_civetweb_port }}" rgw_pull_proto: "http" + ######## #TUNING# ######## -# To support buckets with a very large number of objects it's +# To support buckets with a very large number of objects it's # important to split them into shards. We suggest about 100K # objects per shard as a conservative maximum. #rgw_override_bucket_index_max_shards: 16 - + # Consider setting a quota on buckets so that exceeding this # limit will require admin intervention. #rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16 -# This dictionary will create pools with the given number of pgs. +# This dictionary will create pools with the given number of pgs. # This is important because they would be created with the default -# of 8. +# of 8. # New pools and their corresponding pg_nums can be created # by adding to the create_pools dictionary (see foo). #create_pools: @@ -43,12 +44,12 @@ rgw_pull_proto: "http" # foo: # pg_num: 4 + ########## # DOCKER # ########## -ceph_rgw_civetweb_port: "{{ radosgw_civetweb_port }}" -ceph_rgw_docker_extra_env: -e CLUSTER={{ cluster }} -e RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }} +ceph_rgw_docker_extra_env: ceph_config_keys: [] # DON'T TOUCH ME rgw_config_keys: "/" # DON'T TOUCH ME diff --git a/roles/ceph-rgw/templates/ceph-radosgw.service.j2 b/roles/ceph-rgw/templates/ceph-radosgw.service.j2 index c959fad75..daaaf6600 100644 --- a/roles/ceph-rgw/templates/ceph-radosgw.service.j2 +++ b/roles/ceph-rgw/templates/ceph-radosgw.service.j2 @@ -7,19 +7,41 @@ EnvironmentFile=-/etc/environment ExecStartPre=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }} ExecStartPre=-/usr/bin/docker rm ceph-rgw-{{ ansible_hostname }} ExecStart=/usr/bin/docker run --rm --net=host \ - {% if not containerized_deployment_with_kv -%} - -v /var/lib/ceph:/var/lib/ceph \ - -v /etc/ceph:/etc/ceph \ - {% else -%} - -e KV_TYPE={{kv_type}} \ - -e KV_IP={{kv_endpoint}} \ - -e KV_PORT={{kv_port}} \ - {% endif -%} - -v /etc/localtime:/etc/localtime:ro \ - -e CEPH_DAEMON=RGW \ - {{ ceph_rgw_docker_extra_env }} \ - --name=ceph-rgw-{{ ansible_hostname }} \ - {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} + {% if not containerized_deployment_with_kv -%} + -v /var/lib/ceph:/var/lib/ceph \ + -v /etc/ceph:/etc/ceph \ + {% else -%} + -e KV_TYPE={{ kv_type }} \ + -e KV_IP={{ kv_endpoint }} \ + -e KV_PORT={{ kv_port }} \ + {% endif -%} + {% if radosgw_address_block | length > 0 %} + {% if ip_version == 'ipv4' -%} + -e RGW_CIVETWEB_IP={{ hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }} \ + {%- elif ip_version == 'ipv6' -%} + -e RGW_CIVETWEB_IP=[{{ hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}] \ + {%- endif %} + {% elif hostvars[inventory_hostname]['radosgw_address'] is defined and hostvars[inventory_hostname]['radosgw_address'] != '0.0.0.0' -%} + {% if ip_version == 'ipv4' -%} + -e RGW_CIVETWEB_IP={{ hostvars[inventory_hostname]['radosgw_address'] }} \ + {%- elif ip_version == 'ipv6' -%} + -e RGW_CIVETWEB_IP=[{{ hostvars[inventory_hostname]['radosgw_address'] }}] \ + {% endif %} + {%- else -%} + {% set interface = ["ansible_",radosgw_interface]|join %} + {% if ip_version == 'ipv6' -%} + -e RGW_CIVETWEB_IP=[{{ hostvars[inventory_hostname][interface][ip_version][0]['address'] }}] \ + {%- elif ip_version == 'ipv4' -%} + -e RGW_CIVETWEB_IP={{ hostvars[inventory_hostname][interface][ip_version]['address'] }} \ + {% endif %} + {%- endif %} + -v /etc/localtime:/etc/localtime:ro \ + -e CEPH_DAEMON=RGW \ + -e CLUSTER={{ cluster }} \ + -e RGW_CIVETWEB_PORT={{ radosgw_civetweb_port }} \ + --name=ceph-rgw-{{ ansible_hostname }} \ + {{ ceph_rgw_docker_extra_env }} \ + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} ExecStopPost=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }} Restart=always RestartSec=10s