]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
rgw: set tuning parameters 1458/head
authorAli Maredia <amaredia@redhat.com>
Wed, 19 Apr 2017 16:30:40 +0000 (12:30 -0400)
committerAli Maredia <amaredia@redhat.com>
Tue, 25 Apr 2017 20:01:03 +0000 (16:01 -0400)
Change civetweb_num_thread default to 100

Add capability to override number of pgs for
rgw pools.

Add ceph.conf vars to enable default bucket
object quota at users choosing into the ceph.conf.j2
template

Resolves: rhbz#1437173
Resolves: rhbz#1391500

Signed-off-by: Ali Maredia <amaredia@redhat.com>
group_vars/all.yml.sample
group_vars/rgws.yml.sample
roles/ceph-common/defaults/main.yml
roles/ceph-common/templates/ceph.conf.j2
roles/ceph-rgw/defaults/main.yml
roles/ceph-rgw/tasks/main.yml
roles/ceph-rgw/tasks/rgw_pool_pgs.yml [new file with mode: 0644]

index fadbf2f9f9202ec7fe682d7b4e8a4c29d42da0f9..cb8b968f9a0872c402a1f793f8666d165681bc8b 100644 (file)
@@ -293,7 +293,7 @@ dummy:
 #radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names
 #radosgw_civetweb_port: 8080
 #radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]"
-#radosgw_civetweb_num_threads: 50
+#radosgw_civetweb_num_threads: 100
 # For additional civetweb configuration options available such as SSL, logging,
 # keepalive, and timeout settings, please see the civetweb docs at
 # https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md
index 21ea8d9cf5e1382f842a41804f8d74c31bf1d395..d0c51deed7a91a0fe7cf30bbd9ec9bc925b43f0b 100644 (file)
@@ -29,6 +29,32 @@ dummy:
 #rgw_pull_port: "{{ radosgw_civetweb_port }}"
 #rgw_pull_proto: "http"
 
+########
+#TUNING#
+########
+
+# To support buckets with a very large number of objects it's 
+# important to split them into shards. We suggest about 100K
+# objects per shard as a conservative maximum.
+#rgw_override_bucket_index_max_shards: 16
+# Consider setting a quota on buckets so that exceeding this
+# limit will require admin intervention.
+#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
+
+# This dictionary will create pools with the given number of pgs. 
+# This is important because they would be created with the default
+# of 8. 
+# New pools and their corresponding pg_nums can be created
+# by adding to the create_pools dictionary (see foo).
+#create_pools:
+#  defaults.rgw.buckets.data:
+#    pg_num: 16
+#  defaults.rgw.buckets.index:
+#    pg_num: 32
+#  foo:
+#    pg_num: 4
+
 ##########
 # DOCKER #
 ##########
index ff50c426b47a819c1f23e8e3b2d2453c855ade0f..244e612c02eb3d7d6df093ef868ab5b69cacbb77 100644 (file)
@@ -285,7 +285,7 @@ mds_max_mds: 3
 radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names
 radosgw_civetweb_port: 8080
 radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]"
-radosgw_civetweb_num_threads: 50
+radosgw_civetweb_num_threads: 100
 # For additional civetweb configuration options available such as SSL, logging,
 # keepalive, and timeout settings, please see the civetweb docs at
 # https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md
index 775003ec168c7d9814afb45ff5dc695a2cb9b930..3ad873da3625a4f2de2b2c4d3f606c6830fed66e 100644 (file)
@@ -109,6 +109,12 @@ host = {{ hostvars[host]['ansible_hostname'] }}
 {% if radosgw_dns_name is defined %}
 rgw dns name = {{ radosgw_dns_name }}
 {% endif %}
+{% if rgw_override_bucket_index_max_shards is defined %}
+rgw override bucket index max shards = {{ rgw_override_bucket_index_max_shards }}
+{% endif %}
+{% if rgw_bucket_default_quota_max_objects is defined %}
+rgw bucket default quota max objects = {{ rgw_bucket_default_quota_max_objects }}
+{% endif %}
 host = {{ hostvars[host]['ansible_hostname'] }}
 keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring
 rgw socket path = /tmp/radosgw-{{ hostvars[host]['ansible_hostname'] }}.sock
index 599870f466e75af9d38b1f7b96720ed36da9a65e..2eb337b756e65b7b3c7637c441476a04362f4c53 100644 (file)
@@ -21,6 +21,32 @@ cephx: true
 rgw_pull_port: "{{ radosgw_civetweb_port }}"
 rgw_pull_proto: "http"
 
+########
+#TUNING#
+########
+
+# To support buckets with a very large number of objects it's 
+# important to split them into shards. We suggest about 100K
+# objects per shard as a conservative maximum.
+#rgw_override_bucket_index_max_shards: 16
+# Consider setting a quota on buckets so that exceeding this
+# limit will require admin intervention.
+#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
+
+# This dictionary will create pools with the given number of pgs. 
+# This is important because they would be created with the default
+# of 8. 
+# New pools and their corresponding pg_nums can be created
+# by adding to the create_pools dictionary (see foo).
+#create_pools:
+#  defaults.rgw.buckets.data:
+#    pg_num: 16
+#  defaults.rgw.buckets.index:
+#    pg_num: 32
+#  foo:
+#    pg_num: 4
+
 ##########
 # DOCKER #
 ##########
index a89f8416713914645096dd402a1487c9401e7cc6..4e0dc5e45160d5b9fa4971ef99fbf954e7e436b3 100644 (file)
   # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
   static: False
 
+- include: rgw_pool_pgs.yml
+  when: create_pools is defined
+  static: False
+
 - name: include rgw multisite playbooks
   include: multisite/main.yml
   when:
diff --git a/roles/ceph-rgw/tasks/rgw_pool_pgs.yml b/roles/ceph-rgw/tasks/rgw_pool_pgs.yml
new file mode 100644 (file)
index 0000000..1e848b5
--- /dev/null
@@ -0,0 +1,7 @@
+---
+- name: create rgw pools if create_pools is defined
+  command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}
+  changed_when: false
+  failed_when: false
+  when: create_pools is defined
+  with_dict: "{{ create_pools }}"