]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
migrate from ceph.conf to ceph config
authorGuillaume Abrioux <gabrioux@ibm.com>
Fri, 4 Aug 2023 07:45:28 +0000 (09:45 +0200)
committerGuillaume Abrioux <gabrioux@ibm.com>
Wed, 14 Feb 2024 08:54:13 +0000 (09:54 +0100)
keep the ceph.conf very simple.
manage the common options such as `public_network` with `ceph_config`
module.

Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
group_vars/rgws.yml.sample
roles/ceph-config/tasks/main.yml
roles/ceph-config/templates/ceph.conf.j2
roles/ceph-nfs/tasks/main.yml
roles/ceph-rgw/defaults/main.yml
roles/ceph-rgw/tasks/multisite/main.yml
roles/ceph-rgw/tasks/pre_requisite.yml
site-container.yml.sample
site.yml.sample
tests/functional/tests/mon/test_mons.py
tests/functional/tests/rgw/test_rgw_tuning.py [deleted file]

index 69b1fc0e8c987710bd7c9e61c22e2af4aa544b99..15c3bf8a58658a73b887feb161f139b9a8dad8b7 100644 (file)
@@ -23,15 +23,6 @@ dummy:
 # TUNING #
 ##########
 
-# To support buckets with a very large number of objects it's
-# important to split them into shards. We suggest about 100K
-# objects per shard as a conservative maximum.
-#rgw_override_bucket_index_max_shards: 16
-
-# Consider setting a quota on buckets so that exceeding this
-# limit will require admin intervention.
-#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
-
 # Declaring rgw_create_pools will create pools with the given number of pgs,
 # size, and type. The following are some important notes on this automatic
 # pool creation:
index 1e31f86a9221a6ad1b7c5a692f3726544079fd04..36b46f759dff25a3148aeae6c58599cd090dd35d 100644 (file)
         - "{{ ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') }}"
       when: item
 
-    - name: drop osd_memory_target from conf override
-      set_fact:
-        ceph_conf_overrides: "{{ ceph_conf_overrides | combine({'osd': {item: omit}}, recursive=true) }}"
-      loop:
-        - osd memory target
-        - osd_memory_target
-
     - name: set_fact _osd_memory_target
       set_fact:
         _osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}"
     owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
     group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
     mode: "0644"
-    config_overrides: "{{ ceph_conf_overrides }}"
     config_type: ini
   notify:
     - restart ceph mons
index 9208a22051c767320abf4aac11efff40131eac14..69850ed08eb149d4d59160c283044bfc97034a82 100644 (file)
@@ -2,36 +2,14 @@
 # {{ ansible_managed }}
 
 [global]
-{% if not cephx | bool %}
-auth cluster required = none
-auth service required = none
-auth client required = none
-{% endif %}
-{% if ip_version == 'ipv6'  %}
-ms bind ipv6 = true
-ms bind ipv4 = false
-{% endif %}
-{% if common_single_host_mode is defined and common_single_host_mode %}
-osd crush chooseleaf type = 0
-{% endif %}
+#{% if not cephx | bool %}
+#auth cluster required = none
+#auth service required = none
+#auth client required = none
+#{% endif %}
 {# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #}
 
 {% set nb_mon = groups.get(mon_group_name, []) | length | int %}
-{% set nb_client = groups.get(client_group_name, []) | length | int %}
-{% set nb_osd = groups.get(osd_group_name, []) | length | int %}
-{% if inventory_hostname in groups.get(client_group_name, []) and not inventory_hostname == groups.get(client_group_name, []) | first %}
-{% endif %}
-
-{% if nb_mon > 0 and inventory_hostname in groups.get(mon_group_name, []) %}
-mon initial members = {% for host in groups[mon_group_name] %}
-      {% if hostvars[host]['ansible_facts']['hostname'] is defined -%}
-        {{ hostvars[host]['ansible_facts']['hostname'] }}
-      {%- endif %}
-      {%- if not loop.last %},{% endif %}
-    {% endfor %}
-
-osd pool default crush rule = {{ osd_pool_default_crush_rule }}
-{% endif %}
 
 fsid = {{ fsid }}
 mon host = {% if nb_mon > 0 %}
@@ -46,70 +24,8 @@ mon host = {% if nb_mon > 0 %}
 {{ external_cluster_mon_ips }}
 {% endif %}
 
-{% if public_network is defined %}
-public network = {{ public_network | regex_replace(' ', '') }}
-{% endif %}
-{% if cluster_network is defined %}
-cluster network = {{ cluster_network | regex_replace(' ', '') }}
-{% endif %}
-{% if rgw_override_bucket_index_max_shards is defined %}
-rgw override bucket index max shards = {{ rgw_override_bucket_index_max_shards }}
-{% endif %}
-{% if rgw_bucket_default_quota_max_objects is defined %}
-rgw bucket default quota max objects = {{ rgw_bucket_default_quota_max_objects }}
-{% endif %}
-
 {% if inventory_hostname in groups.get(client_group_name, []) %}
 [client.libvirt]
 admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
 log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
 {% endif %}
-
-{% if inventory_hostname in groups.get(osd_group_name, []) %}
-[osd]
-osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
-{% endif %}
-
-{% if inventory_hostname in groups.get(rgw_group_name, []) %}
-{% set _rgw_hostname = hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_facts']['hostname']) %}
-{# {{ hostvars[host]['rgw_hostname'] }} for backward compatibility, fqdn issues. See bz1580408 #}
-{% if hostvars[inventory_hostname]['rgw_instances'] is defined %}
-{% for instance in hostvars[inventory_hostname]['rgw_instances'] %}
-[client.rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}]
-host = {{ _rgw_hostname }}
-keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}/keyring
-log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] + '.' + instance['instance_name'] }}.log
-{% set _rgw_binding_socket = instance['radosgw_address'] | default(_radosgw_address) | string + ':' + instance['radosgw_frontend_port'] | default(radosgw_frontend_port) | string %}
-{%- macro frontend_line(frontend_type) -%}
-{%- if frontend_type == 'civetweb' -%}
-{{ radosgw_frontend_type }} port={{ _rgw_binding_socket }}{{ 's ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}
-{%- elif frontend_type == 'beast' -%}
-{{ radosgw_frontend_type }} {{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}{{ ' ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}
-{%- endif -%}
-{%- endmacro -%}
-rgw frontends = {{ frontend_line(radosgw_frontend_type) }} {{ radosgw_frontend_options }}
-{% if 'num_threads' not in radosgw_frontend_options %}
-rgw thread pool size = {{ radosgw_thread_pool_size }}
-{% endif %}
-{% if rgw_multisite | bool %}
-{% if ((instance['rgw_zonemaster'] | default(rgw_zonemaster) | bool) or (deploy_secondary_zones | default(True) | bool)) %}
-rgw_realm = {{ instance['rgw_realm'] }}
-rgw_zonegroup = {{ instance['rgw_zonegroup'] }}
-rgw_zone = {{ instance['rgw_zone'] }}
-{% endif %}
-{% endif %}
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if inventory_hostname in groups.get(nfs_group_name, []) and inventory_hostname not in groups.get(rgw_group_name, []) %}
-{% for host in groups[nfs_group_name] %}
-{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_facts']['hostname']) %}
-{% if nfs_obj_gw | bool %}
-[client.rgw.{{ _rgw_hostname }}]
-host = {{ _rgw_hostname }}
-keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname }}/keyring
-log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_facts']['hostname'] }}.log
-{% endif %}
-{% endfor %}
-{% endif %}
index aa609d8c81a02f1ddb886107b2672500595f25df..624d9764484dd30584f030fd1c5e38df4114bcde 100644 (file)
   include_tasks: pre_requisite_container.yml
   when: containerized_deployment | bool
 
+- name: set_fact _rgw_hostname
+  set_fact:
+    _rgw_hostname: "{{ hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_facts']['hostname']) }}"
+
+- name: set rgw parameter (log file)
+  ceph_config:
+    action: set
+    who: "client.rgw.{{ _rgw_hostname }}"
+    option: "log file"
+    value: "/var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}.log"
+  environment:
+    CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+    CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+  loop: "{{ groups.get('nfss', []) }}"
+
 - name: include create_rgw_nfs_user.yml
   import_tasks: create_rgw_nfs_user.yml
   when: groups.get(mon_group_name, []) | length > 0
index a25468b8dbdbc2bfda2118e627b95237e4d9d002..e86f80ef2025b36e1e50e0e669ae2cd0ea435921 100644 (file)
@@ -15,15 +15,6 @@ copy_admin_key: false
 # TUNING #
 ##########
 
-# To support buckets with a very large number of objects it's
-# important to split them into shards. We suggest about 100K
-# objects per shard as a conservative maximum.
-#rgw_override_bucket_index_max_shards: 16
-
-# Consider setting a quota on buckets so that exceeding this
-# limit will require admin intervention.
-#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
-
 # Declaring rgw_create_pools will create pools with the given number of pgs,
 # size, and type. The following are some important notes on this automatic
 # pool creation:
index 17aff9aa74959670cf2cbd915bddf3f762b97c63..ade0bf760ecab82fa997ac11f540312125e01751 100644 (file)
@@ -1,4 +1,19 @@
 ---
+- name: set global config
+  ceph_config:
+    action: set
+    who: "client.rgw.{{ _rgw_hostname + '.' + item.0.instance_name }}"
+    option: "{{ item.1 }}"
+    value: "{{ item.0[item.1] }}"
+  environment:
+    CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+    CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  run_once: true
+  with_nested:
+    - "{{ rgw_instances }}"
+    - [ 'rgw_realm', 'rgw_zonegroup', 'rgw_zone']
+
 - name: set_fact realms
   set_fact:
     realms: '{{ realms | default([]) | union([item.rgw_realm]) }}'
index 2e2ddaf6c8da9dee5556d7e1aaebad4c1ed50228..374ab854a45edf242147b62ae041cf7d8596739a 100644 (file)
@@ -1,4 +1,34 @@
 ---
+- name: set_fact _rgw_hostname
+  set_fact:
+    _rgw_hostname: "{{ hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_facts']['hostname']) }}"
+
+- name: set rgw parameter (log file)
+  ceph_config:
+    action: set
+    who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}"
+    option: "log file"
+    value: "/var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] + '.' + item.instance_name }}.log"
+  environment:
+    CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+    CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+  loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}"
+
+- name: set rgw parameter (rgw_frontends)
+  ceph_config:
+    action: set
+    who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}"
+    option: "rgw_frontends"
+    value: "beast port={{ item.radosgw_frontend_port | string }}"
+  environment:
+    CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+    CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+  loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}"
+  notify: restart ceph rgws
+
+# rgw_frontends
+# {{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}{{ ' ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}
+
 - name: create rados gateway directories
   file:
     path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
index b3b48fa39aa3cb62ef93b7da4e655155dfd223ae..4f67ff3cd7076fb0285df02b51a55543e102a0b0 100644 (file)
             status: "Complete"
             end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
 
+- hosts: mons[0]
+  become: True
+  gather_facts: false
+  any_errors_fatal: true
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: set global config
+      ceph_config:
+        action: set
+        who: "global"
+        option: "{{ item.key }}"
+        value: "{{ item.value }}"
+      environment:
+        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+      with_dict:
+        "{{ { 
+           'public_network': public_network | default(False),
+           'cluster_network': cluster_network | default(False),
+           'osd pool default crush rule': osd_pool_default_crush_rule,
+           'ms bind ipv6': 'true' if ip_version == 'ipv6' else 'false',
+           'ms bind ipv4': 'false' if ip_version == 'ipv6' else 'true',
+           'osd crush chooseleaf type': '0' if common_single_host_mode | default(False) | bool else False,
+        } }}"
+      when:
+        - inventory_hostname == ansible_play_hosts_all | last
+        - item.value
+
+    - name: set global config overrides
+      ceph_config:
+        action: set
+        who: "global"
+        option: "{{ item.key }}"
+        value: "{{ item.value }}"
+      environment:
+        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+      when: inventory_hostname == ansible_play_hosts_all | last
+      with_dict: "{{ ceph_conf_overrides['global'] }}"
+
+    - name: set osd_memory_target
+      ceph_config:
+        action: set
+        who: "osd.*/{{ item }}:host"
+        option: "osd_memory_target"
+        value: "{{ _osd_memory_target | default(osd_memory_target) }}"
+      environment:
+        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+      when: inventory_hostname == ansible_play_hosts_all | last
+      loop: "{{ groups[osd_group_name] | default([]) }}"
+
 - hosts: osds
   become: True
   gather_facts: false
index 26a3aa3fe9e890eff68bd1e18f12737e5475fe97..a5c2fdd2253ae473b950f8a5a2615a42b3263ad6 100644 (file)
             status: "Complete"
             end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
 
+- hosts: mons[0]
+  become: True
+  gather_facts: false
+  any_errors_fatal: true
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - name: set global config
+      ceph_config:
+        action: set
+        who: "global"
+        option: "{{ item.key }}"
+        value: "{{ item.value }}"
+      with_dict:
+        "{{ {
+           'public_network': public_network | default(False),
+           'cluster_network': cluster_network | default(False),
+           'osd pool default crush rule': osd_pool_default_crush_rule,
+           'ms bind ipv6': 'true' if ip_version == 'ipv6' else 'false',
+           'ms bind ipv4': 'false' if ip_version == 'ipv6' else 'true',
+           'osd crush chooseleaf type': '0' if common_single_host_mode | default(False) | bool else False,
+        } }}"
+      when:
+        - inventory_hostname == ansible_play_hosts_all | last
+        - item.value
+
+    - name: set global config overrides
+      ceph_config:
+        action: set
+        who: "global"
+        option: "{{ item.key }}"
+        value: "{{ item.value }}"
+      when: inventory_hostname == ansible_play_hosts_all | last
+      with_dict: "{{ ceph_conf_overrides['global'] }}"
+
+    - name: set osd_memory_target
+      ceph_config:
+        action: set
+        who: "osd.*/{{ item }}:host"
+        option: "osd_memory_target"
+        value: "{{ _osd_memory_target | default(osd_memory_target) }}"
+      when: inventory_hostname == ansible_play_hosts_all | last
+      loop: "{{ groups[osd_group_name] | default([]) }}"
+
 - hosts: osds
   gather_facts: false
   become: True
index 791eb8d0bb329dec27de1d34d91fad541dd4829c..fd09c0d69177b16d67fdd19a5ed0752ff17d4b79 100644 (file)
@@ -29,14 +29,3 @@ class TestMons(object):
         output = host.check_output(cmd)
         assert output.strip().startswith("cluster")
 
-    def test_ceph_config_has_inital_members_line(self, node, host, setup):
-        assert host.file(setup["conf_path"]).contains("^mon initial members = .*$")
-
-    def test_initial_members_line_has_correct_value(self, node, host, setup):
-        mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=setup['cluster_name']))  # noqa E501
-        result = True
-        for host in node["vars"]["groups"]["mons"]:
-            pattern = re.compile(host)
-            if pattern.search(mon_initial_members_line) == None:  # noqa E501
-                result = False
-                assert result
diff --git a/tests/functional/tests/rgw/test_rgw_tuning.py b/tests/functional/tests/rgw/test_rgw_tuning.py
deleted file mode 100644 (file)
index 15fa2c0..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-import pytest
-import json
-
-
-class TestRGWs(object):
-
-    @pytest.mark.no_docker
-    def test_rgw_bucket_default_quota_is_set(self, node, host, setup):
-        assert host.file(setup["conf_path"]).contains(
-            "rgw override bucket index max shards")
-        assert host.file(setup["conf_path"]).contains(
-            "rgw bucket default quota max objects")
-
-    @pytest.mark.no_docker
-    def test_rgw_bucket_default_quota_is_applied(self, node, host, setup):
-        radosgw_admin_cmd = "timeout --foreground -s KILL 5 sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring user info --uid=test --format=json".format(  # noqa E501
-            hostname=node["vars"]["inventory_hostname"],
-            cluster=setup['cluster_name']
-        )
-        radosgw_admin_output = host.run(radosgw_admin_cmd)
-        if radosgw_admin_output.rc == 22:
-            radosgw_admin_cmd = "timeout --foreground -s KILL 5 sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring user create --uid=test --display-name Test".format(  # noqa E501
-                hostname=node["vars"]["inventory_hostname"],
-                cluster=setup['cluster_name']
-            )
-            radosgw_admin_output = host.run(radosgw_admin_cmd)
-        radosgw_admin_output_json = json.loads(radosgw_admin_output.stdout)
-        assert radosgw_admin_output_json["bucket_quota"]["enabled"] == True  # noqa E501
-        assert radosgw_admin_output_json["bucket_quota"]["max_objects"] == 1638400  # noqa E501
-
-    @pytest.mark.no_docker
-    def test_rgw_tuning_pools_are_set(self, node, host, setup):
-        pools = node["vars"]["rgw_create_pools"]
-        if pools is None:
-            pytest.skip('rgw_create_pools not defined, nothing to test')
-        for pool_name in pools.keys():
-            cmd = host.run("sudo ceph --cluster={cluster} --connect-timeout 5 -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd pool get {pool_name} size".format(  # noqa E501
-                hostname=node["vars"]["inventory_hostname"],
-                cluster=setup['cluster_name'],
-                pool_name=pool_name
-            ))
-            assert cmd.rc == 0
-
-    @pytest.mark.docker
-    def test_docker_rgw_tuning_pools_are_set(self, node, host, setup):
-        hostname = node["vars"]["inventory_hostname"]
-        cluster = setup['cluster_name']
-        container_binary = setup["container_binary"]
-        pools = node["vars"].get("rgw_create_pools")
-        if pools is None:
-            pytest.skip('rgw_create_pools not defined, nothing to test')
-        for pool_name in pools.keys():
-            cmd = host.run("sudo {container_binary} exec ceph-rgw-{hostname}-rgw0 ceph --cluster={cluster} -n client.rgw.{hostname}.rgw0 --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd pool get {pool_name} size".format(  # noqa E501
-                hostname=hostname,
-                cluster=cluster,
-                pool_name=pool_name,
-                container_binary=container_binary
-            ))
-            assert cmd.rc == 0