]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
ceph_pool: refact module
authorGuillaume Abrioux <gabrioux@redhat.com>
Mon, 28 Sep 2020 21:27:47 +0000 (23:27 +0200)
committerGuillaume Abrioux <gabrioux@redhat.com>
Fri, 2 Oct 2020 05:42:40 +0000 (07:42 +0200)
remove complexity about current defaults in running cluster

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
library/ceph_pool.py
roles/ceph-client/tasks/create_users_keys.yml
roles/ceph-iscsi-gw/tasks/common.yml
roles/ceph-mds/tasks/create_mds_filesystems.yml
roles/ceph-osd/tasks/openstack_config.yml
roles/ceph-rgw/tasks/rgw_create_pools.yml
tests/library/test_ceph_pool.py

index 2abce26970674129582c44308c9422d9546fec00..9088899d3d7a34f91869177c2a7716da8cee3849 100644 (file)
@@ -51,8 +51,8 @@ options:
             If 'present' is used, the module creates a pool if it doesn't exist
             or update it if it already exists.
             If 'absent' is used, the module will simply delete the pool.
-            If 'list' is used, the module will return all details about the existing pools
-            (json formatted).
+            If 'list' is used, the module will return all details about the
+            existing pools. (json formatted).
         required: false
         choices: ['present', 'absent', 'list']
         default: present
@@ -247,27 +247,7 @@ def generate_get_config_cmd(param, cluster, user, user_key, container_image=None
     return cmd
 
 
-def get_default_running_config(module, cluster, user, user_key, output_format='json', container_image=None):  # noqa E501
-    '''
-    Get some default values set in the cluster
-    '''
-
-    params = ['osd_pool_default_size', 'osd_pool_default_min_size', 'osd_pool_default_pg_num', 'osd_pool_default_pgp_num']  # noqa E501
-
-    default_running_values = {}
-
-    for param in params:
-        rc, cmd, out, err = exec_commands(module, generate_get_config_cmd(param, cluster, user, user_key, container_image=container_image))  # noqa E501
-
-        if rc == 0:
-            default_running_values[param] = out.strip()
-        else:
-            return rc, cmd, out, err
-
-    return rc, cmd, default_running_values, err
-
-
-def get_application_pool(cluster, name, user, user_key, output_format='json', container_image=None):  # noqa E501
+def get_application_pool(cluster, name, user, user_key, output_format='json', container_image=None):
     '''
     Get application type enabled on a given pool
     '''
@@ -319,6 +299,23 @@ def get_pool_details(module, cluster, name, user, user_key, output_format='json'
 
     _rc, _cmd, application_pool, _err = exec_commands(module, get_application_pool(cluster, name, user, user_key, container_image=container_image))  # noqa E501
 
+    # This is a trick because "target_size_ratio" isn't present at the same level in the dict
+    # ie:
+    # {
+    # 'pg_num': 8,
+    # 'pgp_num': 8,
+    # 'pg_autoscale_mode': 'on',
+    #     'options': {
+    #          'target_size_ratio': 0.1
+    #     }
+    # }
+    # If 'target_size_ratio' is present in 'options', we set it, this way we end up
+    # with a dict containing all needed keys at the same level.
+    if 'target_size_ratio' in out['options'].keys():
+        out['target_size_ratio'] = out['options']['target_size_ratio']
+    else:
+        out['target_size_ratio'] = None
+
     application = list(json.loads(application_pool.strip()).keys())
 
     if len(application) == 0:
@@ -335,15 +332,12 @@ def compare_pool_config(user_pool_config, running_pool_details):
     '''
 
     delta = {}
-    filter_keys = ['pg_num', 'pg_placement_num', 'size', 'pg_autoscale_mode']
+    filter_keys = [ 'pg_num', 'pg_placement_num', 'size', 'pg_autoscale_mode', 'target_size_ratio']
     for key in filter_keys:
-        if str(running_pool_details[key]) != user_pool_config[key]['value']:
+        if str(running_pool_details[key]) != user_pool_config[key]['value'] and user_pool_config[key]['value']:
             delta[key] = user_pool_config[key]
 
-    if str(running_pool_details['options'].get('target_size_ratio')) != user_pool_config['target_size_ratio']['value'] and user_pool_config['target_size_ratio']['value'] is not None:  # noqa E501
-        delta['target_size_ratio'] = user_pool_config['target_size_ratio']
-
-    if running_pool_details['application'] != user_pool_config['application']['value'] and user_pool_config['application']['value'] is not None:  # noqa E501
+    if running_pool_details['application'] != user_pool_config['application']['value'] and user_pool_config['application']['value'] != None:
         delta['application'] = {}
         delta['application']['new_application'] = user_pool_config['application']['value']  # noqa E501
         # to be improved (for update_pools()...)
@@ -375,10 +369,16 @@ def create_pool(cluster, name, user, user_key, user_pool_config, container_image
     Create a new pool
     '''
 
-    args = ['create', user_pool_config['pool_name']['value'], '--pg_num', user_pool_config['pg_num']['value'], '--pgp_num', user_pool_config['pgp_num']['value'], user_pool_config['type']['value']]  # noqa E501
+    args = [ 'create', user_pool_config['pool_name']['value'], user_pool_config['type']['value'] ]
+
+    if user_pool_config['pg_autoscale_mode']['value'] != 'on':
+        args.extend(['--pg_num', user_pool_config['pg_num']['value'], '--pgp_num', user_pool_config['pgp_num']['value']])
 
     if user_pool_config['type']['value'] == 'replicated':
-        args.extend([user_pool_config['crush_rule']['value'], '--expected_num_objects', user_pool_config['expected_num_objects']['value'], '--size', user_pool_config['size']['value'], '--autoscale-mode', user_pool_config['pg_autoscale_mode']['value']])  # noqa E501
+        args.extend([ user_pool_config['crush_rule']['value'], '--expected_num_objects', user_pool_config['expected_num_objects']['value'], '--autoscale-mode', user_pool_config['pg_autoscale_mode']['value'] ])
+
+    if user_pool_config['size']['value'] and user_pool_config['type']['value'] == "replicated":
+        args.extend(['--size', user_pool_config['size']['value']])
 
     elif user_pool_config['type']['value'] == 'erasure':
         args.extend([user_pool_config['erasure_profile']['value']])
@@ -462,8 +462,8 @@ def run_module():
         details=dict(type='bool', required=False, default=False),
         size=dict(type='str', required=False),
         min_size=dict(type='str', required=False),
-        pg_num=dict(type='str', required=False, default=None),
-        pgp_num=dict(type='str', required=False, default=None),
+        pg_num=dict(type='str', required=False),
+        pgp_num=dict(type='str', required=False),
         pg_autoscale_mode=dict(type='str', required=False, default='on'),
         target_size_ratio=dict(type='str', required=False, default=None),
         pool_type=dict(type='str', required=False, default='replicated', choices=['replicated', 'erasure', '1', '3']),  # noqa E501
@@ -475,7 +475,7 @@ def run_module():
 
     module = AnsibleModule(
         argument_spec=module_args,
-        supports_check_mode=True,
+        supports_check_mode=True
     )
 
     # Gather module parameters in variables
@@ -483,8 +483,10 @@ def run_module():
     name = module.params.get('name')
     state = module.params.get('state')
     details = module.params.get('details')
-    pg_num = module.params.get('pg')
-    pgp_num = module.params.get('pgp')
+    size = module.params.get('size')
+    min_size = module.params.get('min_size')
+    pg_num = module.params.get('pg_num')
+    pgp_num = module.params.get('pgp_num')
     pg_autoscale_mode = module.params.get('pg_autoscale_mode')
     target_size_ratio = module.params.get('target_size_ratio')
     application = module.params.get('application')
@@ -512,16 +514,18 @@ def run_module():
     expected_num_objects = module.params.get('expected_num_objects')
 
     user_pool_config = {
-        'pool_name': {'value': name},
-        'pg_num': {'value': pg_num, 'cli_set_opt': 'pg_num'},
-        'pgp_num': {'value': pgp_num, 'cli_set_opt': 'pgp_num'},
-        'pg_autoscale_mode': {'value': pg_autoscale_mode, 'cli_set_opt': 'pg_autoscale_mode'},  # noqa E501
-        'target_size_ratio': {'value': target_size_ratio, 'cli_set_opt': 'target_size_ratio'},  # noqa E501
-        'application': {'value': application},
-        'type': {'value': pool_type},
-        'erasure_profile': {'value': erasure_profile},
-        'crush_rule': {'value': rule_name, 'cli_set_opt': 'crush_rule'},
-        'expected_num_objects': {'value': expected_num_objects}
+        'pool_name': { 'value': name },
+        'pg_num': { 'value': pg_num, 'cli_set_opt': 'pg_num' },
+        'pgp_num': { 'value': pgp_num, 'cli_set_opt': 'pgp_num' },
+        'pg_autoscale_mode': { 'value': pg_autoscale_mode, 'cli_set_opt': 'pg_autoscale_mode' },
+        'target_size_ratio': { 'value': target_size_ratio, 'cli_set_opt': 'target_size_ratio' },
+        'application': {'value': application },
+        'type': { 'value': pool_type },
+        'erasure_profile': { 'value': erasure_profile },
+        'crush_rule': { 'value': rule_name, 'cli_set_opt': 'crush_rule' },
+        'expected_num_objects': { 'value': expected_num_objects },
+        'size': { 'value': size },
+        'min_size': { 'value': min_size, 'cli_set_opt': 'size' }
     }
 
     if module.check_mode:
@@ -545,49 +549,25 @@ def run_module():
     keyring_filename = cluster + '.' + user + '.keyring'
     user_key = os.path.join("/etc/ceph/", keyring_filename)
 
-    def_opt = {
-        'size': {
-            'conf_name': 'osd_pool_default_size',
-            'cli_set_opt': 'size'
-        },
-        'min_size': {
-            'conf_name': 'osd_pool_default_min_size',
-            'cli_set_opt': 'min_size'
-        },
-        'pg_num': {
-            'conf_name': 'osd_pool_default_pg_num',
-            'cli_set_opt': 'pg_num'
-        },
-        'pgp_num': {
-            'conf_name': 'osd_pool_default_pgp_num',
-            'cli_set_opt': 'pgp_num'
-        }
-    }
-
     if state == "present":
-        rc, cmd, default_running_ceph_config, err = get_default_running_config(module, cluster, user, user_key, container_image=container_image)  # noqa E501
+        rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image))
         if rc == 0:
-            for k, v in def_opt.items():
-                if module.params[k] is None:
-                    user_pool_config[k] = {'value': default_running_ceph_config[v['conf_name']], 'cli_set_opt': v['cli_set_opt']}  # noqa E501
-                else:
-                    user_pool_config[k] = {'value': module.params.get(k), 'cli_set_opt': v['cli_set_opt']}  # noqa E501
-            rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image))  # noqa E501
-            if rc == 0:
-                running_pool_details = get_pool_details(module, cluster, name, user, user_key, container_image=container_image)  # noqa E501
-                user_pool_config['pg_placement_num'] = { 'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num' }  # noqa E501
-                delta = compare_pool_config(user_pool_config, running_pool_details[2])  # noqa E501
-                if len(delta) > 0 and running_pool_details[2]['erasure_code_profile'] == "" and 'size' not in delta.keys():  # noqa E501
-                    rc, cmd, out, err = update_pool(module, cluster, name, user, user_key, delta, container_image=container_image)  # noqa E501
-                    if rc == 0:
-                        changed = True
-                else:
-                    out = "Pool {} already exists and there is nothing to update.".format(name)  # noqa E501
+            running_pool_details = get_pool_details(module, cluster, name, user, user_key, container_image=container_image)
+            user_pool_config['pg_placement_num'] = { 'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num' }
+            delta = compare_pool_config(user_pool_config, running_pool_details[2])
+            if len(delta) > 0 and running_pool_details[2]['erasure_code_profile'] == "" and 'size' not in delta.keys():
+                rc, cmd, out, err = update_pool(module, cluster, name, user, user_key, delta, container_image=container_image)
+                if rc == 0:
+                    changed = True
             else:
-                rc, cmd, out, err = exec_commands(module, create_pool(cluster, name, user, user_key, user_pool_config=user_pool_config, container_image=container_image))  # noqa E501
-                if user_pool_config['application']['value'] is not None:
-                    _rc, _cmd, _out, _err = exec_commands(module, enable_application_pool(cluster, name, user_pool_config['application']['value'], user, user_key, container_image=container_image))  # noqa E501
-                changed = True
+                out = "Pool {} already exists and there is nothing to update.".format(name)
+        else:
+            rc, cmd, out, err = exec_commands(module, create_pool(cluster, name, user, user_key, user_pool_config=user_pool_config, container_image=container_image))
+            if user_pool_config['application']['value'] != None:
+                rc, _, _, _ = exec_commands(module, enable_application_pool(cluster, name, user_pool_config['application']['value'], user, user_key, container_image=container_image))
+            if user_pool_config['min_size']['value'] != None:
+                pass
+            changed = True
 
     elif state == "list":
         rc, cmd, out, err = exec_commands(module, list_pools(cluster, name, user, user_key, details, container_image=container_image))  # noqa E501
@@ -595,15 +575,25 @@ def run_module():
             out = "Couldn't list pool(s) present on the cluster"
 
     elif state == "absent":
-        rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image))
+        rc, cmd, out, err = exec_commands(module,
+                                        check_pool_exist(cluster,
+                                        name, user,
+                                        user_key, container_image=container_image))
         if rc == 0:
-            rc, cmd, out, err = exec_commands(module, remove_pool(cluster, name, user, user_key, container_image=container_image))
+            rc, cmd, out, err = exec_commands(module,
+                                              remove_pool(cluster,
+                                                          name,
+                                                          user,
+                                                          user_key,
+                                                          container_image=
+                                                          container_image))
             changed = True
         else:
             rc = 0
             out = "Skipped, since pool {} doesn't exist".format(name)
 
-    exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed)  # noqa E501
+    exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd,
+                changed=changed)
 
 
 def main():
index 47cc60bb2166fc5abf7dddc687f98134080a0dcf..0752001c74b00e4f5a8cd8fe2030d131cecb63e4 100644 (file)
@@ -76,8 +76,8 @@
       ceph_pool:
         name: "{{ item.name }}"
         cluster: "{{ cluster }}"
-        pg_num: "{{ item.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}"
-        pgp_num: "{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else omit }}"
+        pg_num: "{{ item.pg_num | default(omit) }}"
+        pgp_num: "{{ item.pgp_num | default(omit) }}"
         size: "{{ item.size | default(omit) }}"
         min_size: "{{ item.min_size | default(omit) }}"
         pool_type: "{{ item.type | default('replicated') }}"
index ed81b5106f670a4b0897533c6ac67d7a049971b9..70ad0bfb864b3efd932a1f314fc9768bccad584d 100644 (file)
@@ -56,8 +56,7 @@
   ceph_pool:
     name: "{{ iscsi_pool_name }}"
     cluster: "{{ cluster }}"
-    pg_num: "{{ osd_pool_default_pg_num }}"
-    size: "{{ iscsi_pool_size | default(osd_pool_default_size) }}"
+    size: "{{ iscsi_pool_size | default(omit) }}"
     application: "rbd"
   run_once: true
   delegate_to: "{{ groups[mon_group_name][0] }}"
index 93ec880d45f49c0a09f20ff4a1d55130fb97ef59..ca6b300a7f5cb2dc1db3e2ec300e278aabeaa878 100644 (file)
@@ -7,8 +7,8 @@
   ceph_pool:
     name: "{{ item.name }}"
     cluster: "{{ cluster }}"
-    pg_num: "{{ item.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}"
-    pgp_num: "{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else omit }}"
+    pg_num: "{{ item.pg_num | default(omit) }}"
+    pgp_num: "{{ item.pgp_num | default(omit) }}"
     size: "{{ item.size | default(omit) }}"
     min_size: "{{ item.min_size | default(omit) }}"
     pool_type: "{{ item.type | default('replicated') }}"
index fbf4408e6eefa27220ab1d42efd4320bb333ae62..8a16ca02568c55e9cb391c105f4593e815c87c41 100644 (file)
@@ -5,8 +5,8 @@
       ceph_pool:
         name: "{{ item.name }}"
         cluster: "{{ cluster }}"
-        pg_num: "{{ item.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}"
-        pgp_num: "{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else omit }}"
+        pg_num: "{{ item.pg_num | default(omit) }}"
+        pgp_num: "{{ item.pgp_num | default(omit) }}"
         size: "{{ item.size | default(omit) }}"
         min_size: "{{ item.min_size | default(omit) }}"
         pool_type: "{{ item.type | default('replicated') }}"
index 751512b6fc1a43ab399a635c5e62a084eb00df28..74a3a398ab4a53daa3d822c3c854fbcc20520b52 100644 (file)
@@ -32,8 +32,9 @@
     name: "{{ item.key }}"
     state: present
     cluster: "{{ cluster }}"
-    pg_num: "{{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
-    pgp_num: "{{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
+    pg_num: "{{ item.value.pg_num | default(omit) }}"
+    pgp_num: "{{ item.value.pgp_num | default(omit) }}"
+    size: "{{ item.value.size | default(omit) }}"
     pool_type: erasure
     erasure_profile: "{{ item.value.ec_profile }}"
     application: rgw
     name: "{{ item.key }}"
     state: present
     cluster: "{{ cluster }}"
-    pg_num: "{{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
-    pgp_num: "{{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
-    size: "{{ item.value.size | default(osd_pool_default_size) }}"
+    pg_num: "{{ item.value.pg_num | default(omit) }}"
+    pgp_num: "{{ item.value.pgp_num | default(omit) }}"
+    size: "{{ item.value.size | default(omit) }}"
+    min_size: "{{ item.value.min_size | default(omit) }}"
     pool_type: replicated
     rule_name: "{{ item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}"
     application: rgw
index b6288af6b063124ca096fa5a2c342e002beffe8e..b0851e54a8feb98cb987def77068d7b8deaddde4 100644 (file)
@@ -81,6 +81,10 @@ class TestCephPoolModule(object):
             'expected_num_objects': 0,
             'fast_read': False,
             'options': {},
+            # 'target_size_ratio' is a key present in the dict above
+            # 'options': {}
+            # see comment in get_pool_details() for more details
+            'target_size_ratio': None,
             'application_metadata': {
                 'rbd': {}
             },