]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
library: add ceph_osd module
authorDimitri Savineau <dsavinea@redhat.com>
Fri, 20 Nov 2020 22:50:59 +0000 (17:50 -0500)
committerGuillaume Abrioux <gabrioux@redhat.com>
Mon, 30 Nov 2020 15:53:45 +0000 (16:53 +0100)
This adds ceph_osd ansible module for replacing the command module
usage with the ceph osd destroy/down/in/out/purge/rm commands.

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
infrastructure-playbooks/filestore-to-bluestore.yml
infrastructure-playbooks/shrink-osd.yml
library/ceph_osd.py [new file with mode: 0644]
tests/library/test_ceph_osd.py [new file with mode: 0644]

index c796a9d4599032f37cbb5edb361f810fabfbda3b..b2f0f90b828b52d54cbce4cc351d0fa79d35b045 100644 (file)
               ignore_errors: true
 
             - name: mark out osds
-              command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ (item.0.stdout | from_json).whoami }}"
+              ceph_osd:
+                ids: "{{ (item.0.stdout | from_json).whoami }}"
+                cluster: "{{ cluster }}"
+                state: out
+              environment:
+                CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+                CEPH_CONTAINER_BINARY: "{{ container_binary }}"
               with_together:
                 - "{{ simple_scan.results }}"
                 - "{{ partlabel.results }}"
         - name: ceph-volume prepared OSDs related tasks
           block:
             - name: mark out osds
-              command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}"
-              with_items: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}"
+              ceph_osd:
+                ids: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}"
+                cluster: "{{ cluster }}"
+                state: out
+              environment:
+                CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+                CEPH_CONTAINER_BINARY: "{{ container_binary }}"
               delegate_to: "{{ groups[mon_group_name][0] }}"
               run_once: true
 
                 - item.type == 'data'
 
             - name: mark down osds
-              command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd down {{ item }}"
-              with_items: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}"
+              ceph_osd:
+                ids: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}"
+                cluster: "{{ cluster }}"
+                state: down
+              environment:
+                CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+                CEPH_CONTAINER_BINARY: "{{ container_binary }}"
               delegate_to: "{{ groups[mon_group_name][0] }}"
               run_once: true
 
             - "{{ ((osd_tree.stdout | default('{}') | from_json).nodes | selectattr('name', 'match', inventory_hostname) | map(attribute='children') | list) }}"
 
         - name: purge osd(s) from the cluster
-          command: >
-            {{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it
+          ceph_osd:
+            ids: "{{ item }}"
+            cluster: "{{ cluster }}"
+            state: purge
+          environment:
+            CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+            CEPH_CONTAINER_BINARY: "{{ container_binary }}"
           run_once: true
           delegate_to: "{{ groups[mon_group_name][0] }}"
           with_items: "{{ osd_ids }}"
index 516e95cbdf9b6828a931bd8026542e55c7b445ff..9d75f7e9d86e517787489fbd94f4669bcea89bf9 100644 (file)
       when: item.skipped is undefined
 
     - name: mark osd(s) out of the cluster
-      command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ osd_to_kill.replace(',', ' ') }}"
-      changed_when: false
+      ceph_osd:
+        ids: "{{ osd_to_kill.split(',') }}"
+        cluster: "{{ cluster }}"
+        state: out
+      environment:
+        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
       run_once: true
 
     - name: stop osd(s) service
       when: item.2 in _lvm_list.keys()
 
     - name: ensure osds are marked down
-      command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd down {{ osd_to_kill.replace(',', ' ') }}"
-      changed_when: false
+      ceph_osd:
+        ids: "{{ osd_to_kill.split(',') }}"
+        cluster: "{{ cluster }}"
+        state: down
+      environment:
+        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
       run_once: true
       delegate_to: "{{ groups[mon_group_name][0] }}"
 
     - name: purge osd(s) from the cluster
-      command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it"
-      changed_when: false
+      ceph_osd:
+        ids: "{{ item }}"
+        cluster: "{{ cluster }}"
+        state: purge
+      environment:
+        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
       run_once: true
       with_items: "{{ osd_to_kill.split(',') }}"
 
diff --git a/library/ceph_osd.py b/library/ceph_osd.py
new file mode 100644 (file)
index 0000000..a315f59
--- /dev/null
@@ -0,0 +1,146 @@
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+    from ansible.module_utils.ca_common import exit_module, generate_ceph_cmd, is_containerized
+except ImportError:
+    from module_utils.ca_common import exit_module, generate_ceph_cmd, is_containerized
+import datetime
+
+
+ANSIBLE_METADATA = {
+    'metadata_version': '1.1',
+    'status': ['preview'],
+    'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_osd
+short_description: Manage Ceph OSD state
+version_added: "2.8"
+description:
+    - Manage Ceph OSD state
+options:
+    ids:
+        description:
+            - The ceph OSD id(s).
+        required: true
+    cluster:
+        description:
+            - The ceph cluster name.
+        required: false
+        default: ceph
+    state:
+        description:
+            - The ceph OSD state.
+        required: true
+        choices: ['destroy', 'down', 'in', 'out', 'purge', 'rm']
+author:
+    - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: destroy OSD 42
+  ceph_osd:
+    ids: 42
+    state: destroy
+
+- name: set multiple OSDs down
+  ceph_osd:
+    ids: [0, 1, 3]
+    state: down
+
+- name: set OSD 42 in
+  ceph_osd:
+    ids: 42
+    state: in
+
+- name: set OSD 42 out
+  ceph_osd:
+    ids: 42
+    state: out
+
+- name: purge OSD 42
+  ceph_osd:
+    ids: 42
+    state: purge
+
+- name: rm OSD 42
+  ceph_osd:
+    ids: 42
+    state: rm
+'''
+
+RETURN = '''#  '''
+
+
+def main():
+    module = AnsibleModule(
+        argument_spec=dict(
+            ids=dict(type='list', required=True),
+            cluster=dict(type='str', required=False, default='ceph'),
+            state=dict(type='str', required=True, choices=['destroy', 'down', 'in', 'out', 'purge', 'rm']),
+        ),
+        supports_check_mode=True,
+    )
+
+    ids = module.params.get('ids')
+    cluster = module.params.get('cluster')
+    state = module.params.get('state')
+
+    if state in ['destroy', 'purge'] and len(ids) > 1:
+        module.fail_json(msg='destroy and purge only support one OSD at at time', rc=1)
+
+    startd = datetime.datetime.now()
+
+    container_image = is_containerized()
+
+    cmd = generate_ceph_cmd(['osd', state], ids, cluster=cluster, container_image=container_image)
+
+    if state in ['destroy', 'purge']:
+        cmd.append('--yes-i-really-mean-it')
+
+    if module.check_mode:
+        exit_module(
+            module=module,
+            out='',
+            rc=0,
+            cmd=cmd,
+            err='',
+            startd=startd,
+            changed=False
+        )
+    else:
+        rc, out, err = module.run_command(cmd)
+        changed = True
+        if state in ['down', 'in', 'out'] and 'marked' not in err:
+            changed = False
+        exit_module(
+            module=module,
+            out=out,
+            rc=rc,
+            cmd=cmd,
+            err=err,
+            startd=startd,
+            changed=changed
+        )
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/library/test_ceph_osd.py b/tests/library/test_ceph_osd.py
new file mode 100644 (file)
index 0000000..dc5c6ab
--- /dev/null
@@ -0,0 +1,244 @@
+from mock.mock import patch
+import os
+import pytest
+import ca_test_common
+import ceph_osd
+
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
+fake_id = '42'
+fake_ids = ['0', '7', '13']
+fake_user = 'client.admin'
+fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user)
+invalid_state = 'foo'
+
+
+class TestCephOSDModule(object):
+
+    @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+    def test_without_parameters(self, m_fail_json):
+        ca_test_common.set_module_args({})
+        m_fail_json.side_effect = ca_test_common.fail_json
+
+        with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+            ceph_osd.main()
+
+        result = result.value.args[0]
+        assert result['msg'] == 'missing required arguments: ids, state'
+
+    @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+    def test_with_invalid_state(self, m_fail_json):
+        ca_test_common.set_module_args({
+            'ids': fake_id,
+            'state': invalid_state,
+        })
+        m_fail_json.side_effect = ca_test_common.fail_json
+
+        with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+            ceph_osd.main()
+
+        result = result.value.args[0]
+        assert result['msg'] == ('value of state must be one of: destroy, down, '
+                                 'in, out, purge, rm, got: {}'.format(invalid_state))
+
+    @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+    def test_with_check_mode(self, m_exit_json):
+        ca_test_common.set_module_args({
+            'ids': fake_id,
+            'state': 'rm',
+            '_ansible_check_mode': True
+        })
+        m_exit_json.side_effect = ca_test_common.exit_json
+
+        with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+            ceph_osd.main()
+
+        result = result.value.args[0]
+        assert not result['changed']
+        assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'rm', fake_id]
+        assert result['rc'] == 0
+        assert not result['stdout']
+        assert not result['stderr']
+
+    @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+    @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+    def test_with_failure(self, m_run_command, m_exit_json):
+        ca_test_common.set_module_args({
+            'ids': fake_id,
+            'state': 'rm'
+        })
+        m_exit_json.side_effect = ca_test_common.exit_json
+        stdout = ''
+        stderr = 'Error EBUSY: osd.{} is still up; must be down before removal.'.format(fake_id)
+        rc = 16
+        m_run_command.return_value = rc, stdout, stderr
+
+        with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+            ceph_osd.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'rm', fake_id]
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+
+    @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+    @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+    @pytest.mark.parametrize('state', ['destroy', 'down', 'in', 'out', 'purge', 'rm'])
+    def test_set_state(self, m_run_command, m_exit_json, state):
+        ca_test_common.set_module_args({
+            'ids': fake_id,
+            'state': state
+        })
+        m_exit_json.side_effect = ca_test_common.exit_json
+        stdout = ''
+        stderr = 'marked {} osd.{}'.format(state, fake_id)
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+        cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state, fake_id]
+        if state in ['destroy', 'purge']:
+            cmd.append('--yes-i-really-mean-it')
+
+        with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+            ceph_osd.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == cmd
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout
+
+    @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+    @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+    @pytest.mark.parametrize('state', ['down', 'in', 'out', 'rm'])
+    def test_set_state_multiple_ids(self, m_run_command, m_exit_json, state):
+        ca_test_common.set_module_args({
+            'ids': fake_ids,
+            'state': state
+        })
+        m_exit_json.side_effect = ca_test_common.exit_json
+        stderr = ''
+        stdout = ''
+        for osd in fake_ids:
+            stderr += 'marked {} osd.{} '.format(state, osd)
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+        cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state]
+        cmd.extend(fake_ids)
+
+        with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+            ceph_osd.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == cmd
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout
+
+    @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+    @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+    @pytest.mark.parametrize('state', ['destroy', 'purge'])
+    def test_invalid_state_multiple_ids(self, m_run_command, m_fail_json, state):
+        ca_test_common.set_module_args({
+            'ids': fake_ids,
+            'state': state
+        })
+        m_fail_json.side_effect = ca_test_common.fail_json
+
+        with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+            ceph_osd.main()
+
+        result = result.value.args[0]
+        assert result['msg'] == 'destroy and purge only support one OSD at at time'
+        assert result['rc'] == 1
+
+    @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+    @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+    @pytest.mark.parametrize('state', ['down', 'in', 'out'])
+    def test_already_set_state(self, m_run_command, m_exit_json, state):
+        ca_test_common.set_module_args({
+            'ids': fake_id,
+            'state': state
+        })
+        m_exit_json.side_effect = ca_test_common.exit_json
+        stdout = ''
+        stderr = 'osd.{} is already {}.'.format(fake_id, state)
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+        cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state, fake_id]
+
+        with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+            ceph_osd.main()
+
+        result = result.value.args[0]
+        assert not result['changed']
+        assert result['cmd'] == cmd
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout
+
+    @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+    @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+    @pytest.mark.parametrize('state', ['down', 'in', 'out', 'rm'])
+    def test_one_already_set_state_multiple_ids(self, m_run_command, m_exit_json, state):
+        ca_test_common.set_module_args({
+            'ids': fake_ids,
+            'state': state
+        })
+        m_exit_json.side_effect = ca_test_common.exit_json
+        stdout = ''
+        stderr = 'marked {} osd.{}. osd.{} does not exist. osd.{} does not exist.'.format(state, fake_ids[0], fake_ids[1], fake_ids[2])
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+        cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state]
+        cmd.extend(fake_ids)
+        if state in ['destroy', 'purge']:
+            cmd.append('--yes-i-really-mean-it')
+
+        with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+            ceph_osd.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == cmd
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout
+
+    @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+    @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+    @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+    @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+    @pytest.mark.parametrize('state', ['destroy', 'down', 'in', 'out', 'purge', 'rm'])
+    def test_set_state_with_container(self, m_run_command, m_exit_json, state):
+        ca_test_common.set_module_args({
+            'ids': fake_id,
+            'state': state
+        })
+        m_exit_json.side_effect = ca_test_common.exit_json
+        stdout = ''
+        stderr = 'marked {} osd.{}'.format(state, fake_id)
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+        cmd = [fake_container_binary, 'run', '--rm', '--net=host',
+               '-v', '/etc/ceph:/etc/ceph:z',
+               '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+               '-v', '/var/log/ceph/:/var/log/ceph/:z',
+               '--entrypoint=ceph', fake_container_image,
+               '-n', fake_user, '-k', fake_keyring,
+               '--cluster', fake_cluster, 'osd', state, fake_id]
+        if state in ['destroy', 'purge']:
+            cmd.append('--yes-i-really-mean-it')
+
+        with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+            ceph_osd.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == cmd
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout