]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
library: add ceph_volume_simple_{activate,scan}
authorDimitri Savineau <dsavinea@redhat.com>
Tue, 17 Nov 2020 14:22:34 +0000 (09:22 -0500)
committerGuillaume Abrioux <gabrioux@redhat.com>
Wed, 25 Nov 2020 09:09:42 +0000 (10:09 +0100)
This adds ceph_volume_simple_{activate,scan} ansible modules for replacing
the command module usage with the ceph-volume simple activate/scan commands.

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
infrastructure-playbooks/filestore-to-bluestore.yml
infrastructure-playbooks/rolling_update.yml
library/ceph_volume_simple_activate.py [new file with mode: 0644]
library/ceph_volume_simple_scan.py [new file with mode: 0644]
tests/library/test_ceph_volume_simple_activate.py [new file with mode: 0644]
tests/library/test_ceph_volume_simple_scan.py [new file with mode: 0644]

index f009682087215c3f2dd13854f4417c308c75e829..c796a9d4599032f37cbb5edb361f810fabfbda3b 100644 (file)
               with_items: "{{ ceph_disk_osds_devices | default([]) }}"
 
             - name: get simple scan data
-              command: "{{ container_run_cmd }} --cluster {{ cluster }} simple scan {{ item.item + 'p1' if item.item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item.item + '1' }} --stdout"
+              ceph_volume_simple_scan:
+                path: "{{ item.item + 'p1' if item.item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item.item + '1' }}"
+                cluster: "{{ cluster }"
+                stdout: true
+              environment:
+                CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+                CEPH_CONTAINER_BINARY: "{{ container_binary }}"
               register: simple_scan
               with_items: "{{ partlabel.results | default([]) }}"
               when: item.stdout == 'ceph data'
index f381fd4b348b5cb552bfa8ee08eb44611c11e581..f391b7aeeb328bfa623f6320a7d99975fb5da0b9 100644 (file)
         name: ceph-osd
 
     - name: scan ceph-disk osds with ceph-volume if deploying nautilus
-      command: "ceph-volume --cluster={{ cluster }} simple scan --force"
+      ceph_volume_simple_scan:
+        cluster: "{{ cluster }}"
+        force: true
       environment:
         CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
       when: not containerized_deployment | bool
 
     - name: activate scanned ceph-disk osds and migrate to ceph-volume if deploying nautilus
-      command: "ceph-volume --cluster={{ cluster }} simple activate --all"
+      ceph_volume_simple_activate:
+        cluster: "{{ cluster }}"
+        osd_all: true
       environment:
         CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
       when: not containerized_deployment | bool
diff --git a/library/ceph_volume_simple_activate.py b/library/ceph_volume_simple_activate.py
new file mode 100644 (file)
index 0000000..43efd97
--- /dev/null
@@ -0,0 +1,203 @@
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+import datetime
+import os
+
+
+ANSIBLE_METADATA = {
+    'metadata_version': '1.1',
+    'status': ['preview'],
+    'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_volume_simple_activate
+short_description: Activate legacy OSD with ceph-volume
+version_added: "2.8"
+description:
+    - Activate legacy OSD with ceph-volume by providing the JSON file from
+      the scan operation or by passing the OSD ID and OSD FSID.
+options:
+    cluster:
+        description:
+            - The ceph cluster name.
+        required: false
+        default: ceph
+    path:
+        description:
+            - The OSD metadata as JSON file in /etc/ceph/osd directory, it
+              must exist.
+        required: false
+    osd_id:
+        description:
+            - The legacy OSD ID.
+        required: false
+    osd_fsid:
+        description:
+            - The legacy OSD FSID.
+        required: false
+    osd_all:
+        description:
+            - Activate all legacy OSDs.
+        required: false
+    systemd:
+        description:
+            - Using systemd unit during the OSD activation.
+        required: false
+        default: true
+author:
+    - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: activate all legacy OSDs
+  ceph_volume_simple_activate:
+    cluster: ceph
+    osd_all: true
+
+- name: activate a legacy OSD via OSD ID and OSD FSID
+  ceph_volume_simple_activate:
+    cluster: ceph
+    osd_id: 3
+    osd_fsid: 0c4a7eca-0c2a-4c12-beff-08a80f064c52
+
+- name: activate a legacy OSD via the JSON file
+  ceph_volume_simple_activate:
+    cluster: ceph
+    path: /etc/ceph/osd/3-0c4a7eca-0c2a-4c12-beff-08a80f064c52.json
+
+- name: activate a legacy OSD via the JSON file without systemd
+  ceph_volume_simple_activate:
+    cluster: ceph
+    path: /etc/ceph/osd/3-0c4a7eca-0c2a-4c12-beff-08a80f064c52.json
+    systemd: false
+'''
+
+RETURN = '''#  '''
+
+
+def exit_module(module, out, rc, cmd, err, startd, changed=False):
+    endd = datetime.datetime.now()
+    delta = endd - startd
+
+    result = dict(
+        cmd=cmd,
+        start=str(startd),
+        end=str(endd),
+        delta=str(delta),
+        rc=rc,
+        stdout=out.rstrip("\r\n"),
+        stderr=err.rstrip("\r\n"),
+        changed=changed,
+    )
+    module.exit_json(**result)
+
+
+def main():
+    module = AnsibleModule(
+        argument_spec=dict(
+            cluster=dict(type='str', required=False, default='ceph'),
+            path=dict(type='path', required=False),
+            systemd=dict(type='bool', required=False, default=True),
+            osd_id=dict(type='str', required=False),
+            osd_fsid=dict(type='str', required=False),
+            osd_all=dict(type='bool', required=False),
+        ),
+        supports_check_mode=True,
+        mutually_exclusive=[
+            ('osd_all', 'osd_id'),
+            ('osd_all', 'osd_fsid'),
+            ('path', 'osd_id'),
+            ('path', 'osd_fsid'),
+        ],
+        required_together=[
+            ('osd_id', 'osd_fsid')
+        ],
+        required_one_of=[
+            ('path', 'osd_id', 'osd_all'),
+            ('path', 'osd_fsid', 'osd_all'),
+        ],
+    )
+
+    path = module.params.get('path')
+    cluster = module.params.get('cluster')
+    systemd = module.params.get('systemd')
+    osd_id = module.params.get('osd_id')
+    osd_fsid = module.params.get('osd_fsid')
+    osd_all = module.params.get('osd_all')
+
+    if path and not os.path.exists(path):
+        module.fail_json(msg='{} does not exist'.format(path), rc=1)
+
+    startd = datetime.datetime.now()
+
+    container_image = os.getenv('CEPH_CONTAINER_IMAGE')
+    container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+    if container_binary and container_image:
+        cmd = [container_binary,
+               'run', '--rm', '--privileged',
+               '--ipc=host', '--net=host',
+               '-v', '/etc/ceph:/etc/ceph:z',
+               '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+               '-v', '/var/log/ceph/:/var/log/ceph/:z',
+               '-v', '/run/lvm/:/run/lvm/',
+               '-v', '/run/lock/lvm/:/run/lock/lvm/',
+               '--entrypoint=ceph-volume', container_image]
+    else:
+        cmd = ['ceph-volume']
+
+    cmd.extend(['--cluster', cluster, 'simple', 'activate'])
+
+    if osd_all:
+        cmd.append('--all')
+    else:
+        if path:
+            cmd.extend(['--file', path])
+        else:
+            cmd.extend([osd_id, osd_fsid])
+
+    if not systemd:
+        cmd.append('--no-systemd')
+
+    if module.check_mode:
+        exit_module(
+            module=module,
+            out='',
+            rc=0,
+            cmd=cmd,
+            err='',
+            startd=startd,
+            changed=False
+        )
+    else:
+        rc, out, err = module.run_command(cmd)
+        exit_module(
+            module=module,
+            out=out,
+            rc=rc,
+            cmd=cmd,
+            err=err,
+            startd=startd,
+            changed=True
+        )
+
+
+if __name__ == '__main__':
+    main()
diff --git a/library/ceph_volume_simple_scan.py b/library/ceph_volume_simple_scan.py
new file mode 100644 (file)
index 0000000..5087d9a
--- /dev/null
@@ -0,0 +1,176 @@
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+import datetime
+import os
+
+
+ANSIBLE_METADATA = {
+    'metadata_version': '1.1',
+    'status': ['preview'],
+    'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_volume_simple_scan
+short_description: Scan legacy OSD with ceph-volume
+version_added: "2.8"
+description:
+    - Scan legacy OSD with ceph-volume and store the output as JSON file
+      in /etc/ceph/osd directory with {OSD_ID}-{OSD_FSID}.json format.
+options:
+    cluster:
+        description:
+            - The ceph cluster name.
+        required: false
+        default: ceph
+    path:
+        description:
+            - The OSD directory or metadata partition. The directory or
+              partition must exist.
+        required: false
+    force:
+        description:
+            - Force re-scanning an OSD and overwriting the JSON content.
+        required: false
+        default: false
+    stdout:
+        description:
+            - Do not store the output to JSON file but stdout instead.
+        required: false
+        default: false
+author:
+    - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: scan all running OSDs
+  ceph_volume_simple_scan:
+    cluster: ceph
+
+- name: scan an OSD with the directory
+  ceph_volume_simple_scan:
+    cluster: ceph
+    path: /var/lib/ceph/osd/ceph-3
+
+- name: scan an OSD with the partition
+  ceph_volume_simple_scan:
+    cluster: ceph
+    path: /dev/sdb1
+
+- name: rescan an OSD and print the result on stdout
+  ceph_volume_simple_scan:
+    cluster: ceph
+    path: /dev/nvme0n1p1
+    force: true
+    stdout: true
+'''
+
+RETURN = '''#  '''
+
+
+def exit_module(module, out, rc, cmd, err, startd, changed=False):
+    endd = datetime.datetime.now()
+    delta = endd - startd
+
+    result = dict(
+        cmd=cmd,
+        start=str(startd),
+        end=str(endd),
+        delta=str(delta),
+        rc=rc,
+        stdout=out.rstrip("\r\n"),
+        stderr=err.rstrip("\r\n"),
+        changed=changed,
+    )
+    module.exit_json(**result)
+
+
+def main():
+    module = AnsibleModule(
+        argument_spec=dict(
+            cluster=dict(type='str', required=False, default='ceph'),
+            path=dict(type='path', required=False),
+            force=dict(type='bool', required=False, default=False),
+            stdout=dict(type='bool', required=False, default=False),
+        ),
+        supports_check_mode=True,
+    )
+
+    path = module.params.get('path')
+    cluster = module.params.get('cluster')
+    force = module.params.get('force')
+    stdout = module.params.get('stdout')
+
+    if path and not os.path.exists(path):
+        module.fail_json(msg='{} does not exist'.format(path), rc=1)
+
+    startd = datetime.datetime.now()
+
+    container_image = os.getenv('CEPH_CONTAINER_IMAGE')
+    container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+    if container_binary and container_image:
+        cmd = [container_binary,
+               'run', '--rm', '--privileged',
+               '--ipc=host', '--net=host',
+               '-v', '/etc/ceph:/etc/ceph:z',
+               '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+               '-v', '/var/log/ceph/:/var/log/ceph/:z',
+               '-v', '/run/lvm/:/run/lvm/',
+               '-v', '/run/lock/lvm/:/run/lock/lvm/',
+               '--entrypoint=ceph-volume', container_image]
+    else:
+        cmd = ['ceph-volume']
+
+    cmd.extend(['--cluster', cluster, 'simple', 'scan'])
+
+    if force:
+        cmd.append('--force')
+
+    if stdout:
+        cmd.append('--stdout')
+
+    if path:
+        cmd.append(path)
+
+    if module.check_mode:
+        exit_module(
+            module=module,
+            out='',
+            rc=0,
+            cmd=cmd,
+            err='',
+            startd=startd,
+            changed=False
+        )
+    else:
+        rc, out, err = module.run_command(cmd)
+        exit_module(
+            module=module,
+            out=out,
+            rc=rc,
+            cmd=cmd,
+            err=err,
+            startd=startd,
+            changed=True
+        )
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/library/test_ceph_volume_simple_activate.py b/tests/library/test_ceph_volume_simple_activate.py
new file mode 100644 (file)
index 0000000..99fc62b
--- /dev/null
@@ -0,0 +1,199 @@
+from mock.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import json
+import os
+import pytest
+import sys
+sys.path.append('./library')
+import ceph_volume_simple_activate  # noqa : E402
+
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
+fake_id = '42'
+fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52'
+fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid)
+
+
+def set_module_args(args):
+    args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+    basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+    pass
+
+
+class AnsibleFailJson(Exception):
+    pass
+
+
+def exit_json(*args, **kwargs):
+    raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+    raise AnsibleFailJson(kwargs)
+
+
+class TestCephVolumeSimpleActivateModule(object):
+
+    @patch.object(basic.AnsibleModule, 'exit_json')
+    def test_with_check_mode(self, m_exit_json):
+        set_module_args({
+            'osd_id': fake_id,
+            'osd_fsid': fake_uuid,
+            '_ansible_check_mode': True
+        })
+        m_exit_json.side_effect = exit_json
+
+        with pytest.raises(AnsibleExitJson) as result:
+            ceph_volume_simple_activate.main()
+
+        result = result.value.args[0]
+        assert not result['changed']
+        assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
+        assert result['rc'] == 0
+        assert not result['stdout']
+        assert not result['stderr']
+
+    @patch.object(basic.AnsibleModule, 'exit_json')
+    @patch.object(basic.AnsibleModule, 'run_command')
+    def test_with_failure(self, m_run_command, m_exit_json):
+        set_module_args({
+            'osd_id': fake_id,
+            'osd_fsid': fake_uuid
+        })
+        m_exit_json.side_effect = exit_json
+        stdout = ''
+        stderr = 'error'
+        rc = 2
+        m_run_command.return_value = rc, stdout, stderr
+
+        with pytest.raises(AnsibleExitJson) as result:
+            ceph_volume_simple_activate.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+
+    @patch.object(basic.AnsibleModule, 'exit_json')
+    @patch.object(basic.AnsibleModule, 'run_command')
+    def test_activate_all_osds(self, m_run_command, m_exit_json):
+        set_module_args({
+            'osd_all': True
+        })
+        m_exit_json.side_effect = exit_json
+        stdout = ''
+        stderr = ''
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+
+        with pytest.raises(AnsibleExitJson) as result:
+            ceph_volume_simple_activate.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--all']
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout
+
+    @patch.object(os.path, 'exists', return_value=True)
+    @patch.object(basic.AnsibleModule, 'exit_json')
+    @patch.object(basic.AnsibleModule, 'run_command')
+    def test_activate_path_exists(self, m_run_command, m_exit_json, m_os_path):
+        set_module_args({
+            'path': fake_path
+        })
+        m_exit_json.side_effect = exit_json
+        stdout = ''
+        stderr = ''
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+
+        with pytest.raises(AnsibleExitJson) as result:
+            ceph_volume_simple_activate.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--file', fake_path]
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout
+
+    @patch.object(os.path, 'exists', return_value=False)
+    @patch.object(basic.AnsibleModule, 'fail_json')
+    def test_activate_path_not_exists(self, m_fail_json, m_os_path):
+        set_module_args({
+            'path': fake_path
+        })
+        m_fail_json.side_effect = fail_json
+
+        with pytest.raises(AnsibleFailJson) as result:
+            ceph_volume_simple_activate.main()
+
+        result = result.value.args[0]
+        assert result['msg'] == '{} does not exist'.format(fake_path)
+        assert result['rc'] == 1
+
+    @patch.object(basic.AnsibleModule, 'exit_json')
+    @patch.object(basic.AnsibleModule, 'run_command')
+    def test_activate_without_systemd(self, m_run_command, m_exit_json):
+        set_module_args({
+            'osd_id': fake_id,
+            'osd_fsid': fake_uuid,
+            'systemd': False
+        })
+        m_exit_json.side_effect = exit_json
+        stdout = ''
+        stderr = ''
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+
+        with pytest.raises(AnsibleExitJson) as result:
+            ceph_volume_simple_activate.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid, '--no-systemd']
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout
+
+    @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+    @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+    @patch.object(basic.AnsibleModule, 'exit_json')
+    @patch.object(basic.AnsibleModule, 'run_command')
+    def test_activate_with_container(self, m_run_command, m_exit_json):
+        set_module_args({
+            'osd_id': fake_id,
+            'osd_fsid': fake_uuid,
+        })
+        m_exit_json.side_effect = exit_json
+        stdout = ''
+        stderr = ''
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+
+        with pytest.raises(AnsibleExitJson) as result:
+            ceph_volume_simple_activate.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == [fake_container_binary,
+                                 'run', '--rm', '--privileged',
+                                 '--ipc=host', '--net=host',
+                                 '-v', '/etc/ceph:/etc/ceph:z',
+                                 '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+                                 '-v', '/var/log/ceph/:/var/log/ceph/:z',
+                                 '-v', '/run/lvm/:/run/lvm/',
+                                 '-v', '/run/lock/lvm/:/run/lock/lvm/',
+                                 '--entrypoint=ceph-volume', fake_container_image,
+                                 '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout
diff --git a/tests/library/test_ceph_volume_simple_scan.py b/tests/library/test_ceph_volume_simple_scan.py
new file mode 100644 (file)
index 0000000..0dd8e97
--- /dev/null
@@ -0,0 +1,191 @@
+from mock.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import json
+import os
+import pytest
+import sys
+sys.path.append('./library')
+import ceph_volume_simple_scan  # noqa : E402
+
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
+fake_path = '/var/lib/ceph/osd/ceph-0'
+
+
+def set_module_args(args):
+    args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+    basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+    pass
+
+
+class AnsibleFailJson(Exception):
+    pass
+
+
+def exit_json(*args, **kwargs):
+    raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+    raise AnsibleFailJson(kwargs)
+
+
+class TestCephVolumeSimpleScanModule(object):
+
+    @patch.object(basic.AnsibleModule, 'exit_json')
+    def test_with_check_mode(self, m_exit_json):
+        set_module_args({
+            '_ansible_check_mode': True
+        })
+        m_exit_json.side_effect = exit_json
+
+        with pytest.raises(AnsibleExitJson) as result:
+            ceph_volume_simple_scan.main()
+
+        result = result.value.args[0]
+        assert not result['changed']
+        assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan']
+        assert result['rc'] == 0
+        assert not result['stdout']
+        assert not result['stderr']
+
+    @patch.object(basic.AnsibleModule, 'exit_json')
+    @patch.object(basic.AnsibleModule, 'run_command')
+    def test_with_failure(self, m_run_command, m_exit_json):
+        set_module_args({
+        })
+        m_exit_json.side_effect = exit_json
+        stdout = ''
+        stderr = 'error'
+        rc = 2
+        m_run_command.return_value = rc, stdout, stderr
+
+        with pytest.raises(AnsibleExitJson) as result:
+            ceph_volume_simple_scan.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan']
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+
+    @patch.object(basic.AnsibleModule, 'exit_json')
+    @patch.object(basic.AnsibleModule, 'run_command')
+    def test_scan_all_osds(self, m_run_command, m_exit_json):
+        set_module_args({
+        })
+        m_exit_json.side_effect = exit_json
+        stdout = ''
+        stderr = ''
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+
+        with pytest.raises(AnsibleExitJson) as result:
+            ceph_volume_simple_scan.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan']
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout
+
+    @patch.object(os.path, 'exists', return_value=True)
+    @patch.object(basic.AnsibleModule, 'exit_json')
+    @patch.object(basic.AnsibleModule, 'run_command')
+    def test_scan_path_exists(self, m_run_command, m_exit_json, m_os_path):
+        set_module_args({
+            'path': fake_path
+        })
+        m_exit_json.side_effect = exit_json
+        stdout = ''
+        stderr = ''
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+
+        with pytest.raises(AnsibleExitJson) as result:
+            ceph_volume_simple_scan.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan', fake_path]
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout
+
+    @patch.object(os.path, 'exists', return_value=False)
+    @patch.object(basic.AnsibleModule, 'fail_json')
+    def test_scan_path_not_exists(self, m_fail_json, m_os_path):
+        set_module_args({
+            'path': fake_path
+        })
+        m_fail_json.side_effect = fail_json
+
+        with pytest.raises(AnsibleFailJson) as result:
+            ceph_volume_simple_scan.main()
+
+        result = result.value.args[0]
+        assert result['msg'] == '{} does not exist'.format(fake_path)
+        assert result['rc'] == 1
+
+    @patch.object(os.path, 'exists', return_value=True)
+    @patch.object(basic.AnsibleModule, 'exit_json')
+    @patch.object(basic.AnsibleModule, 'run_command')
+    def test_scan_path_stdout_force(self, m_run_command, m_exit_json, m_os_path):
+        set_module_args({
+            'path': fake_path,
+            'force': True,
+            'stdout': True
+        })
+        m_exit_json.side_effect = exit_json
+        stdout = ''
+        stderr = ''
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+
+        with pytest.raises(AnsibleExitJson) as result:
+            ceph_volume_simple_scan.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan', '--force', '--stdout', fake_path]
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout
+
+    @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+    @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+    @patch.object(basic.AnsibleModule, 'exit_json')
+    @patch.object(basic.AnsibleModule, 'run_command')
+    def test_scan_with_container(self, m_run_command, m_exit_json):
+        set_module_args({
+        })
+        m_exit_json.side_effect = exit_json
+        stdout = ''
+        stderr = ''
+        rc = 0
+        m_run_command.return_value = rc, stdout, stderr
+
+        with pytest.raises(AnsibleExitJson) as result:
+            ceph_volume_simple_scan.main()
+
+        result = result.value.args[0]
+        assert result['changed']
+        assert result['cmd'] == [fake_container_binary,
+                                 'run', '--rm', '--privileged',
+                                 '--ipc=host', '--net=host',
+                                 '-v', '/etc/ceph:/etc/ceph:z',
+                                 '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+                                 '-v', '/var/log/ceph/:/var/log/ceph/:z',
+                                 '-v', '/run/lvm/:/run/lvm/',
+                                 '-v', '/run/lock/lvm/:/run/lock/lvm/',
+                                 '--entrypoint=ceph-volume', fake_container_image,
+                                 '--cluster', fake_cluster, 'simple', 'scan']
+        assert result['rc'] == rc
+        assert result['stderr'] == stderr
+        assert result['stdout'] == stdout