]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
ceph_volume: refactor
authorSébastien Han <seb@redhat.com>
Wed, 3 Oct 2018 17:52:42 +0000 (19:52 +0200)
committerGuillaume Abrioux <gabrioux@redhat.com>
Wed, 10 Oct 2018 20:08:41 +0000 (16:08 -0400)
This commit does a couple of things:

* Avoid code duplication
* Clarify the code
* add more unit tests
* add myself to the author of the module

Signed-off-by: Sébastien Han <seb@redhat.com>
library/ceph_volume.py
library/test_ceph_volume.py
roles/ceph-osd/tasks/scenarios/lvm.yml
tests/functional/centos/7/bs-lvm-osds-container/group_vars/all
tests/functional/centos/7/bs-lvm-osds/group_vars/all
tests/functional/centos/7/lvm-batch-container/group_vars/all
tests/functional/centos/7/lvm-osds-container/group_vars/all
tox.ini

index cca05cab6a1cd6caac2d59871a409cab1e3f0eda..416867e6b145d2e3efaa9d24b5eb3d567917d4f3 100644 (file)
@@ -123,9 +123,14 @@ options:
             assigned or not depending on how the playbook runs.
         required: false
         default: None
+    list:
+        description:
+            - List potential Ceph LVM metadata on a device
+        required: false
 
 author:
     - Andrew Schoen (@andrewschoen)
+    - Sebastien Han <seb@redhat.com>
 '''
 
 EXAMPLES = '''
@@ -144,7 +149,7 @@ EXAMPLES = '''
     action: create
 
 
-- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db  # noqa E501
+- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db  # noqa e501
   ceph_volume:
     objectstore: bluestore
     data: data-lv
@@ -158,211 +163,165 @@ EXAMPLES = '''
 from ansible.module_utils.basic import AnsibleModule  # noqa 4502
 
 
+def fatal(message, module):
+    '''
+    Report a fatal error and exit
+    '''
+
+    if module:
+        module.fail_json(msg=message, changed=False, rc=1)
+    else:
+        raise(Exception(message))
+
+
 def container_exec(binary, container_image):
     '''
-    Build the CLI to run a command inside a container
+    Build the docker CLI to run a command inside a container
     '''
 
-    command_exec = ["docker", "run", "--rm", "--privileged", "--net=host",
-                    "-v", "/dev:/dev", "-v", "/etc/ceph:/etc/ceph:z",
-                    "-v", "/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket",
-                    "-v", "/var/lib/ceph/:/var/lib/ceph/:z",
-                    os.path.join("--entrypoint=" + binary),
+    command_exec = ['docker', 'run', '--rm', '--privileged', '--net=host',
+                    '-v', '/run/lock/lvm:/run/lock/lvm:z',
+                    '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',
+                    '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket',
+                    '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+                    os.path.join('--entrypoint=' + binary),
                     container_image]
     return command_exec
 
 
+def build_ceph_volume_cmd(action, container_image, cluster=None):
+    '''
+    Build the ceph-volume command
+    '''
+
+    if container_image:
+        binary = 'ceph-volume'
+        cmd = container_exec(
+            binary, container_image)
+    else:
+        binary = ['ceph-volume']
+        cmd = binary
+
+    if cluster:
+        cmd.extend(['--cluster', cluster])
+
+    cmd.append('lvm')
+    cmd.append(action)
+
+    return cmd
+
+
+def exec_command(module, cmd):
+    '''
+    Execute command
+    '''
+
+    rc, out, err = module.run_command(cmd)
+    return rc, cmd, out, err
+
+
+def is_containerized():
+    '''
+    Check if we are running on a containerized cluster
+    '''
+
+    if 'CEPH_CONTAINER_IMAGE' in os.environ:
+        container_image = os.getenv('CEPH_CONTAINER_IMAGE')
+    else:
+        container_image = None
+
+    return container_image
+
+
 def get_data(data, data_vg):
     if data_vg:
-        data = "{0}/{1}".format(data_vg, data)
+        data = '{0}/{1}'.format(data_vg, data)
     return data
 
 
 def get_journal(journal, journal_vg):
     if journal_vg:
-        journal = "{0}/{1}".format(journal_vg, journal)
+        journal = '{0}/{1}'.format(journal_vg, journal)
     return journal
 
 
 def get_db(db, db_vg):
     if db_vg:
-        db = "{0}/{1}".format(db_vg, db)
+        db = '{0}/{1}'.format(db_vg, db)
     return db
 
 
 def get_wal(wal, wal_vg):
     if wal_vg:
-        wal = "{0}/{1}".format(wal_vg, wal)
+        wal = '{0}/{1}'.format(wal_vg, wal)
     return wal
 
 
-def _list(module):
-    cmd = [
-        'ceph-volume',
-        'lvm',
-        'list',
-        '--format=json',
-    ]
-
-    result = dict(
-        changed=False,
-        cmd=cmd,
-        stdout='',
-        stderr='',
-        rc='',
-        start='',
-        end='',
-        delta='',
-    )
-
-    if module.check_mode:
-        return result
-
-    startd = datetime.datetime.now()
-
-    rc, out, err = module.run_command(cmd, encoding=None)
-
-    endd = datetime.datetime.now()
-    delta = endd - startd
-
-    result = dict(
-        cmd=cmd,
-        stdout=out.rstrip(b"\r\n"),
-        stderr=err.rstrip(b"\r\n"),
-        rc=rc,
-        start=str(startd),
-        end=str(endd),
-        delta=str(delta),
-        changed=True,
-    )
-
-    if rc != 0:
-        module.fail_json(msg='non-zero return code', **result)
-
-    module.exit_json(**result)
-
+def batch(module, container_image):
+    '''
+    Batch prepare OSD devices
+    '''
 
-def batch(module):
+    # get module variables
     cluster = module.params['cluster']
     objectstore = module.params['objectstore']
-    batch_devices = module.params['batch_devices']
+    batch_devices = module.params.get('batch_devices', None)
     crush_device_class = module.params.get('crush_device_class', None)
-    dmcrypt = module.params['dmcrypt']
-    osds_per_device = module.params['osds_per_device']
-    journal_size = module.params['journal_size']
-    block_db_size = module.params['block_db_size']
-    report = module.params['report']
-    subcommand = 'batch'
+    journal_size = module.params.get('journal_size', None)
+    block_db_size = module.params.get('block_db_size', None)
+    dmcrypt = module.params.get('dmcrypt', None)
+    osds_per_device = module.params.get('osds_per_device', None)
 
-    if not batch_devices:
-        module.fail_json(
-            msg='batch_devices must be provided if action is "batch"', changed=False, rc=1)  # noqa 4502
+    if not osds_per_device:
+        fatal('osds_per_device must be provided if action is "batch"', module)
 
-    if "CEPH_CONTAINER_IMAGE" in os.environ:
-        container_image = os.getenv("CEPH_CONTAINER_IMAGE")
-    else:
-        container_image = None
+    if osds_per_device < 1:
+        fatal('osds_per_device must be greater than 0 if action is "batch"', module)  # noqa E501
+
+    if not batch_devices:
+        fatal('batch_devices must be provided if action is "batch"', module)
 
-    cmd = ceph_volume_cmd(subcommand, container_image, cluster)
-    cmd.extend(["--%s" % objectstore])
-    cmd.extend("--yes")
-    cmd.extend("--no-systemd ")
+    # Build the CLI
+    action = 'batch'
+    cmd = build_ceph_volume_cmd(action, container_image, cluster)
+    cmd.extend(['--%s' % objectstore])
+    cmd.append('--yes')
 
     if crush_device_class:
-        cmd.extend(["--crush-device-class", crush_device_class])
+        cmd.extend(['--crush-device-class', crush_device_class])
 
     if dmcrypt:
-        cmd.append("--dmcrypt")
+        cmd.append('--dmcrypt')
 
     if osds_per_device > 1:
-        cmd.extend(["--osds-per-device", osds_per_device])
-
-    if objectstore == "filestore":
-        cmd.extend(["--journal-size", journal_size])
+        cmd.extend(['--osds-per-device', osds_per_device])
 
-    if objectstore == "bluestore" and block_db_size != "-1":
-        cmd.extend(["--block-db-size", block_db_size])
+    if objectstore == 'filestore':
+        cmd.extend(['--journal-size', journal_size])
 
-    report_flags = [
-        "--report",
-        "--format=json",
-    ]
+    if objectstore == 'bluestore' and block_db_size != '-1':
+        cmd.extend(['--block-db-size', block_db_size])
 
     cmd.extend(batch_devices)
 
-    result = dict(
-        changed=False,
-        cmd=cmd,
-        stdout='',
-        stderr='',
-        rc='',
-        start='',
-        end='',
-        delta='',
-    )
-
-    if module.check_mode:
-        return result
-
-    startd = datetime.datetime.now()
-
-    report_cmd = copy.copy(cmd)
-    report_cmd.extend(report_flags)
-
-    rc, out, err = module.run_command(report_cmd, encoding=None)
-    try:
-        report_result = json.loads(out)
-    except ValueError:
-        result = dict(
-            cmd=report_cmd,
-            stdout=out.rstrip(b"\r\n"),
-            stderr=err.rstrip(b"\r\n"),
-            rc=rc,
-            changed=True,
-        )
-        module.fail_json(msg='non-zero return code', **result)
-
-    if not report:
-        rc, out, err = module.run_command(cmd, encoding=None)
-    else:
-        cmd = report_cmd
-
-    endd = datetime.datetime.now()
-    delta = endd - startd
-
-    changed = True
-    if not report:
-        changed = report_result['changed']
-
-    result = dict(
-        cmd=cmd,
-        stdout=out.rstrip(b"\r\n"),
-        stderr=err.rstrip(b"\r\n"),
-        rc=rc,
-        start=str(startd),
-        end=str(endd),
-        delta=str(delta),
-        changed=changed,
-    )
-
-    if rc != 0:
-        module.fail_json(msg='non-zero return code', **result)
-
-    module.exit_json(**result)
+    return cmd
 
 
 def ceph_volume_cmd(subcommand, container_image, cluster=None):
+    '''
+    Build ceph-volume initial command
+    '''
 
     if container_image:
-        binary = "ceph-volume"
+        binary = 'ceph-volume'
         cmd = container_exec(
             binary, container_image)
     else:
-        binary = ["ceph-volume"]
+        binary = ['ceph-volume']
         cmd = binary
 
     if cluster:
-        cmd.extend(["--cluster", cluster])
+        cmd.extend(['--cluster', cluster])
 
     cmd.append('lvm')
     cmd.append(subcommand)
@@ -370,19 +329,17 @@ def ceph_volume_cmd(subcommand, container_image, cluster=None):
     return cmd
 
 
-def activate_osd(module, container_image=None):
-    subcommand = "activate"
-    cmd = ceph_volume_cmd(subcommand)
-    cmd.append("--all")
-
-    return True
-
+def prepare_or_create_osd(module, action, container_image):
+    '''
+    Prepare or create OSD devices
+    '''
 
-def prepare_osd(module):
+    # get module variables
     cluster = module.params['cluster']
     objectstore = module.params['objectstore']
     data = module.params['data']
     data_vg = module.params.get('data_vg', None)
+    data = get_data(data, data_vg)
     journal = module.params.get('journal', None)
     journal_vg = module.params.get('journal_vg', None)
     db = module.params.get('db', None)
@@ -390,105 +347,79 @@ def prepare_osd(module):
     wal = module.params.get('wal', None)
     wal_vg = module.params.get('wal_vg', None)
     crush_device_class = module.params.get('crush_device_class', None)
-    dmcrypt = module.params['dmcrypt']
-    subcommand = "prepare"
+    dmcrypt = module.params.get('dmcrypt', None)
 
-    if "CEPH_CONTAINER_IMAGE" in os.environ:
-        container_image = os.getenv("CEPH_CONTAINER_IMAGE")
-    else:
-        container_image = None
-
-    cmd = ceph_volume_cmd(subcommand, container_image, cluster)
-    cmd.extend(["--%s" % objectstore])
-    cmd.append("--data")
-
-    data = get_data(data, data_vg)
+    # Build the CLI
+    cmd = build_ceph_volume_cmd(action, container_image, cluster)
+    cmd.extend(['--%s' % objectstore])
+    cmd.append('--data')
     cmd.append(data)
 
     if journal:
         journal = get_journal(journal, journal_vg)
-        cmd.extend(["--journal", journal])
+        cmd.extend(['--journal', journal])
 
     if db:
         db = get_db(db, db_vg)
-        cmd.extend(["--block.db", db])
+        cmd.extend(['--block.db', db])
 
     if wal:
         wal = get_wal(wal, wal_vg)
-        cmd.extend(["--block.wal", wal])
+        cmd.extend(['--block.wal', wal])
 
     if crush_device_class:
-        cmd.extend(["--crush-device-class", crush_device_class])
+        cmd.extend(['--crush-device-class', crush_device_class])
 
     if dmcrypt:
-        cmd.append("--dmcrypt")
+        cmd.append('--dmcrypt')
 
-    result = dict(
-        changed=False,
-        cmd=cmd,
-        stdout='',
-        stderr='',
-        rc='',
-        start='',
-        end='',
-        delta='',
-    )
+    return cmd
 
-    if module.check_mode:
-        return result
 
-    # check to see if osd already exists
-    # FIXME: this does not work when data is a raw device
-    # support for 'lvm list' and raw devices
-    # was added with https://github.com/ceph/ceph/pull/20620 but
-    # has not made it to a luminous release as of 12.2.4
-    ceph_volume_list_cmd_args = ["lvm", "list", data]
-    if container_image:
-        binary = "ceph-volume"
-        ceph_volume_list_cmd = container_exec(
-            binary, container_image) + ceph_volume_list_cmd_args
-    else:
-        binary = ["ceph-volume"]
-        ceph_volume_list_cmd = binary + ceph_volume_list_cmd_args
+def list_osd(module, container_image):
+    '''
+    List will detect wether or not a device has Ceph LVM Metadata
+    '''
 
-    rc, out, err = module.run_command(ceph_volume_list_cmd, encoding=None)
-    if rc == 0:
-        result["stdout"] = "skipped, since {0} is already used for an osd".format(  # noqa E501
-            data)
-        result['rc'] = 0
-        module.exit_json(**result)
+    # get module variables
+    cluster = module.params['cluster']
+    data = module.params.get('data', None)
+    data_vg = module.params.get('data_vg', None)
+    data = get_data(data, data_vg)
 
-    startd = datetime.datetime.now()
+    # Build the CLI
+    action = 'list'
+    cmd = build_ceph_volume_cmd(action, container_image, cluster)
+    if data:
+        cmd.append(data)
+    cmd.append('--format=json')
 
-    rc, out, err = module.run_command(cmd, encoding=None)
+    return cmd
 
-    endd = datetime.datetime.now()
-    delta = endd - startd
 
-    result = dict(
-        cmd=cmd,
-        stdout=out.rstrip(b"\r\n"),
-        stderr=err.rstrip(b"\r\n"),
-        rc=rc,
-        start=str(startd),
-        end=str(endd),
-        delta=str(delta),
-        changed=True,
-    )
+def activate_osd():
+    '''
+    Activate all the OSDs on a machine
+    '''
 
-    if rc != 0:
-        module.fail_json(msg='non-zero return code', **result)
+    # build the CLI
+    action = 'activate'
+    container_image = None
+    cmd = build_ceph_volume_cmd(action, container_image)
+    cmd.append('--all')
 
-    module.exit_json(**result)
+    return cmd
 
 
-def zap_devices(module):
-    """
+def zap_devices(module, container_image):
+    '''
     Will run 'ceph-volume lvm zap' on all devices, lvs and partitions
     used to create the OSD. The --destroy flag is always passed so that
     if an OSD was originally created with a raw device or partition for
     'data' then any lvs that were created by ceph-volume are removed.
-    """
+    '''
+
+    # get module variables
     data = module.params['data']
     data_vg = module.params.get('data_vg', None)
     journal = module.params.get('journal', None)
@@ -497,65 +428,27 @@ def zap_devices(module):
     db_vg = module.params.get('db_vg', None)
     wal = module.params.get('wal', None)
     wal_vg = module.params.get('wal_vg', None)
-
-    base_zap_cmd = [
-        'ceph-volume',
-        'lvm',
-        'zap',
-        # for simplicity always --destroy. It will be needed
-        # for raw devices and will noop for lvs.
-        '--destroy',
-    ]
-
-    commands = []
-
     data = get_data(data, data_vg)
 
-    commands.append(base_zap_cmd + [data])
+    # build the CLI
+    action = 'zap'
+    cmd = build_ceph_volume_cmd(action, container_image)
+    cmd.append('--destroy')
+    cmd.append(data)
 
     if journal:
         journal = get_journal(journal, journal_vg)
-        commands.append(base_zap_cmd + [journal])
+        cmd.extend([journal])
 
     if db:
         db = get_db(db, db_vg)
-        commands.append(base_zap_cmd + [db])
+        cmd.extend([db])
 
     if wal:
         wal = get_wal(wal, wal_vg)
-        commands.append(base_zap_cmd + [wal])
-
-    result = dict(
-        changed=True,
-        rc=0,
-    )
-    command_results = []
-    for cmd in commands:
-        startd = datetime.datetime.now()
-
-        rc, out, err = module.run_command(cmd, encoding=None)
-
-        endd = datetime.datetime.now()
-        delta = endd - startd
-
-        cmd_result = dict(
-            cmd=cmd,
-            stdout_lines=out.split("\n"),
-            stderr_lines=err.split("\n"),
-            rc=rc,
-            start=str(startd),
-            end=str(endd),
-            delta=str(delta),
-        )
-
-        if rc != 0:
-            module.fail_json(msg='non-zero return code', **cmd_result)
-
-        command_results.append(cmd_result)
+        cmd.extend([wal])
 
-    result["commands"] = command_results
-
-    module.exit_json(**result)
+    return cmd
 
 
 def run_module():
@@ -577,8 +470,8 @@ def run_module():
         dmcrypt=dict(type='bool', required=False, default=False),
         batch_devices=dict(type='list', required=False, default=[]),
         osds_per_device=dict(type='int', required=False, default=1),
-        journal_size=dict(type='str', required=False, default="5120"),
-        block_db_size=dict(type='str', required=False, default="-1"),
+        journal_size=dict(type='str', required=False, default='5120'),
+        block_db_size=dict(type='str', required=False, default='-1'),
         report=dict(type='bool', required=False, default=False),
         containerized=dict(type='str', required=False, default=False),
     )
@@ -588,24 +481,140 @@ def run_module():
         supports_check_mode=True
     )
 
+    result = dict(
+        changed=False,
+        stdout='',
+        stderr='',
+        rc='',
+        start='',
+        end='',
+        delta='',
+    )
+
+    if module.check_mode:
+        return result
+
+    # start execution
+    startd = datetime.datetime.now()
+
+    # get the desired action
     action = module.params['action']
 
-    if action == "create":
-        prepare_osd(module)
-        activate_osd(module)
-    elif action == "prepare":
-        prepare_osd(module)
-    elif action == "activate":
-        activate_osd(module)
-    elif action == "zap":
-        zap_devices(module)
-    elif action == "batch":
-        batch(module)
-    elif action == "list":
-        _list(module)
-
-    module.fail_json(
-        msg='State must either be "present" or "absent".', changed=False, rc=1)
+    # will return either the image name or None
+    container_image = is_containerized()
+
+    # Assume the task's status will be 'changed'
+    changed = True
+
+    if action == 'create' or action == 'prepare':
+        # First test if the device has Ceph LVM Metadata
+        rc, cmd, out, err = exec_command(
+            module, list_osd(module, container_image))
+
+        # list_osd returns a dict, if the dict is empty this means
+        # we can not check the return code since it's not consistent
+        # with the plain output
+        # see: http://tracker.ceph.com/issues/36329
+        # FIXME: it's probably less confusing to check for rc
+
+        # convert out to json, ansible return a string...
+        out_dict = json.loads(out)
+        if out_dict:
+            data = module.params['data']
+            result['stdout'] = 'skipped, since {0} is already used for an osd'.format(  # noqa E501
+            data)
+            result['rc'] = 0
+            module.exit_json(**result)
+
+        # Prepare or create the OSD
+        rc, cmd, out, err = exec_command(
+            module, prepare_or_create_osd(module, action, container_image))
+
+    elif action == 'activate':
+        if container_image:
+            fatal(
+                "This is not how container's activation happens, nothing to activate", module)  # noqa E501
+
+        # Activate the OSD
+        rc, cmd, out, err = exec_command(
+            module, activate_osd())
+
+    elif action == 'zap':
+        # Zap the OSD
+        rc, cmd, out, err = exec_command(
+            module, zap_devices(module, container_image))
+
+    elif action == 'list':
+        # List Ceph LVM Metadata on a device
+        rc, cmd, out, err = exec_command(
+            module, list_osd(module, container_image))
+
+    elif action == 'batch':
+        # Batch prepare AND activate OSDs
+        if container_image:
+            fatal(
+                'Batch operation is currently not supported on containerized deployment (https://tracker.ceph.com/issues/36363)', module)  # noqa E501
+
+        report = module.params.get('report', None)
+
+        # Add --report flag for the idempotency test
+        report_flags = [
+            '--report',
+            '--format=json',
+        ]
+
+        cmd = batch(module, container_image)
+        batch_report_cmd = copy.copy(cmd)
+        batch_report_cmd.extend(report_flags)
+
+        # Run batch --report to see what's going to happen
+        # Do not run the batch command if there is nothing to do
+        rc, cmd, out, err = exec_command(
+            module, batch_report_cmd)
+        try:
+            report_result = json.loads(out)
+        except ValueError:
+            result = dict(
+                cmd=cmd,
+                stdout=out.rstrip(b"\r\n"),
+                stderr=err.rstrip(b"\r\n"),
+                rc=rc,
+                changed=changed,
+            )
+            module.fail_json(msg='non-zero return code', **result)
+
+        if not report:
+            # if not asking for a report, let's just run the batch command
+            changed = report_result['changed']
+            if changed:
+                # Batch prepare the OSD
+                rc, cmd, out, err = exec_command(
+                    module, batch(module, container_image))
+        else:
+            cmd = batch_report_cmd
+
+    else:
+        module.fail_json(
+            msg='State must either be "create" or "prepare" or "activate" or "list" or "zap" or "batch".', changed=False, rc=1)  # noqa E501
+
+    endd = datetime.datetime.now()
+    delta = endd - startd
+
+    result = dict(
+        cmd=cmd,
+        start=str(startd),
+        end=str(endd),
+        delta=str(delta),
+        rc=rc,
+        stdout=out.rstrip(b'\r\n'),
+        stderr=err.rstrip(b'\r\n'),
+        changed=changed,
+    )
+
+    if rc != 0:
+        module.fail_json(msg='non-zero return code', **result)
+
+    module.exit_json(**result)
 
 
 def main():
index 46feff0d5d644c2b9a2cac08fcd2c7c9a339952d..2f090a6ac30f912aac60bca265ac2f8e07e168e6 100644 (file)
@@ -1,4 +1,5 @@
 from . import ceph_volume
+from ansible.compat.tests.mock import MagicMock
 
 
 class TestCephVolumeModule(object):
@@ -34,3 +35,236 @@ class TestCephVolumeModule(object):
     def test_wal_with_vg(self):
         result = ceph_volume.get_wal("wal-lv", "wal-vg")
         assert result == "wal-vg/wal-lv"
+
+    def test_container_exec(sefl):
+        fake_binary = "ceph-volume"
+        fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+        expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host',  # noqa E501
+                                 '-v', '/run/lock/lvm:/run/lock/lvm:z',
+                                 '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',  # noqa E501
+                                 '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket',  # noqa E501
+                                 '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+                                 '--entrypoint=ceph-volume',
+                                 'docker.io/ceph/daemon:latest-luminous']
+        result = ceph_volume.container_exec(fake_binary, fake_container_image)
+        assert result == expected_command_list
+
+    def test_zap_osd_container(self):
+        fake_module = MagicMock()
+        fake_module.params = {'data': '/dev/sda'}
+        fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+        expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host',  # noqa E501
+                                 '-v', '/run/lock/lvm:/run/lock/lvm:z',
+                                 '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',  # noqa E501
+                                 '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket',  # noqa E501
+                                 '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+                                 '--entrypoint=ceph-volume',
+                                 'docker.io/ceph/daemon:latest-luminous',
+                                 'lvm',
+                                 'zap',
+                                 '--destroy',
+                                 '/dev/sda']
+        result = ceph_volume.zap_devices(fake_module, fake_container_image)
+        assert result == expected_command_list
+
+    def test_zap_osd(self):
+        fake_module = MagicMock()
+        fake_module.params = {'data': '/dev/sda'}
+        fake_container_image = None
+        expected_command_list = ['ceph-volume',
+                                 'lvm',
+                                 'zap',
+                                 '--destroy',
+                                 '/dev/sda']
+        result = ceph_volume.zap_devices(fake_module, fake_container_image)
+        assert result == expected_command_list
+
+    def test_activate_osd(self):
+        expected_command_list = ['ceph-volume',
+                                 'lvm',
+                                 'activate',
+                                 '--all']
+        result = ceph_volume.activate_osd()
+        assert result == expected_command_list
+
+    def test_list_osd(self):
+        fake_module = MagicMock()
+        fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
+        fake_container_image = None
+        expected_command_list = ['ceph-volume',
+                                 '--cluster',
+                                 'ceph',
+                                 'lvm',
+                                 'list',
+                                 '/dev/sda',
+                                 '--format=json',
+                                 ]
+        result = ceph_volume.list_osd(fake_module, fake_container_image)
+        assert result == expected_command_list
+
+    def test_list_osd_container(self):
+        fake_module = MagicMock()
+        fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
+        fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+        expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host',  # noqa E501
+                                 '-v', '/run/lock/lvm:/run/lock/lvm:z',
+                                 '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',  # noqa E501
+                                 '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket',  # noqa E501
+                                 '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+                                 '--entrypoint=ceph-volume',
+                                 'docker.io/ceph/daemon:latest-luminous',
+                                 '--cluster',
+                                 'ceph',
+                                 'lvm',
+                                 'list',
+                                 '/dev/sda',
+                                 '--format=json',
+                                 ]
+        result = ceph_volume.list_osd(fake_module, fake_container_image)
+        assert result == expected_command_list
+
+    def test_create_osd_container(self):
+        fake_module = MagicMock()
+        fake_module.params = {'data': '/dev/sda',
+                              'objectstore': 'filestore',
+                              'cluster': 'ceph', }
+
+        fake_action = "create"
+        fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+        expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host',  # noqa E501
+                                 '-v', '/run/lock/lvm:/run/lock/lvm:z',
+                                 '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',  # noqa E501
+                                 '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket',  # noqa E501
+                                 '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+                                 '--entrypoint=ceph-volume',
+                                 'docker.io/ceph/daemon:latest-luminous',
+                                 '--cluster',
+                                 'ceph',
+                                 'lvm',
+                                 'create',
+                                 '--filestore',
+                                 '--data',
+                                 '/dev/sda']
+        result = ceph_volume.prepare_or_create_osd(
+            fake_module, fake_action, fake_container_image)
+        assert result == expected_command_list
+
+    def test_create_osd(self):
+        fake_module = MagicMock()
+        fake_module.params = {'data': '/dev/sda',
+                              'objectstore': 'filestore',
+                              'cluster': 'ceph', }
+
+        fake_container_image = None
+        fake_action = "create"
+        expected_command_list = ['ceph-volume',
+                                 '--cluster',
+                                 'ceph',
+                                 'lvm',
+                                 'create',
+                                 '--filestore',
+                                 '--data',
+                                 '/dev/sda']
+        result = ceph_volume.prepare_or_create_osd(
+            fake_module, fake_action, fake_container_image)
+        assert result == expected_command_list
+
+    def test_prepare_osd_container(self):
+        fake_module = MagicMock()
+        fake_module.params = {'data': '/dev/sda',
+                              'objectstore': 'filestore',
+                              'cluster': 'ceph', }
+
+        fake_action = "prepare"
+        fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+        expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host',  # noqa E501
+                                 '-v', '/run/lock/lvm:/run/lock/lvm:z',
+                                 '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',  # noqa E501
+                                 '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket',  # noqa E501
+                                 '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+                                 '--entrypoint=ceph-volume',
+                                 'docker.io/ceph/daemon:latest-luminous',
+                                 '--cluster',
+                                 'ceph',
+                                 'lvm',
+                                 'prepare',
+                                 '--filestore',
+                                 '--data',
+                                 '/dev/sda']
+        result = ceph_volume.prepare_or_create_osd(
+            fake_module, fake_action, fake_container_image)
+        assert result == expected_command_list
+
+    def test_prepare_osd(self):
+        fake_module = MagicMock()
+        fake_module.params = {'data': '/dev/sda',
+                              'objectstore': 'filestore',
+                              'cluster': 'ceph', }
+
+        fake_container_image = None
+        fake_action = "prepare"
+        expected_command_list = ['ceph-volume',
+                                 '--cluster',
+                                 'ceph',
+                                 'lvm',
+                                 'prepare',
+                                 '--filestore',
+                                 '--data',
+                                 '/dev/sda']
+        result = ceph_volume.prepare_or_create_osd(
+            fake_module, fake_action, fake_container_image)
+        assert result == expected_command_list
+
+    def test_batch_osd_container(self):
+        fake_module = MagicMock()
+        fake_module.params = {'data': '/dev/sda',
+                              'objectstore': 'filestore',
+                              'journal_size': '100',
+                              'cluster': 'ceph',
+                              'batch_devices': ["/dev/sda", "/dev/sdb"]}
+
+        fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+        expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host',  # noqa E501
+                                 '-v', '/run/lock/lvm:/run/lock/lvm:z',
+                                 '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',  # noqa E501
+                                 '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket',  # noqa E501
+                                 '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+                                 '--entrypoint=ceph-volume',
+                                 'docker.io/ceph/daemon:latest-luminous',
+                                 '--cluster',
+                                 'ceph',
+                                 'lvm',
+                                 'batch',
+                                 '--filestore',
+                                 '--yes',
+                                 '--journal-size',
+                                 '100',
+                                 '/dev/sda',
+                                 '/dev/sdb']
+        result = ceph_volume.batch(
+            fake_module, fake_container_image)
+        assert result == expected_command_list
+
+    def test_batch_osd(self):
+        fake_module = MagicMock()
+        fake_module.params = {'data': '/dev/sda',
+                              'objectstore': 'filestore',
+                              'journal_size': '100',
+                              'cluster': 'ceph',
+                              'batch_devices': ["/dev/sda", "/dev/sdb"]}
+
+        fake_container_image = None
+        expected_command_list = ['ceph-volume',
+                                 '--cluster',
+                                 'ceph',
+                                 'lvm',
+                                 'batch',
+                                 '--filestore',
+                                 '--yes',
+                                 '--journal-size',
+                                 '100',
+                                 '/dev/sda',
+                                 '/dev/sdb']
+        result = ceph_volume.batch(
+            fake_module, fake_container_image)
+        assert result == expected_command_list
index bff1cf4cbcbbf4dabdd39f3726b6d2c8cb75ca29..3e759ce532edf2b2c9575f284222f1e8c4c93109 100644 (file)
@@ -17,4 +17,5 @@
   environment:
     CEPH_VOLUME_DEBUG: 1
     CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
-  with_items: "{{ lvm_volumes }}"
\ No newline at end of file
+  with_items: "{{ lvm_volumes }}"
+  tags: prepare_osd
\ No newline at end of file
index 1de1598e61f7d9202083b5dff8f35dd25a6369a8..6f4e785ccba90ba0fafeb822dd35a6fc4644c1e2 100644 (file)
@@ -1,9 +1,12 @@
 ---
 
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
 ceph_origin: repository
 ceph_repository: community
 containerized_deployment: True
-cluster: test
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
 monitor_interface: eth1
@@ -23,3 +26,4 @@ os_tuning_params:
 ceph_conf_overrides:
   global:
     osd_pool_default_size: 1
+ceph_osd_docker_run_script_path: /var/tmp
\ No newline at end of file
index f30393671fe5ae8b8dc1b0720e473c1773f807eb..78e47df516e88ed257f0a0036d0ab0f125c1739d 100644 (file)
@@ -2,7 +2,6 @@
 
 ceph_origin: repository
 ceph_repository: community
-cluster: test
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
 monitor_interface: eth1
index 0555ffa38dcf7240bfdfd5e079abb8eea5732170..6ed0becf0c5f70f4984ea97a20cc505daa64f8be 100644 (file)
@@ -1,5 +1,9 @@
 ---
 
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
 containerized_deployment: True
 ceph_origin: repository
 ceph_repository: community
@@ -14,10 +18,11 @@ crush_device_class: test
 osd_scenario: lvm
 copy_admin_key: true
 devices:
-  - /dev/sdb 
+  - /dev/sdb
   - /dev/sdc
 os_tuning_params:
   - { name: fs.file-max, value: 26234859 }
 ceph_conf_overrides:
   global:
     osd_pool_default_size: 1
+ceph_osd_docker_run_script_path: /var/tmp
\ No newline at end of file
index 0a29dff680f755afe0da56d8328663d254525f97..9b826bd7f40bbc92e842bfca15767881d844f78d 100644 (file)
@@ -1,5 +1,9 @@
 ---
 
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
 ceph_origin: repository
 ceph_repository: community
 cluster: ceph
@@ -26,3 +30,4 @@ os_tuning_params:
 ceph_conf_overrides:
   global:
     osd_pool_default_size: 1
+ceph_osd_docker_run_script_path: /var/tmp
\ No newline at end of file
diff --git a/tox.ini b/tox.ini
index ae1baf68b209680a6d4bf4a2eaa9c6020ff30ccd..0d1f2ddb90cc31f070f5dcf83bb35377bfd1b36b 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -204,6 +204,9 @@ setenv=
   lvm_osds_container: PLAYBOOK = site-docker.yml.sample
   bluestore_lvm_osds: CEPH_STABLE_RELEASE = luminous
   bluestore_lvm_osds_container: CEPH_STABLE_RELEASE = luminous
+  bluestore_lvm_osds_container: PLAYBOOK = site-docker.yml.sample
+  lvm_batch_container: PLAYBOOK = site-docker.yml.sample
+  lvm_batch_container: CEPH_STABLE_RELEASE = luminous
   update_cluster: ROLLING_UPDATE = True
   update_docker_cluster: ROLLING_UPDATE = True
 deps= -r{toxinidir}/tests/requirements.txt
@@ -253,6 +256,9 @@ commands=
   lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
   lvm_osds_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
   bluestore_lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+  bluestore_lvm_osds_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+  lvm_batch: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+  lvm_batch_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
   purge_lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
 
   rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"