[defaults]
ansible_managed = Please do not change this file directly since it is managed by Ansible and will be overwritten
library = ./library
+module_utils = ./module_utils
action_plugins = plugins/actions
callback_plugins = plugins/callback
filter_plugins = plugins/filter
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import generate_ceph_cmd, \
+ is_containerized, \
+ exec_command, \
+ exit_module
+except ImportError:
+ from module_utils.ca_common import generate_ceph_cmd, is_containerized, exec_command, exit_module
+
import datetime
import json
-import os
ANSIBLE_METADATA = {
RETURN = '''# '''
-def container_exec(binary, container_image):
- '''
- Build the docker CLI to run a command inside a container
- '''
-
- container_binary = os.getenv('CEPH_CONTAINER_BINARY')
- command_exec = [container_binary,
- 'run',
- '--rm',
- '--net=host',
- '-v', '/etc/ceph:/etc/ceph:z',
- '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
- '-v', '/var/log/ceph/:/var/log/ceph/:z',
- '--entrypoint=' + binary, container_image]
- return command_exec
-
-
-def is_containerized():
- '''
- Check if we are running on a containerized cluster
- '''
-
- if 'CEPH_CONTAINER_IMAGE' in os.environ:
- container_image = os.getenv('CEPH_CONTAINER_IMAGE')
- else:
- container_image = None
-
- return container_image
-
-
-def pre_generate_ceph_cmd(container_image=None):
- '''
- Generate ceph prefix comaand
- '''
- if container_image:
- cmd = container_exec('ceph', container_image)
- else:
- cmd = ['ceph']
-
- return cmd
-
-
-def generate_ceph_cmd(cluster, args, container_image=None):
- '''
- Generate 'ceph' command line to execute
- '''
-
- cmd = pre_generate_ceph_cmd(container_image=container_image)
-
- base_cmd = [
- '--cluster',
- cluster,
- 'dashboard'
- ]
-
- cmd.extend(base_cmd + args)
-
- return cmd
-
-
-def exec_commands(module, cmd):
- '''
- Execute command(s)
- '''
-
- rc, out, err = module.run_command(cmd)
-
- return rc, cmd, out, err
-
-
def create_user(module, container_image=None):
'''
Create a new user
args = ['ac-user-create', name, password]
- cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image)
+ cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['dashboard'], args=args, container_image=container_image)
return cmd
args.extend(roles)
- cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image)
+ cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['dashboard'], args=args, container_image=container_image)
return cmd
args = ['ac-user-set-password', name, password]
- cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image)
+ cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['dashboard'], args=args, container_image=container_image)
return cmd
args = ['ac-user-show', name, '--format=json']
- cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image)
+ cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['dashboard'], args=args, container_image=container_image)
return cmd
args = ['ac-user-delete', name]
- cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image)
+ cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['dashboard'], args=args, container_image=container_image)
return cmd
-def exit_module(module, out, rc, cmd, err, startd, changed=False):
- endd = datetime.datetime.now()
- delta = endd - startd
-
- result = dict(
- cmd=cmd,
- start=str(startd),
- end=str(endd),
- delta=str(delta),
- rc=rc,
- stdout=out.rstrip("\r\n"),
- stderr=err.rstrip("\r\n"),
- changed=changed,
- )
- module.exit_json(**result)
-
-
def run_module():
module_args = dict(
cluster=dict(type='str', required=False, default='ceph'),
container_image = is_containerized()
if state == "present":
- rc, cmd, out, err = exec_commands(module, get_user(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image))
if rc == 0:
user = json.loads(out)
user['roles'].sort()
roles.sort()
if user['roles'] != roles:
- rc, cmd, out, err = exec_commands(module, set_roles(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, set_roles(module, container_image=container_image))
changed = True
- rc, cmd, out, err = exec_commands(module, set_password(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, set_password(module, container_image=container_image))
else:
- rc, cmd, out, err = exec_commands(module, create_user(module, container_image=container_image))
- rc, cmd, out, err = exec_commands(module, set_roles(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, create_user(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, set_roles(module, container_image=container_image))
changed = True
elif state == "absent":
- rc, cmd, out, err = exec_commands(module, get_user(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image))
if rc == 0:
- rc, cmd, out, err = exec_commands(module, remove_user(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, remove_user(module, container_image=container_image))
changed = True
else:
rc = 0
out = "Dashboard User {} doesn't exist".format(name)
elif state == "info":
- rc, cmd, out, err = exec_commands(module, get_user(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image))
exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed)
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import is_containerized, \
+ exec_command, \
+ generate_ceph_cmd, \
+ exit_module
+except ImportError:
+ from module_utils.ca_common import is_containerized, \
+ exec_command, \
+ generate_ceph_cmd, \
+ exit_module
+
import datetime
import json
-import os
ANSIBLE_METADATA = {
RETURN = '''# '''
-def container_exec(binary, container_image):
- '''
- Build the docker CLI to run a command inside a container
- '''
-
- container_binary = os.getenv('CEPH_CONTAINER_BINARY')
- command_exec = [container_binary,
- 'run',
- '--rm',
- '--net=host',
- '-v', '/etc/ceph:/etc/ceph:z',
- '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
- '-v', '/var/log/ceph/:/var/log/ceph/:z',
- '--entrypoint=' + binary, container_image]
- return command_exec
-
-
-def is_containerized():
- '''
- Check if we are running on a containerized cluster
- '''
-
- if 'CEPH_CONTAINER_IMAGE' in os.environ:
- container_image = os.getenv('CEPH_CONTAINER_IMAGE')
- else:
- container_image = None
-
- return container_image
-
-
-def pre_generate_ceph_cmd(container_image=None):
- '''
- Generate ceph prefix comaand
- '''
- if container_image:
- cmd = container_exec('ceph', container_image)
- else:
- cmd = ['ceph']
-
- return cmd
-
-
-def generate_ceph_cmd(cluster, args, container_image=None):
- '''
- Generate 'ceph' command line to execute
- '''
-
- cmd = pre_generate_ceph_cmd(container_image=container_image)
-
- base_cmd = [
- '--cluster',
- cluster,
- 'fs'
- ]
-
- cmd.extend(base_cmd + args)
-
- return cmd
-
-
-def exec_commands(module, cmd):
- '''
- Execute command(s)
- '''
-
- rc, out, err = module.run_command(cmd)
-
- return rc, cmd, out, err
-
-
def create_fs(module, container_image=None):
'''
Create a new fs
args = ['new', name, metadata, data]
- cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image)
+ cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['fs'], args=args, container_image=container_image)
return cmd
args = ['get', name, '--format=json']
- cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image)
+ cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['fs'], args=args, container_image=container_image)
return cmd
args = ['rm', name, '--yes-i-really-mean-it']
- cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image)
+ cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['fs'], args=args, container_image=container_image)
return cmd
args = ['fail', name]
- cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image)
+ cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['fs'], args=args, container_image=container_image)
return cmd
args = ['set', name, 'max_mds', str(max_mds)]
- cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image)
+ cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['fs'], args=args, container_image=container_image)
return cmd
-def exit_module(module, out, rc, cmd, err, startd, changed=False):
- endd = datetime.datetime.now()
- delta = endd - startd
-
- result = dict(
- cmd=cmd,
- start=str(startd),
- end=str(endd),
- delta=str(delta),
- rc=rc,
- stdout=out.rstrip("\r\n"),
- stderr=err.rstrip("\r\n"),
- changed=changed,
- )
- module.exit_json(**result)
-
-
def run_module():
module_args = dict(
cluster=dict(type='str', required=False, default='ceph'),
container_image = is_containerized()
if state == "present":
- rc, cmd, out, err = exec_commands(module, get_fs(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image))
if rc == 0:
fs = json.loads(out)
if max_mds and fs["mdsmap"]["max_mds"] != max_mds:
- rc, cmd, out, err = exec_commands(module, set_fs(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, set_fs(module, container_image=container_image))
if rc == 0:
changed = True
else:
- rc, cmd, out, err = exec_commands(module, create_fs(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, create_fs(module, container_image=container_image))
if max_mds and max_mds > 1:
- exec_commands(module, set_fs(module, container_image=container_image))
+ exec_command(module, set_fs(module, container_image=container_image))
if rc == 0:
changed = True
elif state == "absent":
- rc, cmd, out, err = exec_commands(module, get_fs(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image))
if rc == 0:
- exec_commands(module, fail_fs(module, container_image=container_image))
- rc, cmd, out, err = exec_commands(module, remove_fs(module, container_image=container_image))
+ exec_command(module, fail_fs(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, remove_fs(module, container_image=container_image))
if rc == 0:
changed = True
else:
out = "Ceph File System {} doesn't exist".format(name)
elif state == "info":
- rc, cmd, out, err = exec_commands(module, get_fs(module, container_image=container_image))
+ rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image))
exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed)
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import is_containerized, container_exec
+except ImportError:
+ from module_utils.ca_common import is_containerized, container_exec
import datetime
import json
import os
raise(Exception(message))
-def container_exec(binary, container_image):
- '''
- Build the docker CLI to run a command inside a container
- '''
-
- container_binary = os.getenv('CEPH_CONTAINER_BINARY')
- command_exec = [container_binary,
- 'run',
- '--rm',
- '--net=host',
- '-v', '/etc/ceph:/etc/ceph:z',
- '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
- '-v', '/var/log/ceph/:/var/log/ceph/:z',
- '--entrypoint=' + binary, container_image]
- return command_exec
-
-
-def is_containerized():
- '''
- Check if we are running on a containerized cluster
- '''
-
- if 'CEPH_CONTAINER_IMAGE' in os.environ:
- container_image = os.getenv('CEPH_CONTAINER_IMAGE')
- else:
- container_image = None
-
- return container_image
-
-
def generate_secret():
'''
Generate a CephX secret
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import generate_ceph_cmd, \
+ pre_generate_ceph_cmd, \
+ is_containerized, \
+ exec_command, \
+ exit_module
+except ImportError:
+ from module_utils.ca_common import generate_ceph_cmd, \
+ pre_generate_ceph_cmd, \
+ is_containerized, \
+ exec_command, \
+ exit_module
+
+
import datetime
import json
import os
RETURN = '''# '''
-def container_exec(binary, container_image):
- '''
- Build the docker CLI to run a command inside a container
- '''
-
- container_binary = os.getenv('CEPH_CONTAINER_BINARY')
- command_exec = [container_binary,
- 'run',
- '--rm',
- '--net=host',
- '-v', '/etc/ceph:/etc/ceph:z',
- '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
- '-v', '/var/log/ceph/:/var/log/ceph/:z',
- '--entrypoint=' + binary, container_image]
- return command_exec
-
-
-def is_containerized():
- '''
- Check if we are running on a containerized cluster
- '''
-
- if 'CEPH_CONTAINER_IMAGE' in os.environ:
- container_image = os.getenv('CEPH_CONTAINER_IMAGE')
- else:
- container_image = None
-
- return container_image
-
-
-def pre_generate_ceph_cmd(container_image=None):
- if container_image:
- binary = 'ceph'
- cmd = container_exec(
- binary, container_image)
- else:
- binary = ['ceph']
- cmd = binary
-
- return cmd
-
-
-def generate_ceph_cmd(cluster, args, user, user_key, container_image=None):
- '''
- Generate 'ceph' command line to execute
- '''
-
- cmd = pre_generate_ceph_cmd(container_image=container_image)
-
- base_cmd = [
- '-n',
- user,
- '-k',
- user_key,
- '--cluster',
- cluster,
- 'osd',
- 'pool'
- ]
-
- cmd.extend(base_cmd + args)
-
- return cmd
-
-
-def exec_commands(module, cmd):
- '''
- Execute command(s)
- '''
-
- rc, out, err = module.run_command(cmd)
-
- return rc, cmd, out, err
-
-
def check_pool_exist(cluster,
name,
user,
args = ['stats', name, '-f', output_format]
cmd = generate_ceph_cmd(cluster=cluster,
+ sub_cmd=['osd', 'pool'],
args=args,
user=user,
user_key=user_key,
args = ['application', 'get', name, '-f', output_format]
cmd = generate_ceph_cmd(cluster=cluster,
+ sub_cmd=['osd', 'pool'],
args=args,
user=user,
user_key=user_key,
args = ['application', 'enable', name, application]
cmd = generate_ceph_cmd(cluster=cluster,
+ sub_cmd=['osd', 'pool'],
args=args,
user=user,
user_key=user_key,
application, '--yes-i-really-mean-it']
cmd = generate_ceph_cmd(cluster=cluster,
+ sub_cmd=['osd', 'pool'],
args=args,
user=user,
user_key=user_key,
args = ['ls', 'detail', '-f', output_format]
cmd = generate_ceph_cmd(cluster=cluster,
+ sub_cmd=['osd', 'pool'],
args=args,
user=user,
user_key=user_key,
container_image=container_image)
- rc, cmd, out, err = exec_commands(module, cmd)
+ rc, cmd, out, err = exec_command(module, cmd)
if rc == 0:
out = [p for p in json.loads(out.strip()) if p['pool_name'] == name][0]
- _rc, _cmd, application_pool, _err = exec_commands(module,
- get_application_pool(cluster, # noqa: E501
- name, # noqa: E501
- user, # noqa: E501
- user_key, # noqa: E501
- container_image=container_image)) # noqa: E501
+ _rc, _cmd, application_pool, _err = exec_command(module,
+ get_application_pool(cluster, # noqa: E501
+ name, # noqa: E501
+ user, # noqa: E501
+ user_key, # noqa: E501
+ container_image=container_image)) # noqa: E501
# This is a trick because "target_size_ratio" isn't present at the same level in the dict
# ie:
args.extend(['-f', output_format])
cmd = generate_ceph_cmd(cluster=cluster,
+ sub_cmd=['osd', 'pool'],
args=args,
user=user,
user_key=user_key,
user_pool_config['pg_autoscale_mode']['value']])
cmd = generate_ceph_cmd(cluster=cluster,
+ sub_cmd=['osd', 'pool'],
args=args,
user=user,
user_key=user_key,
args = ['rm', name, name, '--yes-i-really-really-mean-it']
cmd = generate_ceph_cmd(cluster=cluster,
+ sub_cmd=['osd', 'pool'],
args=args,
user=user,
user_key=user_key,
delta[key]['value']]
cmd = generate_ceph_cmd(cluster=cluster,
+ sub_cmd=['osd', 'pool'],
args=args,
user=user,
user_key=user_key,
container_image=container_image)
- rc, cmd, out, err = exec_commands(module, cmd)
+ rc, cmd, out, err = exec_command(module, cmd)
if rc != 0:
return rc, cmd, out, err
else:
- rc, cmd, out, err = exec_commands(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image)) # noqa: E501
+ rc, cmd, out, err = exec_command(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image)) # noqa: E501
if rc != 0:
return rc, cmd, out, err
- rc, cmd, out, err = exec_commands(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image)) # noqa: E501
+ rc, cmd, out, err = exec_command(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image)) # noqa: E501
if rc != 0:
return rc, cmd, out, err
return rc, cmd, out, err
-def exit_module(module, out, rc, cmd, err, startd, changed=False):
- endd = datetime.datetime.now()
- delta = endd - startd
-
- result = dict(
- cmd=cmd,
- start=str(startd),
- end=str(endd),
- delta=str(delta),
- rc=rc,
- stdout=out.rstrip("\r\n"),
- stderr=err.rstrip("\r\n"),
- changed=changed,
- )
- module.exit_json(**result)
-
-
def run_module():
module_args = dict(
cluster=dict(type='str', required=False, default='ceph'),
user_key = os.path.join("/etc/ceph/", keyring_filename)
if state == "present":
- rc, cmd, out, err = exec_commands(module,
- check_pool_exist(cluster,
- name,
- user,
- user_key,
- container_image=container_image)) # noqa: E501
+ rc, cmd, out, err = exec_command(module,
+ check_pool_exist(cluster,
+ name,
+ user,
+ user_key,
+ container_image=container_image)) # noqa: E501
if rc == 0:
running_pool_details = get_pool_details(module,
cluster,
else:
out = "Pool {} already exists and there is nothing to update.".format(name) # noqa: E501
else:
- rc, cmd, out, err = exec_commands(module,
- create_pool(cluster,
- name,
- user,
- user_key,
- user_pool_config=user_pool_config, # noqa: E501
- container_image=container_image)) # noqa: E501
+ rc, cmd, out, err = exec_command(module,
+ create_pool(cluster,
+ name,
+ user,
+ user_key,
+ user_pool_config=user_pool_config, # noqa: E501
+ container_image=container_image)) # noqa: E501
if user_pool_config['application']['value']:
- rc, _, _, _ = exec_commands(module,
- enable_application_pool(cluster,
- name,
- user_pool_config['application']['value'], # noqa: E501
- user,
- user_key,
- container_image=container_image)) # noqa: E501
+ rc, _, _, _ = exec_command(module,
+ enable_application_pool(cluster,
+ name,
+ user_pool_config['application']['value'], # noqa: E501
+ user,
+ user_key,
+ container_image=container_image)) # noqa: E501
if user_pool_config['min_size']['value']:
# not implemented yet
pass
changed = True
elif state == "list":
- rc, cmd, out, err = exec_commands(module,
- list_pools(cluster,
- name, user,
- user_key,
- details,
- container_image=container_image)) # noqa: E501
+ rc, cmd, out, err = exec_command(module,
+ list_pools(cluster,
+ name, user,
+ user_key,
+ details,
+ container_image=container_image)) # noqa: E501
if rc != 0:
out = "Couldn't list pool(s) present on the cluster"
elif state == "absent":
- rc, cmd, out, err = exec_commands(module,
- check_pool_exist(cluster,
- name, user,
- user_key,
- container_image=container_image)) # noqa: E501
- if rc == 0:
- rc, cmd, out, err = exec_commands(module,
- remove_pool(cluster,
- name,
- user,
+ rc, cmd, out, err = exec_command(module,
+ check_pool_exist(cluster,
+ name, user,
user_key,
container_image=container_image)) # noqa: E501
+ if rc == 0:
+ rc, cmd, out, err = exec_command(module,
+ remove_pool(cluster,
+ name,
+ user,
+ user_key,
+ container_image=container_image)) # noqa: E501
changed = True
else:
rc = 0
#!/usr/bin/python
from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import exec_command, is_containerized
+except ImportError:
+ from module_utils.ca_common import exec_command, is_containerized
import datetime
import copy
import json
return cmd
-def exec_command(module, cmd):
- '''
- Execute command
- '''
-
- rc, out, err = module.run_command(cmd)
- return rc, cmd, out, err
-
-
-def is_containerized():
- '''
- Check if we are running on a containerized cluster
- '''
-
- if 'CEPH_CONTAINER_IMAGE' in os.environ:
- container_image = os.getenv('CEPH_CONTAINER_IMAGE')
- else:
- container_image = None
-
- return container_image
-
-
def get_data(data, data_vg):
if data_vg:
data = '{0}/{1}'.format(data_vg, data)
--- /dev/null
+import os
+import datetime
+
+
+def generate_ceph_cmd(cluster, sub_cmd, args, user='client.admin', user_key='/etc/ceph/ceph.client.admin.keyring', container_image=None):
+ '''
+ Generate 'ceph' command line to execute
+ '''
+
+ cmd = pre_generate_ceph_cmd(container_image=container_image)
+
+ base_cmd = [
+ '-n',
+ user,
+ '-k',
+ user_key,
+ '--cluster',
+ cluster
+ ]
+ base_cmd.extend(sub_cmd)
+ cmd.extend(base_cmd + args)
+
+ return cmd
+
+
+def container_exec(binary, container_image):
+ '''
+ Build the docker CLI to run a command inside a container
+ '''
+
+ container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+ command_exec = [container_binary,
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + binary, container_image]
+ return command_exec
+
+
+def is_containerized():
+ '''
+ Check if we are running on a containerized cluster
+ '''
+
+ if 'CEPH_CONTAINER_IMAGE' in os.environ:
+ container_image = os.getenv('CEPH_CONTAINER_IMAGE')
+ else:
+ container_image = None
+
+ return container_image
+
+
+def pre_generate_ceph_cmd(container_image=None):
+ '''
+ Generate ceph prefix comaand
+ '''
+ if container_image:
+ cmd = container_exec('ceph', container_image)
+ else:
+ cmd = ['ceph']
+
+ return cmd
+
+
+def exec_command(module, cmd):
+ '''
+ Execute command(s)
+ '''
+
+ rc, out, err = module.run_command(cmd)
+
+ return rc, cmd, out, err
+
+
+def exit_module(module, out, rc, cmd, err, startd, changed=False):
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ result = dict(
+ cmd=cmd,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ rc=rc,
+ stdout=out.rstrip("\r\n"),
+ stderr=err.rstrip("\r\n"),
+ changed=changed,
+ )
+ module.exit_json(**result)
--- /dev/null
+from mock.mock import patch
+import os
+import ca_common
+import pytest
+
+fake_binary = 'ceph'
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'docker.io/ceph/daemon:latest'
+fake_container_cmd = [
+ fake_container_binary,
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + fake_binary,
+ fake_container_image
+]
+
+
+class TestCommon(object):
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_container_exec(self):
+ cmd = ca_common.container_exec(fake_binary, fake_container_image)
+ assert cmd == fake_container_cmd
+
+ def test_not_is_containerized(self):
+ assert ca_common.is_containerized() is None
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ def test_is_containerized(self):
+ assert ca_common.is_containerized() == fake_container_image
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_pre_generate_ceph_cmd(self, image):
+ if image:
+ expected_cmd = fake_container_cmd
+ else:
+ expected_cmd = [fake_binary]
+
+ assert ca_common.pre_generate_ceph_cmd(image) == expected_cmd
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_generate_ceph_cmd(self, image):
+ sub_cmd = ['osd', 'pool']
+ args = ['create', 'foo']
+ if image:
+ expected_cmd = fake_container_cmd
+ else:
+ expected_cmd = [fake_binary]
+
+ expected_cmd.extend([
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ fake_cluster,
+ 'osd', 'pool',
+ 'create', 'foo'
+ ])
+ assert ca_common.generate_ceph_cmd(fake_cluster, sub_cmd, args, container_image=image) == expected_cmd
-import os
-import sys
-from mock.mock import patch, MagicMock
-import pytest
-sys.path.append('./library')
-import ceph_dashboard_user # noqa: E402
+from mock.mock import MagicMock
+import ceph_dashboard_user
fake_binary = 'ceph'
fake_cluster = 'ceph'
class TestCephDashboardUserModule(object):
- @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
- def test_container_exec(self):
- cmd = ceph_dashboard_user.container_exec(fake_binary, fake_container_image)
- assert cmd == fake_container_cmd
-
- def test_not_is_containerized(self):
- assert ceph_dashboard_user.is_containerized() is None
-
- @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
- def test_is_containerized(self):
- assert ceph_dashboard_user.is_containerized() == fake_container_image
-
- @pytest.mark.parametrize('image', [None, fake_container_image])
- @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
- def test_pre_generate_ceph_cmd(self, image):
- if image:
- expected_cmd = fake_container_cmd
- else:
- expected_cmd = [fake_binary]
-
- assert ceph_dashboard_user.pre_generate_ceph_cmd(image) == expected_cmd
-
- @pytest.mark.parametrize('image', [None, fake_container_image])
- @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
- def test_generate_ceph_cmd(self, image):
- if image:
- expected_cmd = fake_container_cmd
- else:
- expected_cmd = [fake_binary]
-
- expected_cmd.extend([
- '--cluster',
- fake_cluster,
- 'dashboard'
- ])
- assert ceph_dashboard_user.generate_ceph_cmd(fake_cluster, [], image) == expected_cmd
-
def test_create_user(self):
fake_module = MagicMock()
fake_module.params = fake_params
expected_cmd = [
fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', fake_cluster,
'dashboard', 'ac-user-create',
fake_user,
fake_module.params = fake_params
expected_cmd = [
fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', fake_cluster,
'dashboard', 'ac-user-set-roles',
fake_user
fake_module.params = fake_params
expected_cmd = [
fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', fake_cluster,
'dashboard', 'ac-user-set-password',
fake_user,
fake_module.params = fake_params
expected_cmd = [
fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', fake_cluster,
'dashboard', 'ac-user-show',
fake_user,
fake_module.params = fake_params
expected_cmd = [
fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', fake_cluster,
'dashboard', 'ac-user-delete',
fake_user
-import os
-import sys
-from mock.mock import patch, MagicMock
-import pytest
-sys.path.append('./library')
-import ceph_fs # noqa : E402
+from mock.mock import MagicMock
+import ceph_fs
fake_binary = 'ceph'
class TestCephFsModule(object):
- @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
- def test_container_exec(self):
- cmd = ceph_fs.container_exec(fake_binary, fake_container_image)
- assert cmd == fake_container_cmd
-
- def test_not_is_containerized(self):
- assert ceph_fs.is_containerized() is None
-
- @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
- def test_is_containerized(self):
- assert ceph_fs.is_containerized() == fake_container_image
-
- @pytest.mark.parametrize('image', [None, fake_container_image])
- @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
- def test_pre_generate_ceph_cmd(self, image):
- if image:
- expected_cmd = fake_container_cmd
- else:
- expected_cmd = [fake_binary]
-
- assert ceph_fs.pre_generate_ceph_cmd(image) == expected_cmd
-
- @pytest.mark.parametrize('image', [None, fake_container_image])
- @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
- def test_generate_ceph_cmd(self, image):
- if image:
- expected_cmd = fake_container_cmd
- else:
- expected_cmd = [fake_binary]
-
- expected_cmd.extend([
- '--cluster',
- fake_cluster,
- 'fs'
- ])
- assert ceph_fs.generate_ceph_cmd(fake_cluster, [], image) == expected_cmd
-
def test_create_fs(self):
fake_module = MagicMock()
fake_module.params = fake_params
expected_cmd = [
fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', fake_cluster,
'fs', 'new',
fake_fs,
fake_module.params = fake_params
expected_cmd = [
fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', fake_cluster,
'fs', 'set',
fake_fs,
fake_module.params = fake_params
expected_cmd = [
fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', fake_cluster,
'fs', 'get',
fake_fs,
fake_module.params = fake_params
expected_cmd = [
fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', fake_cluster,
'fs', 'rm',
fake_fs,
fake_module.params = fake_params
expected_cmd = [
fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', fake_cluster,
'fs', 'fail',
fake_fs
import pytest
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
-
-sys.path.append('./library')
-import ceph_key # noqa: E402
+import ceph_key
# From ceph-ansible documentation