default: unlimited
bucketmaxobjects:
description:
- - with bucket quota enabled specify maximum number of objects # noqa E501
+ - with bucket quota enabled specify maximum number of objects # noqa: E501
required: false
default: unlimited
buckets:
returned: always
type: list
sample: [
- "test2: could not modify user: unable to modify user, cannot add duplicate email\n" # noqa E501
+ "test2: could not modify user: unable to modify user, cannot add duplicate email\n" # noqa: E501
]
failed_users:
if email:
if autogenkey:
try:
- rgw.create_user(username, fullname, email=email, key_type='s3', # noqa E501
+ rgw.create_user(username, fullname, email=email, key_type='s3', # noqa: E501
generate_key=autogenkey,
- max_buckets=maxbucket, suspended=suspend) # noqa E501
+ max_buckets=maxbucket, suspended=suspend) # noqa: E501
except radosgw.exception.RadosGWAdminError as e:
- result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
+ result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501
fail_flag = True
else:
try:
- rgw.create_user(username, fullname, email=email, key_type='s3', # noqa E501
- access_key=accesskey, secret_key=secretkey, # noqa E501
- max_buckets=maxbucket, suspended=suspend) # noqa E501
+ rgw.create_user(username, fullname, email=email, key_type='s3', # noqa: E501
+ access_key=accesskey, secret_key=secretkey, # noqa: E501
+ max_buckets=maxbucket, suspended=suspend) # noqa: E501
except radosgw.exception.RadosGWAdminError as e:
- result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
+ result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501
fail_flag = True
else:
if autogenkey:
try:
rgw.create_user(username, fullname, key_type='s3',
generate_key=autogenkey,
- max_buckets=maxbucket, suspended=suspend) # noqa E501
+ max_buckets=maxbucket, suspended=suspend) # noqa: E501
except radosgw.exception.RadosGWAdminError as e:
- result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
+ result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501
fail_flag = True
else:
try:
rgw.create_user(username, fullname, key_type='s3',
- access_key=accesskey, secret_key=secretkey, # noqa E501
- max_buckets=maxbucket, suspended=suspend) # noqa E501
+ access_key=accesskey, secret_key=secretkey, # noqa: E501
+ max_buckets=maxbucket, suspended=suspend) # noqa: E501
except radosgw.exception.RadosGWAdminError as e:
- result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
+ result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501
fail_flag = True
if not fail_flag and userquota:
rgw.set_quota(username, 'user', max_objects=usermaxobjects,
max_size_kb=usermaxsize, enabled=True)
except radosgw.exception.RadosGWAdminError as e:
- result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
+ result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501
fail_flag = True
if not fail_flag and bucketquota:
try:
- rgw.set_quota(username, 'bucket', max_objects=bucketmaxobjects, # noqa E501
+ rgw.set_quota(username, 'bucket', max_objects=bucketmaxobjects, # noqa: E501
max_size_kb=bucketmaxsize, enabled=True)
except radosgw.exception.RadosGWAdminError as e:
- result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
+ result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501
fail_flag = True
if fail_flag:
except radosgw.exception.RadosGWAdminError:
pass
failed_buckets.append(bucket)
- result['error_messages'].append(bucket + ' could not be linked' + ', NoSuchUser ' + user) # noqa E501
+ result['error_messages'].append(bucket + ' could not be linked' + ', NoSuchUser ' + user) # noqa: E501
else:
# something went wrong
failed_buckets.append(bucket)
- result['error_messages'].append(bucket + ' could not be created') # noqa E501
+ result['error_messages'].append(bucket + ' could not be created') # noqa: E501
result['added_buckets'] = ", ".join(added_buckets)
result['failed_buckets'] = ", ".join(failed_buckets)
host=rgw._connection[0],
port=rgw.port,
is_secure=rgw.is_secure,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(), # noqa E501
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(), # noqa: E501
)
try:
admin_access_key=dict(type='str', required=True),
admin_secret_key=dict(type='str', required=True),
buckets=dict(type='list', required=False, elements='dict',
- options=dict(bucket=dict(type='str', required=True), # noqa E501
- user=dict(type='str', required=True))), # noqa E501
+ options=dict(bucket=dict(type='str', required=True), # noqa: E501
+ user=dict(type='str', required=True))), # noqa: E501
users=dict(type='list', required=False, elements='dict',
- options=dict(username=dict(type='str', required=True), # noqa E501
- fullname=dict(type='str', required=True), # noqa E501
- email=dict(type='str', required=False), # noqa E501
- maxbucket=dict(type='int', required=False, default=1000), # noqa E501
- suspend=dict(type='bool', required=False, default=False), # noqa E501
- autogenkey=dict(type='bool', required=False, default=True), # noqa E501
- accesskey=dict(type='str', required=False), # noqa E501
- secretkey=dict(type='str', required=False), # noqa E501
- userquota=dict(type='bool', required=False, default=False), # noqa E501
- usermaxsize=dict(type='str', required=False, default='-1'), # noqa E501
- usermaxobjects=dict(type='int', required=False, default=-1), # noqa E501
- bucketquota=dict(type='bool', required=False, default=False), # noqa E501
- bucketmaxsize=dict(type='str', required=False, default='-1'), # noqa E501
- bucketmaxobjects=dict(type='int', required=False, default=-1)))) # noqa E501
+ options=dict(username=dict(type='str', required=True), # noqa: E501
+ fullname=dict(type='str', required=True), # noqa: E501
+ email=dict(type='str', required=False), # noqa: E501
+ maxbucket=dict(type='int', required=False, default=1000), # noqa: E501
+ suspend=dict(type='bool', required=False, default=False), # noqa: E501
+ autogenkey=dict(type='bool', required=False, default=True), # noqa: E501
+ accesskey=dict(type='str', required=False), # noqa: E501
+ secretkey=dict(type='str', required=False), # noqa: E501
+ userquota=dict(type='bool', required=False, default=False), # noqa: E501
+ usermaxsize=dict(type='str', required=False, default='-1'), # noqa: E501
+ usermaxobjects=dict(type='int', required=False, default=-1), # noqa: E501
+ bucketquota=dict(type='bool', required=False, default=False), # noqa: E501
+ bucketmaxsize=dict(type='str', required=False, default='-1'), # noqa: E501
+ bucketmaxobjects=dict(type='int', required=False, default=-1)))) # noqa: E501
# the AnsibleModule object
module = AnsibleModule(argument_spec=fields,
# radosgw connection
rgw = radosgw.connection.RadosGWAdminConnection(host=rgw_host,
port=port,
- access_key=admin_access_key, # noqa E501
- secret_key=admin_secret_key, # noqa E501
+ access_key=admin_access_key, # noqa: E501
+ secret_key=admin_secret_key, # noqa: E501
aws_signature='AWS4',
is_secure=is_secure)
"region",
"root",
]
- return sorted(location, key=lambda crush: crush_bucket_types.index(crush[0])) # noqa E501
+ return sorted(location, key=lambda crush: crush_bucket_types.index(crush[0])) # noqa: E501
except ValueError as error:
- fatal("{} is not a valid CRUSH bucket, valid bucket types are {}".format(error.args[0].split()[0], crush_bucket_types), module) # noqa E501
+ fatal("{} is not a valid CRUSH bucket, valid bucket types are {}".format(error.args[0].split()[0], crush_bucket_types), module) # noqa: E501
def create_and_move_buckets_list(cluster, location, containerized=None):
for item in location:
bucket_type, bucket_name = item
# ceph osd crush add-bucket maroot root
- cmd_list.append(generate_cmd(cluster, "add-bucket", bucket_name, bucket_type, containerized)) # noqa E501
+ cmd_list.append(generate_cmd(cluster, "add-bucket", bucket_name, bucket_type, containerized)) # noqa: E501
if previous_bucket:
# ceph osd crush move monrack root=maroot
- cmd_list.append(generate_cmd(cluster, "move", previous_bucket, "%s=%s" % (bucket_type, bucket_name), containerized)) # noqa E501
+ cmd_list.append(generate_cmd(cluster, "move", previous_bucket, "%s=%s" % (bucket_type, bucket_name), containerized)) # noqa: E501
previous_bucket = item[1]
return cmd_list
startd = datetime.datetime.now()
# run the Ceph command to add buckets
- rc, cmd, out, err = exec_commands(module, create_and_move_buckets_list(cluster, location, containerized)) # noqa E501
+ rc, cmd, out, err = exec_commands(module, create_and_move_buckets_list(cluster, location, containerized)) # noqa: E501
endd = datetime.datetime.now()
delta = endd - startd
description:
- Fetch client.admin and bootstrap key.
This is only needed for Nautilus and above.
- Writes down to the filesystem the initial keys generated by the monitor. # noqa E501
+ Writes down to the filesystem the initial keys generated by the monitor. # noqa: E501
This command can ONLY run from a monitor node.
required: false
default: false
EXAMPLES = '''
keys_to_create:
- - { name: client.key, key: "AQAin8tUUK84ExAA/QgBtI7gEMWdmnvKBzlXdQ==", caps: { mon: "allow rwx", mds: "allow *" } , mode: "0600" } # noqa e501
- - { name: client.cle, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" } # noqa e501
+ - { name: client.key, key: "AQAin8tUUK84ExAA/QgBtI7gEMWdmnvKBzlXdQ==", caps: { mon: "allow rwx", mds: "allow *" } , mode: "0600" } # noqa: E501
+ - { name: client.cle, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" } # noqa: E501
caps:
mon: "allow rwx"
RETURN = '''# '''
-from ansible.module_utils.basic import AnsibleModule # noqa E402
-import datetime # noqa E402
-import grp # noqa E402
-import json # noqa E402
-import os # noqa E402
-import pwd # noqa E402
-import stat # noqa E402
-import struct # noqa E402
-import time # noqa E402
-import base64 # noqa E402
-import socket # noqa E402
+from ansible.module_utils.basic import AnsibleModule # noqa: E402
+import datetime # noqa: E402
+import grp # noqa: E402
+import json # noqa: E402
+import os # noqa: E402
+import pwd # noqa: E402
+import stat # noqa: E402
+import struct # noqa: E402
+import time # noqa: E402
+import base64 # noqa: E402
+import socket # noqa: E402
-CEPH_INITIAL_KEYS = ['client.admin', 'client.bootstrap-mds', 'client.bootstrap-mgr', # noqa E501
- 'client.bootstrap-osd', 'client.bootstrap-rbd', 'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw'] # noqa E501
+CEPH_INITIAL_KEYS = ['client.admin', 'client.bootstrap-mds', 'client.bootstrap-mgr', # noqa: E501
+ 'client.bootstrap-osd', 'client.bootstrap-rbd', 'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw'] # noqa: E501
def str_to_bool(val):
return cmd
-def generate_ceph_authtool_cmd(cluster, name, secret, caps, dest, container_image=None): # noqa E501
+def generate_ceph_authtool_cmd(cluster, name, secret, caps, dest, container_image=None): # noqa: E501
'''
Generate 'ceph-authtool' command line to execute
'''
return cmd
-def create_key(module, result, cluster, user, user_key_path, name, secret, caps, import_key, dest, container_image=None): # noqa E501
+def create_key(module, result, cluster, user, user_key_path, name, secret, caps, import_key, dest, container_image=None): # noqa: E501
'''
Create a CephX key
'''
return cmd_list
-def info_key(cluster, name, user, user_key_path, output_format, container_image=None): # noqa E501
+def info_key(cluster, name, user, user_key_path, output_format, container_image=None): # noqa: E501
'''
Get information about a CephX key
'''
try:
out_dict = json.loads(out)
except ValueError as e:
- fatal("Could not decode 'ceph auth list' json output: {}".format(e), module) # noqa E501
+ fatal("Could not decode 'ceph auth list' json output: {}".format(e), module) # noqa: E501
entities = []
if "auth_dump" in out_dict:
if v in CEPH_INITIAL_KEYS:
entities.append(v)
else:
- fatal("'auth_dump' key not present in json output:", module) # noqa E501
+ fatal("'auth_dump' key not present in json output:", module) # noqa: E501
- if len(entities) != len(CEPH_INITIAL_KEYS) and not str_to_bool(os.environ.get('CEPH_ROLLING_UPDATE', False)): # noqa E501
+ if len(entities) != len(CEPH_INITIAL_KEYS) and not str_to_bool(os.environ.get('CEPH_ROLLING_UPDATE', False)): # noqa: E501
# must be missing in auth_dump, as if it were in CEPH_INITIAL_KEYS
# it'd be in entities from the above test. Report what's missing.
missing = []
for e in CEPH_INITIAL_KEYS:
if e not in entities:
missing.append(e)
- fatal("initial keyring does not contain keys: " + ' '.join(missing), module) # noqa E501
+ fatal("initial keyring does not contain keys: " + ' '.join(missing), module) # noqa: E501
return entities
module_args = dict(
cluster=dict(type='str', required=False, default='ceph'),
name=dict(type='str', required=False),
- state=dict(type='str', required=False, default='present', choices=['present', 'update', 'absent',
- 'list', 'info', 'fetch_initial_keys', 'generate_secret']),
+ state=dict(type='str', required=False, default='present', choices=['present', 'update', 'absent', # noqa: E501
+ 'list', 'info', 'fetch_initial_keys', 'generate_secret']), # noqa: E501
caps=dict(type='dict', required=False, default=None),
secret=dict(type='str', required=False, default=None, no_log=True),
import_key=dict(type='bool', required=False, default=True),
if import_key:
_info_key = []
rc, cmd, out, err = exec_commands(
- module, info_key(cluster, name, user, user_key_path, output_format, container_image)) # noqa E501
+ module, info_key(cluster, name, user, user_key_path, output_format, container_image)) # noqa: E501
key_exist = rc
if not caps and key_exist != 0:
- fatal("Capabilities must be provided when state is 'present'", module) # noqa E501
+ fatal("Capabilities must be provided when state is 'present'", module) # noqa: E501
if key_exist != 0 and secret is None and caps is None:
- fatal("Keyring doesn't exist, you must provide 'secret' and 'caps'", module) # noqa E501
+ fatal("Keyring doesn't exist, you must provide 'secret' and 'caps'", module) # noqa: E501
if key_exist == 0:
_info_key = json.loads(out)
if not secret:
_caps = _info_key[0]['caps']
if secret == _secret and caps == _caps:
if not os.path.isfile(file_path):
- rc, cmd, out, err = exec_commands(module, get_key(cluster, user, user_key_path, name, file_path, container_image)) # noqa E501
+ rc, cmd, out, err = exec_commands(module, get_key(cluster, user, user_key_path, name, file_path, container_image)) # noqa: E501
result["rc"] = rc
if rc != 0:
- result["stdout"] = "Couldn't fetch the key {0} at {1}.".format(name, file_path) # noqa E501
+ result["stdout"] = "Couldn't fetch the key {0} at {1}.".format(name, file_path) # noqa: E501
module.exit_json(**result)
- result["stdout"] = "fetched the key {0} at {1}.".format(name, file_path) # noqa E501
+ result["stdout"] = "fetched the key {0} at {1}.".format(name, file_path) # noqa: E501
- result["stdout"] = "{0} already exists and doesn't need to be updated.".format(name) # noqa E501
+ result["stdout"] = "{0} already exists and doesn't need to be updated.".format(name) # noqa: E501
result["rc"] = 0
module.set_fs_attributes_if_different(file_args, False)
module.exit_json(**result)
else:
if os.path.isfile(file_path) and not secret or not caps:
- result["stdout"] = "{0} already exists in {1} you must provide secret *and* caps when import_key is {2}".format(name, dest, import_key) # noqa E501
+ result["stdout"] = "{0} already exists in {1} you must provide secret *and* caps when import_key is {2}".format(name, dest, import_key) # noqa: E501
result["rc"] = 0
module.exit_json(**result)
- if (key_exist == 0 and (secret != _secret or caps != _caps)) or key_exist != 0:
+ if (key_exist == 0 and (secret != _secret or caps != _caps)) or key_exist != 0: # noqa: E501
rc, cmd, out, err = exec_commands(module, create_key(
- module, result, cluster, user, user_key_path, name, secret, caps, import_key, file_path, container_image)) # noqa E501
+ module, result, cluster, user, user_key_path, name, secret, caps, import_key, file_path, container_image)) # noqa: E501
if rc != 0:
result["stdout"] = "Couldn't create or update {0}".format(name)
result["stderr"] = err
elif state == "absent":
if key_exist == 0:
rc, cmd, out, err = exec_commands(
- module, delete_key(cluster, user, user_key_path, name, container_image))
+ module, delete_key(cluster, user, user_key_path, name, container_image)) # noqa: E501
if rc == 0:
changed = True
else:
elif state == "info":
rc, cmd, out, err = exec_commands(
- module, info_key(cluster, name, user, user_key_path, output_format, container_image)) # noqa E501
+ module, info_key(cluster, name, user, user_key_path, output_format, container_image)) # noqa: E501
+ if rc != 0:
+ result["stdout"] = "skipped, since {0} does not exist".format(name)
+ result['rc'] = 0
+ module.exit_json(**result)
elif state == "list":
rc, cmd, out, err = exec_commands(
# we use info_cmd[0] because info_cmd is an array made of an array
info_cmd[0].extend(extra_args)
rc, cmd, out, err = exec_commands(
- module, info_cmd) # noqa E501
+ module, info_cmd) # noqa: E501
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = key_path
description:
- name of the ceph OSD flag.
required: true
- choices: ['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub']
+ choices: ['noup', 'nodown', 'noout', 'nobackfill', 'norebalance',
+ 'norecover', 'noscrub', 'nodeep-scrub']
cluster:
description:
- The ceph cluster name.
def main():
module = AnsibleModule(
argument_spec=dict(
- name=dict(type='str', required=True, choices=['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub']),
+ name=dict(type='str', required=True, choices=['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub']), # noqa: E501
cluster=dict(type='str', required=False, default='ceph'),
- state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
+ state=dict(type='str', required=False, default='present', choices=['present', 'absent']), # noqa: E501
),
supports_check_mode=True,
)
container_image = is_containerized()
if state == 'present':
- cmd = generate_ceph_cmd(['osd', 'set'], [name], cluster=cluster, container_image=container_image)
+ cmd = generate_ceph_cmd(['osd', 'set'], [name], cluster=cluster, container_image=container_image) # noqa: E501
else:
- cmd = generate_ceph_cmd(['osd', 'unset'], [name], cluster=cluster, container_image=container_image)
+ cmd = generate_ceph_cmd(['osd', 'unset'], [name], cluster=cluster, container_image=container_image) # noqa: E501
if module.check_mode:
exit_module(
required: false
db_vg:
description:
- - If db is a lv, this must be the name of the volume group it belongs to. # noqa E501
+ - If db is a lv, this must be the name of the volume group it belongs to. # noqa: E501
- Only applicable if objectstore is 'bluestore'.
required: false
wal:
required: false
wal_vg:
description:
- - If wal is a lv, this must be the name of the volume group it belongs to. # noqa E501
+ - If wal is a lv, this must be the name of the volume group it belongs to. # noqa: E501
- Only applicable if objectstore is 'bluestore'.
required: false
crush_device_class:
action: create
-- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa e501
+- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa: E501
ceph_volume:
objectstore: bluestore
data: data-lv
'''
-from ansible.module_utils.basic import AnsibleModule # noqa 4502
+from ansible.module_utils.basic import AnsibleModule # noqa: E402
def fatal(message, module):
fatal('osds_per_device must be provided if action is "batch"', module)
if osds_per_device < 1:
- fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa E501
+ fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa: E501
if not batch_devices:
fatal('batch_devices must be provided if action is "batch"', module)
Check if an LV exists
'''
- args = ['--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg)] # noqa E501
+ args = ['--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg)] # noqa: E501
cmd = build_cmd(args, container_image, binary='lvs')
'bluestore', 'filestore'], default='bluestore'),
action=dict(type='str', required=False, choices=[
'create', 'zap', 'batch', 'prepare', 'activate', 'list',
- 'inventory'], default='create'), # noqa 4502
+ 'inventory'], default='create'), # noqa: 4502
data=dict(type='str', required=False),
data_vg=dict(type='str', required=False),
journal=dict(type='str', required=False),
try:
out_dict = json.loads(out)
except ValueError:
- fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa E501
+ fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa: E501
if out_dict:
data = module.params['data']
- result['stdout'] = 'skipped, since {0} is already used for an osd'.format(data) # noqa E501
+ result['stdout'] = 'skipped, since {0} is already used for an osd'.format(data) # noqa: E501
result['rc'] = 0
module.exit_json(**result)
elif action == 'activate':
if container_image:
fatal(
- "This is not how container's activation happens, nothing to activate", module) # noqa E501
+ "This is not how container's activation happens, nothing to activate", module) # noqa: E501
# Activate the OSD
rc, cmd, out, err = exec_command(
skip = []
for device_type in ['journal', 'data', 'db', 'wal']:
# 1/ if we passed vg/lv
- if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa E501
+ if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501
# 2/ check this is an actual lv/vg
- ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) # noqa E501
+ ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) # noqa: E501
skip.append(ret)
# 3/ This isn't a lv/vg device
if not ret:
module.params['{}_vg'.format(device_type)] = False
module.params[device_type] = False
- # 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device # noqa E501
- elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa E501
+ # 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device # noqa: E501
+ elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501
skip.append(True)
cmd = zap_devices(module, container_image)
else:
module.fail_json(
- msg='State must either be "create" or "prepare" or "activate" or "list" or "zap" or "batch" or "inventory".', changed=False, rc=1) # noqa E501
+ msg='State must either be "create" or "prepare" or "activate" or "list" or "zap" or "batch" or "inventory".', changed=False, rc=1) # noqa: E501
endd = datetime.datetime.now()
delta = endd - startd
"""
-import os # noqa E402
-import logging # noqa E402
-from logging.handlers import RotatingFileHandler # noqa E402
-from ansible.module_utils.basic import * # noqa E402
+import os # noqa: E402
+import logging # noqa: E402
+from logging.handlers import RotatingFileHandler # noqa: E402
+from ansible.module_utils.basic import * # noqa: E402,F403
-from ceph_iscsi_config.client import GWClient # noqa E402
-import ceph_iscsi_config.settings as settings # noqa E402
+from ceph_iscsi_config.client import GWClient # noqa: E402
+import ceph_iscsi_config.settings as settings # noqa: E402
# the main function is called ansible_main to allow the call stack
},
}
- module = AnsibleModule(argument_spec=fields, # noqa F405
+ module = AnsibleModule(argument_spec=fields, # noqa: F405
supports_check_mode=False)
client_iqn = module.params['client_iqn']
"""
-import os # noqa E402
-import logging # noqa E402
+import os # noqa: E402
+import logging # noqa: E402
-from logging.handlers import RotatingFileHandler # noqa E402
-from ansible.module_utils.basic import * # noqa E402
+from logging.handlers import RotatingFileHandler # noqa: E402
+from ansible.module_utils.basic import * # noqa: E402,F403
-import ceph_iscsi_config.settings as settings # noqa E402
-from ceph_iscsi_config.common import Config # noqa E402
+import ceph_iscsi_config.settings as settings # noqa: E402
+from ceph_iscsi_config.common import Config # noqa: E402
-from ceph_iscsi_config.gateway import GWTarget # noqa E402
-from ceph_iscsi_config.utils import valid_ip # noqa E402
+from ceph_iscsi_config.gateway import GWTarget # noqa: E402
+from ceph_iscsi_config.utils import valid_ip # noqa: E402
# the main function is called ansible_main to allow the call stack
}
}
- module = AnsibleModule(argument_spec=fields, # noqa F405
+ module = AnsibleModule(argument_spec=fields, # noqa: F405
supports_check_mode=False)
cfg = Config(logger)
short_description: Manage ceph rbd images to present as iscsi LUNs to clients
description:
- This module calls the 'lun' configuration management module installed
- on the iscsi gateway node(s). The lun module handles the creation and resize # noqa E501
+ on the iscsi gateway node(s). The lun module handles the creation and resize # noqa: E501
of rbd images, and then maps these rbd devices to the gateway node(s) to be
exposed through the kernel's LIO target.
- To support module debugging, this module logs to /var/log/ansible-module-igw_config.log # noqa E501
+ To support module debugging, this module logs to /var/log/ansible-module-igw_config.log # noqa: E501
on the target machine(s).
option:
"""
-import os # noqa E402
-import logging # noqa E402
-from logging.handlers import RotatingFileHandler # noqa E402
+import os # noqa: E402
+import logging # noqa: E402
+from logging.handlers import RotatingFileHandler # noqa: E402
-from ansible.module_utils.basic import * # noqa E402
+from ansible.module_utils.basic import * # noqa: E402,F403
-from ceph_iscsi_config.lun import LUN # noqa E402
-from ceph_iscsi_config.utils import valid_size # noqa E402
-import ceph_iscsi_config.settings as settings # noqa E402
+from ceph_iscsi_config.lun import LUN # noqa: E402
+from ceph_iscsi_config.utils import valid_size # noqa: E402
+import ceph_iscsi_config.settings as settings # noqa: E402
# the main function is called ansible_main to allow the call stack
}
# not supporting check mode currently
- module = AnsibleModule(argument_spec=fields, # noqa F405
+ module = AnsibleModule(argument_spec=fields, # noqa: F405
supports_check_mode=False)
pool = module.params["pool"]
"""
-import os # noqa E402
-import logging # noqa E402
-import socket # noqa E402
-import rados # noqa E402
-import rbd # noqa E402
+import os # noqa: E402
+import logging # noqa: E402
+import socket # noqa: E402,F401
+import rados # noqa: E402
+import rbd # noqa: E402
-from logging.handlers import RotatingFileHandler # noqa E402
-from ansible.module_utils.basic import * # noqa E402
+from logging.handlers import RotatingFileHandler # noqa: E402
+from ansible.module_utils.basic import * # noqa: E402,F403
-import ceph_iscsi_config.settings as settings # noqa E402
-from ceph_iscsi_config.common import Config # noqa E402
-from ceph_iscsi_config.lun import RBDDev # noqa E402
+import ceph_iscsi_config.settings as settings # noqa: E402
+from ceph_iscsi_config.common import Config # noqa: E402
+from ceph_iscsi_config.lun import RBDDev # noqa: E402
__author__ = 'pcuzner@redhat.com'
if rbd_dev.error:
if rbd_dev.error_msg:
- logger.error("Could not remove {}. Error: {}. Manually run the " # noqa E501
+ logger.error("Could not remove {}. Error: {}. Manually run the " # noqa: E501
"rbd command line tool to delete.".
format(image, rbd_dev.error_msg))
else:
}
}
- module = AnsibleModule(argument_spec=fields, # noqa F405
+ module = AnsibleModule(argument_spec=fields, # noqa: F405
supports_check_mode=False)
run_mode = module.params['mode']