description:
- The action to take. Either creating OSDs or zapping devices.
required: true
- choices: ['create', 'zap']
+ choices: ['create', 'zap', 'batch']
default: create
data:
description:
description:
- If set to True the OSD will be encrypted with dmcrypt.
required: false
+ batch_devices:
+ description:
+ - A list of devices to pass to the 'ceph-volume lvm batch' subcommand.
+ - Only applicable if action is 'batch'.
+ required: false
author:
return wal
+def batch(module):
+ cluster = module.params['cluster']
+ objectstore = module.params['objectstore']
+ batch_devices = module.params['batch_devices']
+ crush_device_class = module.params.get('crush_device_class', None)
+ dmcrypt = module.params['dmcrypt']
+
+ if not batch_devices:
+ module.fail_json(msg='batch_devices must be provided if action is "batch"', changed=False, rc=1)
+
+ cmd = [
+ 'ceph-volume',
+ '--cluster',
+ cluster,
+ 'lvm',
+ 'batch',
+ '--%s' % objectstore,
+ '--yes',
+ ]
+
+ if crush_device_class:
+ cmd.extend(["--crush-device-class", crush_device_class])
+
+ if dmcrypt:
+ cmd.append("--dmcrypt")
+
+ cmd.extend(batch_devices)
+
+ result = dict(
+ changed=False,
+ cmd=cmd,
+ stdout='',
+ stderr='',
+ rc='',
+ start='',
+ end='',
+ delta='',
+ )
+
+ if module.check_mode:
+ return result
+
+ startd = datetime.datetime.now()
+
+ rc, out, err = module.run_command(cmd, encoding=None)
+
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ result = dict(
+ cmd=cmd,
+ stdout=out.rstrip(b"\r\n"),
+ stderr=err.rstrip(b"\r\n"),
+ rc=rc,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ changed=True,
+ )
+
+ if rc != 0:
+ module.fail_json(msg='non-zero return code', **result)
+
+ module.exit_json(**result)
+
+
def create_osd(module):
cluster = module.params['cluster']
objectstore = module.params['objectstore']
module_args = dict(
cluster=dict(type='str', required=False, default='ceph'),
objectstore=dict(type='str', required=False, choices=['bluestore', 'filestore'], default='bluestore'),
- action=dict(type='str', required=False, choices=['create', 'zap'], default='create'),
- data=dict(type='str', required=True),
+ action=dict(type='str', required=False, choices=['create', 'zap', 'batch'], default='create'),
+ data=dict(type='str', required=False),
data_vg=dict(type='str', required=False),
journal=dict(type='str', required=False),
journal_vg=dict(type='str', required=False),
wal_vg=dict(type='str', required=False),
crush_device_class=dict(type='str', required=False),
dmcrypt=dict(type='bool', required=False, default=False),
+ batch_devices=dict(type='list', required=False, default=[]),
)
module = AnsibleModule(
create_osd(module)
elif action == "zap":
zap_devices(module)
+ elif action == "batch":
+ batch(module)
module.fail_json(msg='State must either be "present" or "absent".', changed=False, rc=1)
notario.validate(host_vars, non_collocated_osd_scenario, defined_keys=True)
if host_vars["osd_scenario"] == "lvm":
- if notario_store['osd_objectstore'] == 'filestore':
+ if host_vars.get("devices"):
+ notario.validate(host_vars, lvm_batch_scenario, defined_keys=True)
+ elif notario_store['osd_objectstore'] == 'filestore':
notario.validate(host_vars, lvm_filestore_scenario, defined_keys=True)
elif notario_store['osd_objectstore'] == 'bluestore':
notario.validate(host_vars, lvm_bluestore_scenario, defined_keys=True)
("devices", iterables.AllItems(types.string)),
)
+lvm_batch_scenario = ("devices", iterables.AllItems(types.string))
+
lvm_filestore_scenario = ("lvm_volumes", iterables.AllItems((
(optional('crush_device_class'), types.string),
('data', types.string),
include: scenarios/lvm.yml
when:
- osd_scenario == 'lvm'
+ - lvm_volumes is defined
+ - not containerized_deployment
+ # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
+ static: False
+
+- name: include scenarios/lvm-batch.yml
+ include: scenarios/lvm-batch.yml
+ when:
+ - osd_scenario == 'lvm'
+ - devices is defined
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
--- /dev/null
+---
+
+- name: "use ceph-volume lvm batch to create {{ osd_objectstore }} osds"
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ objectstore: "{{ osd_objectstore }}"
+ batch_devices: "{{ devices }}"
+ dmcrypt: "{{ dmcrypt|default(omit) }}"
+ action: "batch"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
when:
- osd_group_name in group_names
- not osd_auto_discovery | default(False)
- - osd_scenario != "lvm"
+ - osd_scenario != "lvm" and devices is not defined
- name: include check_eth_mon.yml
include: check_eth_mon.yml
- rgw_group_name in group_names
- radosgw_interface != "dummy"
- radosgw_address == "0.0.0.0"
- - radosgw_address_block == "subnet"
\ No newline at end of file
+ - radosgw_address_block == "subnet"
group_names = ansible_vars["group_names"]
docker = ansible_vars.get("docker")
osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
- lvm_scenario = ansible_vars.get("osd_scenario") == 'lvm'
+ osd_scenario = ansible_vars.get("osd_scenario")
+ lvm_scenario = osd_scenario in ['lvm', 'lvm-batch']
ceph_release_num = {
'jewel': 10,
'kraken': 11,
--- /dev/null
+../../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+{
+ "ceph_conf_overrides": {
+ "global": {
+ "osd_pool_default_pg_num": 12,
+ "osd_pool_default_size": 1
+ }
+ },
+ "ceph_mon_docker_memory_limit": "2g"
+}
--- /dev/null
+---
+
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.39.0/24"
+cluster_network: "192.168.40.0/24"
+monitor_interface: eth1
+radosgw_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+copy_admin_key: true
+devices:
+ - /dev/sdb
+ - /dev/sdc
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_size: 1
--- /dev/null
+[mons]
+mon0
+
+[mgrs]
+mon0
+
+[osds]
+osd0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# Deploy RESTAPI on each of the Monitors
+restapi: true
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.39
+cluster_subnet: 192.168.40
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
[tox]
envlist = {dev,jewel,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation}
- {dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container}
+ {dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch}
skipsdist = True
update_cluster: {toxinidir}/tests/functional/centos/7/cluster
switch_to_containers: {toxinidir}/tests/functional/centos/7/cluster
lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
+ lvm_batch: {toxinidir}/tests/functional/centos/7/lvm-batch
bluestore_lvm_osds: {toxinidir}/tests/functional/centos/7/bs-lvm-osds
purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
ooo_collocation: {toxinidir}/tests/functional/centos/7/ooo-collocation