From 6d431ec22d048324c2fd234de8685d79edb0ae7d Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Fri, 3 Aug 2018 11:15:58 -0500 Subject: [PATCH] ceph-volume: implement the 'lvm batch' subcommand This adds the action 'batch' to the ceph-volume module so that we can run the new 'ceph-volume lvm batch' subcommand. A functional test is also included. If devices is defind and osd_scenario is lvm then the 'ceph-volume lvm batch' command will be used to create the OSDs. Signed-off-by: Andrew Schoen --- library/ceph_volume.py | 80 ++++++++++++++++++- plugins/actions/validate.py | 6 +- roles/ceph-osd/tasks/main.yml | 10 +++ roles/ceph-osd/tasks/scenarios/lvm-batch.yml | 11 +++ roles/ceph-validate/tasks/main.yml | 4 +- tests/conftest.py | 3 +- .../functional/centos/7/lvm-batch/Vagrantfile | 1 + .../centos/7/lvm-batch/ceph-override.json | 9 +++ .../centos/7/lvm-batch/group_vars/all | 21 +++++ tests/functional/centos/7/lvm-batch/hosts | 8 ++ .../centos/7/lvm-batch/vagrant_variables.yml | 73 +++++++++++++++++ tox.ini | 3 +- 12 files changed, 221 insertions(+), 8 deletions(-) create mode 100644 roles/ceph-osd/tasks/scenarios/lvm-batch.yml create mode 120000 tests/functional/centos/7/lvm-batch/Vagrantfile create mode 100644 tests/functional/centos/7/lvm-batch/ceph-override.json create mode 100644 tests/functional/centos/7/lvm-batch/group_vars/all create mode 100644 tests/functional/centos/7/lvm-batch/hosts create mode 100644 tests/functional/centos/7/lvm-batch/vagrant_variables.yml diff --git a/library/ceph_volume.py b/library/ceph_volume.py index e95c97949..b7b4495fc 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -36,7 +36,7 @@ options: description: - The action to take. Either creating OSDs or zapping devices. required: true - choices: ['create', 'zap'] + choices: ['create', 'zap', 'batch'] default: create data: description: @@ -84,6 +84,11 @@ options: description: - If set to True the OSD will be encrypted with dmcrypt. required: false + batch_devices: + description: + - A list of devices to pass to the 'ceph-volume lvm batch' subcommand. + - Only applicable if action is 'batch'. + required: false author: @@ -140,6 +145,72 @@ def get_wal(wal, wal_vg): return wal +def batch(module): + cluster = module.params['cluster'] + objectstore = module.params['objectstore'] + batch_devices = module.params['batch_devices'] + crush_device_class = module.params.get('crush_device_class', None) + dmcrypt = module.params['dmcrypt'] + + if not batch_devices: + module.fail_json(msg='batch_devices must be provided if action is "batch"', changed=False, rc=1) + + cmd = [ + 'ceph-volume', + '--cluster', + cluster, + 'lvm', + 'batch', + '--%s' % objectstore, + '--yes', + ] + + if crush_device_class: + cmd.extend(["--crush-device-class", crush_device_class]) + + if dmcrypt: + cmd.append("--dmcrypt") + + cmd.extend(batch_devices) + + result = dict( + changed=False, + cmd=cmd, + stdout='', + stderr='', + rc='', + start='', + end='', + delta='', + ) + + if module.check_mode: + return result + + startd = datetime.datetime.now() + + rc, out, err = module.run_command(cmd, encoding=None) + + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + stdout=out.rstrip(b"\r\n"), + stderr=err.rstrip(b"\r\n"), + rc=rc, + start=str(startd), + end=str(endd), + delta=str(delta), + changed=True, + ) + + if rc != 0: + module.fail_json(msg='non-zero return code', **result) + + module.exit_json(**result) + + def create_osd(module): cluster = module.params['cluster'] objectstore = module.params['objectstore'] @@ -313,8 +384,8 @@ def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), objectstore=dict(type='str', required=False, choices=['bluestore', 'filestore'], default='bluestore'), - action=dict(type='str', required=False, choices=['create', 'zap'], default='create'), - data=dict(type='str', required=True), + action=dict(type='str', required=False, choices=['create', 'zap', 'batch'], default='create'), + data=dict(type='str', required=False), data_vg=dict(type='str', required=False), journal=dict(type='str', required=False), journal_vg=dict(type='str', required=False), @@ -324,6 +395,7 @@ def run_module(): wal_vg=dict(type='str', required=False), crush_device_class=dict(type='str', required=False), dmcrypt=dict(type='bool', required=False, default=False), + batch_devices=dict(type='list', required=False, default=[]), ) module = AnsibleModule( @@ -337,6 +409,8 @@ def run_module(): create_osd(module) elif action == "zap": zap_devices(module) + elif action == "batch": + batch(module) module.fail_json(msg='State must either be "present" or "absent".', changed=False, rc=1) diff --git a/plugins/actions/validate.py b/plugins/actions/validate.py index cdccfb9a0..12667404e 100644 --- a/plugins/actions/validate.py +++ b/plugins/actions/validate.py @@ -82,7 +82,9 @@ class ActionModule(ActionBase): notario.validate(host_vars, non_collocated_osd_scenario, defined_keys=True) if host_vars["osd_scenario"] == "lvm": - if notario_store['osd_objectstore'] == 'filestore': + if host_vars.get("devices"): + notario.validate(host_vars, lvm_batch_scenario, defined_keys=True) + elif notario_store['osd_objectstore'] == 'filestore': notario.validate(host_vars, lvm_filestore_scenario, defined_keys=True) elif notario_store['osd_objectstore'] == 'bluestore': notario.validate(host_vars, lvm_bluestore_scenario, defined_keys=True) @@ -222,6 +224,8 @@ non_collocated_osd_scenario = ( ("devices", iterables.AllItems(types.string)), ) +lvm_batch_scenario = ("devices", iterables.AllItems(types.string)) + lvm_filestore_scenario = ("lvm_volumes", iterables.AllItems(( (optional('crush_device_class'), types.string), ('data', types.string), diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 60136faed..115260f68 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -54,6 +54,16 @@ include: scenarios/lvm.yml when: - osd_scenario == 'lvm' + - lvm_volumes is defined + - not containerized_deployment + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False + +- name: include scenarios/lvm-batch.yml + include: scenarios/lvm-batch.yml + when: + - osd_scenario == 'lvm' + - devices is defined - not containerized_deployment # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False diff --git a/roles/ceph-osd/tasks/scenarios/lvm-batch.yml b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml new file mode 100644 index 000000000..4381eece2 --- /dev/null +++ b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml @@ -0,0 +1,11 @@ +--- + +- name: "use ceph-volume lvm batch to create {{ osd_objectstore }} osds" + ceph_volume: + cluster: "{{ cluster }}" + objectstore: "{{ osd_objectstore }}" + batch_devices: "{{ devices }}" + dmcrypt: "{{ dmcrypt|default(omit) }}" + action: "batch" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/roles/ceph-validate/tasks/main.yml b/roles/ceph-validate/tasks/main.yml index c4b77e74a..a865fc4f9 100644 --- a/roles/ceph-validate/tasks/main.yml +++ b/roles/ceph-validate/tasks/main.yml @@ -58,7 +58,7 @@ when: - osd_group_name in group_names - not osd_auto_discovery | default(False) - - osd_scenario != "lvm" + - osd_scenario != "lvm" and devices is not defined - name: include check_eth_mon.yml include: check_eth_mon.yml @@ -74,4 +74,4 @@ - rgw_group_name in group_names - radosgw_interface != "dummy" - radosgw_address == "0.0.0.0" - - radosgw_address_block == "subnet" \ No newline at end of file + - radosgw_address_block == "subnet" diff --git a/tests/conftest.py b/tests/conftest.py index f24c8751f..f35783a61 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -22,7 +22,8 @@ def node(host, request): group_names = ansible_vars["group_names"] docker = ansible_vars.get("docker") osd_auto_discovery = ansible_vars.get("osd_auto_discovery") - lvm_scenario = ansible_vars.get("osd_scenario") == 'lvm' + osd_scenario = ansible_vars.get("osd_scenario") + lvm_scenario = osd_scenario in ['lvm', 'lvm-batch'] ceph_release_num = { 'jewel': 10, 'kraken': 11, diff --git a/tests/functional/centos/7/lvm-batch/Vagrantfile b/tests/functional/centos/7/lvm-batch/Vagrantfile new file mode 120000 index 000000000..dfd7436c9 --- /dev/null +++ b/tests/functional/centos/7/lvm-batch/Vagrantfile @@ -0,0 +1 @@ +../../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/centos/7/lvm-batch/ceph-override.json b/tests/functional/centos/7/lvm-batch/ceph-override.json new file mode 100644 index 000000000..1a9600a14 --- /dev/null +++ b/tests/functional/centos/7/lvm-batch/ceph-override.json @@ -0,0 +1,9 @@ +{ + "ceph_conf_overrides": { + "global": { + "osd_pool_default_pg_num": 12, + "osd_pool_default_size": 1 + } + }, + "ceph_mon_docker_memory_limit": "2g" +} diff --git a/tests/functional/centos/7/lvm-batch/group_vars/all b/tests/functional/centos/7/lvm-batch/group_vars/all new file mode 100644 index 000000000..8ec12080c --- /dev/null +++ b/tests/functional/centos/7/lvm-batch/group_vars/all @@ -0,0 +1,21 @@ +--- + +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.39.0/24" +cluster_network: "192.168.40.0/24" +monitor_interface: eth1 +radosgw_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +osd_scenario: lvm +copy_admin_key: true +devices: + - /dev/sdb + - /dev/sdc +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_size: 1 diff --git a/tests/functional/centos/7/lvm-batch/hosts b/tests/functional/centos/7/lvm-batch/hosts new file mode 100644 index 000000000..d6c89012a --- /dev/null +++ b/tests/functional/centos/7/lvm-batch/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[mgrs] +mon0 + +[osds] +osd0 diff --git a/tests/functional/centos/7/lvm-batch/vagrant_variables.yml b/tests/functional/centos/7/lvm-batch/vagrant_variables.yml new file mode 100644 index 000000000..342ce5f9c --- /dev/null +++ b/tests/functional/centos/7/lvm-batch/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# Deploy RESTAPI on each of the Monitors +restapi: true + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.39 +cluster_subnet: 192.168.40 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tox.ini b/tox.ini index af5c27758..b3c6902b9 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] envlist = {dev,jewel,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation} - {dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container} + {dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch} skipsdist = True @@ -197,6 +197,7 @@ changedir= update_cluster: {toxinidir}/tests/functional/centos/7/cluster switch_to_containers: {toxinidir}/tests/functional/centos/7/cluster lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds + lvm_batch: {toxinidir}/tests/functional/centos/7/lvm-batch bluestore_lvm_osds: {toxinidir}/tests/functional/centos/7/bs-lvm-osds purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds ooo_collocation: {toxinidir}/tests/functional/centos/7/ooo-collocation -- 2.39.5