]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
ceph-volume: implement the 'lvm batch' subcommand
authorAndrew Schoen <aschoen@redhat.com>
Fri, 3 Aug 2018 16:15:58 +0000 (11:15 -0500)
committerAlfredo Deza <alfredo@deza.pe>
Thu, 9 Aug 2018 13:41:58 +0000 (09:41 -0400)
This adds the action 'batch' to the ceph-volume module so that we can
run the new 'ceph-volume lvm batch' subcommand. A functional test is
also included.

If devices is defind and osd_scenario is lvm then the 'ceph-volume lvm
batch' command will be used to create the OSDs.

Signed-off-by: Andrew Schoen <aschoen@redhat.com>
12 files changed:
library/ceph_volume.py
plugins/actions/validate.py
roles/ceph-osd/tasks/main.yml
roles/ceph-osd/tasks/scenarios/lvm-batch.yml [new file with mode: 0644]
roles/ceph-validate/tasks/main.yml
tests/conftest.py
tests/functional/centos/7/lvm-batch/Vagrantfile [new symlink]
tests/functional/centos/7/lvm-batch/ceph-override.json [new file with mode: 0644]
tests/functional/centos/7/lvm-batch/group_vars/all [new file with mode: 0644]
tests/functional/centos/7/lvm-batch/hosts [new file with mode: 0644]
tests/functional/centos/7/lvm-batch/vagrant_variables.yml [new file with mode: 0644]
tox.ini

index e95c9794989209fe3bb27cb3312626aa9cb2dffb..b7b4495fc20f81495a51da399b32aadb932b95f9 100644 (file)
@@ -36,7 +36,7 @@ options:
         description:
             - The action to take. Either creating OSDs or zapping devices.
         required: true
-        choices: ['create', 'zap']
+        choices: ['create', 'zap', 'batch']
         default: create
     data:
         description:
@@ -84,6 +84,11 @@ options:
         description:
             - If set to True the OSD will be encrypted with dmcrypt.
         required: false
+    batch_devices:
+        description:
+            - A list of devices to pass to the 'ceph-volume lvm batch' subcommand.
+            - Only applicable if action is 'batch'.
+        required: false
 
 
 author:
@@ -140,6 +145,72 @@ def get_wal(wal, wal_vg):
     return wal
 
 
+def batch(module):
+    cluster = module.params['cluster']
+    objectstore = module.params['objectstore']
+    batch_devices = module.params['batch_devices']
+    crush_device_class = module.params.get('crush_device_class', None)
+    dmcrypt = module.params['dmcrypt']
+
+    if not batch_devices:
+        module.fail_json(msg='batch_devices must be provided if action is "batch"', changed=False, rc=1)
+
+    cmd = [
+        'ceph-volume',
+        '--cluster',
+        cluster,
+        'lvm',
+        'batch',
+        '--%s' % objectstore,
+        '--yes',
+    ]
+
+    if crush_device_class:
+        cmd.extend(["--crush-device-class", crush_device_class])
+
+    if dmcrypt:
+        cmd.append("--dmcrypt")
+
+    cmd.extend(batch_devices)
+
+    result = dict(
+        changed=False,
+        cmd=cmd,
+        stdout='',
+        stderr='',
+        rc='',
+        start='',
+        end='',
+        delta='',
+    )
+
+    if module.check_mode:
+        return result
+
+    startd = datetime.datetime.now()
+
+    rc, out, err = module.run_command(cmd, encoding=None)
+
+    endd = datetime.datetime.now()
+    delta = endd - startd
+
+    result = dict(
+        cmd=cmd,
+        stdout=out.rstrip(b"\r\n"),
+        stderr=err.rstrip(b"\r\n"),
+        rc=rc,
+        start=str(startd),
+        end=str(endd),
+        delta=str(delta),
+        changed=True,
+    )
+
+    if rc != 0:
+        module.fail_json(msg='non-zero return code', **result)
+
+    module.exit_json(**result)
+
+
 def create_osd(module):
     cluster = module.params['cluster']
     objectstore = module.params['objectstore']
@@ -313,8 +384,8 @@ def run_module():
     module_args = dict(
         cluster=dict(type='str', required=False, default='ceph'),
         objectstore=dict(type='str', required=False, choices=['bluestore', 'filestore'], default='bluestore'),
-        action=dict(type='str', required=False, choices=['create', 'zap'], default='create'),
-        data=dict(type='str', required=True),
+        action=dict(type='str', required=False, choices=['create', 'zap', 'batch'], default='create'),
+        data=dict(type='str', required=False),
         data_vg=dict(type='str', required=False),
         journal=dict(type='str', required=False),
         journal_vg=dict(type='str', required=False),
@@ -324,6 +395,7 @@ def run_module():
         wal_vg=dict(type='str', required=False),
         crush_device_class=dict(type='str', required=False),
         dmcrypt=dict(type='bool', required=False, default=False),
+        batch_devices=dict(type='list', required=False, default=[]),
     )
 
     module = AnsibleModule(
@@ -337,6 +409,8 @@ def run_module():
         create_osd(module)
     elif action == "zap":
         zap_devices(module)
+    elif action == "batch":
+        batch(module)
 
     module.fail_json(msg='State must either be "present" or "absent".', changed=False, rc=1)
 
index cdccfb9a05f52260d6f5151c3e9e76ca4c37ca47..12667404e4309360d806703f43314470b0ca0837 100644 (file)
@@ -82,7 +82,9 @@ class ActionModule(ActionBase):
                     notario.validate(host_vars, non_collocated_osd_scenario, defined_keys=True)
 
                 if host_vars["osd_scenario"] == "lvm":
-                    if notario_store['osd_objectstore'] == 'filestore':
+                    if host_vars.get("devices"):
+                        notario.validate(host_vars, lvm_batch_scenario, defined_keys=True)
+                    elif notario_store['osd_objectstore'] == 'filestore':
                         notario.validate(host_vars, lvm_filestore_scenario, defined_keys=True)
                     elif notario_store['osd_objectstore'] == 'bluestore':
                         notario.validate(host_vars, lvm_bluestore_scenario, defined_keys=True)
@@ -222,6 +224,8 @@ non_collocated_osd_scenario = (
     ("devices", iterables.AllItems(types.string)),
 )
 
+lvm_batch_scenario = ("devices", iterables.AllItems(types.string))
+
 lvm_filestore_scenario = ("lvm_volumes", iterables.AllItems((
     (optional('crush_device_class'), types.string),
     ('data', types.string),
index 60136faed2943976c5ef13dca4bfc8b757475e37..115260f681a6f01bf81e7a8ddf93229f38844e3b 100644 (file)
   include: scenarios/lvm.yml
   when:
     - osd_scenario == 'lvm'
+    - lvm_volumes is defined
+    - not containerized_deployment
+  # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
+  static: False
+
+- name: include scenarios/lvm-batch.yml
+  include: scenarios/lvm-batch.yml
+  when:
+    - osd_scenario == 'lvm'
+    - devices is defined
     - not containerized_deployment
   # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
   static: False
diff --git a/roles/ceph-osd/tasks/scenarios/lvm-batch.yml b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml
new file mode 100644 (file)
index 0000000..4381eec
--- /dev/null
@@ -0,0 +1,11 @@
+---
+
+- name: "use ceph-volume lvm batch to create {{ osd_objectstore }} osds"
+  ceph_volume:
+    cluster: "{{ cluster }}"
+    objectstore: "{{ osd_objectstore }}"
+    batch_devices: "{{ devices }}"
+    dmcrypt: "{{ dmcrypt|default(omit) }}"
+    action: "batch"
+  environment:
+    CEPH_VOLUME_DEBUG: 1
index c4b77e74a85b52a8f4d6b9bd5522218e2e89e324..a865fc4f9c25302b034867d9c7a0494021414a60 100644 (file)
@@ -58,7 +58,7 @@
   when:
     - osd_group_name in group_names
     - not osd_auto_discovery | default(False)
-    - osd_scenario != "lvm"
+    - osd_scenario != "lvm" and devices is not defined
 
 - name: include check_eth_mon.yml
   include: check_eth_mon.yml
@@ -74,4 +74,4 @@
     - rgw_group_name in group_names
     - radosgw_interface != "dummy"
     - radosgw_address == "0.0.0.0"
-    - radosgw_address_block == "subnet"
\ No newline at end of file
+    - radosgw_address_block == "subnet"
index f24c8751f24c10ac863a620486332033de91d62c..f35783a6151344cd6251557c28c933b2b40877ce 100644 (file)
@@ -22,7 +22,8 @@ def node(host, request):
     group_names = ansible_vars["group_names"]
     docker = ansible_vars.get("docker")
     osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
-    lvm_scenario = ansible_vars.get("osd_scenario") == 'lvm'
+    osd_scenario = ansible_vars.get("osd_scenario")
+    lvm_scenario = osd_scenario in ['lvm', 'lvm-batch']
     ceph_release_num = {
         'jewel': 10,
         'kraken': 11,
diff --git a/tests/functional/centos/7/lvm-batch/Vagrantfile b/tests/functional/centos/7/lvm-batch/Vagrantfile
new file mode 120000 (symlink)
index 0000000..dfd7436
--- /dev/null
@@ -0,0 +1 @@
+../../../../../Vagrantfile
\ No newline at end of file
diff --git a/tests/functional/centos/7/lvm-batch/ceph-override.json b/tests/functional/centos/7/lvm-batch/ceph-override.json
new file mode 100644 (file)
index 0000000..1a9600a
--- /dev/null
@@ -0,0 +1,9 @@
+{
+       "ceph_conf_overrides": {
+               "global": {
+                       "osd_pool_default_pg_num": 12,
+                       "osd_pool_default_size": 1
+               }
+       },
+  "ceph_mon_docker_memory_limit": "2g"
+}
diff --git a/tests/functional/centos/7/lvm-batch/group_vars/all b/tests/functional/centos/7/lvm-batch/group_vars/all
new file mode 100644 (file)
index 0000000..8ec1208
--- /dev/null
@@ -0,0 +1,21 @@
+---
+
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.39.0/24"
+cluster_network: "192.168.40.0/24"
+monitor_interface: eth1
+radosgw_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+copy_admin_key: true
+devices:
+  - /dev/sdb 
+  - /dev/sdc
+os_tuning_params:
+  - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+  global:
+    osd_pool_default_size: 1
diff --git a/tests/functional/centos/7/lvm-batch/hosts b/tests/functional/centos/7/lvm-batch/hosts
new file mode 100644 (file)
index 0000000..d6c8901
--- /dev/null
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[mgrs]
+mon0
+
+[osds]
+osd0
diff --git a/tests/functional/centos/7/lvm-batch/vagrant_variables.yml b/tests/functional/centos/7/lvm-batch/vagrant_variables.yml
new file mode 100644 (file)
index 0000000..342ce5f
--- /dev/null
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# Deploy RESTAPI on each of the Monitors
+restapi: true
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.39
+cluster_subnet: 192.168.40
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+#   - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+#   - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location.  vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+  - { name: fs.file-max, value: 26234859 }
diff --git a/tox.ini b/tox.ini
index af5c27758292d1d0bebe738b3f10d0f146bef2ff..b3c6902b996fd5362aed013ef20d9cb810c62173 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
 [tox]
 envlist = {dev,jewel,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation}
-  {dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container}
+  {dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch}
 
 skipsdist = True
 
@@ -197,6 +197,7 @@ changedir=
   update_cluster: {toxinidir}/tests/functional/centos/7/cluster
   switch_to_containers: {toxinidir}/tests/functional/centos/7/cluster
   lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
+  lvm_batch: {toxinidir}/tests/functional/centos/7/lvm-batch
   bluestore_lvm_osds: {toxinidir}/tests/functional/centos/7/bs-lvm-osds
   purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
   ooo_collocation: {toxinidir}/tests/functional/centos/7/ooo-collocation