--- /dev/null
+- name: creates logical volumes for the bucket index or fs journals on a single device and prepares for use of osd_scenario=lvm.
+ hosts:
+ - osds
+
+ tasks:
+
+ - name: include vars of lv_vars.yaml
+ include_vars:
+ file: lv_vars.yaml
+
+ # need to check if lvm2 is installed
+ - name: install lvm2
+ package:
+ name: lvm2
+ state: present
+
+ # Make entire nvme device a VG
+ - name: add nvme device as lvm pv
+ lvg:
+ force: yes
+ pvs: "{{nvme_device}}"
+ pesize: 4
+ state: present
+ vg: "{{nvme_vg_name}}"
+
+ - name: create lvs for fs journals for the bucket index on the nvme device
+ lvol:
+ lv: "{{item.journal_name}}"
+ vg: "{{nvme_vg_name}}"
+ size: "{{journal_size}}"
+ pvs: "{{nvme_device}}"
+ with_items:
+ - "{{ nvme_device_lvs }}"
+
+ - name: create lvs for fs journals for hdd devices
+ lvol:
+ lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
+ vg: "{{nvme_vg_name}}"
+ size: "{{journal_size}}"
+ with_items:
+ - "{{ hdd_devices }}"
+
+ - name: create the lv for data portion of the bucket index on the nvme device
+ lvol:
+ lv: "{{item.lv_name}}"
+ vg: "{{nvme_vg_name}}"
+ size: "{{item.size}}"
+ pvs: "{{nvme_device}}"
+ with_items:
+ - "{{ nvme_device_lvs }}"
+
+ # Make sure all hdd devices have a unique volume group
+ - name: create vgs for all hdd devices
+ lvg:
+ force: yes
+ pvs: "{{item}}"
+ pesize: 4
+ state: present
+ vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
+ with_items:
+ - "{{ hdd_devices }}"
+
+ - name: create lvs for the data portion on hdd devices
+ lvol:
+ lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
+ vg: "{{hdd_vg_prefix}}-{{ item.split('/')[-1] }}"
+ size: "{{hdd_lv_size}}"
+ pvs: "{{ item }}"
+ with_items:
+ - "{{ hdd_devices }}"
+
+ # Write ending configuration logfile
+ - name: write output for osds.yml to logfile
+ action: template src=templates/logfile.j2 dest=/tmp/logfile.txt
+ delegate_to: localhost
+
+ - name: Print closing message
+ debug:
+ msg: "Wrote yaml for osds.yml to /tmp/logfile.txt"
+ delegate_to: localhost
--- /dev/null
+- name: tear down existing osd filesystems then logical volumes, volume groups, and physical volumes
+ hosts:
+ - osds
+
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to tear down the logical volumes?
+ default: 'no'
+ private: no
+
+ tasks:
+ - name: exit playbook, if user did not mean to tear down logical volumes
+ fail:
+ msg: >
+ "Exiting lv-teardown playbook, logical volumes were NOT torn down.
+ To tear down the logical volumes, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
+
+ - name: include vars of lv_vars.yaml
+ include_vars:
+ file: lv_vars.yaml
+
+ # need to check if lvm2 is installed
+ - name: install lvm2
+ package:
+ name: lvm2
+ state: present
+
+# BEGIN TEARDOWN
+ - name: find any existing osd filesystems
+ shell: "grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}'"
+ register: old_osd_filesystems
+
+ - name: tear down any existing osd filesystems
+ command: "umount -v {{ item }}"
+ with_items: "{{ old_osd_filesystems.stdout_lines }}"
+
+ - name: kill all lvm commands that may have been hung
+ command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n"
+ failed_when: false
+
+ ## Logcal Vols
+ - name: tear down existing lv for bucket index
+ lvol:
+ lv: "{{item.lv_name}}"
+ vg: "{{nvme_vg_name}}"
+ state: absent
+ force: yes
+ with_items:
+ - "{{ nvme_device_lvs }}"
+
+ - name: tear down any existing hdd data lvs
+ lvol:
+ lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
+ vg: "{{hdd_vg_prefix}}-{{ item.split('/')[-1] }}"
+ state: absent
+ force: yes
+ with_items:
+ - "{{ hdd_devices }}"
+
+ - name: tear down any existing lv of journal for bucket index
+ lvol:
+ lv: "{{item.journal_name}}"
+ vg: "{{nvme_vg_name}}"
+ state: absent
+ force: yes
+ with_items:
+ - "{{ nvme_device_lvs }}"
+
+ - name: tear down any existing lvs of hdd journals
+ lvol:
+ lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
+ vg: "{{nvme_vg_name}}"
+ state: absent
+ force: yes
+ with_items:
+ - "{{ hdd_devices }}"
+
+ ## Volume Groups
+ - name: remove vg on nvme device
+ lvg:
+ vg: "{{nvme_vg_name}}"
+ state: absent
+ force: yes
+
+ - name: remove vg for each hdd device
+ lvg:
+ vg: "{{hdd_vg_prefix}}-{{ item.split('/')[-1] }}"
+ state: absent
+ force: yes
+ with_items:
+ - "{{ hdd_devices }}"
+
+ ## Physical Vols
+ - name: tear down pv for nvme device
+ command: "pvremove --force --yes {{ nvme_device }}"
+
+ - name: tear down pv for each hdd device
+ command: "pvremove --force --yes {{ item }}"
+ with_items:
+ - "{{ hdd_devices }}"
--- /dev/null
+Suggested cut and paste into "group_vars/osds.yml" for configuring with osd_scenario=lvm
+----------------------------------------------------------------------------------------
+{% for lv in nvme_device_lvs %}
+ - data: {{ lv.lv_name }}
+ data_vg: {{ nvme_vg_name }}
+ journal: {{ lv.journal_name }}
+ journal_vg: {{ nvme_vg_name }}
+{% endfor %}
+{% for hdd in hdd_devices %}
+ - data: {{ hdd_lv_prefix }}-{{ hdd.split('/')[-1] }}
+ data_vg: {{ hdd_vg_prefix }}-{{ hdd.split('/')[-1] }}
+ journal: {{ hdd_journal_prefix }}-{{ hdd.split('/')[-1] }}
+ journal_vg: {{ nvme_vg_name }}
+{% endfor %}
--- /dev/null
+# This file configures logical volume creation for FS Journals on NVMe, a NVMe based bucket index, and HDD based OSDs.
+# This playbook configures one NVMe device at a time. If your OSD systems contain multiple NVMe devices, you will need to edit the key variables ("nvme_device", "hdd_devices") for each run.
+# It is meant to be used when osd_objectstore=filestore and it outputs the the necessary input for group_vars/osds.yml when configured with osd_scenario=lvm
+# The LVs for journals are created first then the LVs for data. All LVs for journals correspond to a LV for data
+#
+## CHANGE THESE VARS ##
+
+# Path of nvme device primed for LV creation for journals and data. Only one NVMe device is allowed at a time.
+nvme_device: /dev/nvme0n1
+
+# Path of hdd devices designated for LV creation.
+hdd_devices:
+ - /dev/sdd
+ - /dev/sde
+ - /dev/sdf
+ - /dev/sdg
+ - /dev/sdh
+
+# Per the lvol module documentation, "size" and "journal_size" is the size of the logical volume, according to lvcreate(8) --size.
+# This is by default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; Float values must begin with a digit.
+# For further reading and examples see: https://docs.ansible.com/ansible/2.6/modules/lvol_module.html
+
+# Suggested journal size is 5500
+journal_size: 5500
+
+# This var is a list of bucket index LVs created on the NVMe device. We recommend one be created but you can add others
+nvme_device_lvs:
+ - lv_name: "ceph-bucket-index-1"
+ size: 100%FREE
+ journal_name: "ceph-journal-bucket-index-1-{{ nvme_device_basename }}"
+
+## TYPICAL USERS WILL NOT NEED TO CHANGE VARS FROM HERE DOWN ##
+
+# all hdd's have to be the same size and the LVs on them are dedicated for OSD data
+hdd_lv_size: 100%FREE
+
+# Since this playbook can be run multiple times across different devices, {{ var.split('/')[-1] }} is used quite frequently in this play-book.
+# This is used to strip the device name away from its path (ex: sdc from /dev/sdc) to differenciate the names of vgs, journals, or lvs if the prefixes are not changed across multiple runs.
+nvme_device_basename: "{{ nvme_device.split('/')[-1] }}"
+
+# Only one volume group is created in the playbook for all the LVs on NVMe. This volume group takes up the entire device specified in "nvme_device".
+nvme_vg_name: "ceph-nvme-vg-{{ nvme_device_basename }}"
+
+hdd_vg_prefix: "ceph-hdd-vg"
+hdd_lv_prefix: "ceph-hdd-lv"
+hdd_journal_prefix: "ceph-journal"
+
+# Journals are created on NVMe device
+journal_device: "{{ nvme_device }}"
+