]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
osd: remove variable osd_scenario
authorGuillaume Abrioux <gabrioux@redhat.com>
Thu, 11 Apr 2019 08:01:15 +0000 (10:01 +0200)
committermergify[bot] <mergify[bot]@users.noreply.github.com>
Fri, 12 Apr 2019 00:45:21 +0000 (00:45 +0000)
As of stable-4.0, the only valid scenario is `lvm`.
Thus, this makes this variable useless.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 4d35e9eeed283a7c4d5cc2f61184b5ca8c55e2b2)

45 files changed:
Vagrantfile
group_vars/osds.yml.sample
group_vars/rhcs.yml.sample
infrastructure-playbooks/lv-create.yml
infrastructure-playbooks/purge-cluster.yml
infrastructure-playbooks/purge-docker-cluster.yml
infrastructure-playbooks/vars/lv_vars.yaml.sample
roles/ceph-config/tasks/main.yml
roles/ceph-handler/templates/restart_osd_daemon.sh.j2
roles/ceph-osd/defaults/main.yml
roles/ceph-osd/tasks/build_devices.yml [deleted file]
roles/ceph-osd/tasks/main.yml
roles/ceph-osd/tasks/start_osds.yml
roles/ceph-osd/templates/ceph-osd-run.sh.j2
roles/ceph-osd/templates/ceph-osd.service.j2
roles/ceph-validate/tasks/check_system.yml
tests/conftest.py
tests/functional/add-mdss/container/group_vars/all
tests/functional/add-mdss/group_vars/all
tests/functional/add-osds/container/group_vars/all
tests/functional/add-osds/group_vars/all
tests/functional/all_daemons/container/group_vars/osds
tests/functional/all_daemons/group_vars/osds
tests/functional/bs-lvm-osds/container/group_vars/all
tests/functional/bs-lvm-osds/group_vars/all
tests/functional/collocation/container/group_vars/osds
tests/functional/collocation/group_vars/osds
tests/functional/lvm-auto-discovery/container/group_vars/all
tests/functional/lvm-auto-discovery/group_vars/all
tests/functional/lvm-batch/container/group_vars/all
tests/functional/lvm-batch/group_vars/all
tests/functional/lvm-osds/container/group_vars/all
tests/functional/lvm-osds/group_vars/all
tests/functional/lvm_setup.yml
tests/functional/ooo-collocation/hosts
tests/functional/ooo_rhel8/group_vars/osds
tests/functional/podman/group_vars/osds
tests/functional/rgw-multisite/container/group_vars/all
tests/functional/rgw-multisite/container/secondary/group_vars/all
tests/functional/rgw-multisite/group_vars/all
tests/functional/rgw-multisite/secondary/group_vars/all
tests/functional/shrink_mon/container/group_vars/osds
tests/functional/shrink_mon/group_vars/osds
tests/functional/shrink_osd/container/group_vars/osds
tests/functional/shrink_osd/group_vars/osds

index f86968ec4662daa0b39444ae0c99e54d41f68ce4..7916593cfaefbb22da1afa14c980477a9dd53714 100644 (file)
@@ -93,7 +93,6 @@ ansible_provision = proc do |ansible|
     ansible.extra_vars = ansible.extra_vars.merge({
       cluster_network: "#{CLUSTER_SUBNET}.0/16",
       devices: ['/dev/sdc'], # hardcode leftover disk
-      osd_scenario: 'collocated',
       monitor_address_block: "#{PUBLIC_SUBNET}.0/16",
       radosgw_address_block: "#{PUBLIC_SUBNET}.0/16",
       public_network: "#{PUBLIC_SUBNET}.0/16",
index 53aeb3b88ff0d8472929b8ed2d262d36423f7aea..f64cd524fd1f92b54cf982e131c2fd6f405092c4 100644 (file)
@@ -52,20 +52,13 @@ dummy:
 #osd_auto_discovery: false
 
 # Encrypt your OSD device using dmcrypt
-# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted
+# If set to True, no matter which osd_objecstore you use the data will be encrypted
 #dmcrypt: False
 
 
-#osd_scenario: dummy
-#valid_osd_scenarios:
-#  - lvm
-
-
 #dedicated_devices: []
 
-# III. Use ceph-volume to create OSDs from logical volumes.
-# Use 'osd_scenario: lvm' to enable this scenario.
-# when using lvm, not collocated journals.
+# Use ceph-volume to create OSDs from logical volumes.
 # lvm_volumes is a list of dictionaries.
 #
 # Filestore: Each dictionary must contain a data, journal and vg_name key. Any
index 639314060200e8199ef2bb91a9aa0a33775aebbd..be3ca9f09ce9cf38f6a477578b99c557e6a2e3c9 100644 (file)
@@ -387,7 +387,7 @@ ceph_rhcs_version: 3
 
 ## Rados Gateway options
 #
-#radosgw_frontend_type: beast # For additional frontends see: http://docs.ceph.com/docs/nautilus/radosgw/frontends/
+#radosgw_frontend_type: beast # For additionnal frontends see: http://docs.ceph.com/docs/nautilus/radosgw/frontends/
 
 #radosgw_civetweb_port: 8080
 #radosgw_civetweb_num_threads: 512
index a05ce15a017b5bc9383873b20f6abe8c961edab1..ec3c72b87ea8bd9bb81ce3efa5d71247668e944f 100644 (file)
@@ -1,11 +1,11 @@
-- name: creates logical volumes for the bucket index or fs journals on a single device and prepares for use of osd_scenario=lvm
+- name: creates logical volumes for the bucket index or fs journals on a single device. 
   become: true
   hosts:
   - osds
 
   vars:
     logfile: |
-      Suggested cut and paste under "lvm_volumes:" in "group_vars/osds.yml" for configuring with osd_scenario=lvm
+      Suggested cut and paste under "lvm_volumes:" in "group_vars/osds.yml"
       -----------------------------------------------------------------------------------------------------------
       {% for lv in nvme_device_lvs %}
         - data: {{ lv.lv_name }}
index b4aa40e3debbd765df1a8ab700627872fb01d3cd..eb544bd79be54868955ce47c741275630b4a60b2 100644 (file)
     with_items: "{{ lvm_volumes }}"
     when:
       - lvm_volumes | default([]) | length > 0
-      - osd_scenario == "lvm"
       - ceph_volume_present.rc == 0
 
   - name: zap and destroy osds created by ceph-volume with devices
     with_items: "{{ devices | default([]) }}"
     when:
       - devices | default([]) | length > 0
-      - osd_scenario == "lvm"
       - ceph_volume_present.rc == 0
 
   - name: get ceph block partitions
index 2d9ef70072dfb1336e12d378c16c9e16104cfb4e..ad1938bc505caf1fbad79af93df1fc41856e4287 100644 (file)
     register: remove_osd_mountpoints
     ignore_errors: true
 
-  - name: for ceph-disk based deployment
-    block:
-      - name: get prepare container
-        command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
-        register: prepare_containers
-        ignore_errors: true
-
-      - name: remove ceph osd prepare container
-        command: "docker rm -f {{ item }}"
-        with_items: "{{ prepare_containers.stdout_lines }}"
-        ignore_errors: true
-
-      # NOTE(leseb): hope someone will find a more elegant way one day...
-      - name: see if encrypted partitions are present
-        shell: |
-          blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
-        register: encrypted_ceph_partuuid
-
-      - name: get ceph data partitions
-        command: |
-          blkid -o device -t PARTLABEL="ceph data"
-        failed_when: false
-        register: ceph_data_partition_to_erase_path
-
-      - name: get ceph lockbox partitions
-        command: |
-          blkid -o device -t PARTLABEL="ceph lockbox"
-        failed_when: false
-        register: ceph_lockbox_partition_to_erase_path
-
-      - name: get ceph block partitions
-        command: |
-          blkid -o device -t PARTLABEL="ceph block"
-        failed_when: false
-        register: ceph_block_partition_to_erase_path
-
-      - name: get ceph journal partitions
-        command: |
-          blkid -o device -t PARTLABEL="ceph journal"
-        failed_when: false
-        register: ceph_journal_partition_to_erase_path
-
-      - name: get ceph db partitions
-        command: |
-          blkid -o device -t PARTLABEL="ceph block.db"
-        failed_when: false
-        register: ceph_db_partition_to_erase_path
-
-      - name: get ceph wal partitions
-        command: |
-          blkid -o device -t PARTLABEL="ceph block.wal"
-        failed_when: false
-        register: ceph_wal_partition_to_erase_path
-
-      - name: set_fact combined_devices_list
-        set_fact:
-          combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
-                                    ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
-                                    ceph_block_partition_to_erase_path.get('stdout_lines', []) +
-                                    ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
-                                    ceph_db_partition_to_erase_path.get('stdout_lines', []) +
-                                    ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
-
-      - name: resolve parent device
-        command: lsblk --nodeps -no pkname "{{ item }}"
-        register: tmp_resolved_parent_device
-        with_items:
-          - "{{ combined_devices_list }}"
-
-      - name: set_fact resolved_parent_device
-        set_fact:
-          resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
-
-      - name: zap ceph osd disks
-        shell: |
-          docker run --rm \
-          --privileged=true \
-          --name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
-          -v /dev/:/dev/ \
-          -e OSD_DEVICE=/dev/{{ item }} \
-          {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
-          zap_device
-        with_items:
-          - "{{ resolved_parent_device }}"
-
-      - name: wait until the zap containers die
-        shell: |
-          docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }}
-        register: zap_alive
-        failed_when: false
-        until: zap_alive.rc != 0
-        retries: 5
-        delay: 10
-
-      - name: remove ceph osd zap disk container
-        docker_container:
-          image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
-          name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
-          state: absent
-        with_items:
-          - "{{ resolved_parent_device }}"
-
-      - name: remove ceph osd service
-        file:
-          path: /etc/systemd/system/ceph-osd@.service
-          state: absent
-    when:
-      - osd_scenario != "lvm"
-
-  - name: for ceph-volume based deployments
-    block:
-      - name: zap and destroy osds created by ceph-volume with lvm_volumes
-        ceph_volume:
-          data: "{{ item.data }}"
-          data_vg: "{{ item.data_vg|default(omit) }}"
-          journal: "{{ item.journal|default(omit) }}"
-          journal_vg: "{{ item.journal_vg|default(omit) }}"
-          db: "{{ item.db|default(omit) }}"
-          db_vg: "{{ item.db_vg|default(omit) }}"
-          wal: "{{ item.wal|default(omit) }}"
-          wal_vg: "{{ item.wal_vg|default(omit) }}"
-          action: "zap"
-        environment:
-          CEPH_VOLUME_DEBUG: 1
-          CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
-          CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-        with_items: "{{ lvm_volumes }}"
-        when: lvm_volumes | default([]) | length > 0
-
-      - name: zap and destroy osds created by ceph-volume with devices
-        ceph_volume:
-          data: "{{ item }}"
-          action: "zap"
-        environment:
-          CEPH_VOLUME_DEBUG: 1
-          CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
-          CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-        with_items: "{{ devices | default([]) }}"
-        when: devices | default([]) | length > 0
-    when:
-      - osd_scenario == "lvm"
+  - name: zap and destroy osds created by ceph-volume with lvm_volumes
+    ceph_volume:
+      data: "{{ item.data }}"
+      data_vg: "{{ item.data_vg|default(omit) }}"
+      journal: "{{ item.journal|default(omit) }}"
+      journal_vg: "{{ item.journal_vg|default(omit) }}"
+      db: "{{ item.db|default(omit) }}"
+      db_vg: "{{ item.db_vg|default(omit) }}"
+      wal: "{{ item.wal|default(omit) }}"
+      wal_vg: "{{ item.wal_vg|default(omit) }}"
+      action: "zap"
+    environment:
+      CEPH_VOLUME_DEBUG: 1
+      CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+      CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+    with_items: "{{ lvm_volumes }}"
+    when: lvm_volumes | default([]) | length > 0
+
+  - name: zap and destroy osds created by ceph-volume with devices
+    ceph_volume:
+      data: "{{ item }}"
+      action: "zap"
+    environment:
+      CEPH_VOLUME_DEBUG: 1
+      CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+      CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+    with_items: "{{ devices | default([]) }}"
+    when: devices | default([]) | length > 0
 
   - name: remove ceph osd image
     docker_image:
index ba618a10ab71236b57065592437637843569735e..790d277aed37ae3ce17707f725bfe95cd8195ba2 100644 (file)
@@ -1,6 +1,6 @@
 # This file configures logical volume creation for FS Journals on NVMe, a NVMe based bucket index, and HDD based OSDs.
 # This playbook configures one NVMe device at a time. If your OSD systems contain multiple NVMe devices, you will need to edit the key variables ("nvme_device", "hdd_devices") for each run.
-# It is meant to be used when osd_objectstore=filestore and it outputs the necessary input for group_vars/osds.yml when configured with osd_scenario=lvm.
+# It is meant to be used when osd_objectstore=filestore and it outputs the necessary input for group_vars/osds.yml.
 # The LVs for journals are created first then the LVs for data. All LVs for journals correspond to a LV for data.
 #
 ## CHANGE THESE VARS ##
index 360ddbdd9010ddd173b430a6d48168eca5131f49..dd9200e62bbbe7e5db8fc2d83e39cfdacf27d2b0 100644 (file)
@@ -4,20 +4,15 @@
   when:
     - containerized_deployment|bool
 
-- block:
-  - name: count number of osds for ceph-disk scenarios
-    set_fact:
-      num_osds: "{{ devices | length | int }}"
-    when:
-      - devices | default([]) | length > 0
-      - osd_scenario in ['collocated', 'non-collocated']
-
+- name: config file operations related to OSDs
+  when:
+    - inventory_hostname in groups.get(osd_group_name, [])
+  block:
   - name: count number of osds for lvm scenario
     set_fact:
       num_osds: "{{ lvm_volumes | length | int }}"
     when:
       - lvm_volumes | default([]) | length > 0
-      - osd_scenario == 'lvm'
 
   - name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
     ceph_volume:
       PYTHONIOENCODING: utf-8
     when:
       - devices | default([]) | length > 0
-      - osd_scenario == 'lvm'
 
   - name: set_fact num_osds from the output of 'ceph-volume lvm batch --report'
     set_fact:
       num_osds: "{{ (lvm_batch_report.stdout | from_json).osds | length | int }}"
     when:
       - devices | default([]) | length > 0
-      - osd_scenario == 'lvm'
       - (lvm_batch_report.stdout | from_json).changed
 
   - name: run 'ceph-volume lvm list' to see how many osds have already been created
@@ -58,7 +51,6 @@
       PYTHONIOENCODING: utf-8
     when:
       - devices | default([]) | length > 0
-      - osd_scenario == 'lvm'
       - not (lvm_batch_report.stdout | from_json).changed
 
   - name: set_fact num_osds from the output of 'ceph-volume lvm list'
@@ -66,7 +58,6 @@
       num_osds: "{{ lvm_list.stdout | from_json | length | int }}"
     when:
       - devices | default([]) | length > 0
-      - osd_scenario == 'lvm'
       - not (lvm_batch_report.stdout | from_json).changed
 
   when:
index 76ed0d5ea7285b4613daf47071abe5e405df7bdd..6bddbf74d8ffdc1b348b1ef3e64a1e12e41c39e3 100644 (file)
@@ -66,13 +66,7 @@ for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-
   # We need to wait because it may take some time for the socket to actually exists
   COUNT=10
   # Wait and ensure the socket exists after restarting the daemon
-  {% if containerized_deployment and osd_scenario != 'lvm' -%}
-  id=$(get_dev_name "$unit")
-  container_id=$(get_container_id_from_dev_name "$id")
-  wait_for_socket_in_container "$container_id"
-  osd_id=$whoami
-  container_exec="{{ container_binary }} exec $container_id"
-  {% elif containerized_deployment and osd_scenario == 'lvm' %}
+  {% if containerized_deployment %}
   osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+')
   container_id=$(get_container_id_from_dev_name "ceph-osd-${osd_id}")
   container_exec="{{ container_binary }} exec $container_id"
index 9d603c26552adc989537c5c16ef2e6b088d752f2..545685c6fc1719116752d1ac5e35ec05420bb351 100644 (file)
@@ -44,20 +44,13 @@ devices: []
 osd_auto_discovery: false
 
 # Encrypt your OSD device using dmcrypt
-# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted
+# If set to True, no matter which osd_objecstore you use the data will be encrypted
 dmcrypt: False
 
 
-osd_scenario: lvm
-valid_osd_scenarios:
-  - lvm
-
-
 dedicated_devices: []
 
-# III. Use ceph-volume to create OSDs from logical volumes.
-# Use 'osd_scenario: lvm' to enable this scenario.
-# when using lvm, not collocated journals.
+# Use ceph-volume to create OSDs from logical volumes.
 # lvm_volumes is a list of dictionaries.
 #
 # Filestore: Each dictionary must contain a data, journal and vg_name key. Any
diff --git a/roles/ceph-osd/tasks/build_devices.yml b/roles/ceph-osd/tasks/build_devices.yml
deleted file mode 100644 (file)
index 350e085..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: resolve dedicated device link(s)
-  command: readlink -f {{ item }}
-  changed_when: false
-  with_items: "{{ dedicated_devices }}"
-  register: dedicated_devices_prepare_canonicalize
-  when:
-    - osd_scenario == 'non-collocated'
-    - not osd_auto_discovery
-
-- name: set_fact build dedicated_devices from resolved symlinks
-  set_fact:
-    dedicated_devices_tmp: "{{ dedicated_devices_tmp | default([]) + [ item.stdout ] }}"
-  with_items: "{{ dedicated_devices_prepare_canonicalize.results }}"
-  when:
-    - osd_scenario == 'non-collocated'
-    - not osd_auto_discovery
-
-- name: set_fact build final dedicated_devices list
-  set_fact:
-    dedicated_devices: "{{ dedicated_devices_tmp | reject('search','/dev/disk') | list }}"
-  when:
-    - osd_scenario == 'non-collocated'
-    - not osd_auto_discovery
index 10354244098dabac188fc13993973ce403f157f7..32b707584484e79d6aed0c246552188c3d7eb32a 100644 (file)
@@ -29,7 +29,6 @@
   register: result
   until: result is succeeded
   when:
-    - osd_scenario == 'lvm'
     - not is_atomic
   tags:
     - with_pkg
@@ -40,9 +39,6 @@
 - name: include container_options_facts.yml
   include_tasks: container_options_facts.yml
 
-- name: include build_devices.yml
-  include_tasks: build_devices.yml
-
 - name: read information about the devices
   parted:
     device: "{{ item }}"
@@ -53,7 +49,6 @@
 - name: include_tasks scenarios/lvm.yml
   include_tasks: scenarios/lvm.yml
   when:
-    - osd_scenario == 'lvm'
     - lvm_volumes|length > 0
   # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
   static: False
 - name: include_tasks scenarios/lvm-batch.yml
   include_tasks: scenarios/lvm-batch.yml
   when:
-    - osd_scenario == 'lvm'
     - devices|length > 0
   # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
   static: False
 
-- name: include_tasks activate_osds.yml
-  include_tasks: activate_osds.yml
-  when:
-    - not containerized_deployment
-    - osd_scenario != 'lvm'
-
 - name: include_tasks start_osds.yml
   include_tasks: start_osds.yml
 
index 06215f20ff0a3ac5076c5d3121ea49e5e84538ef..a4af564b3637bdeefd7df794a3e0edd1a6da3037 100644 (file)
     when:
       - ceph_docker_on_openstack
 
-  - name: test if the container image has directory {{ container_bin_path }}
-    command: "{{ container_binary }} run --rm --net=host --entrypoint=test {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -d {{ container_bin_path }}"
-    changed_when: false
-    failed_when: false
-    register: test_container_bin_path
-    when:
-      - osd_scenario != 'lvm'
-
-  - name: test if the container image has the disk_list function
-    command: "{{ container_binary }} run --rm --net=host --entrypoint=stat {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} {{ container_bin_path + '/disk_list.sh' if test_container_bin_path.rc == 0 else 'disk_list.sh' }}"
-    changed_when: false
-    failed_when: false
-    register: disk_list
-    when:
-      - osd_scenario != 'lvm'
-
   - name: generate ceph osd docker run script
     become: true
     template:
 
 - name: systemd start osd
   systemd:
-    name: ceph-osd@{{ item | regex_replace('/dev/', '') if osd_scenario != 'lvm' and containerized_deployment else item }}
+    name: ceph-osd@{{ item }}
     state: started
     enabled: yes
     masked: no
     daemon_reload: yes
-  with_items: "{{ devices if osd_scenario != 'lvm' and containerized_deployment else ((ceph_osd_ids.stdout | from_json).keys() | list) if osd_scenario == 'lvm' and not containerized_deployment else osd_ids_non_container.stdout_lines }}"
+  with_items: "{{ ((ceph_osd_ids.stdout | from_json).keys() | list) if not containerized_deployment else osd_ids_non_container.stdout_lines }}"
 
 - name: ensure systemd service override directory exists
   file:
index 79779b354f40e0dffa0d4167bdb3d71a281fdb6f..02c2d1f7c2f7c5022c1befffdc2cbbeb5879e73f 100644 (file)
@@ -2,70 +2,6 @@
 # {{ ansible_managed }}
 
 
-{% if osd_scenario != 'lvm' -%}
-{% if disk_list.get('rc') == 0 -%}
-#############
-# VARIABLES #
-#############
-DOCKER_ENV=""
-
-#############
-# FUNCTIONS #
-#############
-function expose_partitions () {
-DOCKER_ENV=$({{ container_binary }} run --rm --net=host --name expose_partitions_${1} --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z -e CLUSTER={{ cluster }} -e OSD_DEVICE=/dev/${1} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list)
-}
-{% else -%}
-# NOTE(leseb): maintains backwards compatibility with old ceph-docker Jewel images
-# Jewel images prior to https://github.com/ceph/ceph-docker/pull/797
-REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
-function expose_partitions {
-  if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}; then
-    if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
-      {{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log
-    fi
-  fi
-  if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-${1}; then
-    if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then
-      {{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log
-    fi
-  fi
-  if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
-    part=$(grep "Journal is GPT partition" {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
-    DOCKER_ENV="-e OSD_JOURNAL=$part"
-  fi
-  if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then
-    part=$(grep "Journal is GPT partition" {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
-    DOCKER_ENV="-e OSD_JOURNAL=$part"
-  fi
-  if [[ -z $DOCKER_ENV ]]; then
-    # NOTE(leseb): if we arrive here this probably means we just switched from non-containers to containers.
-    # This is tricky as we don't have any info on the type of OSD, this is 'only' a problem for non-collocated scenarios
-    # We can't assume that the 'ceph' is still present so calling Docker exec instead
-    part=$({{ container_binary }} run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list /dev/${1} | awk '/journal / {print $1}')
-    DOCKER_ENV="-e OSD_JOURNAL=$part"
-  fi
-  # if empty, the previous command didn't find anything so we fail
-  if [[ -z $DOCKER_ENV ]]; then
-    echo "ERROR: could not discover ceph partitions"
-    exit 1
-  fi
-}
-
-{% endif -%}
-
-expose_partitions "$1"
-
-# discover osd_objectstore for ceph-disk based osds
-if [[ $DOCKER_ENV =~ "BLUESTORE" ]]; then
-  DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE=1"
-elif [[ $DOCKER_ENV =~ "JOURNAL" ]]; then
-  DOCKER_ENV="$DOCKER_ENV -e OSD_FILESTORE=1"
-fi
-
-{% endif -%}
-
-
 ########
 # MAIN #
 ########
@@ -112,17 +48,9 @@ numactl \
   {% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
   -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
   {% endif -%}
-  {% if osd_scenario == 'lvm' -%}
   -v /run/lvm/:/run/lvm/ \
   -e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
   -e OSD_ID="$1" \
   --name=ceph-osd-"$1" \
-  {% else -%}
-  $DOCKER_ENV \
-  -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
-  -e OSD_DEVICE=/dev/"${1}" \
-  -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
-  --name=ceph-osd-{{ ansible_hostname }}-"${1}" \
-  {% endif -%}
   {{ ceph_osd_docker_extra_env }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
index 31e117c689019326b25f2fe815a495060e520118..ea67df29b97e6ff25aa3990a0f4a6adfe0ec8055 100644 (file)
@@ -7,19 +7,10 @@ After=docker.service
 
 [Service]
 EnvironmentFile=-/etc/environment
-{% if osd_scenario == 'lvm' -%}
 ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
 ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i
-{% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-{{ ansible_hostname }}-%i
-ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-{{ ansible_hostname }}-%i
-{% endif -%}
 ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i
-{% if osd_scenario == 'lvm' -%}
 ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
-{% else %}
-ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-{{ ansible_hostname }}-%i
-{% endif -%}
 Restart=always
 RestartSec=10s
 TimeoutStartSec=120
index aa107cb5a2f70991e39e31a8485af44a9c183634..25d299c1acd586a6fbe9bd2f7617a7ebda389bcd 100644 (file)
   when:
     - iscsi_gw_group_name in group_names
 
-- name: warn users that ceph-disk scenarios will be removed on 3.3
-  debug:
-    msg: |
-        osd_scenario is set to {{ osd_scenario }}, this variable is not used anymore and defaults to 'lvm'.
-        If you have something different than 'lvm', this means you want ceph-ansible to manage your ceph-disk OSDs.
-        So basically, ceph-ansible can still start your ceph-disk osd services
-  run_once: true
-  when:
-    - osd_group_name in group_names
-    - osd_scenario != 'lvm'
index 113d741b37c2a4fd715c4931edda6d47a52cc22b..302634c2cec457d36b2acf19003437ab5e399bf1 100644 (file)
@@ -97,9 +97,7 @@ def node(host, request):
     rolling_update = os.environ.get("ROLLING_UPDATE", "False")
     group_names = ansible_vars["group_names"]
     docker = ansible_vars.get("docker")
-    osd_scenario = ansible_vars.get("osd_scenario")
     radosgw_num_instances = ansible_vars.get("radosgw_num_instances", 1)
-    lvm_scenario = osd_scenario in ['lvm', 'lvm-batch']
     ceph_release_num = {
         'jewel': 10,
         'kraken': 11,
@@ -121,12 +119,6 @@ def node(host, request):
             request.function, group_names)
         pytest.skip(reason)
 
-    if request.node.get_closest_marker("no_lvm_scenario") and lvm_scenario:
-        pytest.skip("Not a valid test for lvm scenarios")
-
-    if not lvm_scenario and request.node.get_closest_marker("lvm_scenario"):
-        pytest.skip("Not a valid test for non-lvm scenarios")
-
     if request.node.get_closest_marker("no_docker") and docker:
         pytest.skip(
             "Not a valid test for containerized deployments or atomic hosts")
@@ -135,11 +127,6 @@ def node(host, request):
         pytest.skip(
             "Not a valid test for non-containerized deployments or atomic hosts")  # noqa E501
 
-    journal_collocation_test = ansible_vars.get("osd_scenario") == "collocated"
-    if request.node.get_closest_marker("journal_collocation") and not journal_collocation_test:  # noqa E501
-        pytest.skip("Scenario is not using journal collocation")
-
-
 
     data = dict(
         vars=ansible_vars,
index fdd8ac490bf002a5e55db1de72143d16e8af430d..389bf9e14c5f6da53cf51ef111555f27d3694aca 100644 (file)
@@ -10,7 +10,6 @@ monitor_interface: eth1
 radosgw_interface: eth1
 journal_size: 100
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
 lvm_volumes:
index 6c0561398c64f14b673e063fbeca856a31f78be9..4ce40af30b70ebc8d5dcc3bd95c2b3246347f006 100644 (file)
@@ -8,7 +8,6 @@ monitor_interface: eth1
 radosgw_interface: eth1
 journal_size: 100
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
 lvm_volumes:
index 87a64289c7a6c708307b0e8f20d2ad58c4b2d857..8c484f9ab074d03c945bb2320c70e951d1102a02 100644 (file)
@@ -10,7 +10,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
 radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
 lvm_volumes:
index 99d4aeb33eedafdf255199dc5f644cae1bda3ee7..7e6c1311250fc7e120fd574072efd68a9caca172 100644 (file)
@@ -8,7 +8,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
 radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
 lvm_volumes:
index 672a0f956e891a7fa4789a4bbb7557f2476adfdb..9cea91d6a3646229fa4abac8ec254b94445d9adb 100644 (file)
@@ -1,7 +1,6 @@
 ---
 ceph_osd_docker_run_script_path: /var/tmp
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 lvm_volumes:
   - data: data-lv1
     data_vg: test_group
index e27c474223beeb57b3a5626aeeb52713fb355250..3ec1d6e4c1867094562734c6dccb7ee75a713f52 100644 (file)
@@ -1,7 +1,6 @@
 ---
 os_tuning_params:
   - { name: fs.file-max, value: 26234859 }
-osd_scenario: lvm
 osd_objectstore: "bluestore"
 lvm_volumes:
   - data: data-lv1
index 8d41cbb17a6c0d013b1ffd3fac9c851f2161dbd6..490f8e591f3be3c8d14411d249784948c560b372 100644 (file)
@@ -11,7 +11,6 @@ public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
 monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
 lvm_volumes:
index eae5a497c06a75882d6a20d05ba08f66befe25bf..ea2c10f8bd2e98a1a94bb11b74e60d1f2d6900f1 100644 (file)
@@ -6,7 +6,6 @@ public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
 monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
 lvm_volumes:
index 988ec50e2fab2d33074eec1dea9ca59f2c5a0cbf..9cea91d6a3646229fa4abac8ec254b94445d9adb 100644 (file)
@@ -1,6 +1,5 @@
 ---
 ceph_osd_docker_run_script_path: /var/tmp
-osd_scenario: lvm
 osd_objectstore: "bluestore"
 lvm_volumes:
   - data: data-lv1
index 988ec50e2fab2d33074eec1dea9ca59f2c5a0cbf..9cea91d6a3646229fa4abac8ec254b94445d9adb 100644 (file)
@@ -1,6 +1,5 @@
 ---
 ceph_osd_docker_run_script_path: /var/tmp
-osd_scenario: lvm
 osd_objectstore: "bluestore"
 lvm_volumes:
   - data: data-lv1
index 8f095e9a0e1000f465a90344f950f0d5dc81acbb..7cc7ce5741264b14e3a3ef0626112fed30dd815a 100644 (file)
@@ -15,7 +15,6 @@ radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
 journal_size: 100
 osd_objectstore: "bluestore"
 crush_device_class: test
-osd_scenario: lvm
 copy_admin_key: true
 osd_auto_discovery: true
 os_tuning_params:
index f2d1c14172b29bae2142f5f9bb075d1747a6a8c7..99dc63fe060769ab92407358db81b0bad0f9927d 100644 (file)
@@ -9,7 +9,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
 radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
 osd_objectstore: "bluestore"
 crush_device_class: test
-osd_scenario: lvm
 copy_admin_key: true
 osd_auto_discovery: true
 os_tuning_params:
index 81b853c45f403eaa61d7768a49f7fc64c1824af4..5265e2cf1496144704514700eea7f4855497fdc9 100644 (file)
@@ -15,7 +15,6 @@ radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
 journal_size: 100
 osd_objectstore: "bluestore"
 crush_device_class: test
-osd_scenario: lvm
 copy_admin_key: true
 devices:
   - /dev/sdb
index cde0eaa22b0809b68e646a399a843ff7051b2a66..fb6611b329aea2f84a465ff3182b0f2dc25108f8 100644 (file)
@@ -9,7 +9,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
 radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
 osd_objectstore: "bluestore"
 crush_device_class: test
-osd_scenario: lvm
 copy_admin_key: true
 devices:
   - /dev/disk/by-id/ata-QEMU_HARDDISK_QM00002
index f080d7db0d38ce332f077816ff07b31c94cf0112..83e87a486ba49c45a7e9a3379677e40cb71321d2 100644 (file)
@@ -15,7 +15,6 @@ osd_objectstore: "filestore"
 copy_admin_key: true
 containerized_deployment: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
-osd_scenario: lvm
 lvm_volumes:
   - data: data-lv1
     journal: /dev/sdc1
index c23c858b07eccedb67b0a7d437e623e470fb29bf..a9d14cb948850cbf516aef7471f2a389be115c2f 100644 (file)
@@ -10,7 +10,6 @@ journal_size: 100
 osd_objectstore: "filestore"
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
-osd_scenario: lvm
 lvm_volumes:
   - data: data-lv1
     journal: /dev/sdc1
index 86e1fbee071f3324950d975e20768dbcd0ef364e..2505079834f417d245827bf0ecd6ba48b64eddf6 100644 (file)
@@ -9,73 +9,60 @@
   gather_facts: false
   become: yes
   tasks:
-
-    - block:
-      - name: check if it is atomic host
-        stat:
-          path: /run/ostree-booted
-        register: stat_ostree
-        tags:
-          - always
-
-      - name: set_fact is_atomic
-        set_fact:
-          is_atomic: '{{ stat_ostree.stat.exists }}'
-        tags:
-          - always
-
-      # Some images may not have lvm2 installed
-      - name: install lvm2
-        package:
-          name: lvm2
-          state: present
-        register: result
-        until: result is succeeded
-        when:
-          - not is_atomic
-
-      - name: create physical volume
-        command: pvcreate /dev/sdb
-        failed_when: false
-
-      - name: create volume group
-        command: vgcreate test_group /dev/sdb
-        failed_when: false
-
-      - name: create logical volume 1
-        command: lvcreate --yes -l 50%FREE -n data-lv1 test_group
-        failed_when: false
-
-      - name: create logical volume 2
-        command: lvcreate --yes -l 50%FREE -n data-lv2 test_group
-        failed_when: false
-
-      - name: partition /dev/sdc for journals
-        parted:
-          device: /dev/sdc
-          number: 1
-          part_start: 0%
-          part_end: 50%
-          unit: '%'
-          label: gpt
-          state: present
-
-      - name: partition /dev/sdc for journals
-        parted:
-          device: /dev/sdc
-          number: 2
-          part_start: 50%
-          part_end: 100%
-          unit: '%'
-          state: present
-          label: gpt
-
-      - name: create journals vg from /dev/sdc2
-        lvg:
-          vg: journals
-          pvs: /dev/sdc2
-
-      - name: create journal1 lv
-        command: lvcreate --yes -l 100%FREE -n journal1 journals
-        failed_when: false
-      when: osd_scenario == 'lvm'
+    - name: check if it is atomic host
+      stat:
+        path: /run/ostree-booted
+      register: stat_ostree
+      tags:
+        - always
+    - name: set_fact is_atomic
+      set_fact:
+        is_atomic: '{{ stat_ostree.stat.exists }}'
+      tags:
+        - always
+    # Some images may not have lvm2 installed
+    - name: install lvm2
+      package:
+        name: lvm2
+        state: present
+      register: result
+      until: result is succeeded
+      when:
+        - not is_atomic
+    - name: create physical volume
+      command: pvcreate /dev/sdb
+      failed_when: false
+    - name: create volume group
+      command: vgcreate test_group /dev/sdb
+      failed_when: false
+    - name: create logical volume 1
+      command: lvcreate --yes -l 50%FREE -n data-lv1 test_group
+      failed_when: false
+    - name: create logical volume 2
+      command: lvcreate --yes -l 50%FREE -n data-lv2 test_group
+      failed_when: false
+    - name: partition /dev/sdc for journals
+      parted:
+        device: /dev/sdc
+        number: 1
+        part_start: 0%
+        part_end: 50%
+        unit: '%'
+        label: gpt
+        state: present
+    - name: partition /dev/sdc for journals
+      parted:
+        device: /dev/sdc
+        number: 2
+        part_start: 50%
+        part_end: 100%
+        unit: '%'
+        state: present
+        label: gpt
+    - name: create journals vg from /dev/sdc2
+      lvg:
+        vg: journals
+        pvs: /dev/sdc2
+    - name: create journal1 lv
+      command: lvcreate --yes -l 100%FREE -n journal1 journals
+      failed_when: false
index ae671a1be2d837977c7fb41f872949714a5bdb02..e4321f41cd798adfc67379b34e011f57ecf74ea3 100644 (file)
@@ -49,7 +49,6 @@ all:
     - {name: vms, pg_num: 8, rule_name: ''}
     - {name: volumes, pg_num: 8, rule_name: ''}
     osd_objectstore: filestore
-    osd_scenario: collocated
     ceph_osd_docker_run_script_path: /opt
     pools: []
     public_network: 192.168.95.0/24
index 672a0f956e891a7fa4789a4bbb7557f2476adfdb..9cea91d6a3646229fa4abac8ec254b94445d9adb 100644 (file)
@@ -1,7 +1,6 @@
 ---
 ceph_osd_docker_run_script_path: /var/tmp
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 lvm_volumes:
   - data: data-lv1
     data_vg: test_group
index 672a0f956e891a7fa4789a4bbb7557f2476adfdb..9cea91d6a3646229fa4abac8ec254b94445d9adb 100644 (file)
@@ -1,7 +1,6 @@
 ---
 ceph_osd_docker_run_script_path: /var/tmp
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 lvm_volumes:
   - data: data-lv1
     data_vg: test_group
index 59346718a67cda5b719966af9bfc0511ecc0d36f..c451e157c8342de447e7bfc5cff9252618abec8c 100644 (file)
@@ -10,7 +10,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
 radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
 lvm_volumes:
index af43d618f50f11bca98b338e0f8c4a2df7bf1c47..2c42970c6f26173c39cbd3aa32fe27a8e4fc3445 100644 (file)
@@ -10,7 +10,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
 radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
 lvm_volumes:
index ab53a5f407c1a657b53d97d13b3da4970ade4658..b89bd8206059acec8b7d4435ebdd53f8c3b1b4a7 100644 (file)
@@ -8,7 +8,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
 radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
 lvm_volumes:
index 70ba003d909b3d5e5effbb60e597141501c898ff..0fb25fb51e93d0428c865a00ec24032bdad705b7 100644 (file)
@@ -8,7 +8,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
 radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
 lvm_volumes:
index 672a0f956e891a7fa4789a4bbb7557f2476adfdb..9cea91d6a3646229fa4abac8ec254b94445d9adb 100644 (file)
@@ -1,7 +1,6 @@
 ---
 ceph_osd_docker_run_script_path: /var/tmp
 osd_objectstore: "bluestore"
-osd_scenario: lvm
 lvm_volumes:
   - data: data-lv1
     data_vg: test_group
index e27c474223beeb57b3a5626aeeb52713fb355250..3ec1d6e4c1867094562734c6dccb7ee75a713f52 100644 (file)
@@ -1,7 +1,6 @@
 ---
 os_tuning_params:
   - { name: fs.file-max, value: 26234859 }
-osd_scenario: lvm
 osd_objectstore: "bluestore"
 lvm_volumes:
   - data: data-lv1
index 7a4bf927609df7419112c9c2cfbe9e0b1ab0b559..2558deb1be2e117460ed267b27f9eccfc9c63d63 100644 (file)
@@ -2,7 +2,6 @@
 ceph_osd_docker_run_script_path: /var/tmp
 journal_size: 100
 osd_objectstore: "filestore"
-osd_scenario: lvm
 lvm_volumes:
   - data: data-lv1
     journal: /dev/sdc1
index d07a317fff43731f094b3963415a3350a6b1db9f..59fd2abf8450c436d2b2412867ca5decb2a47f59 100644 (file)
@@ -1,7 +1,6 @@
 ---
 os_tuning_params:
   - { name: fs.file-max, value: 26234859 }
-osd_scenario: lvm
 journal_size: 100
 osd_objectstore: "filestore"
 lvm_volumes: