- name: wait for all osd to be up
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd stat -f json"
register: wait_for_all_osds_up
- retries: "{{ nb_retry_wait_osd_up }}"
- delay: "{{ delay_wait_osd_up }}"
+ retries: "86400"
+ delay: "1"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
until:
- inventory_hostname == ansible_play_hosts_all | last
tags: wait_all_osds_up
+- name: sleep 365d
+ command: sleep 365d
+ when:
+ - not ansible_check_mode
+ - inventory_hostname == ansible_play_hosts_all | last
+ - sleep_after_fs2bs | default(False) | bool
+
- name: include crush_rules.yml
include_tasks: crush_rules.yml
when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool
mon0
[osds]
-osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]"
-osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true
-osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=1024
-osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
-osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true
-osd5 osd_objectstore=filestore osd_auto_discovery=true journal_size=1024
\ No newline at end of file
+osd0 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
+osd1 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
-osd_vms: 6
+osd_vms: 2
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd0:osd1'
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd3:osd4' --tags partitions
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd0:osd1' --tags partitions
# deploy the cluster
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/filestore-to-bluestore.yml --limit osds --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
+ sleep_after_fs2bs=True \
"
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"