config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml'))
settings=YAML.load_file(config_file)
-NMONS = settings['mon_vms']
-NOSDS = settings['osd_vms']
-NMDSS = settings['mds_vms']
-NRGWS = settings['rgw_vms']
-NNFSS = settings['nfs_vms']
-RESTAPI = settings['restapi']
-CLIENTS = settings['client_vms']
-SUBNET = settings['subnet']
-BOX = settings['vagrant_box']
-BOX_URL = settings['vagrant_box_url']
-MEMORY = settings['memory']
-STORAGECTL = settings['vagrant_storagectl']
-ETH = settings['eth']
-DOCKER = settings['docker']
+NMONS = settings['mon_vms']
+NOSDS = settings['osd_vms']
+NMDSS = settings['mds_vms']
+NRGWS = settings['rgw_vms']
+NNFSS = settings['nfs_vms']
+RESTAPI = settings['restapi']
+NRBD_MIRRORS = settings['rbd_mirror_vms']
+CLIENTS = settings['client_vms']
+SUBNET = settings['subnet']
+BOX = settings['vagrant_box']
+BOX_URL = settings['vagrant_box_url']
+MEMORY = settings['memory']
+STORAGECTL = settings['vagrant_storagectl']
+ETH = settings['eth']
+DOCKER = settings['docker']
if BOX == 'openstack'
require 'vagrant-openstack-provider'
# these aren't supported by Vagrant, see
# https://github.com/mitchellh/vagrant/issues/3539
ansible.groups = {
- 'mons' => (0..NMONS - 1).map { |j| "#{OSPREFIX}mon#{j}" },
- 'osds' => (0..NOSDS - 1).map { |j| "#{OSPREFIX}osd#{j}" },
- 'mdss' => (0..NMDSS - 1).map { |j| "#{OSPREFIX}mds#{j}" },
- 'rgws' => (0..NRGWS - 1).map { |j| "#{OSPREFIX}rgw#{j}" },
- 'nfss' => (0..NNFSS - 1).map { |j| "#{OSPREFIX}nfs#{j}" },
- 'clients' => (0..CLIENTS - 1).map { |j| "#{OSPREFIX}client#{j}" }
+ 'mons' => (0..NMONS - 1).map { |j| "#{OSPREFIX}mon#{j}" },
+ 'osds' => (0..NOSDS - 1).map { |j| "#{OSPREFIX}osd#{j}" },
+ 'mdss' => (0..NMDSS - 1).map { |j| "#{OSPREFIX}mds#{j}" },
+ 'rgws' => (0..NRGWS - 1).map { |j| "#{OSPREFIX}rgw#{j}" },
+ 'nfss' => (0..NNFSS - 1).map { |j| "#{OSPREFIX}nfs#{j}" },
+ 'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{OSPREFIX}rbd_mirror#{j}" },
+ 'clients' => (0..CLIENTS - 1).map { |j| "#{OSPREFIX}client#{j}" }
}
if RESTAPI then
rgw_containerized_deployment: 'true',
nfs_containerized_deployment: 'true',
restapi_containerized_deployment: 'true',
+ rbd_mirror_containerized_deployment: 'true',
ceph_mon_docker_interface: ETH,
ceph_mon_docker_subnet: "#{SUBNET}.0/24",
ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK,OSD_JOURNAL_SIZE=100",
end
end
+ (0..NRBD_MIRRORS - 1).each do |i|
+ config.vm.define "#{OSPREFIX}rbd_mirror#{i}" do |rbd_mirror|
+ rbd_mirror.vm.hostname = "#{OSPREFIX}ceph-rbd-mirror#{i}"
+ if !OSVM
+ rbd_mirror.vm.network :private_network, ip: "#{SUBNET}.8#{i}"
+ end
+ # Virtualbox
+ rbd_mirror.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ rbd_mirror.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ rbd_mirror.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ end
+ # Parallels
+ rbd_mirror.vm.provider "parallels" do |prl|
+ prl.name = "ceph-rbd-mirror#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+ end
+ end
+
(0..NMONS - 1).each do |i|
config.vm.define "#{OSPREFIX}mon#{i}" do |mon|
mon.vm.hostname = "#{OSPREFIX}ceph-mon#{i}"
##########
# GLOBAL #
##########
+#docker: true
#ceph_docker_dev_image: false
#######
#######
# MDS #
#######
-#mds_containerized_deployment: false
+#mds_containerized_deployment: true
#mds_containerized_deployment_with_kv: false
#kv_type: etcd
#kv_endpoint: 127.0.0.1
###########
# RESTAPI #
###########
-#restapi_containerized_deployment: false
+#restapi_containerized_deployment: true
#ceph_restapi_docker_interface: eth0
#ceph_restapi_port: 5000
#ceph_restapi_docker_username: ceph
#ceph_restapi_docker_imagename: daemon
#ceph_restapi_docker_image_tag: latest
#ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables
+
+
+##############
+# RBD MIRROR #
+##############
+#rbd_mirror_containerized_deployment: true
+#rbd_mirror_containerized_deployment_with_kv: false
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#ceph_rbd_mirror_docker_username: ceph
+#ceph_rbd_mirror_docker_imagename: daemon
+#ceph_rbd_mirror_docker_image_tag: latest
+#ceph_docker_on_openstack: false
+
#osd_containerized_deployment_with_kv: false
#kv_type: etcd
#kv_endpoint: 127.0.0.1
+#kv_port: 4001
#ceph_osd_docker_prepare_env: "OSD_FORCE_ZAP=1"
#ceph_osd_docker_username: ceph
#ceph_osd_docker_imagename: daemon
# Thus if cluster names are identical we can not have them under /etc/ceph
#ceph_rbd_mirror_remote_cluster: ""
+
#################
# CONFIGURATION #
#################
#ceph_rbd_mirror_configure: false
#ceph_rbd_mirror_pool: ""
+
+##########
+# DOCKER #
+##########
+
+#rbd_mirror_containerized_deployment: false
+#rbd_mirror_containerized_deployment_with_kv: false
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#ceph_rbd_mirror_docker_username: ceph
+#ceph_rbd_mirror_docker_imagename: daemon
+#ceph_rbd_mirror_docker_image_tag: latest
+#ceph_docker_on_openstack: false
+
# Thus if cluster names are identical we can not have them under /etc/ceph
ceph_rbd_mirror_remote_cluster: ""
+
#################
# CONFIGURATION #
#################
ceph_rbd_mirror_configure: false
ceph_rbd_mirror_pool: ""
+
+
+##########
+# DOCKER #
+##########
+
+rbd_mirror_containerized_deployment: false
+rbd_mirror_containerized_deployment_with_kv: false
+kv_type: etcd
+kv_endpoint: 127.0.0.1
+ceph_rbd_mirror_docker_username: ceph
+ceph_rbd_mirror_docker_imagename: daemon
+ceph_rbd_mirror_docker_image_tag: latest
+ceph_docker_on_openstack: false
categories:
- system
dependencies:
- - { role: ceph.ceph-common }
+ - { role: ceph.ceph-common, when: not rbd_mirror_containerized_deployment }
--- /dev/null
+---
+- name: set config and keys paths
+ set_fact:
+ ceph_config_keys:
+ - /etc/ceph/ceph.client.admin.keyring
+ - /etc/ceph/ceph.conf
+ - /etc/ceph/monmap
+ - /etc/ceph/ceph.mon.keyring
+ - /var/lib/ceph/bootstrap-osd/ceph.keyring
+ - /var/lib/ceph/bootstrap-rgw/ceph.keyring
+ - /var/lib/ceph/bootstrap-mds/ceph.keyring
+
+- name: stat for ceph config and keys
+ stat:
+ path: "{{ item }}"
+ with_items: ceph_config_keys
+ changed_when: false
+ failed_when: false
+ register: statleftover
+
+- name: fail if we find existing cluster files
+ fail:
+ msg: "looks like no cluster is running but ceph files are present, please remove them"
+ with_together:
+ - ceph_config_keys
+ - statleftover.results
+ when: item.1.stat.exists == true
--- /dev/null
+---
+# NOTE (leseb): we can not use docker inspect with 'format filed' because of
+# https://github.com/ansible/ansible/issues/10156
+- name: inspect ceph version
+ shell: docker inspect "docker.io/{{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}:{{ ceph_rbd_mirror_docker_image_tag }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
+ changed_when: false
+ failed_when: false
+ run_once: true
+ register: ceph_version
+
+- set_fact:
+ after_hammer=True
+ when: ceph_version.stdout not in ['firefly','giant', 'hammer']
+
+- name: create bootstrap directories (for or before hammer)
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: root
+ group: root
+ mode: "0755"
+ with_items:
+ - /etc/ceph/
+ when: not after_hammer
+
+- name: create bootstrap directories (after hammer)
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "64045"
+ group: "64045"
+ mode: "0755"
+ with_items:
+ - /etc/ceph/
+ when: after_hammer
--- /dev/null
+---
+# NOTE (leseb): the mds container needs the admin key
+# so it can create the mds pools for cephfs
+- name: set config and keys paths
+ set_fact:
+ ceph_config_keys:
+ - /etc/ceph/ceph.conf
+ - /etc/ceph/ceph.client.admin.keyring
+
+- name: stat for ceph config and keys
+ local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }}
+ with_items: ceph_config_keys
+ changed_when: false
+ become: false
+ failed_when: false
+ register: statconfig
+
+- name: try to fetch ceph config and keys
+ copy:
+ src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
+ dest: "{{ item.0 }}"
+ owner: root
+ group: root
+ mode: 0644
+ changed_when: false
+ with_together:
+ - ceph_config_keys
+ - statconfig.results
+ when: item.1.stat.exists == true
--- /dev/null
+---
+- name: check if a cluster is already running
+ shell: "docker ps | grep -sq 'ceph/daemon'"
+ register: ceph_health
+ changed_when: false
+ failed_when: false
+
+- name: check if it is Atomic host
+ stat: path=/run/ostree-booted
+ register: stat_ostree
+
+- name: set fact for using Atomic host
+ set_fact:
+ is_atomic='{{ stat_ostree.stat.exists }}'
+
+- include: checks.yml
+ when: ceph_health.rc != 0
+
+- include: pre_requisite.yml
+- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
+ vars:
+ ceph_docker_username: "{{ ceph_rbd_mirror_docker_username }}"
+ ceph_docker_imagename: "{{ ceph_rbd_mirror_docker_imagename }}"
+ ceph_docker_image_tag: "{{ ceph_rbd_mirror_docker_image_tag }}"
+- include: dirs_permissions.yml
+- include: fetch_configs.yml
+
+- include: selinux.yml
+ when: ansible_os_family == 'RedHat'
+
+- include: start_docker_rbd_mirror.yml
--- /dev/null
+---
+- name: install pip and docker on ubuntu
+ apt:
+ name: "{{ item }}"
+ state: present
+ update_cache: yes
+ with_items:
+ - python-pip
+ - docker
+ - docker.io
+ when: ansible_distribution == 'Ubuntu'
+ tags:
+ with_pkg
+
+- name: install pip and docker on debian
+ apt:
+ name: "{{ item }}"
+ state: present
+ update_cache: yes
+ with_items:
+ - python-pip
+ - docker-engine
+ when: ansible_distribution == 'Debian'
+ tags:
+ with_pkg
+
+# install epel for pip
+- name: install epel on redhat
+ yum:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - epel-release
+ when:
+ - ansible_os_family == 'RedHat'
+ - ansible_pkg_mgr == "yum"
+ tags:
+ with_pkg
+ failed_when: false
+
+- name: install pip on redhat
+ yum:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - python-pip
+ when:
+ - ansible_os_family == 'RedHat'
+ - ansible_pkg_mgr == "yum"
+ tags:
+ with_pkg
+
+- name: install docker-engine on redhat
+ yum:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - docker-engine
+ when:
+ - ansible_os_family == 'RedHat'
+ - ansible_pkg_mgr == "yum"
+ tags:
+ with_pkg
+ failed_when: false
+
+# for CentOS
+- name: install docker on redhat
+ yum:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - docker
+ when:
+ - ansible_os_family == 'RedHat'
+ - ansible_pkg_mgr == "yum"
+ tags:
+ with_pkg
+ failed_when: false
+
+# docker package could be docker-enginer or docker
+- name: install pip and docker on redhat
+ dnf:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - python-pip
+ - docker-engine
+ when:
+ - ansible_os_family == 'RedHat'
+ - ansible_pkg_mgr == "dnf"
+ tags:
+ with_pkg
+
+- name: start docker service
+ service:
+ name: docker
+ state: started
+ enabled: yes
+ tags:
+ with_pkg
+
+# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
+# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
+- name: install six
+ pip:
+ name: six
+ version: 1.9.0
+ tags:
+ with_pkg
+
+# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
+- name: install docker-py
+ pip:
+ name: docker-py
+ version: 1.1.0
+ tags:
+ with_pkg
+ when: ansible_version['full'] | version_compare('2.1.0.0', '<')
+
+- name: install docker-py
+ pip:
+ name: docker-py
+ state: latest
+ tags:
+ with_pkg
+ when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
--- /dev/null
+---
+- name: check if selinux is enabled
+ command: getenforce
+ register: sestatus
+ changed_when: false
+
+- name: set selinux permissions
+ shell: chcon -Rt svirt_sandbox_file_t {{ item }}
+ with_items:
+ - /etc/ceph
+ - /var/lib/ceph
+ changed_when: false
+ when: sestatus.stdout != 'Disabled'
--- /dev/null
+---
+# Use systemd to manage container on Atomic host
+- name: generate systemd unit file
+ become: true
+ template:
+ src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2"
+ dest: /var/lib/ceph/ceph-rbd-mirror@.service
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS'
+
+- name: link systemd unit file for rbd mirror instance
+ file:
+ src: /var/lib/ceph/ceph-rbd-mirror@.service
+ dest: /etc/systemd/system/multi-user.target.wants/ceph-rbd-mirror@{{ ansible_hostname }}.service
+ state: link
+ when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS'
+
+- name: enable systemd unit file for rbd mirror instance
+ command: systemctl enable /etc/systemd/system/multi-user.target.wants/ceph-rbd-mirror@{{ ansible_hostname }}.service
+ failed_when: false
+ changed_when: false
+ when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS'
+
+- name: reload systemd unit files
+ command: systemctl daemon-reload
+ changed_when: false
+ failed_when: false
+ when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS'
+
+- name: systemd start rbd mirror container
+ service:
+ name: ceph-rbd-mirror@{{ ansible_hostname }}
+ state: started
+ enabled: yes
+ changed_when: false
+ when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS'
+
+- name: run the ceph rbd mirror docker image
+ docker:
+ image: "{{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}"
+ name: ceph-{{ ansible_hostname }}-rbd-mirror
+ net: host
+ state: running
+ volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
+ when: ansible_os_family != 'RedHat' and ansible_os_family != 'CoreOS'
---
- include: pre_requisite.yml
+ when: not rbd_mirror_containerized_deployment
+
- include: start_rbd_mirror.yml
+ when: not rbd_mirror_containerized_deployment
+
- include: configure_mirroring.yml
- when: ceph_rbd_mirror_configure
+ when:
+ - ceph_rbd_mirror_configure
+ - not rbd_mirror_containerized_deployment
+
+- include: ./docker/main.yml
+ when: rbd_mirror_containerized_deployment
--- /dev/null
+[Unit]
+Description=Ceph RBD mirror
+After=docker.service
+
+[Service]
+EnvironmentFile=-/etc/environment
+ExecStartPre=-/usr/bin/docker stop {{ ansible_hostname }}
+ExecStartPre=-/usr/bin/docker rm {{ ansible_hostname }}
+ExecStart=/usr/bin/docker run --rm --net=host \
+ {% if not rbd_mirror_containerized_deployment_with_kv -%}
+ -v /var/lib/ceph:/var/lib/ceph \
+ -v /etc/ceph:/etc/ceph \
+ {% else -%}
+ -e KV_TYPE={{kv_type}} \
+ -e KV_IP={{kv_endpoint}} \
+ {% endif -%}
+ --privileged \
+ -e CEPH_DAEMON=RBD_MIRROR \
+ --name={{ ansible_hostname }} \
+ {{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}
+ExecStopPost=-/usr/bin/docker stop {{ ansible_hostname }}
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+
+[Install]
+WantedBy=multi-user.target
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
+rbd_mirror_vms: 0
client_vms: 0
# Deploy RESTAPI on each of the Monitors