]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Add role definitions of ceph-rgw-loadbalancer
authorguihecheng <guihecheng@cmiot.chinamobile.com>
Thu, 4 Apr 2019 02:54:41 +0000 (10:54 +0800)
committermergify[bot] <mergify[bot]@users.noreply.github.com>
Thu, 6 Jun 2019 19:44:30 +0000 (19:44 +0000)
This add support for rgw loadbalancer based on HAProxy and Keepalived.
We define a single role ceph-rgw-loadbalancer and include HAProxy and
Keepalived configurations all in this.

A single haproxy backend is used to balance all RGW instances and
a single frontend is exported via a single port, default 80.

Keepalived is used to maintain the high availability of all haproxy
instances. You are free to use any number of VIPs. A single VIP is
shared across all keepalived instances and there will be one
master for one VIP, selected sequentially, and others serve as
backups.
This assumes that each keepalived instance is on the same node as
one haproxy instance and we use a simple check script to detect
the state of each haproxy instance and trigger the VIP failover
upon its failure.

Signed-off-by: guihecheng <guihecheng@cmiot.chinamobile.com>
(cherry picked from commit 35d40c65f8c7f785a53978210c54f642e1384feb)

14 files changed:
generate_group_vars_sample.sh
group_vars/all.yml.sample
group_vars/rgwloadbalancers.yml.sample [new file with mode: 0644]
group_vars/rhcs.yml.sample
roles/ceph-defaults/defaults/main.yml
roles/ceph-infra/tasks/configure_firewall.yml
roles/ceph-rgw-loadbalancer/defaults/main.yml [new file with mode: 0644]
roles/ceph-rgw-loadbalancer/handlers/main.yml [new file with mode: 0644]
roles/ceph-rgw-loadbalancer/meta/main.yml [new file with mode: 0644]
roles/ceph-rgw-loadbalancer/tasks/main.yml [new file with mode: 0644]
roles/ceph-rgw-loadbalancer/tasks/pre_requisite.yml [new file with mode: 0644]
roles/ceph-rgw-loadbalancer/tasks/start_rgw_loadbalancer.yml [new file with mode: 0644]
roles/ceph-rgw-loadbalancer/templates/haproxy.cfg.j2 [new file with mode: 0644]
roles/ceph-rgw-loadbalancer/templates/keepalived.conf.j2 [new file with mode: 0644]

index 96a910ffe14fea745801a1a42ddc4af93f46df51..dea3a23bbabe7e19d7fad1fe4d2c0cb7607a5c52 100755 (executable)
@@ -70,6 +70,8 @@ for role in "$basedir"/roles/ceph-*; do
     output="rbdmirrors.yml.sample"
   elif [[ $rolename == "ceph-iscsi-gw" ]]; then
     output="iscsigws.yml.sample"
+  elif [[ $rolename == "ceph-rgw-loadbalancer" ]]; then
+    output="rgwloadbalancers.yml.sample"
   else
     output="${rolename:5}s.yml.sample"
   fi
index 8c3dec90e08457779bcb0021ca6fe3553039f6c6..de3ee1d397d2e3ccaf3ff5d4aa2d08dab1fe4a3d 100644 (file)
@@ -54,6 +54,7 @@ dummy:
 #client_group_name: clients
 #iscsi_gw_group_name: iscsigws
 #mgr_group_name: mgrs
+#rgwloadbalancer_group_name: rgwloadbalancers
 
 # If configure_firewall is true, then ansible will try to configure the
 # appropriate firewalling rules so that Ceph daemons can communicate
@@ -70,6 +71,7 @@ dummy:
 #ceph_rbdmirror_firewall_zone: public
 #ceph_iscsi_firewall_zone: public
 #ceph_dashboard_firewall_zone: public
+#ceph_rgwloadbalancer_firewall_zone: public
 
 # Generate local ceph.conf in fetch directory
 #ceph_conf_local: false
diff --git a/group_vars/rgwloadbalancers.yml.sample b/group_vars/rgwloadbalancers.yml.sample
new file mode 100644 (file)
index 0000000..05e9e57
--- /dev/null
@@ -0,0 +1,24 @@
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+#haproxy_frontend_port: 80
+#
+#virtual_ips:
+#   - 192.168.238.250
+#   - 192.168.238.251
+#
+#virtual_ip_netmask: 24
+#virtual_ip_interface: ens33
+
index 4cd9c45b6c8b4d3e62bc8af6e203667811f44e48..0b7c020737e4fed77b08934147a0fd41619e695b 100644 (file)
@@ -54,6 +54,7 @@ fetch_directory: ~/ceph-ansible-keys
 #client_group_name: clients
 #iscsi_gw_group_name: iscsigws
 #mgr_group_name: mgrs
+#rgwloadbalancer_group_name: rgwloadbalancers
 
 # If configure_firewall is true, then ansible will try to configure the
 # appropriate firewalling rules so that Ceph daemons can communicate
@@ -70,6 +71,7 @@ fetch_directory: ~/ceph-ansible-keys
 #ceph_rbdmirror_firewall_zone: public
 #ceph_iscsi_firewall_zone: public
 #ceph_dashboard_firewall_zone: public
+#ceph_rgwloadbalancer_firewall_zone: public
 
 # Generate local ceph.conf in fetch directory
 #ceph_conf_local: false
index 03b0e175b7391c8f4ba3cc1b287970da046e7cdc..50e686d9e3c009b799d03cf66d0ebb930c0bc107 100644 (file)
@@ -46,6 +46,7 @@ rbdmirror_group_name: rbdmirrors
 client_group_name: clients
 iscsi_gw_group_name: iscsigws
 mgr_group_name: mgrs
+rgwloadbalancer_group_name: rgwloadbalancers
 
 # If configure_firewall is true, then ansible will try to configure the
 # appropriate firewalling rules so that Ceph daemons can communicate
@@ -62,6 +63,7 @@ ceph_nfs_firewall_zone: public
 ceph_rbdmirror_firewall_zone: public
 ceph_iscsi_firewall_zone: public
 ceph_dashboard_firewall_zone: public
+ceph_rgwloadbalancer_firewall_zone: public
 
 # Generate local ceph.conf in fetch directory
 ceph_conf_local: false
index 3f1dbb6b413652cae3a43659415d7bd00a442e93..a4431600254a0731d0685347a3641c3bd7f48f19 100644 (file)
           state: enabled
     when: dashboard_enabled
 
+  - name: open haproxy ports
+    firewalld:
+      port: "{{ haproxy_frontend_port | default(80) }}/tcp"
+      zone: "{{ ceph_rgwloadbalancer_firewall_zone }}"
+      source: "{{ public_network }}"
+      permanent: true
+      immediate: true
+      state: enabled
+    when:
+      - rgwloadbalancer_group_name is defined
+      - rgwloadbalancer_group_name in group_names
+    tags:
+      - firewall
+
+  - name: add rich rule for keepalived vrrp
+    firewalld:
+      rich_rule: 'rule protocol value="vrrp" accept'
+      permanent: true
+      immediate: true
+      state: enabled
+    when:
+      - rgwloadbalancer_group_name is defined
+      - rgwloadbalancer_group_name in group_names
+    tags:
+      - firewall
+
 - meta: flush_handlers
diff --git a/roles/ceph-rgw-loadbalancer/defaults/main.yml b/roles/ceph-rgw-loadbalancer/defaults/main.yml
new file mode 100644 (file)
index 0000000..69370c2
--- /dev/null
@@ -0,0 +1,15 @@
+---
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+haproxy_frontend_port: 80
+#
+#virtual_ips:
+#   - 192.168.238.250
+#   - 192.168.238.251
+#
+#virtual_ip_netmask: 24
+#virtual_ip_interface: ens33
diff --git a/roles/ceph-rgw-loadbalancer/handlers/main.yml b/roles/ceph-rgw-loadbalancer/handlers/main.yml
new file mode 100644 (file)
index 0000000..b75d933
--- /dev/null
@@ -0,0 +1,10 @@
+---
+- name: restart haproxy
+  service:
+    name: haproxy
+    state: restarted
+
+- name: restart keepalived
+  service:
+    name: keepalived
+    state: restarted
diff --git a/roles/ceph-rgw-loadbalancer/meta/main.yml b/roles/ceph-rgw-loadbalancer/meta/main.yml
new file mode 100644 (file)
index 0000000..be90117
--- /dev/null
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+  author: Gui Hecheng
+  description: Config HAProxy & Keepalived
+  license: Apache
+  min_ansible_version: 2.8
+  platforms:
+    - name: EL
+      versions:
+        - 7
+  categories:
+    - system
+dependencies: []
diff --git a/roles/ceph-rgw-loadbalancer/tasks/main.yml b/roles/ceph-rgw-loadbalancer/tasks/main.yml
new file mode 100644 (file)
index 0000000..2229e77
--- /dev/null
@@ -0,0 +1,6 @@
+---
+- name: include_tasks pre_requisite.yml
+  include_tasks: pre_requisite.yml
+
+- name: include_tasks start_rgw_loadbalancer.yml
+  include_tasks: start_rgw_loadbalancer.yml
diff --git a/roles/ceph-rgw-loadbalancer/tasks/pre_requisite.yml b/roles/ceph-rgw-loadbalancer/tasks/pre_requisite.yml
new file mode 100644 (file)
index 0000000..7d3c33a
--- /dev/null
@@ -0,0 +1,35 @@
+---
+- name: install haproxy and keepalived
+  package:
+    name: ['haproxy', 'keepalived']
+    state: present
+  register: result
+  until: result is succeeded
+
+- name: "generate haproxy configuration file: haproxy.cfg"
+  template:
+    src: haproxy.cfg.j2
+    dest: /etc/haproxy/haproxy.cfg
+    owner: "root"
+    group: "root"
+    mode: "0644"
+    validate: "haproxy -f %s -c"
+  notify:
+    - restart haproxy
+
+- name: set_fact vip to vrrp_instance
+  set_fact:
+      vrrp_instances: "{{ vrrp_instances | default([]) | union([{ 'name': 'VI_' + index|string , 'vip': item, 'master': groups[rgwloadbalancer_group_name][index] }]) }}"
+  loop: "{{ virtual_ips | flatten(levels=1) }}"
+  loop_control:
+      index_var: index
+
+- name: "generate keepalived: configuration file: keepalived.conf"
+  template:
+    src: keepalived.conf.j2
+    dest: /etc/keepalived/keepalived.conf
+    owner: "root"
+    group: "root"
+    mode: "0644"
+  notify:
+    - restart keepalived
diff --git a/roles/ceph-rgw-loadbalancer/tasks/start_rgw_loadbalancer.yml b/roles/ceph-rgw-loadbalancer/tasks/start_rgw_loadbalancer.yml
new file mode 100644 (file)
index 0000000..344fe19
--- /dev/null
@@ -0,0 +1,12 @@
+---
+- name: start haproxy
+  service:
+    name: haproxy
+    state: started
+    enabled: yes
+
+- name: start keepalived
+  service:
+    name: keepalived
+    state: started
+    enabled: yes
diff --git a/roles/ceph-rgw-loadbalancer/templates/haproxy.cfg.j2 b/roles/ceph-rgw-loadbalancer/templates/haproxy.cfg.j2
new file mode 100644 (file)
index 0000000..31fc3dd
--- /dev/null
@@ -0,0 +1,43 @@
+# {{ ansible_managed  }}
+global
+    log         127.0.0.1 local2
+
+    chroot      /var/lib/haproxy
+    pidfile     /var/run/haproxy.pid
+    maxconn     8000
+    user        haproxy
+    group       haproxy
+    daemon
+    stats socket /var/lib/haproxy/stats
+
+defaults
+    mode                    http
+    log                     global
+    option                  httplog
+    option                  dontlognull
+    option http-server-close
+    option forwardfor       except 127.0.0.0/8
+    option                  redispatch
+    retries                 3
+    timeout http-request    10s
+    timeout queue           1m
+    timeout connect         10s
+    timeout client          1m
+    timeout server          1m
+    timeout http-keep-alive 10s
+    timeout check           10s
+    maxconn                 8000
+
+frontend rgw-frontend
+    bind *:{{ haproxy_frontend_port }}
+    default_backend rgw-backend
+
+backend rgw-backend
+    option forwardfor
+    balance static-rr
+    option httpchk Get /
+{% for host in groups[rgw_group_name] %}
+{% for instance in hostvars[host]['rgw_instances'] %}
+       server {{ 'server-' + hostvars[host]['ansible_hostname'] + '-' + instance['instance_name'] }} {{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} weight 100
+{% endfor %}
+{% endfor %}
diff --git a/roles/ceph-rgw-loadbalancer/templates/keepalived.conf.j2 b/roles/ceph-rgw-loadbalancer/templates/keepalived.conf.j2
new file mode 100644 (file)
index 0000000..0c5bd61
--- /dev/null
@@ -0,0 +1,35 @@
+# {{ ansible_managed }}
+! Configuration File for keepalived
+
+global_defs {
+   router_id CEPH_RGW
+}
+
+vrrp_script check_haproxy {
+    script "killall -0 haproxy"
+    weight -20
+    interval 2
+    rise 2
+    fall 2
+}
+
+{% for instance in vrrp_instances %}
+vrrp_instance {{ instance['name'] }} {
+    state {{ 'MASTER' if ansible_hostname == instance['master'] else 'BACKUP' }}
+    priority {{ '100' if ansible_hostname == instance['master'] else '90' }}
+    interface {{ virtual_ip_interface }}
+    virtual_router_id {{ 50 + loop.index }}
+    advert_int 1
+    authentication {
+        auth_type PASS
+        auth_pass 1234
+    }
+    virtual_ipaddress {
+        {{ instance['vip'] }}/{{ virtual_ip_netmask }} dev {{ virtual_ip_interface }}
+    }
+    track_script {
+        check_haproxy
+    }
+}
+
+{% endfor %}