]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm: HA for RGW endpoints 38615/head
authorDaniel-Pivonka <dpivonka@redhat.com>
Mon, 26 Oct 2020 18:34:39 +0000 (14:34 -0400)
committerAdam King <adking@redhat.com>
Fri, 8 Jan 2021 22:00:51 +0000 (17:00 -0500)
Cephadm deploying keepalived and HAproxy for providing High availability for RGW endpoints

Fixes: https://tracker.ceph.com/issues/45116
Signed-off-by: Daniel-Pivonka <dpivonka@redhat.com>
Signed-off-by: Adam King <adking@redhat.com>
Signed-off-by: Juan Miguel Olmo Martínez <jolmomar@redhat.com>
15 files changed:
doc/images/HAProxy_for_RGW.svg [new file with mode: 0644]
doc/mgr/orchestrator.rst
src/cephadm/cephadm
src/pybind/mgr/cephadm/inventory.py
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/schedule.py
src/pybind/mgr/cephadm/serve.py
src/pybind/mgr/cephadm/services/cephadmservice.py
src/pybind/mgr/cephadm/services/ha_rgw.py [new file with mode: 0644]
src/pybind/mgr/cephadm/templates/services/haproxy/haproxy.cfg.j2 [new file with mode: 0644]
src/pybind/mgr/cephadm/templates/services/keepalived/keepalived.conf.j2 [new file with mode: 0644]
src/pybind/mgr/cephadm/tests/test_spec.py
src/pybind/mgr/orchestrator/_interface.py
src/pybind/mgr/orchestrator/module.py
src/python-common/ceph/deployment/service_spec.py

diff --git a/doc/images/HAProxy_for_RGW.svg b/doc/images/HAProxy_for_RGW.svg
new file mode 100644 (file)
index 0000000..c2fc95f
--- /dev/null
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:lucid="lucid" width="1182" height="877"><g transform="translate(-539 -40)" lucid:page-tab-id="0_0"><path d="M0 0h1870.4v1323.2H0z" fill="#fff"/><g filter="url(#a)"><path d="M1380 320c0-22.1 17.9-40 40-40h240c22.1 0 40 17.9 40 40v280c0 22.1-17.9 40-40 40h-240c-22.1 0-40-17.9-40-40z" stroke="#5e5e5e" stroke-width="3" fill="#fff"/><use xlink:href="#b" transform="matrix(1,0,0,1,1385,285) translate(14.01 17.77777777777778)"/><use xlink:href="#c" transform="matrix(1,0,0,1,1385,285) translate(65.80012345679013 17.77777777777778)"/></g><g filter="url(#a)"><path d="M920 320c0-22.1 17.9-40 40-40h240c22.1 0 40 17.9 40 40v280c0 22.1-17.9 40-40 40H960c-22.1 0-40-17.9-40-40z" stroke="#5e5e5e" stroke-width="3" fill="#fff"/><use xlink:href="#b" transform="matrix(1,0,0,1,925,285) translate(14.01 17.77777777777778)"/><use xlink:href="#d" transform="matrix(1,0,0,1,925,285) translate(65.80012345679013 17.77777777777778)"/></g><g filter="url(#a)"><path d="M560 320c0-22.1 17.9-40 40-40h240c22.1 0 40 17.9 40 40v280c0 22.1-17.9 40-40 40H600c-22.1 0-40-17.9-40-40z" stroke="#5e5e5e" stroke-width="3" fill="#fff"/><use xlink:href="#b" transform="matrix(1,0,0,1,565,285) translate(14.01 17.77777777777778)"/><use xlink:href="#e" transform="matrix(1,0,0,1,565,285) translate(65.80012345679013 17.77777777777778)"/></g><path d="M708 60c17.67 0 32 17.9 32 40s-14.33 40-32 40h-96c-17.67 0-32-17.9-32-40s14.33-40 32-40z" stroke="#5e5e5e" stroke-width="3" fill="#c1e4f7"/><use xlink:href="#f" transform="matrix(1,0,0,1,585,65) translate(18.95061728395062 39.65277777777778)"/><use xlink:href="#g" transform="matrix(1,0,0,1,585,65) translate(79.32098765432099 39.65277777777778)"/><path d="M1608 63c17.67 0 32 17.9 32 40s-14.33 40-32 40h-96c-17.67 0-32-17.9-32-40s14.33-40 32-40z" stroke="#5e5e5e" stroke-width="3" fill="#c1e4f7"/><use xlink:href="#f" transform="matrix(1,0,0,1,1485,68) translate(7.870370370370381 39.65277777777778)"/><use xlink:href="#h" transform="matrix(1,0,0,1,1485,68) translate(68.24074074074075 39.65277777777778)"/><use xlink:href="#c" transform="matrix(1,0,0,1,1485,68) translate(126.14197530864197 39.65277777777778)"/><path d="M700 360c0 22.1-17.9 40-40 40s-40-17.9-40-40 17.9-40 40-40 40 17.9 40 40z" stroke="#5e5e5e" stroke-width="3" fill="#b2b2b2"/><use xlink:href="#i" transform="matrix(1,0,0,1,625,325) translate(17.098765432098766 39.65277777777778)"/><path d="M1058.5 360c0 22.1-17.9 40-40 40s-40-17.9-40-40 17.9-40 40-40 40 17.9 40 40z" stroke="#5e5e5e" stroke-width="3" fill="#a3d977"/><use xlink:href="#i" transform="matrix(1,0,0,1,983.4990644204674,325) translate(17.098765432098766 39.65277777777778)"/><path d="M732.7 360H945.8" stroke="#5e5e5e" stroke-width="8" fill="none"/><path d="M714.45 360l14.26-4.64v9.28zM964.05 360l-14.26 4.64v-9.28z" stroke="#5e5e5e" stroke-width="8" fill="#5e5e5e"/><path d="M580 506c0-3.3 2.7-6 6-6h148c3.3 0 6 2.7 6 6v68c0 3.3-2.7 6-6 6H586c-3.3 0-6-2.7-6-6z" stroke="#5e5e5e" stroke-width="3" fill="#b2b2b2"/><use xlink:href="#j" transform="matrix(1,0,0,1,592,512) translate(14.97530864197531 31.77777777777778)"/><use xlink:href="#e" transform="matrix(1,0,0,1,592,512) translate(108.67901234567901 31.77777777777778)"/><path d="M1518.5 360c0 22.1-17.9 40-40 40s-40-17.9-40-40 17.9-40 40-40 40 17.9 40 40z" stroke="#5e5e5e" stroke-width="3" fill="#b2b2b2"/><use xlink:href="#i" transform="matrix(1,0,0,1,1443.4990644204672,325) translate(17.098765432098766 39.65277777777778)"/><path d="M1091.2 360h314.6" stroke="#5e5e5e" stroke-width="8" fill="none"/><path d="M1072.94 360l14.27-4.64v9.28zM1424.05 360l-14.26 4.64v-9.28z" stroke="#5e5e5e" stroke-width="8" fill="#5e5e5e"/><path d="M680 426.33c0-3.3 2.7-6 6-6h148c3.3 0 6 2.7 6 6V474c0 3.3-2.7 6-6 6H686c-3.3 0-6-2.7-6-6z" stroke="#5e5e5e" stroke-width="3" fill="#b2b2b2"/><use xlink:href="#k" transform="matrix(1,0,0,1,692,432.3333333333333) translate(3.864197530864189 17.77777777777778)"/><use xlink:href="#l" transform="matrix(1,0,0,1,692,432.3333333333333) translate(119.79012345679013 17.77777777777778)"/><use xlink:href="#m" transform="matrix(1,0,0,1,692,432.3333333333333) translate(32.19753086419753 44.44444444444444)"/><path d="M1188 60c17.67 0 32 17.9 32 40s-14.33 40-32 40h-96c-17.67 0-32-17.9-32-40s14.33-40 32-40z" stroke="#5e5e5e" stroke-width="3" fill="#c1e4f7"/><use xlink:href="#f" transform="matrix(1,0,0,1,1065,65) translate(18.95061728395062 39.65277777777778)"/><use xlink:href="#g" transform="matrix(1,0,0,1,1065,65) translate(79.32098765432099 39.65277777777778)"/><path d="M938.5 506c0-3.3 2.7-6 6-6h148c3.3 0 6 2.7 6 6v68c0 3.3-2.7 6-6 6h-148c-3.3 0-6-2.7-6-6z" stroke="#5e5e5e" stroke-width="3" fill="#a3d977"/><use xlink:href="#j" transform="matrix(1,0,0,1,950.4990644204674,512) translate(14.97530864197531 31.77777777777778)"/><use xlink:href="#d" transform="matrix(1,0,0,1,950.4990644204674,512) translate(108.67901234567901 31.77777777777778)"/><path d="M1398.5 506c0-3.3 2.7-6 6-6h148c3.3 0 6 2.7 6 6v68c0 3.3-2.7 6-6 6h-148c-3.3 0-6-2.7-6-6z" stroke="#5e5e5e" stroke-width="3" fill="#b2b2b2"/><use xlink:href="#j" transform="matrix(1,0,0,1,1410.4990644204672,512) translate(13.154320987654323 31.77777777777778)"/><use xlink:href="#c" transform="matrix(1,0,0,1,1410.4990644204672,512) translate(106.85802469135803 31.77777777777778)"/><path d="M660 402.5V480" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M659.98 401.5l1.02-.03v1.06h-2v-1.1z" fill="#5e5e5e"/><path d="M660 495.26L655.36 481h9.28z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><path d="M1018.5 402.5V480" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M1018.48 401.5l1.02-.03v1.06h-2v-1.1z" fill="#5e5e5e"/><path d="M1018.5 495.26l-4.64-14.26h9.27z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><path d="M1478.5 402.5V480" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M1478.48 401.5l1.02-.03v1.06h-2v-1.1z" fill="#5e5e5e"/><path d="M1478.5 495.26l-4.64-14.26h9.27z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><path d="M560 823c0-3.3 2.7-6 6-6h148c3.3 0 6 2.7 6 6v68c0 3.3-2.7 6-6 6H566c-3.3 0-6-2.7-6-6z" stroke="#5e5e5e" stroke-width="3" fill="#ff8f80"/><use xlink:href="#n" transform="matrix(1,0,0,1,572,829) translate(40.90123456790124 17.77777777777778)"/><use xlink:href="#o" transform="matrix(1,0,0,1,572,829) translate(18.648148148148145 44.44444444444444)"/><use xlink:href="#e" transform="matrix(1,0,0,1,572,829) translate(105.00617283950618 44.44444444444444)"/><path d="M740 823c0-3.3 2.7-6 6-6h148c3.3 0 6 2.7 6 6v68c0 3.3-2.7 6-6 6H746c-3.3 0-6-2.7-6-6z" stroke="#5e5e5e" stroke-width="3" fill="#ff8f80"/><use xlink:href="#n" transform="matrix(1,0,0,1,752,829) translate(40.90123456790124 17.77777777777778)"/><use xlink:href="#o" transform="matrix(1,0,0,1,752,829) translate(18.648148148148145 44.44444444444444)"/><use xlink:href="#d" transform="matrix(1,0,0,1,752,829) translate(105.00617283950618 44.44444444444444)"/><path d="M920 823c0-3.3 2.7-6 6-6h148c3.3 0 6 2.7 6 6v68c0 3.3-2.7 6-6 6H926c-3.3 0-6-2.7-6-6z" stroke="#5e5e5e" stroke-width="3" fill="#ff8f80"/><use xlink:href="#n" transform="matrix(1,0,0,1,932,829) translate(40.90123456790124 17.77777777777778)"/><use xlink:href="#o" transform="matrix(1,0,0,1,932,829) translate(18.648148148148145 44.44444444444444)"/><use xlink:href="#p" transform="matrix(1,0,0,1,932,829) translate(105.00617283950618 44.44444444444444)"/><path d="M1480 823c0-3.3 2.7-6 6-6h148c3.3 0 6 2.7 6 6v68c0 3.3-2.7 6-6 6h-148c-3.3 0-6-2.7-6-6z" stroke="#5e5e5e" stroke-width="3" fill="#ff8f80"/><use xlink:href="#n" transform="matrix(1,0,0,1,1492,829) translate(40.90123456790124 17.77777777777778)"/><use xlink:href="#o" transform="matrix(1,0,0,1,1492,829) translate(18.648148148148145 44.44444444444444)"/><use xlink:href="#q" transform="matrix(1,0,0,1,1492,829) translate(105.00617283950618 44.44444444444444)"/><path d="M1058.5 426c0-3.3 2.7-6 6-6h148c3.3 0 6 2.7 6 6v47.67c0 3.3-2.7 6-6 6h-148c-3.3 0-6-2.7-6-6z" stroke="#5e5e5e" stroke-width="3" fill="#a3d977"/><use xlink:href="#k" transform="matrix(1,0,0,1,1070.4990644204674,432) translate(3.864197530864189 17.77777777777778)"/><use xlink:href="#r" transform="matrix(1,0,0,1,1070.4990644204674,432) translate(119.79012345679013 17.77777777777778)"/><use xlink:href="#s" transform="matrix(1,0,0,1,1070.4990644204674,432) translate(34.11111111111111 44.44444444444444)"/><path d="M1518.5 426c0-3.3 2.7-6 6-6h148c3.3 0 6 2.7 6 6v47.67c0 3.3-2.7 6-6 6h-148c-3.3 0-6-2.7-6-6z" stroke="#5e5e5e" stroke-width="3" fill="#b2b2b2"/><use xlink:href="#k" transform="matrix(1,0,0,1,1530.4990644204672,432) translate(2.0432098765432016 17.77777777777778)"/><use xlink:href="#t" transform="matrix(1,0,0,1,1530.4990644204672,432) translate(117.96913580246914 17.77777777777778)"/><use xlink:href="#m" transform="matrix(1,0,0,1,1530.4990644204672,432) translate(32.19753086419753 44.44444444444444)"/><path d="M1138.5 417.5v-5.74c0-3.3-2.7-6-6-6h-83.23" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M1139.5 418.5h-2v-1.03h2z" fill="#5e5e5e"/><path d="M1044.84 395.98l10.1 11.08-8.44 3.83z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><path d="M1598.5 417.5v-6.98c0-3.3-2.7-6-6-6H1511" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M1599.5 418.5h-2v-1.03h2z" fill="#5e5e5e"/><path d="M1507.48 393.63l8.8 12.14-8.82 2.86z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><path d="M760 417.83V406c0-3.3-2.7-6-6-6h-56.86" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M761 418.83h-2v-1.02h2z" fill="#5e5e5e"/><path d="M691.67 391.43l11.58 9.54-7.82 4.98z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><path d="M659.9 582.5L641.6 797.07" stroke="#b2b2b2" stroke-width="2" fill="none"/><path d="M660.9 582.6l-1.98-.16.08-.94h2z" fill="#b2b2b2"/><path d="M640.28 812.28l-3.4-14.6 9.23.78z" stroke="#b2b2b2" stroke-width="2" fill="#b2b2b2"/><path d="M660.56 582.33l149 217.9" stroke="#b2b2b2" stroke-width="2" fill="none"/><path d="M661.4 581.78l-1.65 1.13-.96-1.4h2.4z" fill="#b2b2b2"/><path d="M818.17 812.83l-11.87-9.16 7.65-5.23z" stroke="#b2b2b2" stroke-width="2" fill="#b2b2b2"/><path d="M660.82 582.07L984.76 805" stroke="#b2b2b2" stroke-width="2" fill="none"/><path d="M660.28 582.9l-2.04-1.4h3z" fill="#b2b2b2"/><path d="M997.33 813.67l-14.38-4.27 5.26-7.64z" stroke="#b2b2b2" stroke-width="2" fill="#b2b2b2"/><path d="M660.97 581.75l881.12 229.1" stroke="#b2b2b2" stroke-width="2" fill="none"/><path d="M660.74 582.73l-3.9-1.02.06-.2h4.16z" fill="#b2b2b2"/><path d="M1556.87 814.7l-14.98.88 2.33-8.97z" stroke="#b2b2b2" stroke-width="2" fill="#b2b2b2"/><path d="M1017.65 582.03l-361.9 223.74" stroke="#a3d977" stroke-width="2" fill="none"/><path d="M1018.15 582.9l-.86-1.4h3.1z" fill="#a3d977"/><path d="M642.75 813.8l9.7-11.45 4.87 7.9z" stroke="#a3d977" stroke-width="2" fill="#a3d977"/><path d="M1017.85 582.26L831.97 801.4" stroke="#a3d977" stroke-width="2" fill="none"/><path d="M1018.6 582.93l-1.53-1.3.12-.13h2.6z" fill="#a3d977"/><path d="M822.1 813.03l5.7-13.88 7.06 6z" stroke="#a3d977" stroke-width="2" fill="#a3d977"/><path d="M1018.4 582.5l-18.24 213.6" stroke="#a3d977" stroke-width="2" fill="none"/><path d="M1019.4 582.6l-1.98-.16.08-.94h2z" fill="#a3d977"/><path d="M998.86 811.3l-3.4-14.6 9.23.78z" stroke="#a3d977" stroke-width="2" fill="#a3d977"/><path d="M1019.42 581.9l523.6 226.26" stroke="#a3d977" stroke-width="2" fill="none"/><path d="M1019.04 582.82l-3.06-1.32h3.64z" fill="#a3d977"/><path d="M1557.03 814.22l-14.93-1.4 3.67-8.52z" stroke="#a3d977" stroke-width="2" fill="#a3d977"/><path d="M1477.54 581.77L657.82 810.53" stroke="#b2b2b2" stroke-width="2" fill="none"/><path d="M1481.66 581.66l-3.88 1.08-.35-1.24h4.18z" fill="#b2b2b2"/><path d="M643.12 814.63l12.5-8.3 2.48 8.93z" stroke="#b2b2b2" stroke-width="2" fill="#b2b2b2"/><path d="M1477.56 581.83L837.43 809.3" stroke="#b2b2b2" stroke-width="2" fill="none"/><path d="M1477.87 582.8l-.46-1.3h4.1z" fill="#b2b2b2"/><path d="M823.05 814.42l11.9-9.15 3.1 8.74z" stroke="#b2b2b2" stroke-width="2" fill="#b2b2b2"/><path d="M1477.6 581.94l-460.98 225.43" stroke="#b2b2b2" stroke-width="2" fill="none"/><path d="M1478.02 582.85l-.66-1.35h3.42z" fill="#b2b2b2"/><path d="M1002.9 814.08l10.8-10.43 4.06 8.33z" stroke="#b2b2b2" stroke-width="2" fill="#b2b2b2"/><path d="M1478.83 582.44l75.08 215.6" stroke="#b2b2b2" stroke-width="2" fill="none"/><path d="M1479.78 582.14l-1.9.66-.44-1.3h2.12z" fill="#b2b2b2"/><path d="M1558.94 812.44l-9.07-11.94 8.75-3.05z" stroke="#b2b2b2" stroke-width="2" fill="#b2b2b2"/><path d="M860 450.14l178.5-.28" stroke="#a3d977" stroke-width="2" fill="none"/><path d="M844.74 450.16L859 445.5v9.27zM1053.76 449.84l-14.26 4.66v-9.27z" stroke="#a3d977" stroke-width="2" fill="#a3d977"/><path d="M1238.5 449.5h260" stroke="#a3d977" stroke-width="2" fill="none"/><path d="M1223.24 449.5l14.26-4.64v9.28zM1513.76 449.5l-14.26 4.64v-9.28z" stroke="#a3d977" stroke-width="2" fill="#a3d977"/><path d="M1140 142.5v81.7c0 3.3-2.7 6-6 6h-103.9c-3.3 0-6 2.68-6 6v64.2" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M1141 142.53h-2v-1.03h2z" fill="#5e5e5e"/><path d="M1024.1 315.66l-4.63-14.27h9.27z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><path d="M1560 145.5v80c0 3.3-2.7 6-6 6h-529.5c-3.3 0-6 2.7-6 6V300" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M1561 145.53h-2v-1.03h2z" fill="#5e5e5e"/><path d="M1018.5 315.26l-4.64-14.26h9.27z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><path d="M660 142.5V224c0 3.3 2.7 6 6 6h346.5c3.3 0 6 2.7 6 6v64" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M661 142.53h-2v-1.03h2z" fill="#5e5e5e"/><path d="M1018.5 315.26l-4.64-14.26h9.27z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><path d="M1250.5 400c0 2.5-2 4.5-4.5 4.5s-4.5-2-4.5-4.5 2-4.5 4.5-4.5 4.5 2 4.5 4.5zm18.3 0c0 2.5-2.03 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.47 0 4.5 2 4.5 4.5zm18.27 0c0 2.5-2 4.5-4.5 4.5-2.48 0-4.5-2-4.5-4.5s2.02-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm18.3 0c0 2.5-2.03 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.47 0 4.5 2 4.5 4.5zm18.27 0c0 2.5-2 4.5-4.5 4.5-2.48 0-4.5-2-4.5-4.5s2.02-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm18.3 0c0 2.5-2.03 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.47 0 4.5 2 4.5 4.5zm18.27 0c0 2.5-2 4.5-4.5 4.5-2.47 0-4.5-2-4.5-4.5s2.03-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm18.3 0c0 2.5-2 4.5-4.5 4.5s-4.5-2-4.5-4.5 2-4.5 4.5-4.5 4.5 2 4.5 4.5z" fill="#5e5e5e"/><path d="M1246.12 404.5h-4.62v-9h4.62zM1378.5 404.5h-4.62v-9h4.62zM1090.5 858.5c0 2.5-2 4.5-4.5 4.5s-4.5-2-4.5-4.5 2-4.5 4.5-4.5 4.5 2 4.5 4.5zm17.64 0c0 2.5-2.02 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.48 0 4.5 2 4.5 4.5zm17.63 0c0 2.5-2 4.5-4.5 4.5-2.48 0-4.5-2-4.5-4.5s2.02-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm17.64 0c0 2.5-2 4.5-4.5 4.5-2.48 0-4.5-2-4.5-4.5s2.02-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm17.65 0c0 2.5-2.02 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.48 0 4.5 2 4.5 4.5zm17.63 0c0 2.5-2 4.5-4.5 4.5-2.48 0-4.5-2-4.5-4.5s2.02-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm17.64 0c0 2.5-2.02 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.48 0 4.5 2 4.5 4.5zm17.63 0c0 2.5-2 4.5-4.5 4.5-2.48 0-4.5-2-4.5-4.5s2.02-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm17.64 0c0 2.5-2.02 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.48 0 4.5 2 4.5 4.5zm17.63 0c0 2.5-2.02 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.48 0 4.5 2 4.5 4.5zm17.63 0c0 2.5-2 4.5-4.5 4.5-2.48 0-4.5-2-4.5-4.5s2.02-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm17.64 0c0 2.5-2 4.5-4.5 4.5s-4.5-2-4.5-4.5 2-4.5 4.5-4.5 4.5 2 4.5 4.5zm17.64 0c0 2.5-2.02 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.48 0 4.5 2 4.5 4.5zm17.63 0c0 2.5-2 4.5-4.5 4.5-2.48 0-4.5-2-4.5-4.5s2.02-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm17.64 0c0 2.5-2 4.5-4.5 4.5-2.48 0-4.5-2-4.5-4.5s2.02-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm17.65 0c0 2.5-2.02 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.48 0 4.5 2 4.5 4.5zm17.63 0c0 2.5-2 4.5-4.5 4.5-2.48 0-4.5-2-4.5-4.5s2.02-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm17.64 0c0 2.5-2.02 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.48 0 4.5 2 4.5 4.5zm17.63 0c0 2.5-2 4.5-4.5 4.5-2.48 0-4.5-2-4.5-4.5s2.02-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm17.64 0c0 2.5-2.02 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.48 0 4.5 2 4.5 4.5zm17.63 0c0 2.5-2.02 4.5-4.5 4.5-2.5 0-4.5-2-4.5-4.5s2-4.5 4.5-4.5c2.48 0 4.5 2 4.5 4.5zm17.63 0c0 2.5-2 4.5-4.5 4.5-2.48 0-4.5-2-4.5-4.5s2.02-4.5 4.5-4.5c2.5 0 4.5 2 4.5 4.5zm17.64 0c0 2.5-2 4.5-4.5 4.5s-4.5-2-4.5-4.5 2-4.5 4.5-4.5 4.5 2 4.5 4.5z" fill="#5e5e5e"/><path d="M1086.12 863h-4.62v-9h4.62zM1478.5 863h-4.62v-9h4.62z" fill="#5e5e5e"/><path d="M742.5 540H754c3.3 0 6-2.7 6-6v-34" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M742.53 541h-1.03v-2h1.03z" fill="#5e5e5e"/><path d="M760 484.74l4.64 14.26h-9.28z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><path d="M1101 540h31.5c3.3 0 6-2.7 6-6v-34.33" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M1101.02 541H1100v-2h1.02z" fill="#5e5e5e"/><path d="M1138.5 484.4l4.63 14.27h-9.27z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><path d="M1561 540h31.5c3.3 0 6-2.7 6-6v-34.33" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M1561.02 541H1560v-2h1.02z" fill="#5e5e5e"/><path d="M1598.5 484.4l4.63 14.27h-9.27z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><defs><path fill="#333" d="M197 0v-115H63V0H30v-248h33v105h134v-105h34V0h-34" id="u"/><path fill="#333" d="M100-194c62-1 85 37 85 99 1 63-27 99-86 99S16-35 15-95c0-66 28-99 85-99zM99-20c44 1 53-31 53-75 0-43-8-75-51-75s-53 32-53 75 10 74 51 75" id="v"/><path fill="#333" d="M135-143c-3-34-86-38-87 0 15 53 115 12 119 90S17 21 10-45l28-5c4 36 97 45 98 0-10-56-113-15-118-90-4-57 82-63 122-42 12 7 21 19 24 35" id="w"/><path fill="#333" d="M59-47c-2 24 18 29 38 22v24C64 9 27 4 27-40v-127H5v-23h24l9-43h21v43h35v23H59v120" id="x"/><g id="b"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#u"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,15.987654320987653,0)" xlink:href="#v"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,28.333333333333332,0)" xlink:href="#w"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,39.44444444444444,0)" xlink:href="#x"/></g><path fill="#333" d="M190 0L58-211 59 0H30v-248h39L202-35l-2-213h31V0h-41" id="y"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#y" id="c"/><filter id="a" filterUnits="objectBoundingBox" x="-.03" y="-.01" width="1.06" height="1.06"><feOffset result="offOut" in="SourceAlpha" dy="6"/><feGaussianBlur result="blurOut" in="offOut" stdDeviation="5"/><feColorMatrix result="colorOut" in="blurOut" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5019607843137255 0"/><feBlend in="SourceGraphic" in2="colorOut"/></filter><path fill="#333" d="M101-251c82-7 93 87 43 132L82-64C71-53 59-42 53-27h129V0H18c2-99 128-94 128-182 0-28-16-43-45-43s-46 15-49 41l-32-3c6-41 34-60 81-64" id="z"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#z" id="d"/><path fill="#333" d="M27 0v-27h64v-190l-56 39v-29l58-41h29v221h61V0H27" id="A"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#A" id="e"/><path fill="#333" d="M233-177c-1 41-23 64-60 70L243 0h-38l-65-103H63V0H30v-248c88 3 205-21 203 71zM63-129c60-2 137 13 137-47 0-61-80-42-137-45v92" id="B"/><path fill="#333" d="M143 4C61 4 22-44 18-125c-5-107 100-154 193-111 17 8 29 25 37 43l-32 9c-13-25-37-40-76-40-61 0-88 39-88 99 0 61 29 100 91 101 35 0 62-11 79-27v-45h-74v-28h105v86C228-13 192 4 143 4" id="C"/><path fill="#333" d="M266 0h-40l-56-210L115 0H75L2-248h35L96-30l15-64 43-154h32l59 218 59-218h35" id="D"/><g id="f"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#B"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,15.987654320987653,0)" xlink:href="#C"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,33.2716049382716,0)" xlink:href="#D"/></g><path fill="#333" d="M96-169c-40 0-48 33-48 73s9 75 48 75c24 0 41-14 43-38l32 2c-6 37-31 61-74 61-59 0-76-41-82-99-10-93 101-131 147-64 4 7 5 14 7 22l-32 3c-4-21-16-35-41-35" id="E"/><path fill="#333" d="M24 0v-261h32V0H24" id="F"/><path fill="#333" d="M24-231v-30h32v30H24zM24 0v-190h32V0H24" id="G"/><path fill="#333" d="M100-194c63 0 86 42 84 106H49c0 40 14 67 53 68 26 1 43-12 49-29l28 8c-11 28-37 45-77 45C44 4 14-33 15-96c1-61 26-98 85-98zm52 81c6-60-76-77-97-28-3 7-6 17-6 28h103" id="H"/><path fill="#333" d="M117-194c89-4 53 116 60 194h-32v-121c0-31-8-49-39-48C34-167 62-67 57 0H25l-1-190h30c1 10-1 24 2 32 11-22 29-35 61-36" id="I"/><g id="g"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#E"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,11.11111111111111,0)" xlink:href="#F"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,15.987654320987653,0)" xlink:href="#G"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,20.864197530864196,0)" xlink:href="#H"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,33.20987654320987,0)" xlink:href="#I"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,45.55555555555555,0)" xlink:href="#x"/></g><g id="h"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#E"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,11.11111111111111,0)" xlink:href="#F"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,15.987654320987653,0)" xlink:href="#G"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,20.864197530864196,0)" xlink:href="#H"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,33.20987654320987,0)" xlink:href="#I"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,45.55555555555555,0)" xlink:href="#x"/></g><path fill="#333" d="M137 0h-34L2-248h35l83 218 83-218h36" id="J"/><path fill="#333" d="M33 0v-248h34V0H33" id="K"/><path fill="#333" d="M30-248c87 1 191-15 191 75 0 78-77 80-158 76V0H30v-248zm33 125c57 0 124 11 124-50 0-59-68-47-124-48v98" id="L"/><g id="i"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#J"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,14.814814814814813,0)" xlink:href="#K"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,20.98765432098765,0)" xlink:href="#L"/></g><path fill="#333" d="M205 0l-28-72H64L36 0H1l101-248h38L239 0h-34zm-38-99l-47-123c-12 45-31 82-46 123h93" id="M"/><path fill="#333" d="M114-163C36-179 61-72 57 0H25l-1-190h30c1 12-1 29 2 39 6-27 23-49 58-41v29" id="N"/><path fill="#333" d="M141 0L90-78 38 0H4l68-98-65-92h35l48 74 47-74h35l-64 92 68 98h-35" id="O"/><path fill="#333" d="M179-190L93 31C79 59 56 82 12 73V49c39 6 53-20 64-50L1-190h34L92-34l54-156h33" id="P"/><g id="j"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#u"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,15.987654320987653,0)" xlink:href="#M"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,30.80246913580247,0)" xlink:href="#L"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,45.61728395061728,0)" xlink:href="#N"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,52.962962962962955,0)" xlink:href="#v"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,65.30864197530863,0)" xlink:href="#O"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,76.41975308641975,0)" xlink:href="#P"/></g><path fill="#333" d="M194 0L95-120 63-95V0H30v-248h33v124l119-124h40L117-140 236 0h-42" id="Q"/><path fill="#333" d="M115-194c55 1 70 41 70 98S169 2 115 4C84 4 66-9 55-30l1 105H24l-1-265h31l2 30c10-21 28-34 59-34zm-8 174c40 0 45-34 45-75s-6-73-45-74c-42 0-51 32-51 76 0 43 10 73 51 73" id="R"/><path fill="#333" d="M141-36C126-15 110 5 73 4 37 3 15-17 15-53c-1-64 63-63 125-63 3-35-9-54-41-54-24 1-41 7-42 31l-33-3c5-37 33-52 76-52 45 0 72 20 72 64v82c-1 20 7 32 28 27v20c-31 9-61-2-59-35zM48-53c0 20 12 33 32 33 41-3 63-29 60-74-43 2-92-5-92 41" id="S"/><path fill="#333" d="M108 0H70L1-190h34L89-25l56-165h34" id="T"/><path fill="#333" d="M85-194c31 0 48 13 60 33l-1-100h32l1 261h-30c-2-10 0-23-3-31C134-8 116 4 85 4 32 4 16-35 15-94c0-66 23-100 70-100zm9 24c-40 0-46 34-46 75 0 40 6 74 45 74 42 0 51-32 51-76 0-42-9-74-50-73" id="U"/><g id="k"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#Q"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,14.814814814814813,0)" xlink:href="#H"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,27.160493827160494,0)" xlink:href="#H"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,39.50617283950617,0)" xlink:href="#R"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,51.85185185185185,0)" xlink:href="#S"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,64.19753086419753,0)" xlink:href="#F"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,69.07407407407408,0)" xlink:href="#G"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,73.95061728395062,0)" xlink:href="#T"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,85.06172839506173,0)" xlink:href="#H"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,97.40740740740742,0)" xlink:href="#U"/></g><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#A" id="l"/><path fill="#333" d="M115-194c53 0 69 39 70 98 0 66-23 100-70 100C84 3 66-7 56-30L54 0H23l1-261h32v101c10-23 28-34 59-34zm-8 174c40 0 45-34 45-75 0-40-5-75-45-74-42 0-51 32-51 76 0 43 10 73 51 73" id="V"/><path fill="#333" d="M143 0L79-87 56-68V0H24v-261h32v163l83-92h37l-77 82L181 0h-38" id="W"/><path fill="#333" d="M84 4C-5 8 30-112 23-190h32v120c0 31 7 50 39 49 72-2 45-101 50-169h31l1 190h-30c-1-10 1-25-2-33-11 22-28 36-60 37" id="X"/><g id="m"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#V"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,12.345679012345679,0)" xlink:href="#S"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,24.691358024691358,0)" xlink:href="#E"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,35.80246913580247,0)" xlink:href="#W"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,46.91358024691358,0)" xlink:href="#X"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,59.25925925925926,0)" xlink:href="#R"/></g><g id="n"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#B"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,15.987654320987653,0)" xlink:href="#C"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,33.2716049382716,0)" xlink:href="#D"/></g><path fill="#333" d="M210-169c-67 3-38 105-44 169h-31v-121c0-29-5-50-35-48C34-165 62-65 56 0H25l-1-190h30c1 10-1 24 2 32 10-44 99-50 107 0 11-21 27-35 58-36 85-2 47 119 55 194h-31v-121c0-29-5-49-35-48" id="Y"/><g id="o"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#U"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,12.345679012345679,0)" xlink:href="#S"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,24.691358024691358,0)" xlink:href="#H"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,37.03703703703704,0)" xlink:href="#Y"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,55.49382716049382,0)" xlink:href="#v"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,67.8395061728395,0)" xlink:href="#I"/></g><path fill="#333" d="M126-127c33 6 58 20 58 59 0 88-139 92-164 29-3-8-5-16-6-25l32-3c6 27 21 44 54 44 32 0 52-15 52-46 0-38-36-46-79-43v-28c39 1 72-4 72-42 0-27-17-43-46-43-28 0-47 15-49 41l-32-3c6-42 35-63 81-64 48-1 79 21 79 65 0 36-21 52-52 59" id="Z"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#Z" id="p"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#I" id="q"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#z" id="r"/><g id="s"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#Y"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,18.456790123456788,0)" xlink:href="#S"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,30.80246913580247,0)" xlink:href="#w"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,41.91358024691358,0)" xlink:href="#x"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,48.086419753086425,0)" xlink:href="#H"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,60.4320987654321,0)" xlink:href="#N"/></g><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#y" id="t"/></defs></g></svg>
\ No newline at end of file
index 8940d463efdb506c4a5fa4edb537b4f00c168ddd..f22a4a684b09dddb459e61969e99a6a29bce4b63 100644 (file)
@@ -177,7 +177,7 @@ Example::
 
 When the parameter ``all-available-devices`` or a DriveGroup specification is used, a cephadm service is created.
 This service guarantees that all available devices or devices included in the DriveGroup will be used for OSDs.
-Note that the effect of ``--all-available-devices`` is persistent; that is, drives which are added to the system 
+Note that the effect of ``--all-available-devices`` is persistent; that is, drives which are added to the system
 or become available (say, by zapping) after the command is complete will be automatically found and added to the cluster.
 
 That is, after using::
@@ -312,7 +312,7 @@ error if it doesn't know how to do this transition.
 Update the number of monitor hosts::
 
     ceph orch apply mon --placement=<placement> [--dry-run]
-    
+
 Where ``placement`` is a :ref:`orchestrator-cli-placement-spec`.
 
 Each host can optionally specify a network for the monitor to listen on.
@@ -320,7 +320,7 @@ Each host can optionally specify a network for the monitor to listen on.
 Update the number of manager hosts::
 
     ceph orch apply mgr --placement=<placement> [--dry-run]
-    
+
 Where ``placement`` is a :ref:`orchestrator-cli-placement-spec`.
 
 ..
@@ -413,6 +413,174 @@ Service Commands::
 
     ceph orch <start|stop|restart|redeploy|reconfig> <service_name>
 
+
+.. _orchestrator-haproxy-service-spec:
+
+High availability service for RGW
+=================================
+
+This service allows the user to create a high avalilability RGW service
+providing a mimimun set of configuration options.
+
+The orchestrator will deploy and configure automatically several HAProxy and
+Keepalived containers to assure the continuity of the RGW service while the
+Ceph cluster will have at least 1 RGW daemon running.
+
+The next image explains graphically how this service works:
+
+.. image:: ../images/HAProxy_for_RGW.svg
+
+There are N hosts where the HA RGW service is deployed. This means that we have
+an HAProxy and a keeplived daemon running in each of this hosts.
+Keepalived is used to provide a "virtual IP" binded to the hosts. All RGW
+clients use this  "virtual IP"  to connect with the RGW Service.
+
+Each keeplived daemon is checking each few seconds what is the status of the
+HAProxy daemon running in the same host. Also it is aware that the "master" keepalived
+daemon will be running without problems.
+
+If the "master" keepalived daemon or the Active HAproxy is not responding, one
+of the keeplived daemons running in backup mode will be elected as master, and
+the "virtual ip" will be moved to that node.
+
+The active HAProxy also acts like a load balancer, distributing all RGW requests
+between all the RGW daemons available.
+
+**Prerequisites:**
+
+* At least two RGW daemons running in the Ceph cluster
+* Operating system prerequisites:
+  In order for the Keepalived service to forward network packets properly to the
+  real servers, each router node must have IP forwarding turned on in the kernel.
+  So it will be needed to set this system option::
+
+    net.ipv4.ip_forward = 1
+
+  Load balancing in HAProxy and Keepalived at the same time also requires the
+  ability to bind to an IP address that are nonlocal, meaning that it is not
+  assigned to a device on the local system. This allows a running load balancer
+  instance to bind to an IP that is not local for failover.
+  So it will be needed to set this system option::
+
+    net.ipv4.ip_nonlocal_bind = 1
+
+  Be sure to set properly these two options in the file ``/etc/sysctl.conf`` in
+  order to persist this values even if the hosts are restarted.
+  These configuration changes must be applied in all the hosts where the HAProxy for
+  RGW service is going to be deployed.
+
+
+**Deploy of the high availability service for RGW**
+
+Use the command::
+
+    ceph orch apply -i <service_spec_file>
+
+**Service specification file:**
+
+It is a yaml format file with the following properties:
+
+.. code-block:: yaml
+
+    service_type: ha-rgw
+    service_id: haproxy_for_rgw
+    placement:
+      hosts:
+        - host1
+        - host2
+        - host3
+    spec:
+      virtual_ip_interface: <string> # ex: eth0
+      virtual_ip_address: <string>/<string> # ex: 192.168.20.1/24
+      frontend_port: <integer>  # ex: 8080
+      ha_proxy_port: <integer> # ex: 1967
+      ha_proxy_stats_enabled: <boolean> # ex: true
+      ha_proxy_stats_user: <string> # ex: admin
+      ha_proxy_stats_password: <string> # ex: true
+      ha_proxy_enable_prometheus_exporter: <boolean> # ex: true
+      ha_proxy_monitor_uri: <string> # ex: /haproxy_health
+      keepalived_user: <string> # ex: admin
+      keepalived_password: <string> # ex: admin
+      ha_proxy_frontend_ssl_certificate: <optional string> ex:
+        [
+          "-----BEGIN CERTIFICATE-----",
+          "MIIDZTCCAk2gAwIBAgIUClb9dnseOsgJWAfhPQvrZw2MP2kwDQYJKoZIhvcNAQEL",
+          ....
+          "-----END CERTIFICATE-----",
+          "-----BEGIN PRIVATE KEY-----",
+          ....
+          "sCHaZTUevxb4h6dCEk1XdPr2O2GdjV0uQ++9bKahAy357ELT3zPE8yYqw7aUCyBO",
+          "aW5DSCo8DgfNOgycVL/rqcrc",
+          "-----END PRIVATE KEY-----"
+        ]
+      ha_proxy_frontend_ssl_port: <optional integer> # ex: 8090
+      ha_proxy_ssl_dh_param: <optional integer> # ex: 1024
+      ha_proxy_ssl_ciphers: <optional string> # ex: ECDH+AESGCM:!MD5
+      ha_proxy_ssl_options: <optional string> # ex: no-sslv3
+      haproxy_container_image: <optional string> # ex: haproxy:2.4-dev3-alpine
+      keepalived_container_image: <optional string> # ex: arcts/keepalived:1.2.2
+
+where the properties of this service specification are:
+
+* ``service_type``
+    Mandatory and set to "ha-rgw"
+* ``service_id``
+    The name of the service.
+* ``placement hosts``
+    The hosts where it is desired to run the HA daemons. An HAProxy and a
+    Keepalived containers will be deployed in these hosts.
+    The RGW daemons can run in other different hosts or not.
+* ``virtual_ip_interface``
+    The physical network interface where the virtual ip will be binded
+* ``virtual_ip_address``
+    The virtual IP ( and network ) where the HA RGW service will be available.
+    All your RGW clients must point to this IP in order to use the HA RGW
+    service .
+* ``frontend_port``
+    The port used to access the HA RGW service
+* ``ha_proxy_port``
+    The port used by HAProxy containers
+* ``ha_proxy_stats_enabled``
+    If it is desired to enable the statistics URL in HAProxy daemons
+* ``ha_proxy_stats_user``
+    User needed to access the HAProxy statistics URL
+* ``ha_proxy_stats_password``
+    The password needed to access the HAProxy statistics URL
+* ``ha_proxy_enable_prometheus_exporter``
+    If it is desired to enable the Promethes exporter in HAProxy. This will
+    allow to consume RGW Service metrics from Grafana.
+* ``ha_proxy_monitor_uri``:
+    To set the API endpoint where the health of HAProxy daemon is provided
+* ``keepalived_user``
+    User needed to access keepalived daemons
+* ``keepalived_password``:
+    The password needed to access keepalived daemons
+* ``ha_proxy_frontend_ssl_certificate``:
+    SSl certificate. You must paste the content of your .pem file
+* ``ha_proxy_frontend_ssl_port``:
+    The https port used by HAProxy containers
+* ``ha_proxy_ssl_dh_param``:
+    Value used for the `tune.ssl.default-dh-param` setting in the HAProxy
+    config file
+* ``ha_proxy_ssl_ciphers``:
+    Value used for the `ssl-default-bind-ciphers` setting in HAProxy config
+    file.
+* ``ha_proxy_ssl_options``:
+    Value used for the `ssl-default-bind-options` setting in HAProxy config
+    file.
+* ``haproxy_container_image``:
+    HAProxy image location used to pull the image
+* ``keepalived_container_image``:
+    Keepalived image location used to pull the image
+
+**Useful hints for the RGW Service:**
+
+* Good to have at least 3 RGW daemons
+* Use at least 3 hosts for the HAProxy for RGW service
+* In each host an HAProxy and a Keepalived daemon will be deployed. These
+  daemons can be managed as systemd services
+
+
 Deploying custom containers
 ===========================
 
index ea58389c1491e1ada4c229a70af5e5ab0ee89a57..4cd676bfa14a351cba7fbd7e1cea2554f1c1beaf 100755 (executable)
@@ -218,7 +218,17 @@ class Monitoring(object):
     }  # type: ignore
 
 ##################################
-
+def populate_files(config_dir, config_files, uid, gid):
+    # type: (str, Dict, int, int) -> None
+    """create config files for different services"""
+    for fname in config_files:
+        config_file = os.path.join(config_dir, fname)
+        config_content = dict_get_join(config_files, fname)
+        logger.info('Write file: %s' % (config_file))
+        with open(config_file, 'w') as f:
+            os.fchown(f.fileno(), uid, gid)
+            os.fchmod(f.fileno(), 0o600)
+            f.write(config_content)
 
 class NFSGanesha(object):
     """Defines a NFS-Ganesha container"""
@@ -343,14 +353,7 @@ class NFSGanesha(object):
         makedirs(config_dir, uid, gid, 0o755)
 
         # populate files from the config-json
-        for fname in self.files:
-            config_file = os.path.join(config_dir, fname)
-            config_content = dict_get_join(self.files, fname)
-            logger.info('Write file: %s' % (config_file))
-            with open(config_file, 'w') as f:
-                os.fchown(f.fileno(), uid, gid)
-                os.fchmod(f.fileno(), 0o600)
-                f.write(config_content)
+        populate_files(config_dir, self.files, uid, gid)
 
         # write the RGW keyring
         if self.rgw:
@@ -491,14 +494,7 @@ class CephIscsi(object):
         makedirs(configfs_dir, uid, gid, 0o755)
 
         # populate files from the config-json
-        for fname in self.files:
-            config_file = os.path.join(data_dir, fname)
-            config_content = dict_get_join(self.files, fname)
-            logger.info('Write file: %s' % (config_file))
-            with open(config_file, 'w') as f:
-                os.fchown(f.fileno(), uid, gid)
-                os.fchmod(f.fileno(), 0o600)
-                f.write(config_content)
+        populate_files(data_dir, self.files, uid, gid)
 
     @staticmethod
     def configfs_mount_umount(data_dir, mount=True):
@@ -524,6 +520,163 @@ class CephIscsi(object):
 
 ##################################
 
+class HAproxy(object):
+    """Defines an HAproxy container"""
+    daemon_type = 'haproxy'
+    required_files = ['haproxy.cfg']
+    default_image = 'haproxy'
+
+    def __init__(self, fsid: str, daemon_id: Union[int, str],
+                 config_json: Dict, image: str) -> None:
+        self.fsid = fsid
+        self.daemon_id = daemon_id
+        self.image = image
+
+        # config-json options
+        self.files = dict_get(config_json, 'files', {})
+
+        self.validate()
+
+    @classmethod
+    def init(cls, fsid: str, daemon_id: Union[int, str]) -> 'HAproxy':
+        return cls(fsid, daemon_id, get_parm(args.config_json), args.image)
+
+    def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None:
+        """Create files under the container data dir"""
+        if not os.path.isdir(data_dir):
+            raise OSError('data_dir is not a directory: %s' % (data_dir))
+
+        # create additional directories in data dir for HAproxy to use
+        if not os.path.isdir(os.path.join(data_dir, 'haproxy')):
+            makedirs(os.path.join(data_dir, 'haproxy'), uid, gid, DATA_DIR_MODE)
+
+        data_dir = os.path.join(data_dir, 'haproxy')
+        populate_files(data_dir, self.files, uid, gid)
+
+    def get_daemon_args(self) -> List[str]:
+        return ['haproxy', '-f', '/var/lib/haproxy/haproxy.cfg']
+
+    def validate(self):
+        # type: () -> None
+        if not is_fsid(self.fsid):
+            raise Error('not an fsid: %s' % self.fsid)
+        if not self.daemon_id:
+            raise Error('invalid daemon_id: %s' % self.daemon_id)
+        if not self.image:
+            raise Error('invalid image: %s' % self.image)
+
+        # check for the required files
+        if self.required_files:
+            for fname in self.required_files:
+                if fname not in self.files:
+                    raise Error('required file missing from config-json: %s' % fname)
+
+    def get_daemon_name(self):
+        # type: () -> str
+        return '%s.%s' % (self.daemon_type, self.daemon_id)
+
+    def get_container_name(self, desc=None):
+        # type: (Optional[str]) -> str
+        cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name())
+        if desc:
+            cname = '%s-%s' % (cname, desc)
+        return cname
+
+    def extract_uid_gid_haproxy(self):
+        # better directory for this?
+        return extract_uid_gid(file_path='/var/lib')
+
+    @staticmethod
+    def get_container_mounts(data_dir: str) -> Dict[str, str]:
+        mounts = dict()
+        mounts[os.path.join(data_dir,'haproxy')] = '/var/lib/haproxy'
+        return mounts
+
+##################################
+
+
+class Keepalived(object):
+    """Defines an Keepalived container"""
+    daemon_type = 'keepalived'
+    required_files = ['keepalived.conf']
+    default_image = 'arcts/keepalived'
+
+    def __init__(self, fsid: str, daemon_id: Union[int, str],
+                 config_json: Dict, image: str) -> None:
+        self.fsid = fsid
+        self.daemon_id = daemon_id
+        self.image = image
+
+        # config-json options
+        self.files = dict_get(config_json, 'files', {})
+
+        self.validate()
+
+    @classmethod
+    def init(cls, fsid: str, daemon_id: Union[int, str]) -> 'Keepalived':
+        return cls(fsid, daemon_id, get_parm(args.config_json), args.image)
+
+    def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None:
+        """Create files under the container data dir"""
+        if not os.path.isdir(data_dir):
+            raise OSError('data_dir is not a directory: %s' % (data_dir))
+
+        # create additional directories in data dir for keepalived to use
+        if not os.path.isdir(os.path.join(data_dir, 'keepalived')):
+            makedirs(os.path.join(data_dir, 'keepalived'), uid, gid, DATA_DIR_MODE)
+
+        # populate files from the config-json
+        populate_files(data_dir, self.files, uid, gid)
+
+    def validate(self):
+        # type: () -> None
+        if not is_fsid(self.fsid):
+            raise Error('not an fsid: %s' % self.fsid)
+        if not self.daemon_id:
+            raise Error('invalid daemon_id: %s' % self.daemon_id)
+        if not self.image:
+            raise Error('invalid image: %s' % self.image)
+
+        # check for the required files
+        if self.required_files:
+            for fname in self.required_files:
+                if fname not in self.files:
+                    raise Error('required file missing from config-json: %s' % fname)
+
+    def get_daemon_name(self):
+        # type: () -> str
+        return '%s.%s' % (self.daemon_type, self.daemon_id)
+
+    def get_container_name(self, desc=None):
+        # type: (Optional[str]) -> str
+        cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name())
+        if desc:
+            cname = '%s-%s' % (cname, desc)
+        return cname
+
+    @staticmethod
+    def get_container_envs():
+        # type: () -> List[str]
+        envs = [
+            'KEEPALIVED_AUTOCONF=false',
+            'KEEPALIVED_CONF=/etc/keepalived/keepalived.conf',
+            'KEEPALIVED_CMD="/usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf"',
+            'KEEPALIVED_DEBUG=false'
+        ]
+        return envs
+
+    def extract_uid_gid_keepalived(self):
+        # better directory for this?
+        return extract_uid_gid(file_path='/var/lib')
+
+    @staticmethod
+    def get_container_mounts(data_dir: str) -> Dict[str, str]:
+        mounts = dict()
+        mounts[os.path.join(data_dir,'keepalived.conf')] = '/etc/keepalived/keepalived.conf'
+        return mounts
+
+##################################
+
 
 class CustomContainer(object):
     """Defines a custom container"""
@@ -685,6 +838,8 @@ def get_supported_daemons():
     supported_daemons.append(CephIscsi.daemon_type)
     supported_daemons.append(CustomContainer.daemon_type)
     supported_daemons.append(CephadmDaemon.daemon_type)
+    supported_daemons.append(HAproxy.daemon_type)
+    supported_daemons.append(Keepalived.daemon_type)
     assert len(supported_daemons) == len(set(supported_daemons))
     return supported_daemons
 
@@ -1401,6 +1556,10 @@ def default_image(func):
                 type_ = args.name.split('.', 1)[0]
                 if type_ in Monitoring.components:
                     args.image = Monitoring.components[type_]['image']
+                if type_ == 'haproxy':
+                    args.image = HAproxy.default_image
+                if type_ == 'keepalived':
+                    args.image = Keepalived.default_image
             if not args.image:
                 args.image = os.environ.get('CEPHADM_IMAGE')
             if not args.image:
@@ -1749,6 +1908,9 @@ def get_daemon_args(fsid, daemon_type, daemon_id):
     elif daemon_type == NFSGanesha.daemon_type:
         nfs_ganesha = NFSGanesha.init(fsid, daemon_id)
         r += nfs_ganesha.get_daemon_args()
+    elif daemon_type == HAproxy.daemon_type:
+        haproxy = HAproxy.init(fsid, daemon_id)
+        r += haproxy.get_daemon_args()
     elif daemon_type == CustomContainer.daemon_type:
         cc = CustomContainer.init(fsid, daemon_id)
         r.extend(cc.get_daemon_args())
@@ -1818,6 +1980,14 @@ def create_daemon_dirs(fsid, daemon_type, daemon_id, uid, gid,
         ceph_iscsi = CephIscsi.init(fsid, daemon_id)
         ceph_iscsi.create_daemon_dirs(data_dir, uid, gid)
 
+    elif daemon_type == HAproxy.daemon_type:
+        haproxy = HAproxy.init(fsid, daemon_id)
+        haproxy.create_daemon_dirs(data_dir, uid, gid)
+
+    elif daemon_type == Keepalived.daemon_type:
+        keepalived = Keepalived.init(fsid, daemon_id)
+        keepalived.create_daemon_dirs(data_dir, uid, gid)
+
     elif daemon_type == CustomContainer.daemon_type:
         cc = CustomContainer.init(fsid, daemon_id)
         cc.create_daemon_dirs(data_dir, uid, gid)
@@ -1972,12 +2142,22 @@ def get_container_mounts(fsid, daemon_type, daemon_id,
         nfs_ganesha = NFSGanesha.init(fsid, daemon_id)
         mounts.update(nfs_ganesha.get_container_mounts(data_dir))
 
+    if daemon_type == HAproxy.daemon_type:
+        assert daemon_id
+        data_dir = get_data_dir(fsid, daemon_type, daemon_id)
+        mounts.update(HAproxy.get_container_mounts(data_dir))
+
     if daemon_type == CephIscsi.daemon_type:
         assert daemon_id
         data_dir = get_data_dir(fsid, daemon_type, daemon_id)
         log_dir = get_log_dir(fsid)
         mounts.update(CephIscsi.get_container_mounts(data_dir, log_dir))
 
+    if daemon_type == Keepalived.daemon_type:
+        assert daemon_id
+        data_dir = get_data_dir(fsid, daemon_type, daemon_id)
+        mounts.update(Keepalived.get_container_mounts(data_dir))
+
     if daemon_type == CustomContainer.daemon_type:
         assert daemon_id
         cc = CustomContainer.init(fsid, daemon_id)
@@ -2020,6 +2200,12 @@ def get_container(fsid: str, daemon_type: str, daemon_id: Union[int, str],
         entrypoint = NFSGanesha.entrypoint
         name = '%s.%s' % (daemon_type, daemon_id)
         envs.extend(NFSGanesha.get_container_envs())
+    elif daemon_type == HAproxy.daemon_type:
+        name = '%s.%s' % (daemon_type, daemon_id)
+    elif daemon_type == Keepalived.daemon_type:
+        name = '%s.%s' % (daemon_type, daemon_id)
+        envs.extend(Keepalived.get_container_envs())
+        container_args.extend(['--cap-add NET_ADMIN'])
     elif daemon_type == CephIscsi.daemon_type:
         entrypoint = CephIscsi.entrypoint
         name = '%s.%s' % (daemon_type, daemon_id)
@@ -3514,6 +3700,22 @@ def command_deploy():
                       reconfig=args.reconfig,
                       ports=daemon_ports)
 
+    elif daemon_type == HAproxy.daemon_type:
+        haproxy = HAproxy.init(args.fsid, daemon_id)
+        uid, gid = haproxy.extract_uid_gid_haproxy()
+        c = get_container(args.fsid, daemon_type, daemon_id)
+        deploy_daemon(args.fsid, daemon_type, daemon_id, c, uid, gid,
+                      reconfig=args.reconfig,
+                      ports=daemon_ports)
+
+    elif daemon_type == Keepalived.daemon_type:
+        keepalived = Keepalived.init(args.fsid, daemon_id)
+        uid, gid = keepalived.extract_uid_gid_keepalived()
+        c = get_container(args.fsid, daemon_type, daemon_id)
+        deploy_daemon(args.fsid, daemon_type, daemon_id, c, uid, gid,
+                      reconfig=args.reconfig,
+                      ports=daemon_ports)
+
     elif daemon_type == CustomContainer.daemon_type:
         cc = CustomContainer.init(args.fsid, daemon_id)
         if not args.reconfig and not redeploy:
@@ -3952,6 +4154,22 @@ def list_daemons(detail=True, legacy_dir=None):
                                        err.startswith('%s, version ' % cmd):
                                         version = err.split(' ')[2]
                                         seen_versions[image_id] = version
+                                elif daemon_type == 'haproxy':
+                                    out, err, code = call(
+                                        [container_path, 'exec', container_id,
+                                         'haproxy', '-v'])
+                                    if not code and \
+                                       out.startswith('HA-Proxy version '):
+                                        version = out.split(' ')[2]
+                                        seen_versions[image_id] = version
+                                elif daemon_type == 'keepalived':
+                                    out, err, code = call(
+                                        [container_path, 'exec', container_id,
+                                         'keepalived', '--version'])
+                                    if not code and \
+                                       err.startswith('Keepalived '):
+                                        version = err.split(' ')[1]
+                                        seen_versions[image_id] = version
                                 elif daemon_type == CustomContainer.daemon_type:
                                     # Because a custom container can contain
                                     # everything, we do not know which command
@@ -5646,6 +5864,14 @@ def command_gather_facts():
 
 ##################################
 
+def command_verify_prereqs():
+    if args.service_type == 'haproxy' or args.service_type == 'keepalived':
+        out, err, code = call(['sysctl', '-n', 'net.ipv4.ip_nonlocal_bind'])
+        if out.strip() != "1":
+            raise Error('net.ipv4.ip_nonlocal_bind not set to 1')
+
+##################################
+
 
 class CephadmCache:
     task_types = ['disks', 'daemons', 'host', 'http_server']
@@ -6948,6 +7174,15 @@ def _get_parser():
         help="Maintenance action - enter maintenance, or exit maintenance")
     parser_maintenance.set_defaults(func=command_maintenance)
 
+    parser_verify_prereqs = subparsers.add_parser(
+        'verify-prereqs',
+        help='verify system prerequisites for a given service are met on this host')
+    parser_verify_prereqs.set_defaults(func=command_verify_prereqs)
+    parser_verify_prereqs.add_argument(
+        '--daemon-type',
+        required=True,
+        help='service type of service to whose prereqs will be checked')
+
     return parser
 
 
index f2d5ceea9493ba66288734ff905a8dc12f9a7a37..a643eb15341b028a8b290e9be203343c682e3f61 100644 (file)
@@ -417,6 +417,7 @@ class HostCache():
         return r
 
     def get_daemon(self, daemon_name: str) -> orchestrator.DaemonDescription:
+        assert not daemon_name.startswith('ha-rgw.')
         for _, dm in self.daemons.items():
             for _, dd in dm.items():
                 if dd.name() == daemon_name:
@@ -437,6 +438,9 @@ class HostCache():
 
     def get_daemons_by_service(self, service_name):
         # type: (str) -> List[orchestrator.DaemonDescription]
+        assert not service_name.startswith('keepalived.')
+        assert not service_name.startswith('haproxy.')
+
         result = []   # type: List[orchestrator.DaemonDescription]
         for host, dm in self.daemons.items():
             for name, d in dm.items():
@@ -446,6 +450,8 @@ class HostCache():
 
     def get_daemons_by_type(self, service_type):
         # type: (str) -> List[orchestrator.DaemonDescription]
+        assert service_type not in ['keepalived', 'haproxy']
+
         result = []   # type: List[orchestrator.DaemonDescription]
         for host, dm in self.daemons.items():
             for name, d in dm.items():
@@ -578,6 +584,8 @@ class HostCache():
         self.daemons[host][dd.name()] = dd
 
     def rm_daemon(self, host: str, name: str) -> None:
+        assert not name.startswith('ha-rgw.')
+
         if host in self.daemons:
             if name in self.daemons[host]:
                 del self.daemons[host][name]
@@ -594,6 +602,8 @@ class HostCache():
                    for h in self.get_hosts())
 
     def schedule_daemon_action(self, host: str, daemon_name: str, action: str) -> None:
+        assert not daemon_name.startswith('ha-rgw.')
+
         priorities = {
             'start': 1,
             'restart': 2,
@@ -619,6 +629,8 @@ class HostCache():
                 del self.scheduled_daemon_actions[host]
 
     def get_scheduled_daemon_action(self, host: str, daemon: str) -> Optional[str]:
+        assert not daemon.startswith('ha-rgw.')
+
         return self.scheduled_daemon_actions.get(host, {}).get(daemon)
 
 
index 77c8dbba32587433296761f35c85487cb487d0d7..47d8315b5ebedc67ea8c3c0e185ff737cd5ba7f1 100644 (file)
@@ -25,7 +25,7 @@ from ceph.deployment import inventory
 from ceph.deployment.drive_group import DriveGroupSpec
 from ceph.deployment.service_spec import \
     NFSServiceSpec, ServiceSpec, PlacementSpec, assert_valid_host, \
-    CustomContainerSpec, HostPlacementSpec
+    CustomContainerSpec, HostPlacementSpec, HA_RGWSpec
 from ceph.utils import str_to_datetime, datetime_to_str, datetime_now
 from cephadm.serve import CephadmServe
 from cephadm.services.cephadmservice import CephadmDaemonSpec
@@ -37,6 +37,7 @@ import orchestrator
 from orchestrator import OrchestratorError, OrchestratorValidationError, HostSpec, \
     CLICommandMeta, OrchestratorEvent, set_exception_subject, DaemonDescription
 from orchestrator._interface import GenericSpec
+from orchestrator._interface import daemon_type_to_service, service_to_daemon_types
 
 from . import remotes
 from . import utils
@@ -45,6 +46,7 @@ from .services.cephadmservice import MonService, MgrService, MdsService, RgwServ
     RbdMirrorService, CrashService, CephadmService, CephadmExporter, CephadmExporterConfig
 from .services.container import CustomContainerService
 from .services.iscsi import IscsiService
+from .services.ha_rgw import HA_RGWService
 from .services.nfs import NFSService
 from .services.osd import RemoveUtil, OSDQueue, OSDService, OSD, NotFoundError
 from .services.monitoring import GrafanaService, AlertmanagerService, PrometheusService, \
@@ -217,6 +219,16 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
             default='docker.io/prom/node-exporter:v0.18.1',
             desc='Prometheus container image',
         ),
+        Option(
+            'container_image_haproxy',
+            default='haproxy',
+            desc='HAproxy container image',
+        ),
+        Option(
+            'container_image_keepalived',
+            default='arcts/keepalived',
+            desc='Keepalived container image',
+        ),
         Option(
             'warn_on_stray_hosts',
             type='bool',
@@ -337,6 +349,8 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
             self.container_image_grafana = ''
             self.container_image_alertmanager = ''
             self.container_image_node_exporter = ''
+            self.container_image_haproxy = ''
+            self.container_image_keepalived = ''
             self.warn_on_stray_hosts = True
             self.warn_on_stray_daemons = True
             self.warn_on_failed_host_check = True
@@ -417,6 +431,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
         self.node_exporter_service = NodeExporterService(self)
         self.crash_service = CrashService(self)
         self.iscsi_service = IscsiService(self)
+        self.ha_rgw_service = HA_RGWService(self)
         self.container_service = CustomContainerService(self)
         self.cephadm_exporter_service = CephadmExporter(self)
         self.cephadm_services = {
@@ -433,6 +448,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
             'node-exporter': self.node_exporter_service,
             'crash': self.crash_service,
             'iscsi': self.iscsi_service,
+            'ha-rgw': self.ha_rgw_service,
             'container': self.container_service,
             'cephadm-exporter': self.cephadm_exporter_service,
         }
@@ -1189,6 +1205,10 @@ To check that the host is reachable:
             image = self.container_image_alertmanager
         elif daemon_type == 'node-exporter':
             image = self.container_image_node_exporter
+        elif daemon_type == 'haproxy':
+            image = self.container_image_haproxy
+        elif daemon_type == 'keepalived':
+            image = self.container_image_keepalived
         elif daemon_type == CustomContainerService.TYPE:
             # The image can't be resolved, the necessary information
             # is only available when a container is deployed (given
@@ -1397,7 +1417,7 @@ To check that the host is reachable:
                 daemon_map[dd.daemon_type].append(dd.daemon_id)
 
         for daemon_type, daemon_ids in daemon_map.items():
-            r = self.cephadm_services[daemon_type].ok_to_stop(daemon_ids)
+            r = self.cephadm_services[daemon_type_to_service(daemon_type)].ok_to_stop(daemon_ids)
             if r.retval:
                 self.log.error(f'It is NOT safe to stop host {hostname}')
                 return r.retval, r.stderr
@@ -1643,6 +1663,9 @@ To check that the host is reachable:
                     sm[n].container_image_id = 'mix'
                 if sm[n].container_image_name != dd.container_image_name:
                     sm[n].container_image_name = 'mix'
+                if dd.daemon_type == 'haproxy' or dd.daemon_type == 'keepalived':
+                    # ha-rgw has 2 daemons running per host
+                    sm[n].size = sm[n].size*2
         for n, spec in self.spec_store.specs.items():
             if n in sm:
                 continue
@@ -1659,6 +1682,9 @@ To check that the host is reachable:
             if service_type == 'nfs':
                 spec = cast(NFSServiceSpec, spec)
                 sm[n].rados_config_location = spec.rados_config_location()
+            if spec.service_type == 'ha-rgw':
+                # ha-rgw has 2 daemons running per host
+                sm[n].size = sm[n].size*2
         return list(sm.values())
 
     @trivial_completion
@@ -2030,7 +2056,17 @@ To check that the host is reachable:
                         self.log.warning(msg)
                         return msg
 
-            cephadm_config, deps = self.cephadm_services[daemon_spec.daemon_type].generate_config(
+            if daemon_spec.daemon_type == 'haproxy':
+                haspec = cast(HA_RGWSpec, daemon_spec.spec)
+                if haspec.haproxy_container_image:
+                    image = haspec.haproxy_container_image
+
+            if daemon_spec.daemon_type == 'keepalived':
+                haspec = cast(HA_RGWSpec, daemon_spec.spec)
+                if haspec.keepalived_container_image:
+                    image = haspec.keepalived_container_image
+
+            cephadm_config, deps = self.cephadm_services[daemon_type_to_service(daemon_spec.daemon_type)].generate_config(
                 daemon_spec)
 
             # TCP port to open in the host firewall
@@ -2123,7 +2159,7 @@ To check that the host is reachable:
 
         with set_exception_subject('service', daemon.service_id(), overwrite=True):
 
-            self.cephadm_services[daemon_type].pre_remove(daemon)
+            self.cephadm_services[daemon_type_to_service(daemon_type)].pre_remove(daemon)
 
             args = ['--name', name, '--force']
             self.log.info('Removing daemon %s from %s' % (name, host))
@@ -2134,7 +2170,7 @@ To check that the host is reachable:
                 self.cache.rm_daemon(host, name)
             self.cache.invalidate_host_daemons(host)
 
-            self.cephadm_services[daemon_type].post_remove(daemon)
+            self.cephadm_services[daemon_type_to_service(daemon_type)].post_remove(daemon)
 
             return "Removed {} from host '{}'".format(name, host)
 
@@ -2189,7 +2225,7 @@ To check that the host is reachable:
                     config_func(spec)
                 did_config = True
 
-            daemon_spec = self.cephadm_services[daemon_type].make_daemon_spec(
+            daemon_spec = self.cephadm_services[daemon_type_to_service(daemon_type)].make_daemon_spec(
                 host, daemon_id, network, spec)
             self.log.debug('Placing %s.%s on host %s' % (
                 daemon_type, daemon_id, host))
@@ -2279,6 +2315,7 @@ To check that the host is reachable:
                 'mgr': PlacementSpec(count=2),
                 'mds': PlacementSpec(count=2),
                 'rgw': PlacementSpec(count=2),
+                'ha-rgw': PlacementSpec(count=2),
                 'iscsi': PlacementSpec(count=1),
                 'rbd-mirror': PlacementSpec(count=2),
                 'nfs': PlacementSpec(count=1),
@@ -2336,6 +2373,10 @@ To check that the host is reachable:
     def apply_rgw(self, spec: ServiceSpec) -> str:
         return self._apply(spec)
 
+    @trivial_completion
+    def apply_ha_rgw(self, spec: ServiceSpec) -> str:
+        return self._apply(spec)
+
     @trivial_completion
     def add_iscsi(self, spec):
         # type: (ServiceSpec) -> List[str]
index e2e3c926ab91d81d559210c6d9178d6b9deec4e7..2018aa3677772ec7ee77ed234feb5566d14317eb 100644 (file)
@@ -127,6 +127,15 @@ class HostAssignment(object):
                 logger.info("deploying %s monitor(s) instead of %s so monitors may achieve consensus" % (
                     len(candidates) - 1, len(candidates)))
                 return candidates[0:len(candidates)-1]
+
+            # do not deploy ha-rgw on hosts that don't support virtual ips
+            if self.spec.service_type == 'ha-rgw' and self.filter_new_host:
+                old = candidates
+                candidates = [h for h in candidates if self.filter_new_host(h.hostname)]
+                for h in list(set(old) - set(candidates)):
+                    logger.info(
+                        f"Filtered out host {h.hostname} for ha-rgw. Could not verify host allowed virtual ips")
+                logger.info('filtered %s down to %s' % (old, candidates))
             return candidates
 
         # if asked to place even number of mons, deploy 1 less
@@ -160,21 +169,29 @@ class HostAssignment(object):
 
         # we don't need any additional hosts
         if need < 0:
-            return self.prefer_hosts_with_active_daemons(hosts_with_daemons, count)
+            final_candidates = self.prefer_hosts_with_active_daemons(hosts_with_daemons, count)
         else:
-            # exclusive to 'mon' daemons. Filter out hosts that don't have a public network assigned
+            # exclusive to daemons from 'mon' and 'ha-rgw' services.
+            # Filter out hosts that don't have a public network assigned
+            # or don't allow virtual ips respectively
             if self.filter_new_host:
                 old = others
                 others = [h for h in others if self.filter_new_host(h.hostname)]
-                logger.debug('filtered %s down to %s' % (old, others))
+                for h in list(set(old) - set(others)):
+                    if self.spec.service_type == 'ha-rgw':
+                        logger.info(
+                            f"Filtered out host {h.hostname} for ha-rgw. Could not verify host allowed virtual ips")
+                logger.info('filtered %s down to %s' % (old, others))
 
             # ask the scheduler to return a set of hosts with a up to the value of <count>
             others = self.scheduler.place(others, need)
-            logger.debug('Combine hosts with existing daemons %s + new hosts %s' % (
+            logger.info('Combine hosts with existing daemons %s + new hosts %s' % (
                 hosts_with_daemons, others))
             # if a host already has the anticipated daemon, merge it with the candidates
             # to get a list of HostPlacementSpec that can be deployed on.
-            return list(merge_hostspecs(hosts_with_daemons, others))
+            final_candidates = list(merge_hostspecs(hosts_with_daemons, others))
+
+        return final_candidates
 
     def get_hosts_with_active_daemon(self, hosts: List[HostPlacementSpec]) -> List[HostPlacementSpec]:
         active_hosts: List['HostPlacementSpec'] = []
index 13bcf14c65f215a9aa7d7a6646c59951bef4787a..ffcc69d7197b8abd9d31d35b4d14b4810a63fe8e 100644 (file)
@@ -11,7 +11,7 @@ except ImportError:
 
 from ceph.deployment import inventory
 from ceph.deployment.drive_group import DriveGroupSpec
-from ceph.deployment.service_spec import ServiceSpec, HostPlacementSpec, RGWSpec
+from ceph.deployment.service_spec import ServiceSpec, HostPlacementSpec, RGWSpec, HA_RGWSpec
 from ceph.utils import str_to_datetime, datetime_now
 
 import orchestrator
@@ -19,6 +19,7 @@ from cephadm.schedule import HostAssignment
 from cephadm.upgrade import CEPH_UPGRADE_ORDER
 from cephadm.utils import forall_hosts, cephadmNoImage, is_repo_digest
 from orchestrator import OrchestratorError
+from orchestrator._interface import daemon_type_to_service, service_to_daemon_types
 
 if TYPE_CHECKING:
     from cephadm.module import CephadmOrchestrator, ContainerInspectInfo
@@ -449,7 +450,7 @@ class CephadmServe:
         """
         self.mgr.migration.verify_no_migration()
 
-        daemon_type = spec.service_type
+        service_type = spec.service_type
         service_name = spec.service_name()
         if spec.unmanaged:
             self.log.debug('Skipping unmanaged service %s' % service_name)
@@ -459,9 +460,9 @@ class CephadmServe:
             return False
         self.log.debug('Applying service %s spec' % service_name)
 
-        config_func = self._config_fn(daemon_type)
+        config_func = self._config_fn(service_type)
 
-        if daemon_type == 'osd':
+        if service_type == 'osd':
             self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec))
             # TODO: return True would result in a busy loop
             # can't know if daemon count changed; create_from_spec doesn't
@@ -471,7 +472,7 @@ class CephadmServe:
         daemons = self.mgr.cache.get_daemons_by_service(service_name)
 
         public_network = None
-        if daemon_type == 'mon':
+        if service_type == 'mon':
             ret, out, err = self.mgr.check_mon_command({
                 'prefix': 'config get',
                 'who': 'mon',
@@ -489,20 +490,38 @@ class CephadmServe:
             # host
             return len(self.mgr.cache.networks[host].get(public_network, [])) > 0
 
+        def virtual_ip_allowed(host):
+            # type: (str) -> bool
+            # Verify that it is possible to use Virtual IPs in the host
+            try:
+                if self.mgr.cache.facts[host]['kernel_parameters']['net.ipv4.ip_nonlocal_bind'] == '0':
+                    return False
+            except KeyError:
+                return False
+
+            return True
+
         ha = HostAssignment(
             spec=spec,
             hosts=self.mgr._hosts_with_daemon_inventory(),
             get_daemons_func=self.mgr.cache.get_daemons_by_service,
-            filter_new_host=matches_network if daemon_type == 'mon' else None,
+            filter_new_host=matches_network if service_type == 'mon'
+            else virtual_ip_allowed if service_type == 'ha-rgw' else None,
         )
 
-        hosts: List[HostPlacementSpec] = ha.place()
-        self.log.debug('Usable hosts: %s' % hosts)
+        try:
+            hosts: List[HostPlacementSpec] = ha.place()
+            self.log.debug('Usable hosts: %s' % hosts)
+        except OrchestratorError as e:
+            self.log.error('Failed to apply %s spec %s: %s' % (
+                spec.service_name(), spec, e))
+            self.mgr.events.for_service(spec, 'ERROR', 'Failed to apply: ' + str(e))
+            return False
 
         r = None
 
         # sanity check
-        if daemon_type in ['mon', 'mgr'] and len(hosts) < 1:
+        if service_type in ['mon', 'mgr'] and len(hosts) < 1:
             self.log.debug('cannot scale mon|mgr below 1 (hosts=%s)' % hosts)
             return False
 
@@ -515,50 +534,55 @@ class CephadmServe:
         remove_daemon_hosts: Set[orchestrator.DaemonDescription] = ha.remove_daemon_hosts(hosts)
         self.log.debug('Hosts that will loose daemons: %s' % remove_daemon_hosts)
 
-        for host, network, name in add_daemon_hosts:
-            daemon_id = self.mgr.get_unique_name(daemon_type, host, daemons,
-                                                 prefix=spec.service_id,
-                                                 forcename=name)
-
-            if not did_config and config_func:
-                if daemon_type == 'rgw':
-                    rgw_config_func = cast(Callable[[RGWSpec, str], None], config_func)
-                    rgw_config_func(cast(RGWSpec, spec), daemon_id)
-                else:
-                    config_func(spec)
-                did_config = True
-
-            daemon_spec = self.mgr.cephadm_services[daemon_type].make_daemon_spec(
-                host, daemon_id, network, spec)
-            self.log.debug('Placing %s.%s on host %s' % (
-                daemon_type, daemon_id, host))
+        if service_type == 'ha-rgw':
+            spec = self.update_ha_rgw_definitive_hosts(spec, hosts, add_daemon_hosts)
 
-            try:
-                daemon_spec = self.mgr.cephadm_services[daemon_type].prepare_create(daemon_spec)
-                self.mgr._create_daemon(daemon_spec)
-                r = True
-            except (RuntimeError, OrchestratorError) as e:
-                self.mgr.events.for_service(spec, 'ERROR',
-                                            f"Failed while placing {daemon_type}.{daemon_id}"
-                                            f"on {host}: {e}")
-                # only return "no change" if no one else has already succeeded.
-                # later successes will also change to True
-                if r is None:
-                    r = False
-                continue
+        for host, network, name in add_daemon_hosts:
+            for daemon_type in service_to_daemon_types(service_type):
+                daemon_id = self.mgr.get_unique_name(daemon_type, host, daemons,
+                                                     prefix=spec.service_id,
+                                                     forcename=name)
+
+                if not did_config and config_func:
+                    if daemon_type == 'rgw':
+                        rgw_config_func = cast(Callable[[RGWSpec, str], None], config_func)
+                        rgw_config_func(cast(RGWSpec, spec), daemon_id)
+                    else:
+                        config_func(spec)
+                    did_config = True
+
+                daemon_spec = self.mgr.cephadm_services[service_type].make_daemon_spec(
+                    host, daemon_id, network, spec, daemon_type=daemon_type)
+                self.log.debug('Placing %s.%s on host %s' % (
+                    daemon_type, daemon_id, host))
 
-            # add to daemon list so next name(s) will also be unique
-            sd = orchestrator.DaemonDescription(
-                hostname=host,
-                daemon_type=daemon_type,
-                daemon_id=daemon_id,
-            )
-            daemons.append(sd)
+                try:
+                    daemon_spec = self.mgr.cephadm_services[service_type].prepare_create(
+                        daemon_spec)
+                    self.mgr._create_daemon(daemon_spec)
+                    r = True
+                except (RuntimeError, OrchestratorError) as e:
+                    self.mgr.events.for_service(spec, 'ERROR',
+                                                f"Failed while placing {daemon_type}.{daemon_id}"
+                                                f"on {host}: {e}")
+                    # only return "no change" if no one else has already succeeded.
+                    # later successes will also change to True
+                    if r is None:
+                        r = False
+                    continue
+
+                # add to daemon list so next name(s) will also be unique
+                sd = orchestrator.DaemonDescription(
+                    hostname=host,
+                    daemon_type=daemon_type,
+                    daemon_id=daemon_id,
+                )
+                daemons.append(sd)
 
         # remove any?
         def _ok_to_stop(remove_daemon_hosts: Set[orchestrator.DaemonDescription]) -> bool:
             daemon_ids = [d.daemon_id for d in remove_daemon_hosts]
-            r = self.mgr.cephadm_services[daemon_type].ok_to_stop(daemon_ids)
+            r = self.mgr.cephadm_services[service_type].ok_to_stop(daemon_ids)
             return not r.retval
 
         while remove_daemon_hosts and not _ok_to_stop(remove_daemon_hosts):
@@ -595,7 +619,7 @@ class CephadmServe:
             if dd.daemon_type in ['grafana', 'iscsi', 'prometheus', 'alertmanager', 'nfs']:
                 daemons_post[dd.daemon_type].append(dd)
 
-            if self.mgr.cephadm_services[dd.daemon_type].get_active_daemon(
+            if self.mgr.cephadm_services[daemon_type_to_service(dd.daemon_type)].get_active_daemon(
                self.mgr.cache.get_daemons_by_service(dd.service_name())).daemon_id == dd.daemon_id:
                 dd.is_active = True
             else:
@@ -653,7 +677,8 @@ class CephadmServe:
         for daemon_type, daemon_descs in daemons_post.items():
             if daemon_type in self.mgr.requires_post_actions:
                 self.mgr.requires_post_actions.remove(daemon_type)
-                self.mgr._get_cephadm_service(daemon_type).daemon_check_post(daemon_descs)
+                self.mgr._get_cephadm_service(daemon_type_to_service(
+                    daemon_type)).daemon_check_post(daemon_descs)
 
     def convert_tags_to_repo_digest(self) -> None:
         if not self.mgr.use_repo_digest:
@@ -672,3 +697,19 @@ class CephadmServe:
                 image_info = digests[container_image_ref]
                 if image_info.repo_digest:
                     self.mgr.set_container_image(entity, image_info.repo_digest)
+
+    # ha-rgw needs definitve host list to create keepalived config files
+    # if definitive host list has changed, all ha-rgw daemons must get new
+    # config, including those that are already on the correct host and not
+    # going to be deployed
+    def update_ha_rgw_definitive_hosts(self, spec: ServiceSpec, hosts: List[HostPlacementSpec],
+                                       add_hosts: Set[HostPlacementSpec]) -> HA_RGWSpec:
+        spec = cast(HA_RGWSpec, spec)
+        if not (set(hosts) == set(spec.definitive_host_list)):
+            spec.definitive_host_list = hosts
+            ha_rgw_daemons = self.mgr.cache.get_daemons_by_service(spec.service_name())
+            for daemon in ha_rgw_daemons:
+                if daemon.hostname in [h.hostname for h in hosts] and daemon.hostname not in add_hosts:
+                    self.mgr.cache.schedule_daemon_action(
+                        daemon.hostname, daemon.name(), 'reconfig')
+        return spec
index 2ae4f1591a16f6f1033026b4b733c1f77b9117cc..a2b26bf0690d5bd8cfd1f906ad7c465689f5c523 100644 (file)
@@ -94,12 +94,14 @@ class CephadmService(metaclass=ABCMeta):
     def make_daemon_spec(self, host: str,
                          daemon_id: str,
                          netowrk: str,
-                         spec: ServiceSpecs) -> CephadmDaemonSpec:
+                         spec: ServiceSpecs,
+                         daemon_type: Optional[str] = None,) -> CephadmDaemonSpec:
         return CephadmDaemonSpec(
             host=host,
             daemon_id=daemon_id,
             spec=spec,
-            network=netowrk
+            network=netowrk,
+            daemon_type=daemon_type
         )
 
     def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
@@ -270,7 +272,7 @@ class CephService(CephadmService):
         """
         Map the daemon id to a cephx keyring entity name
         """
-        if self.TYPE in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]:
+        if self.TYPE in ['rgw', 'rbd-mirror', 'nfs', "iscsi", 'haproxy', 'keepalived']:
             return AuthEntity(f'client.{self.TYPE}.{daemon_id}')
         elif self.TYPE == 'crash':
             if host == "":
diff --git a/src/pybind/mgr/cephadm/services/ha_rgw.py b/src/pybind/mgr/cephadm/services/ha_rgw.py
new file mode 100644 (file)
index 0000000..1635d48
--- /dev/null
@@ -0,0 +1,151 @@
+import json
+import logging
+from typing import List, cast, Tuple, Dict, Any
+
+from ceph.deployment.service_spec import HA_RGWSpec
+
+from orchestrator import DaemonDescription, OrchestratorError
+from .cephadmservice import CephadmDaemonSpec, CephService
+from ..utils import CephadmNoImage, cephadmNoImage, resolve_ip
+
+logger = logging.getLogger(__name__)
+
+
+class HA_RGWService(CephService):
+    TYPE = 'ha-rgw'
+
+    class rgw_server():
+        def __init__(self, hostname: str, address: str):
+            self.name = hostname
+            self.ip = address
+
+    def prepare_create(self, daemon_spec: CephadmDaemonSpec[HA_RGWSpec]) -> CephadmDaemonSpec:
+        assert daemon_spec.daemon_type == 'haproxy' or daemon_spec.daemon_type == 'keepalived'
+        assert daemon_spec.spec
+
+        if daemon_spec.daemon_type == 'haproxy':
+            return self.haproxy_prepare_create(daemon_spec)
+        else:
+            return self.keepalived_prepare_create(daemon_spec)
+
+    def generate_config(self, daemon_spec: CephadmDaemonSpec) -> Tuple[Dict[str, Any], List[str]]:
+        assert daemon_spec.daemon_type == 'haproxy' or daemon_spec.daemon_type == 'keepalived'
+
+        if daemon_spec.daemon_type == 'haproxy':
+            return self.haproxy_generate_config(daemon_spec)
+        else:
+            return self.keepalived_generate_config(daemon_spec)
+
+    def haproxy_prepare_create(self, daemon_spec: CephadmDaemonSpec[HA_RGWSpec]) -> CephadmDaemonSpec:
+        assert daemon_spec.daemon_type == 'haproxy'
+        assert daemon_spec.spec
+
+        daemon_id = daemon_spec.daemon_id
+        host = daemon_spec.host
+        spec = daemon_spec.spec
+
+        logger.info('Create daemon %s on host %s with spec %s' % (
+            daemon_id, host, spec))
+        return daemon_spec
+
+    def keepalived_prepare_create(self, daemon_spec: CephadmDaemonSpec[HA_RGWSpec]) -> CephadmDaemonSpec:
+        assert daemon_spec.daemon_type == 'keepalived'
+        assert daemon_spec.spec
+
+        daemon_id = daemon_spec.daemon_id
+        host = daemon_spec.host
+        spec = daemon_spec.spec
+
+        logger.info('Create daemon %s on host %s with spec %s' % (
+            daemon_id, host, spec))
+        return daemon_spec
+
+    def haproxy_generate_config(self, daemon_spec: CephadmDaemonSpec) -> Tuple[Dict[str, Any], List[str]]:
+        daemon_id = daemon_spec.daemon_id
+        host = daemon_spec.host
+
+        service_name: str = "ha-rgw." + daemon_id.split('.')[0]
+        # if no service spec, return empty config
+        if not daemon_spec.spec and service_name not in self.mgr.spec_store.specs:
+            config_files = {'files': {}}  # type: Dict[str, Any]
+            return config_files, []
+        elif daemon_spec.spec:
+            spec = daemon_spec.spec
+        else:
+            # service spec is not attached to daemon spec but is in spec store
+            spec = cast(HA_RGWSpec, self.mgr.spec_store.specs[service_name])
+
+        rgw_daemons = self.mgr.cache.get_daemons_by_type('rgw')
+        rgw_servers = []
+        for daemon in rgw_daemons:
+            rgw_servers.append(self.rgw_server(
+                daemon.name(), resolve_ip(daemon.hostname)))
+
+        # virtual ip address cannot have netmask attached when passed to haproxy config
+        # since the port is added to the end and something like 123.123.123.10/24:8080 is invalid
+        virtual_ip_address = spec.virtual_ip_address
+        if "/" in str(spec.virtual_ip_address):
+            just_ip = str(spec.virtual_ip_address).split('/')[0]
+            virtual_ip_address = just_ip
+
+        ha_context = {'spec': spec, 'rgw_servers': rgw_servers,
+                      'virtual_ip_address': virtual_ip_address}
+
+        haproxy_conf = self.mgr.template.render('services/haproxy/haproxy.cfg.j2', ha_context)
+
+        config_files = {
+            'files': {
+                "haproxy.cfg": haproxy_conf,
+            }
+        }
+        if spec.ha_proxy_frontend_ssl_certificate:
+            ssl_cert = spec.ha_proxy_frontend_ssl_certificate
+            if isinstance(ssl_cert, list):
+                ssl_cert = '\n'.join(ssl_cert)
+            config_files['files']['haproxy.pem'] = ssl_cert
+
+        return config_files, []
+
+    def keepalived_generate_config(self, daemon_spec: CephadmDaemonSpec) -> Tuple[Dict[str, Any], List[str]]:
+        daemon_id = daemon_spec.daemon_id
+        host = daemon_spec.host
+
+        service_name: str = "ha-rgw." + daemon_id.split('.')[0]
+        # if no service spec, return empty config
+        if not daemon_spec.spec and service_name not in self.mgr.spec_store.specs:
+            config_file = {'files': {}}  # type: Dict[str, Any]
+            return config_file, []
+        elif daemon_spec.spec:
+            spec = daemon_spec.spec
+        else:
+            # service spec is not attached to daemon spec but is in spec store
+            spec = cast(HA_RGWSpec, self.mgr.spec_store.specs[service_name])
+
+        all_hosts = []
+        for h, network, name in spec.definitive_host_list:
+            all_hosts.append(h)
+
+        # set state. first host in placement is master all others backups
+        state = 'BACKUP'
+        if all_hosts[0] == host:
+            state = 'MASTER'
+
+        # remove host, daemon is being deployed on from all_hosts list for
+        # other_ips in conf file and converter to ips
+        all_hosts.remove(host)
+        other_ips = [resolve_ip(h) for h in all_hosts]
+
+        ka_context = {'spec': spec, 'state': state,
+                      'other_ips': other_ips,
+                      'host_ip': resolve_ip(host)}
+
+        keepalived_conf = self.mgr.template.render(
+            'services/keepalived/keepalived.conf.j2', ka_context)
+
+        config_file = {
+            'files': {
+                "keepalived.conf": keepalived_conf,
+            }
+        }
+
+        return config_file, []
diff --git a/src/pybind/mgr/cephadm/templates/services/haproxy/haproxy.cfg.j2 b/src/pybind/mgr/cephadm/templates/services/haproxy/haproxy.cfg.j2
new file mode 100644 (file)
index 0000000..4b3b4cf
--- /dev/null
@@ -0,0 +1,66 @@
+# {{ cephadm_managed }}
+global
+    log         127.0.0.1 local2
+    chroot      /var/lib/haproxy
+    pidfile     /var/lib/haproxy/haproxy.pid
+    maxconn     8000
+    daemon
+    stats socket /var/lib/haproxy/stats
+{% if spec.ha_proxy_frontend_ssl_certificate %}
+  {% if spec.ha_proxy_ssl_dh_param %}
+    tune.ssl.default-dh-param {{ spec.ha_proxy_ssl_dh_param }}
+  {% endif %}
+  {% if spec.ha_proxy_ssl_ciphers %}
+    ssl-default-bind-ciphers {{ spec.ha_proxy_ssl_ciphers | join(':') }}
+  {% endif %}
+  {% if spec.ha_proxy_ssl_options %}
+    ssl-default-bind-options {{ spec.ha_proxy_ssl_options | join(' ') }}
+  {% endif %}
+{% endif %}
+
+defaults
+    mode                    http
+    log                     global
+    option                  httplog
+    option                  dontlognull
+    option http-server-close
+    option forwardfor       except 127.0.0.0/8
+    option                  redispatch
+    retries                 3
+    timeout http-request    1s
+    timeout queue           20s
+    timeout connect         5s
+    timeout client          1s
+    timeout server          1s
+    timeout http-keep-alive 5s
+    timeout check           5s
+    maxconn                 8000
+
+frontend stats
+    bind *:{{ spec.ha_proxy_port }}
+{% if spec.ha_proxy_stats_enabled %}
+    stats enable
+{% endif %}
+    stats uri /stats
+    stats refresh 10s
+    stats auth {{ spec.ha_proxy_stats_user }}:{{ spec.ha_proxy_stats_password }}
+{% if spec.ha_proxy_enable_prometheus_exporter %}
+    http-request use-service prometheus-exporter if { path /metrics }
+{% endif %}
+    monitor-uri {{ spec.ha_proxy_monitor_uri }}
+
+frontend rgw-frontend
+{% if spec.ha_proxy_frontend_ssl_certificate %}
+    bind {{ virtual_ip_address }}:{{ spec.ha_proxy_frontend_ssl_port }} ssl crt /var/lib/haproxy/haproxy.pem
+{% else %}
+    bind {{ virtual_ip_address }}:{{ spec.frontend_port }}
+{% endif %}
+    default_backend rgw-backend
+
+backend rgw-backend
+    option forwardfor
+    balance static-rr
+    option httpchk HEAD / HTTP/1.0
+    {% for server in rgw_servers %}
+    server {{ server.name }} {{ server.ip }}:80 check weight 100
+    {% endfor %}
diff --git a/src/pybind/mgr/cephadm/templates/services/keepalived/keepalived.conf.j2 b/src/pybind/mgr/cephadm/templates/services/keepalived/keepalived.conf.j2
new file mode 100644 (file)
index 0000000..e7a0662
--- /dev/null
@@ -0,0 +1,32 @@
+# {{ cephadm_managed }}
+vrrp_script check_haproxy {
+    script "curl http://localhost:{{ spec.ha_proxy_port }}/haproxy_test"
+    weight -20
+    interval 2
+    rise 2
+    fall 2
+}
+
+vrrp_instance VI_0 {
+  state {{ state }}
+  priority 100
+  interface {{ spec.virtual_ip_interface }}
+  virtual_router_id 51
+  advert_int 1
+  authentication {
+      auth_type PASS
+      auth_pass {{ spec.keepalived_password }}
+  }
+  unicast_src_ip {{ host_ip }}
+  unicast_peer {
+    {% for ip in other_ips  %}
+    {{ ip }}
+    {% endfor %}
+  }
+  virtual_ipaddress {
+    {{ spec.virtual_ip_address }} dev {{ spec.virtual_ip_interface }}
+  }
+  track_script {
+      check_haproxy
+  }
+}
index f4f24f352cd97c460f25e4e10b72f17e9b3eaa0a..f59174f2a6371d7c5921fbd43ce74359b7074072 100644 (file)
@@ -6,8 +6,11 @@ import json
 
 import pytest
 
+import yaml
+
 from ceph.deployment.service_spec import ServiceSpec, NFSServiceSpec, RGWSpec, \
-    IscsiServiceSpec, AlertManagerSpec, HostPlacementSpec, CustomContainerSpec
+    IscsiServiceSpec, AlertManagerSpec, HostPlacementSpec, CustomContainerSpec, \
+    HA_RGWSpec
 
 from orchestrator import DaemonDescription, OrchestratorError
 
@@ -137,7 +140,7 @@ def test_spec_octopus(spec_json):
         "last_refresh": "2020-04-03T15:31:48.725856",
         "created": "2020-04-02T19:23:08.829543",
         "started": "2020-04-03T07:29:16.932838",
-        "is_active": false 
+        "is_active": false
     },
     {
         "hostname": "ceph-001",
@@ -152,7 +155,7 @@ def test_spec_octopus(spec_json):
         "last_refresh": "2020-04-03T15:31:48.725903",
         "created": "2020-04-02T19:23:11.390694",
         "started": "2020-04-03T07:29:16.910897",
-        "is_active": false 
+        "is_active": false
     },
     {
         "hostname": "ceph-001",
@@ -167,7 +170,7 @@ def test_spec_octopus(spec_json):
         "last_refresh": "2020-04-03T15:31:48.725950",
         "created": "2020-04-02T19:23:52.025088",
         "started": "2020-04-03T07:29:16.847972",
-        "is_active": false 
+        "is_active": false
     },
     {
         "hostname": "ceph-001",
@@ -182,7 +185,7 @@ def test_spec_octopus(spec_json):
         "last_refresh": "2020-04-03T15:31:48.725807",
         "created": "2020-04-02T19:22:18.648584",
         "started": "2020-04-03T07:29:16.856153",
-        "is_active": false 
+        "is_active": false
     },
     {
         "hostname": "ceph-001",
@@ -197,7 +200,7 @@ def test_spec_octopus(spec_json):
         "last_refresh": "2020-04-03T15:31:48.725715",
         "created": "2020-04-02T19:22:13.863300",
         "started": "2020-04-03T07:29:17.206024",
-        "is_active": false 
+        "is_active": false
     },
     {
         "hostname": "ceph-001",
@@ -212,7 +215,7 @@ def test_spec_octopus(spec_json):
         "last_refresh": "2020-04-03T15:31:48.725996",
         "created": "2020-04-02T19:23:53.880197",
         "started": "2020-04-03T07:29:16.880044",
-        "is_active": false 
+        "is_active": false
     },
     {
         "hostname": "ceph-001",
@@ -227,7 +230,7 @@ def test_spec_octopus(spec_json):
         "last_refresh": "2020-04-03T15:31:48.726088",
         "created": "2020-04-02T20:35:02.991435",
         "started": "2020-04-03T07:29:19.373956",
-        "is_active": false 
+        "is_active": false
     },
     {
         "hostname": "ceph-001",
@@ -242,7 +245,7 @@ def test_spec_octopus(spec_json):
         "last_refresh": "2020-04-03T15:31:48.726134",
         "created": "2020-04-02T20:35:17.142272",
         "started": "2020-04-03T07:29:19.374002",
-        "is_active": false 
+        "is_active": false
     },
     {
         "hostname": "ceph-001",
@@ -257,7 +260,7 @@ def test_spec_octopus(spec_json):
         "last_refresh": "2020-04-03T15:31:48.726042",
         "created": "2020-04-02T19:24:10.281163",
         "started": "2020-04-03T07:29:16.926292",
-        "is_active": false 
+        "is_active": false
     },
     {
         "hostname": "ceph-001",
@@ -265,7 +268,7 @@ def test_spec_octopus(spec_json):
         "daemon_type": "rgw",
         "status": 1,
         "status_desc": "starting",
-        "is_active": false 
+        "is_active": false
     }
 ]""")
 )
@@ -657,3 +660,38 @@ def test_custom_container_spec_config_json():
     config_json = spec.config_json()
     for key in ['entrypoint', 'uid', 'gid', 'bind_mounts', 'dirs']:
         assert key not in config_json
+
+def test_HA_RGW_spec():
+    yaml_str ="""service_type: ha-rgw
+service_id: haproxy_for_rgw
+placement:
+  hosts:
+    - host1
+    - host2
+    - host3
+spec:
+  virtual_ip_interface: eth0
+  virtual_ip_address: 192.168.20.1/24
+  frontend_port: 8080
+  ha_proxy_port: 1967
+  ha_proxy_stats_enabled: true
+  ha_proxy_stats_user: admin
+  ha_proxy_stats_password: admin
+  ha_proxy_enable_prometheus_exporter: true
+  ha_proxy_monitor_uri: /haproxy_health
+  keepalived_password: admin
+"""
+    yaml_file = yaml.safe_load(yaml_str)
+    spec = ServiceSpec.from_json(yaml_file)
+    assert spec.service_type == "ha-rgw"
+    assert spec.service_id == "haproxy_for_rgw"
+    assert spec.virtual_ip_interface == "eth0"
+    assert spec.virtual_ip_address == "192.168.20.1/24"
+    assert spec.frontend_port == 8080
+    assert spec.ha_proxy_port == 1967
+    assert spec.ha_proxy_stats_enabled == True
+    assert spec.ha_proxy_stats_user == "admin"
+    assert spec.ha_proxy_stats_password == "admin"
+    assert spec.ha_proxy_enable_prometheus_exporter == True
+    assert spec.ha_proxy_monitor_uri == "/haproxy_health"
+    assert spec.keepalived_password == "admin"
index e05646c6fb3f2a3d1813794d995374267b2a4a15..2542b92398d28e5ed38ce61cc326042011050f69 100644 (file)
@@ -22,7 +22,7 @@ import yaml
 
 from ceph.deployment import inventory
 from ceph.deployment.service_spec import ServiceSpec, NFSServiceSpec, RGWSpec, \
-    ServiceSpecValidationError, IscsiServiceSpec
+    ServiceSpecValidationError, IscsiServiceSpec, HA_RGWSpec
 from ceph.deployment.drive_group import DriveGroupSpec
 from ceph.deployment.hostspec import HostSpec
 from ceph.utils import datetime_to_str, str_to_datetime
@@ -889,6 +889,7 @@ class Orchestrator(object):
             'prometheus': self.apply_prometheus,
             'rbd-mirror': self.apply_rbd_mirror,
             'rgw': self.apply_rgw,
+            'ha-rgw': self.apply_ha_rgw,
             'host': self.add_host,
             'cephadm-exporter': self.apply_cephadm_exporter,
         }
@@ -1055,6 +1056,10 @@ class Orchestrator(object):
         """Update RGW cluster"""
         raise NotImplementedError()
 
+    def apply_ha_rgw(self, spec: HA_RGWSpec) -> Completion[str]:
+        """Update ha-rgw daemons"""
+        raise NotImplementedError()
+
     def add_rbd_mirror(self, spec: ServiceSpec) -> Completion[List[str]]:
         """Create rbd-mirror daemon(s)"""
         raise NotImplementedError()
@@ -1171,6 +1176,51 @@ def json_to_generic_spec(spec: dict) -> GenericSpec:
         return ServiceSpec.from_json(spec)
 
 
+def daemon_type_to_service(dtype: str) -> str:
+    mapping = {
+        'mon': 'mon',
+        'mgr': 'mgr',
+        'mds': 'mds',
+        'rgw': 'rgw',
+        'osd': 'osd',
+        'haproxy': 'ha-rgw',
+        'keepalived': 'ha-rgw',
+        'iscsi': 'iscsi',
+        'rbd-mirror': 'rbd-mirror',
+        'nfs': 'nfs',
+        'grafana': 'grafana',
+        'alertmanager': 'alertmanager',
+        'prometheus': 'prometheus',
+        'node-exporter': 'node-exporter',
+        'crash': 'crash',
+        'container': 'container',
+        'cephadm-exporter': 'cephadm-exporter',
+    }
+    return mapping[dtype]
+
+
+def service_to_daemon_types(stype: str) -> List[str]:
+    mapping = {
+        'mon': ['mon'],
+        'mgr': ['mgr'],
+        'mds': ['mds'],
+        'rgw': ['rgw'],
+        'osd': ['osd'],
+        'ha-rgw': ['haproxy', 'keepalived'],
+        'iscsi': ['iscsi'],
+        'rbd-mirror': ['rbd-mirror'],
+        'nfs': ['nfs'],
+        'grafana': ['grafana'],
+        'alertmanager': ['alertmanager'],
+        'prometheus': ['prometheus'],
+        'node-exporter': ['node-exporter'],
+        'crash': ['crash'],
+        'container': ['container'],
+        'cephadm-exporter': ['cephadm-exporter'],
+    }
+    return mapping[stype]
+
+
 class UpgradeStatusSpec(object):
     # Orchestrator's report on what's going on with any ongoing upgrade
     def __init__(self):
@@ -1236,6 +1286,8 @@ class DaemonDescription(object):
         # The type of service (osd, mon, mgr, etc.)
         self.daemon_type = daemon_type
 
+        assert daemon_type not in ['HA_RGW', 'ha-rgw']
+
         # The orchestrator will have picked some names for daemons,
         # typically either based on hostnames or on pod names.
         # This is the <foo> in mds.<foo>, the ID that will appear
@@ -1271,7 +1323,7 @@ class DaemonDescription(object):
 
     def matches_service(self, service_name: Optional[str]) -> bool:
         if service_name:
-            return self.name().startswith(service_name + '.')
+            return (daemon_type_to_service(self.daemon_type) + '.' + self.daemon_id).startswith(service_name + '.')
         return False
 
     def service_id(self):
@@ -1318,15 +1370,15 @@ class DaemonDescription(object):
             # daemon_id == "service_id"
             return self.daemon_id
 
-        if self.daemon_type in ServiceSpec.REQUIRES_SERVICE_ID:
+        if daemon_type_to_service(self.daemon_type) in ServiceSpec.REQUIRES_SERVICE_ID:
             return _match()
 
         return self.daemon_id
 
     def service_name(self):
-        if self.daemon_type in ServiceSpec.REQUIRES_SERVICE_ID:
-            return f'{self.daemon_type}.{self.service_id()}'
-        return self.daemon_type
+        if daemon_type_to_service(self.daemon_type) in ServiceSpec.REQUIRES_SERVICE_ID:
+            return f'{daemon_type_to_service(self.daemon_type)}.{self.service_id()}'
+        return daemon_type_to_service(self.daemon_type)
 
     def __repr__(self):
         return "<DaemonDescription>({type}.{id})".format(type=self.daemon_type,
index 4b9bc9c0ac2995043037642cfa03392711dcf925..c2af2f3a2eaa107df16da9ed6041711bc77d7f8a 100644 (file)
@@ -17,7 +17,7 @@ from mgr_module import MgrModule, HandleCommandResult, Option
 
 from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_command, \
     raise_if_exception, _cli_write_command, TrivialReadCompletion, OrchestratorError, \
-    NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, \
+    NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, HA_RGWSpec, \
     RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta, \
     ServiceDescription, DaemonDescription, IscsiServiceSpec, json_to_generic_spec, GenericSpec
 
@@ -765,7 +765,7 @@ Examples:
                 self._orchestrator_wait([completion])
                 data = completion.result
                 if format == 'plain':
-                    out = generate_preview_tables(data , True)
+                    out = generate_preview_tables(data, True)
                 else:
                     out = to_format(data, format, many=True, cls=None)
             return HandleCommandResult(stdout=out)
index 240a83ad1f9de888943e02edb06f9c3fcc4eb6b5..6602c10f9dddafd6492c2dc68b115eb9b5ad1ce5 100644 (file)
@@ -381,8 +381,8 @@ class ServiceSpec(object):
     """
     KNOWN_SERVICE_TYPES = 'alertmanager crash grafana iscsi mds mgr mon nfs ' \
                           'node-exporter osd prometheus rbd-mirror rgw ' \
-                          'container cephadm-exporter'.split()
-    REQUIRES_SERVICE_ID = 'iscsi mds nfs osd rgw container'.split()
+                          'container cephadm-exporter ha-rgw'.split()
+    REQUIRES_SERVICE_ID = 'iscsi mds nfs osd rgw container ha-rgw '.split()
 
     @classmethod
     def _cls(cls, service_type):
@@ -394,6 +394,7 @@ class ServiceSpec(object):
             'osd': DriveGroupSpec,
             'iscsi': IscsiServiceSpec,
             'alertmanager': AlertManagerSpec,
+            'ha-rgw': HA_RGWSpec,
             'container': CustomContainerSpec,
         }.get(service_type, cls)
         if ret == ServiceSpec and not service_type:
@@ -780,6 +781,95 @@ class AlertManagerSpec(ServiceSpec):
 yaml.add_representer(AlertManagerSpec, ServiceSpec.yaml_representer)
 
 
+class HA_RGWSpec(ServiceSpec):
+    def __init__(self,
+                 service_type: str = 'ha-rgw',
+                 service_id: Optional[str] = None,
+                 placement: Optional[PlacementSpec] = None,
+                 virtual_ip_interface: Optional[str] = None,
+                 virtual_ip_address: Optional[str] = None,
+                 frontend_port: Optional[int] = None,
+                 ha_proxy_port: Optional[int] = None,
+                 ha_proxy_stats_enabled: Optional[bool] = None,
+                 ha_proxy_stats_user: Optional[str] = None,
+                 ha_proxy_stats_password: Optional[str] = None,
+                 ha_proxy_enable_prometheus_exporter: Optional[bool] = None,
+                 ha_proxy_monitor_uri: Optional[str] = None,
+                 keepalived_password: Optional[str] = None,
+                 ha_proxy_frontend_ssl_certificate: Optional[str] = None,
+                 ha_proxy_frontend_ssl_port: Optional[int] = None,
+                 ha_proxy_ssl_dh_param: Optional[str] = None,
+                 ha_proxy_ssl_ciphers: Optional[List[str]] = None,
+                 ha_proxy_ssl_options: Optional[List[str]] = None,
+                 haproxy_container_image: Optional[str] = None,
+                 keepalived_container_image: Optional[str] = None,
+                 definitive_host_list: Optional[List[HostPlacementSpec]] = None
+                 ):
+        assert service_type == 'ha-rgw'
+        super(HA_RGWSpec, self).__init__('ha-rgw', service_id=service_id, placement=placement)
+
+        self.virtual_ip_interface = virtual_ip_interface
+        self.virtual_ip_address = virtual_ip_address
+        self.frontend_port = frontend_port
+        self.ha_proxy_port = ha_proxy_port
+        self.ha_proxy_stats_enabled = ha_proxy_stats_enabled
+        self.ha_proxy_stats_user = ha_proxy_stats_user
+        self.ha_proxy_stats_password = ha_proxy_stats_password
+        self.ha_proxy_enable_prometheus_exporter = ha_proxy_enable_prometheus_exporter
+        self.ha_proxy_monitor_uri = ha_proxy_monitor_uri
+        self.keepalived_password = keepalived_password
+        self.ha_proxy_frontend_ssl_certificate = ha_proxy_frontend_ssl_certificate
+        self.ha_proxy_frontend_ssl_port = ha_proxy_frontend_ssl_port
+        self.ha_proxy_ssl_dh_param = ha_proxy_ssl_dh_param
+        self.ha_proxy_ssl_ciphers = ha_proxy_ssl_ciphers
+        self.ha_proxy_ssl_options = ha_proxy_ssl_options
+        self.haproxy_container_image = haproxy_container_image
+        self.keepalived_container_image = keepalived_container_image
+        # placeholder variable. Need definitive list of hosts this service will
+        # be placed on in order to generate keepalived config. Will be populated
+        # when applying spec
+        self.definitive_host_list = []  # type: List[HostPlacementSpec]
+
+    def validate(self):
+        super(HA_RGWSpec, self).validate()
+
+        if not self.virtual_ip_interface:
+            raise ServiceSpecValidationError(
+                'Cannot add ha-rgw: No Virtual IP Interface specified')
+        if not self.virtual_ip_address:
+            raise ServiceSpecValidationError(
+                'Cannot add ha-rgw: No Virtual IP Address specified')
+        if not self.frontend_port and not self.ha_proxy_frontend_ssl_certificate:
+            raise ServiceSpecValidationError(
+                'Cannot add ha-rgw: No Frontend Port specified')
+        if not self.ha_proxy_port:
+            raise ServiceSpecValidationError(
+                'Cannot add ha-rgw: No HA Proxy Port specified')
+        if not self.ha_proxy_stats_enabled:
+            raise ServiceSpecValidationError(
+                'Cannot add ha-rgw: Ha Proxy Stats Enabled option not set')
+        if not self.ha_proxy_stats_user:
+            raise ServiceSpecValidationError(
+                'Cannot add ha-rgw: No HA Proxy Stats User specified')
+        if not self.ha_proxy_stats_password:
+            raise ServiceSpecValidationError(
+                'Cannot add ha-rgw: No HA Proxy Stats Password specified')
+        if not self.ha_proxy_enable_prometheus_exporter:
+            raise ServiceSpecValidationError(
+                'Cannot add ha-rgw: HA Proxy Enable Prometheus Exporter option not set')
+        if not self.ha_proxy_monitor_uri:
+            raise ServiceSpecValidationError(
+                'Cannot add ha-rgw: No HA Proxy Monitor Uri specified')
+        if not self.keepalived_password:
+            raise ServiceSpecValidationError(
+                'Cannot add ha-rgw: No Keepalived Password specified')
+        if self.ha_proxy_frontend_ssl_certificate:
+            if not self.ha_proxy_frontend_ssl_port:
+                raise ServiceSpecValidationError(
+                    'Cannot add ha-rgw: Specified Ha Proxy Frontend SSL ' +
+                    'Certificate but no SSL Port')
+
+
 class CustomContainerSpec(ServiceSpec):
     def __init__(self,
                  service_type: str = 'container',