]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
dashboard: use variables for port value
authorDimitri Savineau <dsavinea@redhat.com>
Wed, 10 Jul 2019 21:15:45 +0000 (17:15 -0400)
committerGuillaume Abrioux <gabrioux@redhat.com>
Thu, 18 Jul 2019 05:22:13 +0000 (07:22 +0200)
The current port value for alertmanager, grafana, node-exporter and
prometheus is hardcoded in the roles so it's not possible to change the
port binding of those services.

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
12 files changed:
group_vars/all.yml.sample
group_vars/rhcs.yml.sample
roles/ceph-dashboard/tasks/configure_dashboard.yml
roles/ceph-defaults/defaults/main.yml
roles/ceph-grafana/tasks/configure_grafana.yml
roles/ceph-grafana/templates/datasources-ceph-dashboard.yml.j2
roles/ceph-grafana/templates/grafana.ini.j2
roles/ceph-infra/tasks/configure_firewall.yml
roles/ceph-node-exporter/templates/node_exporter.service.j2
roles/ceph-prometheus/templates/alertmanager.service.j2
roles/ceph-prometheus/templates/prometheus.service.j2
roles/ceph-prometheus/templates/prometheus.yml.j2

index 93ddfa4bc108ecc5b99eb169dbdd0ea517bca202..de218b880f74f502e1592b72eddc3ff85e4bc566 100644 (file)
@@ -717,6 +717,7 @@ dummy:
 #dashboard_rgw_api_admin_resource: ''
 #dashboard_rgw_api_no_ssl_verify: False
 #node_exporter_container_image: prom/node-exporter:latest
+#node_exporter_port: 9100
 #grafana_admin_user: admin
 #grafana_admin_password: admin
 # We only need this for SSL (https) connections
@@ -734,6 +735,7 @@ dummy:
 #  - vonage-status-panel
 #  - grafana-piechart-panel
 #grafana_allow_embedding: True
+#grafana_port: 3000
 #prometheus_container_image: prom/prometheus:latest
 #prometheus_container_cpu_period: 100000
 #prometheus_container_cpu_cores: 2
@@ -742,6 +744,7 @@ dummy:
 #prometheus_data_dir: /var/lib/prometheus
 #prometheus_conf_dir: /etc/prometheus
 #prometheus_user_id: '65534'  # This is the UID used by the prom/prometheus container image
+#prometheus_port: 9090
 #alertmanager_container_image: prom/alertmanager:latest
 #alertmanager_container_cpu_period: 100000
 #alertmanager_container_cpu_cores: 2
@@ -749,6 +752,7 @@ dummy:
 #alertmanager_container_memory: 4
 #alertmanager_data_dir: /var/lib/alertmanager
 #alertmanager_conf_dir: /etc/alertmanager
+#alertmanager_port: 9093
 
 
 ##################################
index 1fda8a7be9f4d5a68c16ebb8c3cb964acaa8da7a..5836cb725419e4593ae6fd9853902b8a5a53ca6e 100644 (file)
@@ -717,6 +717,7 @@ ceph_docker_registry: "registry.access.redhat.com"
 #dashboard_rgw_api_admin_resource: ''
 #dashboard_rgw_api_no_ssl_verify: False
 #node_exporter_container_image: prom/node-exporter:latest
+#node_exporter_port: 9100
 #grafana_admin_user: admin
 #grafana_admin_password: admin
 # We only need this for SSL (https) connections
@@ -734,6 +735,7 @@ ceph_docker_registry: "registry.access.redhat.com"
 #  - vonage-status-panel
 #  - grafana-piechart-panel
 #grafana_allow_embedding: True
+#grafana_port: 3000
 #prometheus_container_image: prom/prometheus:latest
 #prometheus_container_cpu_period: 100000
 #prometheus_container_cpu_cores: 2
@@ -742,6 +744,7 @@ ceph_docker_registry: "registry.access.redhat.com"
 #prometheus_data_dir: /var/lib/prometheus
 #prometheus_conf_dir: /etc/prometheus
 #prometheus_user_id: '65534'  # This is the UID used by the prom/prometheus container image
+#prometheus_port: 9090
 #alertmanager_container_image: prom/alertmanager:latest
 #alertmanager_container_cpu_period: 100000
 #alertmanager_container_cpu_cores: 2
@@ -749,6 +752,7 @@ ceph_docker_registry: "registry.access.redhat.com"
 #alertmanager_container_memory: 4
 #alertmanager_data_dir: /var/lib/alertmanager
 #alertmanager_conf_dir: /etc/alertmanager
+#alertmanager_port: 9093
 
 
 ##################################
index 3ca192e6c17eb233fef27e69317a79d33598875b..7d5e6d81cb1515f86fa67f96d6d2add22e20e984 100644 (file)
     dashboard_url: "{{ hostvars[(groups[grafana_server_group_name][0] | default(groups[mgr_group_name][0]) | default(groups[mon_group_name][0]))]['ansible_hostname'] }}"
 
 - name: set grafana url
-  command: "{{ container_exec_cmd }} ceph dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_url }}:3000/"
+  command: "{{ container_exec_cmd }} ceph dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_url }}:{{ grafana_port }}/"
   delegate_to: "{{ groups[mon_group_name][0] }}"
   changed_when: false
 
 - name: set alertmanager host
-  command: "{{ container_exec_cmd }} ceph dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ dashboard_url }}:9093/"
+  command: "{{ container_exec_cmd }} ceph dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ dashboard_url }}:{{ alertmanager_port }}/"
   delegate_to: "{{ groups[mon_group_name][0] }}"
   changed_when: false
 
index 70ac28a446def4967f2b99d7968711c0a1ebf61c..4cb38500809bebbf52824a7fd4a9983f50dd9277 100644 (file)
@@ -709,6 +709,7 @@ dashboard_rgw_api_scheme: ''
 dashboard_rgw_api_admin_resource: ''
 dashboard_rgw_api_no_ssl_verify: False
 node_exporter_container_image: prom/node-exporter:latest
+node_exporter_port: 9100
 grafana_admin_user: admin
 grafana_admin_password: admin
 # We only need this for SSL (https) connections
@@ -726,6 +727,7 @@ grafana_plugins:
   - vonage-status-panel
   - grafana-piechart-panel
 grafana_allow_embedding: True
+grafana_port: 3000
 prometheus_container_image: prom/prometheus:latest
 prometheus_container_cpu_period: 100000
 prometheus_container_cpu_cores: 2
@@ -734,6 +736,7 @@ prometheus_container_memory: 4
 prometheus_data_dir: /var/lib/prometheus
 prometheus_conf_dir: /etc/prometheus
 prometheus_user_id: '65534'  # This is the UID used by the prom/prometheus container image
+prometheus_port: 9090
 alertmanager_container_image: prom/alertmanager:latest
 alertmanager_container_cpu_period: 100000
 alertmanager_container_cpu_cores: 2
@@ -741,6 +744,7 @@ alertmanager_container_cpu_cores: 2
 alertmanager_container_memory: 4
 alertmanager_data_dir: /var/lib/alertmanager
 alertmanager_conf_dir: /etc/alertmanager
+alertmanager_port: 9093
 
 
 ##################################
index d706913e11c49251a3ad57b79f9472d8cd2cc856..ab5b831cefea0db2e991d10bae41ef9fcc503546 100644 (file)
@@ -17,7 +17,7 @@
 
 - name: wait for grafana to be stopped
   wait_for:
-    port: 3000
+    port: '{{ grafana_port }}'
     state: stopped
 
 - name: make sure grafana configuration directories exist
@@ -91,4 +91,4 @@
 
 - name: wait for grafana to start
   wait_for:
-    port: 3000
+    port: '{{ grafana_port }}'
index 94d29530d39934ec4fe06ac93295edea7f506509..4746fb31281da83a7e72084f7689b764f04768f5 100644 (file)
@@ -17,7 +17,7 @@ datasources:
   # <int> org id. will default to orgId 1 if not specified
   orgId: 1
   # <string> url
-  url: 'http://{{ grafana_server_addr | default(_current_monitor_address) }}:9090'
+  url: 'http://{{ grafana_server_addr | default(_current_monitor_address) }}:{{ prometheus_port }}'
   # <bool> enable/disable basic auth
   basicAuth: false
   # <bool> mark as default datasource. Max one per org
index 0afe023c3c5b2f249c49a85bc3359feac8f4e1a1..ff5f694576899a9c17092e93d1820b85920065c6 100644 (file)
@@ -20,6 +20,7 @@ cert_file = /etc/grafana/ceph-dashboard.crt
 cert_key = /etc/grafana/ceph-dashboard.key
 domain = {{ ansible_fqdn }}
 protocol = {{ dashboard_protocol }}
+http_port = {{ grafana_port }}
 
 [security]
 admin_user = {{ grafana_admin_user }}
index f358180d06d7f5ff0576b43af17d01245f46b178..63885cc7d9f89e2df0c614d0aa10c7c3842f3c7b 100644 (file)
 
   - name: open node_exporter port
     firewalld:
-      port: "9100/tcp"
+      port: "{{ node_exporter_port }}/tcp"
       zone: "{{ ceph_dashboard_firewall_zone }}"
       permanent: true
       immediate: true
   - block:
       - name: open grafana port
         firewalld:
-          port: "3000/tcp"
+          port: "{{ grafana_port }}/tcp"
           zone: "{{ ceph_dashboard_firewall_zone }}"
           permanent: true
           immediate: true
 
       - name: open prometheus port
         firewalld:
-          port: "9090/tcp"
+          port: "{{ prometheus_port }}/tcp"
           zone: "{{ ceph_dashboard_firewall_zone }}"
           permanent: true
           immediate: true
 
       - name: open alertmanager port
         firewalld:
-          port: "9093/tcp"
+          port: "{{ alertmanager_port }}/tcp"
           zone: "{{ ceph_dashboard_firewall_zone }}"
           permanent: true
           immediate: true
index cf9e6d629468dc7a3fcb609fc634f8e1f64d4cf0..a92381b2622e5ddc08d3a34d402a1ac5bd8c7faf 100644 (file)
@@ -17,7 +17,8 @@ ExecStart=/usr/bin/{{ container_binary }} run --name=node-exporter \
   {{ node_exporter_container_image }} \
   --path.procfs=/host/proc \
   --path.sysfs=/host/sys \
-  --no-collector.timex
+  --no-collector.timex \
+  --web.listen-address=:{{ node_exporter_port }}
 ExecStop=-/usr/bin/{{ container_binary }} stop node-exporter
 Restart=always
 RestartSec=10s
index dc60ec1676128825951a4b6f7806004283d3713b..8cd08a8256bec2f905757431df8ffcfc65ad2f8d 100644 (file)
@@ -22,7 +22,8 @@ ExecStart=/usr/bin/{{ container_binary }} run --name=alertmanager \
   --memory-swap={{ alertmanager_container_memory * 2 }}GB \
   {{ alertmanager_container_image }} \
   --config.file=/etc/alertmanager/alertmanager.yml \
-  --storage.path=/alertmanager
+  --storage.path=/alertmanager \
+  --web.listen-address=:{{ alertmanager_port }}
 ExecStop=/usr/bin/{{ container_binary }} stop alertmanager
 Restart=always
 RestartSec=10s
index f0cbb9558c953e175fbf6a2c818bc30923b7c527..4f2d314cce76fc955e902f5b8ecf7142288b90d1 100644 (file)
@@ -23,7 +23,8 @@ ExecStart=/usr/bin/{{ container_binary }} run --name=prometheus \
   {{ prometheus_container_image }} \
   --config.file=/etc/prometheus/prometheus.yml \
   --storage.tsdb.path=/prometheus \
-  --web.external-url=http://{{ inventory_hostname }}:9090/
+  --web.external-url=http://{{ inventory_hostname }}:{{ prometheus_port }}/ \
+  --web.listen-address=:{{ prometheus_port }}
 ExecStop=/usr/bin/{{ container_binary }} stop prometheus
 Restart=always
 RestartSec=10s
index 5c375f022bbac2411f0ae5033df3dd6bd7e73fd5..1afc9ee816c52d17ff1123927893b6901141b386 100644 (file)
@@ -8,7 +8,7 @@ rule_files:
 scrape_configs:
   - job_name: 'prometheus'
     static_configs:
-      - targets: ['localhost:9090']
+      - targets: ['localhost:{{ prometheus_port }}']
   - job_name: 'ceph'
     honor_labels: true
     static_configs:
@@ -21,20 +21,20 @@ scrape_configs:
     static_configs:
 {% if grafana_server_group_name in groups %}
 {% for host in (groups['all'] | difference(groups[grafana_server_group_name])) %}
-      - targets: ['{{ host }}:9100']
+      - targets: ['{{ host }}:{{ node_exporter_port }}']
         labels:
           instance: "{{ hostvars[host]['ansible_nodename'] }}"
 {% endfor %}
   - job_name: 'grafana'
     static_configs:
 {% for host in groups[grafana_server_group_name] %}
-      - targets: ['{{ host }}:9100']
+      - targets: ['{{ host }}:{{ node_exporter_port }}']
         labels:
           instance: "{{ hostvars[host]['ansible_nodename'] }}"
 {% endfor %}
 {% else %}
 {% for host in groups['all'] %}
-      - targets: ['{{ host }}:9100']
+      - targets: ['{{ host }}:{{ node_exporter_port }}']
         labels:
           instance: "{{ hostvars[host]['ansible_nodename'] }}"
 {% endfor %}
@@ -52,4 +52,4 @@ alerting:
   alertmanagers:
   - scheme: http
     static_configs:
-    - targets: ['{{ grafana_server_addr | default(_current_monitor_address) }}:9093']
+    - targets: ['{{ grafana_server_addr | default(_current_monitor_address) }}:{{ alertmanager_port }}']