#dashboard_rgw_api_admin_resource: ''
#dashboard_rgw_api_no_ssl_verify: False
#node_exporter_container_image: prom/node-exporter:latest
+#node_exporter_port: 9100
#grafana_admin_user: admin
#grafana_admin_password: admin
# We only need this for SSL (https) connections
# - vonage-status-panel
# - grafana-piechart-panel
#grafana_allow_embedding: True
+#grafana_port: 3000
#prometheus_container_image: prom/prometheus:latest
#prometheus_container_cpu_period: 100000
#prometheus_container_cpu_cores: 2
#prometheus_data_dir: /var/lib/prometheus
#prometheus_conf_dir: /etc/prometheus
#prometheus_user_id: '65534' # This is the UID used by the prom/prometheus container image
+#prometheus_port: 9090
#alertmanager_container_image: prom/alertmanager:latest
#alertmanager_container_cpu_period: 100000
#alertmanager_container_cpu_cores: 2
#alertmanager_container_memory: 4
#alertmanager_data_dir: /var/lib/alertmanager
#alertmanager_conf_dir: /etc/alertmanager
+#alertmanager_port: 9093
##################################
#dashboard_rgw_api_admin_resource: ''
#dashboard_rgw_api_no_ssl_verify: False
#node_exporter_container_image: prom/node-exporter:latest
+#node_exporter_port: 9100
#grafana_admin_user: admin
#grafana_admin_password: admin
# We only need this for SSL (https) connections
# - vonage-status-panel
# - grafana-piechart-panel
#grafana_allow_embedding: True
+#grafana_port: 3000
#prometheus_container_image: prom/prometheus:latest
#prometheus_container_cpu_period: 100000
#prometheus_container_cpu_cores: 2
#prometheus_data_dir: /var/lib/prometheus
#prometheus_conf_dir: /etc/prometheus
#prometheus_user_id: '65534' # This is the UID used by the prom/prometheus container image
+#prometheus_port: 9090
#alertmanager_container_image: prom/alertmanager:latest
#alertmanager_container_cpu_period: 100000
#alertmanager_container_cpu_cores: 2
#alertmanager_container_memory: 4
#alertmanager_data_dir: /var/lib/alertmanager
#alertmanager_conf_dir: /etc/alertmanager
+#alertmanager_port: 9093
##################################
dashboard_url: "{{ hostvars[(groups[grafana_server_group_name][0] | default(groups[mgr_group_name][0]) | default(groups[mon_group_name][0]))]['ansible_hostname'] }}"
- name: set grafana url
- command: "{{ container_exec_cmd }} ceph dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_url }}:3000/"
+ command: "{{ container_exec_cmd }} ceph dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_url }}:{{ grafana_port }}/"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
- name: set alertmanager host
- command: "{{ container_exec_cmd }} ceph dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ dashboard_url }}:9093/"
+ command: "{{ container_exec_cmd }} ceph dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ dashboard_url }}:{{ alertmanager_port }}/"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
dashboard_rgw_api_admin_resource: ''
dashboard_rgw_api_no_ssl_verify: False
node_exporter_container_image: prom/node-exporter:latest
+node_exporter_port: 9100
grafana_admin_user: admin
grafana_admin_password: admin
# We only need this for SSL (https) connections
- vonage-status-panel
- grafana-piechart-panel
grafana_allow_embedding: True
+grafana_port: 3000
prometheus_container_image: prom/prometheus:latest
prometheus_container_cpu_period: 100000
prometheus_container_cpu_cores: 2
prometheus_data_dir: /var/lib/prometheus
prometheus_conf_dir: /etc/prometheus
prometheus_user_id: '65534' # This is the UID used by the prom/prometheus container image
+prometheus_port: 9090
alertmanager_container_image: prom/alertmanager:latest
alertmanager_container_cpu_period: 100000
alertmanager_container_cpu_cores: 2
alertmanager_container_memory: 4
alertmanager_data_dir: /var/lib/alertmanager
alertmanager_conf_dir: /etc/alertmanager
+alertmanager_port: 9093
##################################
- name: wait for grafana to be stopped
wait_for:
- port: 3000
+ port: '{{ grafana_port }}'
state: stopped
- name: make sure grafana configuration directories exist
- name: wait for grafana to start
wait_for:
- port: 3000
+ port: '{{ grafana_port }}'
# <int> org id. will default to orgId 1 if not specified
orgId: 1
# <string> url
- url: 'http://{{ grafana_server_addr | default(_current_monitor_address) }}:9090'
+ url: 'http://{{ grafana_server_addr | default(_current_monitor_address) }}:{{ prometheus_port }}'
# <bool> enable/disable basic auth
basicAuth: false
# <bool> mark as default datasource. Max one per org
cert_key = /etc/grafana/ceph-dashboard.key
domain = {{ ansible_fqdn }}
protocol = {{ dashboard_protocol }}
+http_port = {{ grafana_port }}
[security]
admin_user = {{ grafana_admin_user }}
- name: open node_exporter port
firewalld:
- port: "9100/tcp"
+ port: "{{ node_exporter_port }}/tcp"
zone: "{{ ceph_dashboard_firewall_zone }}"
permanent: true
immediate: true
- block:
- name: open grafana port
firewalld:
- port: "3000/tcp"
+ port: "{{ grafana_port }}/tcp"
zone: "{{ ceph_dashboard_firewall_zone }}"
permanent: true
immediate: true
- name: open prometheus port
firewalld:
- port: "9090/tcp"
+ port: "{{ prometheus_port }}/tcp"
zone: "{{ ceph_dashboard_firewall_zone }}"
permanent: true
immediate: true
- name: open alertmanager port
firewalld:
- port: "9093/tcp"
+ port: "{{ alertmanager_port }}/tcp"
zone: "{{ ceph_dashboard_firewall_zone }}"
permanent: true
immediate: true
{{ node_exporter_container_image }} \
--path.procfs=/host/proc \
--path.sysfs=/host/sys \
- --no-collector.timex
+ --no-collector.timex \
+ --web.listen-address=:{{ node_exporter_port }}
ExecStop=-/usr/bin/{{ container_binary }} stop node-exporter
Restart=always
RestartSec=10s
--memory-swap={{ alertmanager_container_memory * 2 }}GB \
{{ alertmanager_container_image }} \
--config.file=/etc/alertmanager/alertmanager.yml \
- --storage.path=/alertmanager
+ --storage.path=/alertmanager \
+ --web.listen-address=:{{ alertmanager_port }}
ExecStop=/usr/bin/{{ container_binary }} stop alertmanager
Restart=always
RestartSec=10s
{{ prometheus_container_image }} \
--config.file=/etc/prometheus/prometheus.yml \
--storage.tsdb.path=/prometheus \
- --web.external-url=http://{{ inventory_hostname }}:9090/
+ --web.external-url=http://{{ inventory_hostname }}:{{ prometheus_port }}/ \
+ --web.listen-address=:{{ prometheus_port }}
ExecStop=/usr/bin/{{ container_binary }} stop prometheus
Restart=always
RestartSec=10s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- - targets: ['localhost:9090']
+ - targets: ['localhost:{{ prometheus_port }}']
- job_name: 'ceph'
honor_labels: true
static_configs:
static_configs:
{% if grafana_server_group_name in groups %}
{% for host in (groups['all'] | difference(groups[grafana_server_group_name])) %}
- - targets: ['{{ host }}:9100']
+ - targets: ['{{ host }}:{{ node_exporter_port }}']
labels:
instance: "{{ hostvars[host]['ansible_nodename'] }}"
{% endfor %}
- job_name: 'grafana'
static_configs:
{% for host in groups[grafana_server_group_name] %}
- - targets: ['{{ host }}:9100']
+ - targets: ['{{ host }}:{{ node_exporter_port }}']
labels:
instance: "{{ hostvars[host]['ansible_nodename'] }}"
{% endfor %}
{% else %}
{% for host in groups['all'] %}
- - targets: ['{{ host }}:9100']
+ - targets: ['{{ host }}:{{ node_exporter_port }}']
labels:
instance: "{{ hostvars[host]['ansible_nodename'] }}"
{% endfor %}
alertmanagers:
- scheme: http
static_configs:
- - targets: ['{{ grafana_server_addr | default(_current_monitor_address) }}:9093']
+ - targets: ['{{ grafana_server_addr | default(_current_monitor_address) }}:{{ alertmanager_port }}']