int main(int argc, char** argv) {
- // TODO: daemonize
- std::cout << "inside exporter" << std::endl;
-
- std::cout << "Starting http server thread..." << std::endl;
boost::thread server_thread(http_server_thread_entrypoint);
- std::cout << "Starting collector..." << std::endl;
DaemonMetricCollector &collector = collector_instance();
collector.main();
server_thread.join();
FuncT = TypeVar('FuncT', bound=Callable)
# Default container images -----------------------------------------------------
-DEFAULT_IMAGE = 'quay.ceph.io/ceph-ci/ceph:main'
+DEFAULT_IMAGE = 'docker.io/rhcsdashboard/ceph-exporter'
DEFAULT_IMAGE_IS_MAIN = True
DEFAULT_IMAGE_RELEASE = 'quincy'
DEFAULT_PROMETHEUS_IMAGE = 'quay.io/prometheus/prometheus:v2.33.4'
##################################
+class CephExporter(object):
+ """Defines a Ceph-exporter container"""
+
+ daemon_type = 'exporter'
+ entrypoint = '/usr/bin/ceph-exporter'
+
+ port_map = {
+ 'exporter': 9085,
+ }
+
+ def __init__(self,
+ ctx,
+ fsid,
+ daemon_id,
+ image=DEFAULT_IMAGE):
+ # type: (CephadmContext, str, Union[int, str], Dict, str) -> None
+ self.ctx = ctx
+ self.fsid = fsid
+ self.daemon_id = daemon_id
+ self.image = image
+
+ @classmethod
+ def init(cls, ctx, fsid, daemon_id):
+ # type: (CephadmContext, str, Union[int, str]) -> CephIscsi
+ return cls(ctx, fsid, daemon_id,
+ get_parm(ctx.config_json), ctx.image)
+
+ @staticmethod
+ def get_container_mounts():
+ mounts = dict()
+ mounts['/var/run/ceph'] = '/var/run/ceph:z'
+ return mounts
+
+ def get_daemon_name(self):
+ # type: () -> str
+ return '%s.%s' % (self.daemon_type, self.daemon_id)
+
+ def get_container_name(self, desc=None):
+ # type: (Optional[str]) -> str
+ cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name())
+ if desc:
+ cname = '%s-%s' % (cname, desc)
+ return cname
+
+##################################
+
+
class HAproxy(object):
"""Defines an HAproxy container"""
daemon_type = 'haproxy'
supported_daemons.extend(Monitoring.components)
supported_daemons.append(NFSGanesha.daemon_type)
supported_daemons.append(CephIscsi.daemon_type)
+ supported_daemons.append(CephExporter.daemon_type)
supported_daemons.append(CustomContainer.daemon_type)
supported_daemons.append(HAproxy.daemon_type)
supported_daemons.append(Keepalived.daemon_type)
log_dir = get_log_dir(fsid, ctx.log_dir)
mounts.update(CephIscsi.get_container_mounts(data_dir, log_dir))
+ if daemon_type == CephExporter.daemon_type:
+ assert daemon_id
+ mounts.update(CephExporter.get_container_mounts())
+
if daemon_type == Keepalived.daemon_type:
assert daemon_id
data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id)
entrypoint = NFSGanesha.entrypoint
name = '%s.%s' % (daemon_type, daemon_id)
envs.extend(NFSGanesha.get_container_envs())
+ elif daemon_type == CephExporter.daemon_type:
+ entrypoint = CephExporter.entrypoint
+ name = '%s.%s' % (daemon_type, daemon_id)
elif daemon_type == HAproxy.daemon_type:
name = '%s.%s' % (daemon_type, daemon_id)
container_args.extend(['--user=root']) # haproxy 2.4 defaults to a different user
config=config, keyring=keyring,
reconfig=ctx.reconfig,
ports=daemon_ports)
+
+ elif daemon_type == CephExporter.daemon_type:
+ if not ctx.reconfig and not redeploy and not daemon_ports:
+ daemon_ports = list(CephExporter.port_map.values())
+
+ uid = 0
+ gid = 0
+ c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id)
+ deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid,
+ reconfig=ctx.reconfig,
+ ports=daemon_ports)
elif daemon_type == CephIscsi.daemon_type:
config, keyring = get_config_and_keyring(ctx)
timer.async_wait([&](const boost::system::error_code& e) {
std::cerr << e << std::endl;
update_sockets();
- std::cout << "updating metrics" << std::endl;
send_requests();
timer.expires_from_now(boost::posix_time::seconds(stats_period));
request_loop(timer);
} else {
add_double_or_int_metric(ss, perf_values, name, description, mtype, labels);
}
- result += ss.str() + "\n";
+ result += ss.str();
}
}
}
void DaemonMetricCollector::update_sockets() {
std::string path = "/var/run/ceph/";
- for (const auto & entry : std::filesystem::directory_iterator(path)) {
+ for (const auto & entry : std::filesystem::recursive_directory_iterator(path)) {
if (entry.path().extension() == ".asok") {
std::string daemon_socket_name = entry.path().filename().string();
- std::cout << "Got socket: " << daemon_socket_name << std::endl;
// remove .asok
std::string daemon_name = daemon_socket_name.substr(0, daemon_socket_name.size() - 5);
if (clients.find(daemon_name) == clients.end()) {
// Construct a response message based on the program state.
void create_response()
{
- std::cout << "Got request on " << request_.target() << std::endl;
if(request_.target() == "/metrics")
{
response_.set(http::field::content_type, "text/plain");
DaemonMetricCollector &collector = collector_instance();
std::string metrics = collector.get_metrics();
- beast::ostream(response_.body()) << "Perf Counters\n" << metrics << std::endl;
+ beast::ostream(response_.body()) << metrics << std::endl;
}
else
{
acceptor.async_accept(socket,
[&](beast::error_code ec)
{
- std::cout << "async accept" << std::endl;
if(!ec)
std::make_shared<http_connection>(std::move(socket))->start();
http_server(acceptor, socket);
from . import ssh
from .migrations import Migrations
from .services.cephadmservice import MonService, MgrService, MdsService, RgwService, \
- RbdMirrorService, CrashService, CephadmService, CephfsMirrorService, CephadmAgent
+ RbdMirrorService, CrashService, CephadmService, CephfsMirrorService, CephadmAgent, \
+ CephExporterService
from .services.ingress import IngressService
from .services.container import CustomContainerService
from .services.iscsi import IscsiService
# Default container images -----------------------------------------------------
-DEFAULT_IMAGE = 'quay.io/ceph/ceph'
-DEFAULT_PROMETHEUS_IMAGE = 'quay.io/prometheus/prometheus:v2.33.4'
-DEFAULT_NODE_EXPORTER_IMAGE = 'quay.io/prometheus/node-exporter:v1.3.1'
+DEFAULT_IMAGE = 'docker.io/rhcsdashboard/ceph-exporter'
+DEFAULT_PROMETHEUS_IMAGE = 'quay.io/prometheus/prometheus:v2.18.1'
+DEFAULT_NODE_EXPORTER_IMAGE = 'quay.io/prometheus/node-exporter:v0.18.1'
DEFAULT_LOKI_IMAGE = 'docker.io/grafana/loki:2.4.0'
DEFAULT_PROMTAIL_IMAGE = 'docker.io/grafana/promtail:2.4.0'
DEFAULT_ALERT_MANAGER_IMAGE = 'quay.io/prometheus/alertmanager:v0.23.0'
RgwService, RbdMirrorService, GrafanaService, AlertmanagerService,
PrometheusService, NodeExporterService, LokiService, PromtailService, CrashService, IscsiService,
IngressService, CustomContainerService, CephfsMirrorService,
- CephadmAgent, SNMPGatewayService
+ CephadmAgent, SNMPGatewayService, CephExporterService
]
# https://github.com/python/mypy/issues/8993
image = self.container_image_haproxy
elif daemon_type == 'keepalived':
image = self.container_image_keepalived
- elif daemon_type == CustomContainerService.TYPE:
+ elif daemon_type == CustomContainerService.TYPE or daemon_type == 'exporter':
# The image can't be resolved, the necessary information
# is only available when a container is deployed (given
# via spec).
'alertmanager': PlacementSpec(count=1),
'prometheus': PlacementSpec(count=1),
'node-exporter': PlacementSpec(host_pattern='*'),
+ 'exporter': PlacementSpec(host_pattern='*'),
'loki': PlacementSpec(count=1),
'promtail': PlacementSpec(host_pattern='*'),
'crash': PlacementSpec(host_pattern='*'),
def apply_node_exporter(self, spec: ServiceSpec) -> str:
return self._apply(spec)
+ @handle_orch_error
+ def apply_exporter(self, spec: ServiceSpec) -> str:
+ return self._apply(spec)
+
@handle_orch_error
def apply_crash(self, spec: ServiceSpec) -> str:
return self._apply(spec)
return daemon_spec
+class CephExporterService(CephService):
+ TYPE = 'exporter'
+
+ def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec:
+ assert self.TYPE == daemon_spec.daemon_type
+ daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec)
+ return daemon_spec
+
+ def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]:
+ assert self.TYPE == daemon_spec.daemon_type
+ return {}, []
+
+
class CephfsMirrorService(CephService):
TYPE = 'cephfs-mirror'
'mon': self.apply_mon,
'nfs': self.apply_nfs,
'node-exporter': self.apply_node_exporter,
+ 'exporter': self.apply_exporter,
'osd': lambda dg: self.apply_drivegroups([dg]), # type: ignore
'prometheus': self.apply_prometheus,
'loki': self.apply_loki,
"""Update existing a Node-Exporter daemon(s)"""
raise NotImplementedError()
+ def apply_exporter(self, spec: ServiceSpec) -> OrchResult[str]:
+ """Update existing a exporter daemon(s)"""
+ raise NotImplementedError()
+
def apply_loki(self, spec: ServiceSpec) -> OrchResult[str]:
"""Update existing a Loki daemon(s)"""
raise NotImplementedError()
'alertmanager': 'alertmanager',
'prometheus': 'prometheus',
'node-exporter': 'node-exporter',
+ 'exporter': 'exporter',
'loki': 'loki',
'promtail': 'promtail',
'crash': 'crash',
'loki': ['loki'],
'promtail': ['promtail'],
'node-exporter': ['node-exporter'],
+ 'exporter': ['exporter'],
'crash': ['crash'],
'container': ['container'],
'agent': ['agent'],
alertmanager = 'alertmanager'
grafana = 'grafana'
node_exporter = 'node-exporter'
+ exporter = 'exporter'
prometheus = 'prometheus'
loki = 'loki'
promtail = 'promtail'
start the services.
"""
KNOWN_SERVICE_TYPES = 'alertmanager crash grafana iscsi loki promtail mds mgr mon nfs ' \
- 'node-exporter osd prometheus rbd-mirror rgw agent ' \
+ 'node-exporter exporter osd prometheus rbd-mirror rgw agent ' \
'container ingress cephfs-mirror snmp-gateway'.split()
REQUIRES_SERVICE_ID = 'iscsi mds nfs rgw container ingress '.split()
MANAGED_CONFIG_OPTIONS = [