mirror of
https://github.com/poseidon/typhoon
synced 2024-12-18 14:34:14 +01:00
50215e373b
* Allow Kubernetes Ingress resources to be probed via Blackbox Exporter (if present) if annotated `prometheus.io/probe: "true"` * Fix probes of Services via Blackbox Exporter. Require Blackbox Exporter to be deployed in the same `monitoring` namespace, be named `blackbox-exporter`, and use port 8080
325 lines
12 KiB
YAML
325 lines
12 KiB
YAML
apiVersion: v1
|
|
kind: ConfigMap
|
|
metadata:
|
|
name: prometheus-config
|
|
namespace: monitoring
|
|
data:
|
|
prometheus.yaml: |-
|
|
# Global config
|
|
global:
|
|
scrape_interval: 15s
|
|
|
|
# AlertManager
|
|
alerting:
|
|
alertmanagers:
|
|
- static_configs:
|
|
- targets:
|
|
- alertmanager:9093
|
|
|
|
# Scrape configs for running Prometheus on a Kubernetes cluster.
|
|
# This uses separate scrape configs for cluster components (i.e. API server, node)
|
|
# and services to allow each to use different authentication configs.
|
|
#
|
|
# Kubernetes labels will be added as Prometheus labels on metrics via the
|
|
# `labelmap` relabeling action.
|
|
scrape_configs:
|
|
|
|
# Scrape config for API servers.
|
|
#
|
|
# Kubernetes exposes API servers as endpoints to the default/kubernetes
|
|
# service so this uses `endpoints` role and uses relabelling to only keep
|
|
# the endpoints associated with the default/kubernetes service using the
|
|
# default named port `https`. This works for single API server deployments as
|
|
# well as HA API server deployments.
|
|
- job_name: 'kubernetes-apiservers'
|
|
kubernetes_sd_configs:
|
|
- role: endpoints
|
|
|
|
scheme: https
|
|
tls_config:
|
|
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
|
# Using endpoints to discover kube-apiserver targets finds the pod IP
|
|
# (host IP since apiserver uses host network) which is not used in
|
|
# the server certificate.
|
|
insecure_skip_verify: true
|
|
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
|
|
|
# Keep only the default/kubernetes service endpoints for the https port. This
|
|
# will add targets for each API server which Kubernetes adds an endpoint to
|
|
# the default/kubernetes service.
|
|
relabel_configs:
|
|
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
|
action: keep
|
|
regex: default;kubernetes;https
|
|
- replacement: apiserver
|
|
action: replace
|
|
target_label: job
|
|
|
|
metric_relabel_configs:
|
|
- source_labels: [__name__]
|
|
action: drop
|
|
regex: etcd_(debugging|disk|request|server).*
|
|
- source_labels: [__name__]
|
|
action: drop
|
|
regex: apiserver_admission_controller_admission_latencies_seconds_.*
|
|
- source_labels: [__name__]
|
|
action: drop
|
|
regex: apiserver_admission_step_admission_latencies_seconds_.*
|
|
- source_labels: [__name__, group]
|
|
regex: apiserver_request_duration_seconds_bucket;.+
|
|
action: drop
|
|
- source_labels: [__name__, group]
|
|
regex: apiserver_request_duration_seconds_count;.+
|
|
action: drop
|
|
|
|
# Scrape config for kube-controller-manager endpoints.
|
|
#
|
|
# kube-controller-manager service endpoints can be discovered by using the
|
|
# `endpoints` role and relabelling to only keep only endpoints associated with
|
|
# kube-system/kube-controller-manager and the `https` port.
|
|
- job_name: 'kube-controller-manager'
|
|
kubernetes_sd_configs:
|
|
- role: endpoints
|
|
scheme: https
|
|
tls_config:
|
|
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
|
insecure_skip_verify: true
|
|
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
|
relabel_configs:
|
|
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
|
action: keep
|
|
regex: kube-system;kube-controller-manager;metrics
|
|
- replacement: kube-controller-manager
|
|
action: replace
|
|
target_label: job
|
|
|
|
# Scrape config for kube-scheduler endpoints.
|
|
#
|
|
# kube-scheduler service endpoints can be discovered by using the `endpoints`
|
|
# role and relabelling to only keep only endpoints associated with
|
|
# kube-system/kube-scheduler and the `https` port.
|
|
- job_name: 'kube-scheduler'
|
|
kubernetes_sd_configs:
|
|
- role: endpoints
|
|
scheme: https
|
|
tls_config:
|
|
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
|
insecure_skip_verify: true
|
|
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
|
relabel_configs:
|
|
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
|
action: keep
|
|
regex: kube-system;kube-scheduler;metrics
|
|
- replacement: kube-scheduler
|
|
action: replace
|
|
target_label: job
|
|
|
|
# Scrape config for node (i.e. kubelet) /metrics (e.g. 'kubelet_'). Explore
|
|
# metrics from a node by scraping kubelet (127.0.0.1:10250/metrics).
|
|
- job_name: 'kubelet'
|
|
kubernetes_sd_configs:
|
|
- role: node
|
|
|
|
scheme: https
|
|
tls_config:
|
|
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
|
# Kubelet certs don't have any fixed IP SANs
|
|
insecure_skip_verify: true
|
|
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
|
|
|
# Scrape config for Kubelet cAdvisor. Explore metrics from a node by
|
|
# scraping kubelet (127.0.0.1:10250/metrics/cadvisor).
|
|
- job_name: 'kubernetes-cadvisor'
|
|
kubernetes_sd_configs:
|
|
- role: node
|
|
|
|
scheme: https
|
|
metrics_path: /metrics/cadvisor
|
|
tls_config:
|
|
# Kubelet certs don't have any fixed IP SANs
|
|
insecure_skip_verify: true
|
|
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
|
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
|
|
|
metric_relabel_configs:
|
|
- source_labels: [__name__, image]
|
|
action: drop
|
|
regex: container_([a-z_]+);
|
|
- source_labels: [__name__]
|
|
action: drop
|
|
regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
|
|
|
|
|
|
# Scrap etcd metrics from controllers via listen-metrics-urls
|
|
- job_name: 'etcd'
|
|
kubernetes_sd_configs:
|
|
- role: node
|
|
scheme: http
|
|
relabel_configs:
|
|
- source_labels: [__meta_kubernetes_node_label_node_kubernetes_io_controller]
|
|
action: keep
|
|
regex: 'true'
|
|
- source_labels: [__meta_kubernetes_node_address_InternalIP]
|
|
action: replace
|
|
target_label: __address__
|
|
replacement: '${1}:2381'
|
|
|
|
# Scrape config for service endpoints.
|
|
#
|
|
# The relabeling allows the actual service scrape endpoint to be configured
|
|
# via the following annotations:
|
|
#
|
|
# * `prometheus.io/scrape`: Only scrape services that have a value of `true`
|
|
# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
|
|
# to set this to `https` & most likely set the `tls_config` of the scrape config.
|
|
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
|
|
# * `prometheus.io/port`: If the metrics are exposed on a different port to the
|
|
# service then set this appropriately.
|
|
# * `prometheus.io/param`: Custom metrics query parameter, like "format=prometheus".
|
|
- job_name: 'kubernetes-service-endpoints'
|
|
kubernetes_sd_configs:
|
|
- role: endpoints
|
|
|
|
honor_labels: true
|
|
relabel_configs:
|
|
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
|
|
action: keep
|
|
regex: true
|
|
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
|
|
action: replace
|
|
target_label: __scheme__
|
|
regex: (https?)
|
|
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
|
|
action: replace
|
|
target_label: __metrics_path__
|
|
regex: (.+)
|
|
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
|
|
action: replace
|
|
target_label: __address__
|
|
regex: ([^:]+)(?::\d+)?;(\d+)
|
|
replacement: $1:$2
|
|
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_param]
|
|
action: replace
|
|
target_label: __param_$1
|
|
regex: ([^=]+)=(.*)
|
|
replacement: $2
|
|
- action: labelmap
|
|
regex: __meta_kubernetes_service_label_(.+)
|
|
- source_labels: [__meta_kubernetes_namespace]
|
|
action: replace
|
|
target_label: namespace
|
|
- source_labels: [__meta_kubernetes_pod_name]
|
|
action: replace
|
|
target_label: pod
|
|
- source_labels: [__meta_kubernetes_service_name]
|
|
action: replace
|
|
target_label: job
|
|
|
|
metric_relabel_configs:
|
|
- source_labels: [__name__]
|
|
action: drop
|
|
regex: etcd_(debugging|disk|request|server).*
|
|
|
|
# Example scrape config for pods
|
|
#
|
|
# The relabeling allows the actual pod scrape endpoint to be configured via the
|
|
# following annotations:
|
|
#
|
|
# * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
|
|
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
|
|
# * `prometheus.io/port`: Scrape the pod on the indicated port instead of the
|
|
# pod's declared ports (default is a port-free target if none are declared).
|
|
- job_name: 'kubernetes-pods'
|
|
|
|
kubernetes_sd_configs:
|
|
- role: pod
|
|
|
|
relabel_configs:
|
|
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
|
|
action: keep
|
|
regex: true
|
|
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
|
|
action: replace
|
|
target_label: __metrics_path__
|
|
regex: (.+)
|
|
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
|
action: replace
|
|
regex: ([^:]+)(?::\d+)?;(\d+)
|
|
replacement: $1:$2
|
|
target_label: __address__
|
|
- action: labelmap
|
|
regex: __meta_kubernetes_pod_label_(.+)
|
|
- source_labels: [__meta_kubernetes_namespace]
|
|
action: replace
|
|
target_label: kubernetes_namespace
|
|
- source_labels: [__meta_kubernetes_pod_name]
|
|
action: replace
|
|
target_label: kubernetes_pod_name
|
|
|
|
# Example scrape config for probing Services via the Blackbox Exporter.
|
|
#
|
|
# Relabeling allows service scraping to be configured via annotations:
|
|
# * `prometheus.io/probe`: Only probe services that have a value of `true`
|
|
- job_name: 'kubernetes-services'
|
|
|
|
metrics_path: /probe
|
|
params:
|
|
module: [http_2xx]
|
|
|
|
kubernetes_sd_configs:
|
|
- role: service
|
|
|
|
relabel_configs:
|
|
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
|
|
action: keep
|
|
regex: true
|
|
- source_labels: [__address__]
|
|
target_label: __param_target
|
|
- target_label: __address__
|
|
replacement: blackbox-exporter:8080
|
|
- source_labels: [__param_target]
|
|
target_label: instance
|
|
- action: labelmap
|
|
regex: __meta_kubernetes_service_label_(.+)
|
|
- source_labels: [__meta_kubernetes_namespace]
|
|
target_label: namespace
|
|
- source_labels: [__meta_kubernetes_service_name]
|
|
target_label: job
|
|
|
|
# Example scrape config for probing Ingresses via a Blackbox Exporter.
|
|
#
|
|
# Relabeling allows service scraping to be configured via annotations:
|
|
# * `prometheus.io/probe`: Only probe ingresses that have a value of `true`
|
|
- job_name: 'kubernetes-ingresses'
|
|
metrics_path: /probe
|
|
params:
|
|
module: [http_2xx]
|
|
|
|
kubernetes_sd_configs:
|
|
- role: ingress
|
|
|
|
relabel_configs:
|
|
- source_labels: [__meta_kubernetes_ingress_annotation_prometheus_io_probe]
|
|
action: keep
|
|
regex: true
|
|
- source_labels: [__meta_kubernetes_ingress_scheme, __address__, __meta_kubernetes_ingress_path]
|
|
regex: (.+);(.+);(.+)
|
|
replacement: ${1}://${2}${3}
|
|
target_label: __param_target
|
|
- target_label: __address__
|
|
replacement: blackbox-exporter:8080
|
|
- source_labels: [__param_target]
|
|
target_label: instance
|
|
- action: labelmap
|
|
regex: __meta_kubernetes_ingress_label_(.+)
|
|
- source_labels: [__meta_kubernetes_namespace]
|
|
target_label: namespace
|
|
- source_labels: [__meta_kubernetes_service_name]
|
|
target_label: job
|
|
|
|
# Rule files
|
|
rule_files:
|
|
- "/etc/prometheus/rules/*.rules"
|
|
- "/etc/prometheus/rules/*.yaml"
|
|
- "/etc/prometheus/rules/*.yml"
|