來源:
http://www.javashuo.com/article/p-xoxedfio-kh.html
https://github.com/redhatxl/k8s-prometheus-grafananode
git clone https://github.com/redhatxl/k8s-prometheus-grafana.git
git
在node節點下載監控所需鏡像(在全部節點也下載了)github
docker pull prom/node-exporter docker pull prom/prometheus:v2.0.0 docker pull grafana/grafana:4.2.0
採用daemonset方式部署node-exporter組件
kubectl create -f node-exporter.yaml
docker
部署prometheus組件
rbac文件
kubectl create -f k8s-prometheus-grafana/prometheus/rbac-setup.yaml
api
以configmap的形式管理prometheus組件的配置文件
kubectl create -f k8s-prometheus-grafana/prometheus/configmap.yaml
bash
Prometheus deployment 文件
kubectl create -f k8s-prometheus-grafana/prometheus/prometheus.deploy.yml
服務器
Prometheus service文件
kubectl create -f k8s-prometheus-grafana/prometheus/prometheus.svc.yml
app
grafana deployment配置文件
kubectl create -f k8s-prometheus-grafana/grafana/grafana-deploy.yaml
grafana service配置文件
kubectl create -f k8s-prometheus-grafana/grafana/grafana-svc.yamlide
grafana ingress配置文件
kubectl create -f k8s-prometheus-grafana/grafana/grafana-ing.yamlpost
k get po -n kube-system -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES coredns-5c98db65d4-j2925 1/1 Running 4 2d23h 10.244.0.13 k8s-master <none> <none> coredns-5c98db65d4-k2rcj 1/1 Running 4 2d23h 10.244.0.12 k8s-master <none> <none> etcd-k8s-master 1/1 Running 4 2d23h 192.168.190.140 k8s-master <none> <none> grafana-core-6ff599bfdc-pbdzf 1/1 Running 0 99m 10.244.1.66 k8s-node1 <none> <none> kube-apiserver-k8s-master 1/1 Running 4 2d23h 192.168.190.140 k8s-master <none> <none> kube-controller-manager-k8s-master 1/1 Running 4 2d23h 192.168.190.140 k8s-master <none> <none> kube-flannel-ds-amd64-6rq62 1/1 Running 5 2d23h 192.168.190.140 k8s-master <none> <none> kube-flannel-ds-amd64-ctmdz 1/1 Running 4 2d23h 192.168.190.141 k8s-node1 <none> <none> kube-proxy-kmgc5 1/1 Running 4 2d23h 192.168.190.140 k8s-master <none> <none> kube-proxy-ss8jr 1/1 Running 4 2d23h 192.168.190.141 k8s-node1 <none> <none> kube-scheduler-k8s-master 1/1 Running 4 2d23h 192.168.190.140 k8s-master <none> <none> node-exporter-fsfkb 1/1 Running 0 102m 10.244.1.64 k8s-node1 <none> <none> prometheus-68545d4fd8-hnltb 1/1 Running 0 101m 10.244.1.65 k8s-node1 <none> <none> k get svc -n kube-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE grafana NodePort 10.106.70.200 <none> 3000:30592/TCP 98m kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 2d23h node-exporter NodePort 10.102.245.168 <none> 9100:31672/TCP 101m prometheus NodePort 10.101.205.6 <none> 9090:30003/TCP 99m
訪問prometheus :
任一個節點的物理地址+暴露的端口(9090:30003/TCP )
訪問grafana:
任一個節點的物理地址+暴露的端口(3000:30592/TCP)
輸入密碼登陸:
admin/admin
添加數據源
類型:prometheus
url: http://prometheus:9090
save& test
導入模板
模板下載地址https:///dashboards/315, 或在線導入315 /1621
cat node-exporter.yaml --- apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: node-exporter namespace: kube-system labels: k8s-app: node-exporter spec: template: metadata: labels: k8s-app: node-exporter spec: containers: - image: prom/node-exporter name: node-exporter ports: - containerPort: 9100 protocol: TCP name: http --- apiVersion: v1 kind: Service metadata: labels: k8s-app: node-exporter name: node-exporter namespace: kube-system spec: ports: - name: http port: 9100 nodePort: 31672 protocol: TCP type: NodePort selector: k8s-app: node-exporter
cat rbac-setup.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: prometheus rules: - apiGroups: [""] resources: - nodes - nodes/proxy - services - endpoints - pods verbs: ["get", "list", "watch"] - apiGroups: - extensions resources: - ingresses verbs: ["get", "list", "watch"] - nonResourceURLs: ["/metrics"] verbs: ["get"] --- apiVersion: v1 kind: ServiceAccount metadata: name: prometheus namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: prometheus roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: prometheus subjects: - kind: ServiceAccount name: prometheus namespace: kube-system
cat configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: prometheus-config namespace: kube-system data: prometheus.yml: | global: scrape_interval: 15s evaluation_interval: 15s scrape_configs: - job_name: 'kubernetes-apiservers' kubernetes_sd_configs: - role: endpoints scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token relabel_configs: - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] action: keep regex: default;kubernetes;https - job_name: 'kubernetes-nodes' kubernetes_sd_configs: - role: node scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - target_label: __address__ replacement: kubernetes.default.svc:443 - source_labels: [__meta_kubernetes_node_name] regex: (.+) target_label: __metrics_path__ replacement: /api/v1/nodes/${1}/proxy/metrics - job_name: 'kubernetes-cadvisor' kubernetes_sd_configs: - role: node scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - target_label: __address__ replacement: kubernetes.default.svc:443 - source_labels: [__meta_kubernetes_node_name] regex: (.+) target_label: __metrics_path__ replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor - job_name: 'kubernetes-service-endpoints' kubernetes_sd_configs: - role: endpoints relabel_configs: - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] action: keep regex: true - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] action: replace target_label: __scheme__ regex: (https?) - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] action: replace target_label: __metrics_path__ regex: (.+) - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] action: replace target_label: __address__ regex: ([^:]+)(?::\d+)?;(\d+) replacement: $1:$2 - action: labelmap regex: __meta_kubernetes_service_label_(.+) - source_labels: [__meta_kubernetes_namespace] action: replace target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_service_name] action: replace target_label: kubernetes_name - job_name: 'kubernetes-services' kubernetes_sd_configs: - role: service metrics_path: /probe params: module: [http_2xx] relabel_configs: - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] action: keep regex: true - source_labels: [__address__] target_label: __param_target - target_label: __address__ replacement: blackbox-exporter.example.com:9115 - source_labels: [__param_target] target_label: instance - action: labelmap regex: __meta_kubernetes_service_label_(.+) - source_labels: [__meta_kubernetes_namespace] target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_service_name] target_label: kubernetes_name - job_name: 'kubernetes-ingresses' kubernetes_sd_configs: - role: ingress relabel_configs: - source_labels: [__meta_kubernetes_ingress_annotation_prometheus_io_probe] action: keep regex: true - source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path] regex: (.+);(.+);(.+) replacement: ${1}://${2}${3} target_label: __param_target - target_label: __address__ replacement: blackbox-exporter.example.com:9115 - source_labels: [__param_target] target_label: instance - action: labelmap regex: __meta_kubernetes_ingress_label_(.+) - source_labels: [__meta_kubernetes_namespace] target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_ingress_name] target_label: kubernetes_name - job_name: 'kubernetes-pods' kubernetes_sd_configs: - role: pod relabel_configs: - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] action: keep regex: true - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] action: replace target_label: __metrics_path__ regex: (.+) - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] action: replace regex: ([^:]+)(?::\d+)?;(\d+) replacement: $1:$2 target_label: __address__ - action: labelmap regex: __meta_kubernetes_pod_label_(.+) - source_labels: [__meta_kubernetes_namespace] action: replace target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_pod_name] action: replace target_label: kubernetes_pod_name
配置說明:來源官網
https://prometheus.io/docs/prometheus/latest/configuration/configuration/
Kubernetes SD配置容許從Kubernetes的 REST API 檢索抓取目標, 並始終與集羣狀態保持同步。
role能夠將如下類型之一配置爲發現目標:
1,node
該node角色爲每一個羣集節點發現一個目標,其地址默認爲Kubelet的HTTP端口。目標地址默認爲的地址類型順序Kubernetes節點對象的第一個現有地址NodeInternalIP,NodeExternalIP, NodeLegacyHostIP,和NodeHostName。
可用的元標籤:
__meta_kubernetes_node_name:節點對象的名稱。
__meta_kubernetes_node_label_
__meta_kubernetes_node_labelpresent_
__meta_kubernetes_node_annotation_
__meta_kubernetes_node_annotationpresent_
__meta_kubernetes_node_address_
此外,該instance節點的標籤將設置爲從API服務器檢索到的節點名稱
2,service
該service角色發現每個服務端口爲每一個服務的目標。這一般用於監視服務的黑盒。該地址將設置爲服務的Kubernetes DNS名稱以及相應的服務端口
可用的元標籤:
__meta_kubernetes_namespace:服務對象的名稱空間。
__meta_kubernetes_service_annotation_
__meta_kubernetes_service_annotationpresent_
__meta_kubernetes_service_cluster_ip:服務的羣集IP地址。(不適用於外部名稱類型的服務)
__meta_kubernetes_service_external_name:服務的DNS名稱。(適用於外部名稱類型的服務)
__meta_kubernetes_service_label_
__meta_kubernetes_service_labelpresent_
__meta_kubernetes_service_name:服務對象的名稱。
__meta_kubernetes_service_port_name:目標服務端口的名稱。
__meta_kubernetes_service_port_protocol:目標服務端口的協議。
3,pod
該pod角色發現全部pods並將其內容器公開爲目標。對於容器的每一個聲明的端口,將生成一個目標。若是容器沒有指定的端口,則會爲每一個容器建立無故口目標,以經過從新標記手動添加端口
可用的元標籤:
__meta_kubernetes_namespace:pod對象的名稱空間。
__meta_kubernetes_pod_name:pod對象的名稱。
__meta_kubernetes_pod_ip:pod對象的pod IP。
__meta_kubernetes_pod_label_
__meta_kubernetes_pod_labelpresent_
__meta_kubernetes_pod_annotation_
__meta_kubernetes_pod_annotationpresent_
__meta_kubernetes_pod_container_init:true若是容器是InitContainer
__meta_kubernetes_pod_container_name:目標地址指向的容器的名稱。
__meta_kubernetes_pod_container_port_name:容器端口的名稱。
__meta_kubernetes_pod_container_port_number:容器端口號。
__meta_kubernetes_pod_container_port_protocol:容器端口的協議。
__meta_kubernetes_pod_ready:設置爲true或false吊艙的就緒狀態。
__meta_kubernetes_pod_phase:設置爲Pending,Running,Succeeded,Failed或Unknown 在生命週期。
__meta_kubernetes_pod_node_name:將Pod調度到的節點的名稱。
__meta_kubernetes_pod_host_ip:pod對象的當前主機IP。
__meta_kubernetes_pod_uid:pod對象的UID。
__meta_kubernetes_pod_controller_kind:pod控制器的對象種類。
__meta_kubernetes_pod_controller_name:pod控制器的名稱。
4,endpoints
該endpoints角色從服務的列表中的終端發現目標。對於每一個端點地址,每一個端口都發現一個目標。若是端點由Pod支持,則該Pod的全部其餘未綁定到端點端口的容器端口也將被發現爲目標
可用的元標籤:
__meta_kubernetes_namespace:端點對象的名稱空間。
__meta_kubernetes_endpoints_name:端點對象的名稱。
對於直接從端點列表中發現的全部目標(未從基礎容器額外推斷出的全部目標),將附加如下標籤:
__meta_kubernetes_endpoint_hostname:端點的主機名。
__meta_kubernetes_endpoint_node_name:託管端點的節點的名稱。
__meta_kubernetes_endpoint_ready:設置爲true或false爲端點的就緒狀態。
__meta_kubernetes_endpoint_port_name:端點端口的名稱。
__meta_kubernetes_endpoint_port_protocol:端點端口的協議。
__meta_kubernetes_endpoint_address_target_kind:端點地址目標的種類。
__meta_kubernetes_endpoint_address_target_name:端點地址目標的名稱。
若是端點屬於服務,role: service則會附加發現的全部標籤。
對於由Pod支持的全部目標,role: pod將附加發現的全部標籤。
5,ingress
該ingress角色發現了一個目標,爲每一個進入的每一個路徑。這一般對黑盒監視入口頗有用。該地址將設置爲入口規範中指定的主機。
可用的元標籤:
__meta_kubernetes_namespace:入口對象的名稱空間。
__meta_kubernetes_ingress_name:入口對象的名稱。
__meta_kubernetes_ingress_label_
__meta_kubernetes_ingress_labelpresent_
__meta_kubernetes_ingress_annotation_
__meta_kubernetes_ingress_annotationpresent_
__meta_kubernetes_ingress_scheme:https若是設置了TLS配置,則爲入口的協議方案。默認爲http。
__meta_kubernetes_ingress_path:來自入口規範的路徑。默認爲/
有關Kubernetes發現的配置選項:
# The information to access the Kubernetes API. # The API server addresses. If left empty, Prometheus is assumed to run inside # of the cluster and will discover API servers automatically and use the pod's # CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. [ api_server: <host> ] # The Kubernetes role of entities that should be discovered. role: <role> # Optional authentication information used to authenticate to the API server. # Note that `basic_auth`, `bearer_token` and `bearer_token_file` options are # mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: <string> ] [ password: <secret> ] [ password_file: <string> ] # Optional bearer token authentication information. [ bearer_token: <secret> ] # Optional bearer token file authentication information. [ bearer_token_file: <filename> ] # Optional proxy URL. [ proxy_url: <string> ] # TLS configuration. tls_config: [ <tls_config> ] # Optional namespace discovery. If omitted, all namespaces are used. namespaces: names: [ - <string> ]
prometheus-kubernetes.yml:
來源:https://github.com/prometheus/prometheus/blob/release-2.14/documentation/examples/prometheus-kubernetes.yml
# A scrape configuration for running Prometheus on a Kubernetes cluster. # This uses separate scrape configs for cluster components (i.e. API server, node) # and services to allow each to use different authentication configs. # # Kubernetes labels will be added as Prometheus labels on metrics via the # `labelmap` relabeling action. # # If you are using Kubernetes 1.7.2 or earlier, please take note of the comments # for the kubernetes-cadvisor job; you will need to edit or remove this job. # Scrape config for API servers. # # Kubernetes exposes API servers as endpoints to the default/kubernetes # service so this uses `endpoints` role and uses relabelling to only keep # the endpoints associated with the default/kubernetes service using the # default named port `https`. This works for single API server deployments as # well as HA API server deployments. scrape_configs: - job_name: 'kubernetes-apiservers' kubernetes_sd_configs: - role: endpoints # Default to scraping over https. If required, just disable this or change to # `http`. scheme: https # This TLS & bearer token file config is used to connect to the actual scrape # endpoints for cluster components. This is separate to discovery auth # configuration because discovery & scraping are two separate concerns in # Prometheus. The discovery auth config is automatic if Prometheus runs inside # the cluster. Otherwise, more config options have to be provided within the # <kubernetes_sd_config>. tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt # If your node certificates are self-signed or use a different CA to the # master CA, then disable certificate verification below. Note that # certificate verification is an integral part of a secure infrastructure # so this should only be disabled in a controlled environment. You can # disable certificate verification by uncommenting the line below. # # insecure_skip_verify: true bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # Keep only the default/kubernetes service endpoints for the https port. This # will add targets for each API server which Kubernetes adds an endpoint to # the default/kubernetes service. relabel_configs: - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] action: keep regex: default;kubernetes;https # Scrape config for nodes (kubelet). # # Rather than connecting directly to the node, the scrape is proxied though the # Kubernetes apiserver. This means it will work if Prometheus is running out of # cluster, or can't connect to nodes for some other reason (e.g. because of # firewalling). - job_name: 'kubernetes-nodes' # Default to scraping over https. If required, just disable this or change to # `http`. scheme: https # This TLS & bearer token file config is used to connect to the actual scrape # endpoints for cluster components. This is separate to discovery auth # configuration because discovery & scraping are two separate concerns in # Prometheus. The discovery auth config is automatic if Prometheus runs inside # the cluster. Otherwise, more config options have to be provided within the # <kubernetes_sd_config>. tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - role: node relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - target_label: __address__ replacement: kubernetes.default.svc:443 - source_labels: [__meta_kubernetes_node_name] regex: (.+) target_label: __metrics_path__ replacement: /api/v1/nodes/${1}/proxy/metrics # Scrape config for Kubelet cAdvisor. # # This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics # (those whose names begin with 'container_') have been removed from the # Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to # retrieve those metrics. # # In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor # HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics" # in that case (and ensure cAdvisor's HTTP server hasn't been disabled with # the --cadvisor-port=0 Kubelet flag). # # This job is not necessary and should be removed in Kubernetes 1.6 and # earlier versions, or it will cause the metrics to be scraped twice. - job_name: 'kubernetes-cadvisor' # Default to scraping over https. If required, just disable this or change to # `http`. scheme: https # This TLS & bearer token file config is used to connect to the actual scrape # endpoints for cluster components. This is separate to discovery auth # configuration because discovery & scraping are two separate concerns in # Prometheus. The discovery auth config is automatic if Prometheus runs inside # the cluster. Otherwise, more config options have to be provided within the # <kubernetes_sd_config>. tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - role: node relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - target_label: __address__ replacement: kubernetes.default.svc:443 - source_labels: [__meta_kubernetes_node_name] regex: (.+) target_label: __metrics_path__ replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor # Example scrape config for service endpoints. # # The relabeling allows the actual service scrape endpoint to be configured # for all or only some endpoints. - job_name: 'kubernetes-service-endpoints' kubernetes_sd_configs: - role: endpoints relabel_configs: # Example relabel to scrape only endpoints that have # "example.io/should_be_scraped = true" annotation. # - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_scraped] # action: keep # regex: true # # Example relabel to customize metric path based on endpoints # "example.io/metric_path = <metric path>" annotation. # - source_labels: [__meta_kubernetes_service_annotation_example_io_metric_path] # action: replace # target_label: __metrics_path__ # regex: (.+) # # Example relabel to scrape only single, desired port for the service based # on endpoints "example.io/scrape_port = <port>" annotation. # - source_labels: [__address__, __meta_kubernetes_service_annotation_example_io_scrape_port] # action: replace # regex: ([^:]+)(?::\d+)?;(\d+) # replacement: $1:$2 # target_label: __address__ # # Example relabel to configure scrape scheme for all service scrape targets # based on endpoints "example.io/scrape_scheme = <scheme>" annotation. # - source_labels: [__meta_kubernetes_service_annotation_example_io_scrape_scheme] # action: replace # target_label: __scheme__ # regex: (https?) - action: labelmap regex: __meta_kubernetes_service_label_(.+) - source_labels: [__meta_kubernetes_namespace] action: replace target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_service_name] action: replace target_label: kubernetes_name # Example scrape config for probing services via the Blackbox Exporter. # # The relabeling allows the actual service scrape endpoint to be configured # for all or only some services. - job_name: 'kubernetes-services' metrics_path: /probe params: module: [http_2xx] kubernetes_sd_configs: - role: service relabel_configs: # Example relabel to probe only some services that have "example.io/should_be_probed = true" annotation # - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_probed] # action: keep # regex: true - source_labels: [__address__] target_label: __param_target - target_label: __address__ replacement: blackbox-exporter.example.com:9115 - source_labels: [__param_target] target_label: instance - action: labelmap regex: __meta_kubernetes_service_label_(.+) - source_labels: [__meta_kubernetes_namespace] target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_service_name] target_label: kubernetes_name # Example scrape config for probing ingresses via the Blackbox Exporter. # # The relabeling allows the actual ingress scrape endpoint to be configured # for all or only some services. - job_name: 'kubernetes-ingresses' metrics_path: /probe params: module: [http_2xx] kubernetes_sd_configs: - role: ingress relabel_configs: # Example relabel to probe only some ingresses that have "example.io/should_be_probed = true" annotation # - source_labels: [__meta_kubernetes_ingress_annotation_example_io_should_be_probed] # action: keep # regex: true - source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path] regex: (.+);(.+);(.+) replacement: ${1}://${2}${3} target_label: __param_target - target_label: __address__ replacement: blackbox-exporter.example.com:9115 - source_labels: [__param_target] target_label: instance - action: labelmap regex: __meta_kubernetes_ingress_label_(.+) - source_labels: [__meta_kubernetes_namespace] target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_ingress_name] target_label: kubernetes_name # Example scrape config for pods # # The relabeling allows the actual pod scrape to be configured # for all the declared ports (or port-free target if none is declared) # or only some ports. - job_name: 'kubernetes-pods' kubernetes_sd_configs: - role: pod relabel_configs: # Example relabel to scrape only pods that have # "example.io/should_be_scraped = true" annotation. # - source_labels: [__meta_kubernetes_pod_annotation_example_io_should_be_scraped] # action: keep # regex: true # # Example relabel to customize metric path based on pod # "example.io/metric_path = <metric path>" annotation. # - source_labels: [__meta_kubernetes_pod_annotation_example_io_metric_path] # action: replace # target_label: __metrics_path__ # regex: (.+) # # Example relabel to scrape only single, desired port for the pod # based on pod "example.io/scrape_port = <port>" annotation. # Note that __address__ is modified here, so if pod containers' ports # are declared, they all will be ignored. # - source_labels: [__address__, __meta_kubernetes_pod_annotation_example_io_scrape_port] # action: replace # regex: ([^:]+)(?::\d+)?;(\d+) # replacement: $1:$2 # target_label: __address__ - action: labelmap regex: __meta_kubernetes_pod_label_(.+) - source_labels: [__meta_kubernetes_namespace] action: replace target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_pod_name] action: replace target_label: kubernetes_pod_name
cat prometheus.deploy.yml --- apiVersion: apps/v1beta2 kind: Deployment metadata: labels: name: prometheus-deployment name: prometheus namespace: kube-system spec: replicas: 1 selector: matchLabels: app: prometheus template: metadata: labels: app: prometheus spec: containers: - image: prom/prometheus:v2.0.0 name: prometheus command: - "/bin/prometheus" args: - "--config.file=/etc/prometheus/prometheus.yml" - "--storage.tsdb.path=/prometheus" - "--storage.tsdb.retention=24h" ports: - containerPort: 9090 protocol: TCP volumeMounts: - mountPath: "/prometheus" name: data - mountPath: "/etc/prometheus" name: config-volume resources: requests: cpu: 100m memory: 100Mi limits: cpu: 500m memory: 2500Mi serviceAccountName: prometheus volumes: - name: data emptyDir: {} - name: config-volume configMap: name: prometheus-config
cat prometheus.svc.yml --- kind: Service apiVersion: v1 metadata: labels: app: prometheus name: prometheus namespace: kube-system spec: type: NodePort ports: - port: 9090 targetPort: 9090 nodePort: 30003 selector: app: prometheus
cat grafana-deploy.yaml apiVersion: extensions/v1beta1 kind: Deployment metadata: name: grafana-core namespace: kube-system labels: app: grafana component: core spec: replicas: 1 template: metadata: labels: app: grafana component: core spec: containers: - image: grafana/grafana:4.2.0 name: grafana-core imagePullPolicy: IfNotPresent # env: resources: # keep request = limit to keep this container in guaranteed class limits: cpu: 100m memory: 100Mi requests: cpu: 100m memory: 100Mi env: # The following env variables set up basic auth twith the default admin user and admin password. - name: GF_AUTH_BASIC_ENABLED value: "true" - name: GF_AUTH_ANONYMOUS_ENABLED value: "false" # - name: GF_AUTH_ANONYMOUS_ORG_ROLE # value: Admin # does not really work, because of template variables in exported dashboards: # - name: GF_DASHBOARDS_JSON_ENABLED # value: "true" readinessProbe: httpGet: path: /login port: 3000 # initialDelaySeconds: 30 # timeoutSeconds: 1 volumeMounts: - name: grafana-persistent-storage mountPath: /var volumes: - name: grafana-persistent-storage emptyDir: {}
cat grafana-svc.yaml apiVersion: v1 kind: Service metadata: name: grafana namespace: kube-system labels: app: grafana component: core spec: type: NodePort ports: - port: 3000 selector: app: grafana component: core
cat grafana-ing.yaml apiVersion: extensions/v1beta1 kind: Ingress metadata: name: grafana namespace: kube-system spec: rules: - host: k8s.grafana http: paths: - path: / backend: serviceName: grafana servicePort: 3000