k8s使用kube-router網絡插件並監控流量狀態

簡介

kube-router是一個新的k8s的網絡插件,使用lvs作服務的代理及負載均衡,使用iptables來作網絡的隔離策略。部署簡單,只須要在每一個節點部署一個daemonset便可,高性能,易維護。支持pod間通訊,以及服務的代理。node

安裝

# 本次實驗從新建立了集羣,使用以前測試其餘網絡插件的集羣環境沒有成功
# 多是因爲環境干擾,實驗時須要注意

# 建立kube-router目錄下載相關文件
mkdir kube-router && cd kube-router
wget https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml
wget https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter-all-features.yaml

# 如下兩種部署方式任選其一

# 1. 只啓用 pod網絡通訊,網絡隔離策略 功能
kubectl apply -f kubeadm-kuberouter.yaml

# 2. 啓用 pod網絡通訊,網絡隔離策略,服務代理 全部功能
# 刪除kube-proxy和其以前配置的服務代理
kubectl apply -f kubeadm-kuberouter-all-features.yaml
kubectl -n kube-system delete ds kube-proxy

# 在每一個節點上執行
docker run --privileged --net=host registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy-amd64:v1.10.2 kube-proxy --cleanup

# 查看
kubectl get pods --namespace kube-system
kubectl get svc --namespace kube-system
複製代碼

測試

# 啓動用於測試的deployment
kubectl run nginx --replicas=2 --image=nginx:alpine --port=80
kubectl expose deployment nginx --type=NodePort --name=example-service-nodeport
kubectl expose deployment nginx --name=example-service

# dns及訪問測試
kubectl run curl --image=radial/busyboxplus:curl -i --tty
nslookup kubernetes
nslookup example-service
curl example-service

# 清理
kubectl delete svc example-service example-service-nodeport
kubectl delete deploy nginx curl
複製代碼

監控相關數據並可視化

從新部署kube-router

# 修改yml文件
cp kubeadm-kuberouter-all-features.yaml kubeadm-kuberouter-all-features.yaml.ori
vim kubeadm-kuberouter-all-features.yaml
...
spec:
  template:
    metadata:
      labels:
        k8s-app: kube-router
        tier: node
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
		# 添加以下參數,讓prometheus收集數據
        prometheus.io/scrape: "true"
        prometheus.io/path: "/metrics"
        prometheus.io/port: "8080"
    spec:
      serviceAccountName: kube-router
      serviceAccount: kube-router
      containers:
      - name: kube-router
        image: cloudnativelabs/kube-router
        imagePullPolicy: Always
        args:
		# 添加以下參數開啓metrics
        - --metrics-path=/metrics
        - --metrics-port=8080
        - --run-router=true
        - --run-firewall=true
        - --run-service-proxy=true
        - --kubeconfig=/var/lib/kube-router/kubeconfig
...

# 從新部署
kubectl delete ds kube-router -n kube-system
kubectl apply -f kubeadm-kuberouter-all-features.yaml

# 測試獲取metrics
curl http://127.0.0.1:8080/metrics
複製代碼

部署prometheus

複製以下內容到prometheus.yml文件nginx

---
apiVersion: v1
kind: ConfigMap
metadata:
 name: prometheus
 namespace: kube-system
data:
  prometheus.yml: |-
 global:
 scrape_interval: 15s
 scrape_configs:

    # scrape config for API servers
 - job_name: 'kubernetes-apiservers'
 kubernetes_sd_configs:
 - role: endpoints
 scheme: https
 tls_config:
 ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
 bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
 relabel_configs:
 - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
 action: keep
 regex: default;kubernetes;https

    # scrape config for nodes (kubelet)
 - job_name: 'kubernetes-nodes'
 scheme: https
 tls_config:
 ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
 bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
 kubernetes_sd_configs:
 - role: node
 relabel_configs:
 - action: labelmap
 regex: __meta_kubernetes_node_label_(.+)
 - target_label: __address__
 replacement: kubernetes.default.svc:443
 - source_labels: [__meta_kubernetes_node_name]
 regex: (.+)
 target_label: __metrics_path__
 replacement: /api/v1/nodes/${1}/proxy/metrics

    # Scrape config for Kubelet cAdvisor.
    #
    # This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
    # (those whose names begin with 'container_') have been removed from the
    # Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to
    # retrieve those metrics.
    #
    # In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
    # HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
    # in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
    # the --cadvisor-port=0 Kubelet flag).
    #
    # This job is not necessary and should be removed in Kubernetes 1.6 and
    # earlier versions, or it will cause the metrics to be scraped twice.
 - job_name: 'kubernetes-cadvisor'
 scheme: https
 tls_config:
 ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
 bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
 kubernetes_sd_configs:
 - role: node
 relabel_configs:
 - action: labelmap
 regex: __meta_kubernetes_node_label_(.+)
 - target_label: __address__
 replacement: kubernetes.default.svc:443
 - source_labels: [__meta_kubernetes_node_name]
 regex: (.+)
 target_label: __metrics_path__
 replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor

    # scrape config for service endpoints.
 - job_name: 'kubernetes-service-endpoints'
 kubernetes_sd_configs:
 - role: endpoints
 relabel_configs:
 - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
 action: keep
 regex: true
 - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
 action: replace
 target_label: __scheme__
 regex: (https?)
 - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
 action: replace
 target_label: __metrics_path__
 regex: (.+)
 - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
 action: replace
 target_label: __address__
 regex: ([^:]+)(?::\d+)?;(\d+)
 replacement: $1:$2
 - action: labelmap
 regex: __meta_kubernetes_service_label_(.+)
 - source_labels: [__meta_kubernetes_namespace]
 action: replace
 target_label: kubernetes_namespace
 - source_labels: [__meta_kubernetes_service_name]
 action: replace
 target_label: kubernetes_name

    # Example scrape config for pods
 - job_name: 'kubernetes-pods'
 kubernetes_sd_configs:
 - role: pod

 relabel_configs:
 - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
 action: keep
 regex: true
 - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
 action: replace
 target_label: __metrics_path__
 regex: (.+)
 - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
 action: replace
 regex: ([^:]+)(?::\d+)?;(\d+)
 replacement: $1:$2
 target_label: __address__
 - action: labelmap
 regex: __meta_kubernetes_pod_label_(.+)
 - source_labels: [__meta_kubernetes_namespace]
 action: replace
 target_label: namespace
 - source_labels: [__meta_kubernetes_pod_name]
 action: replace
 target_label: pod_name

---
apiVersion: v1
kind: Service
metadata:
 annotations:
    prometheus.io/scrape: 'true'
 labels:
 name: prometheus
 name: prometheus
 namespace: kube-system
spec:
 selector:
 app: prometheus
 type: NodePort
 ports:
 - name: prometheus
 protocol: TCP
 port: 9090
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
 name: prometheus
 namespace: kube-system
spec:
 replicas: 1
 selector:
 matchLabels:
 app: prometheus
 template:
 metadata:
 name: prometheus
 labels:
 app: prometheus
 annotations:
        sidecar.istio.io/inject: "false"
 spec:
 serviceAccountName: prometheus
 containers:
 - name: prometheus
 image: docker.io/prom/prometheus:v2.2.1
 imagePullPolicy: IfNotPresent
 args:
 - '--storage.tsdb.retention=6h'
 - '--config.file=/etc/prometheus/prometheus.yml'
 ports:
 - name: web
 containerPort: 9090
 volumeMounts:
 - name: config-volume
 mountPath: /etc/prometheus
 volumes:
 - name: config-volume
 configMap:
 name: prometheus
---
apiVersion: v1
kind: ServiceAccount
metadata:
 name: prometheus
 namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
 name: prometheus
rules:
- apiGroups: [""]
 resources:
 - nodes
 - services
 - endpoints
 - pods
 - nodes/proxy
 verbs: ["get", "list", "watch"]
- apiGroups: [""]
 resources:
 - configmaps
 verbs: ["get"]
- nonResourceURLs: ["/metrics"]
 verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
 name: prometheus
roleRef:
 apiGroup: rbac.authorization.k8s.io
 kind: ClusterRole
 name: prometheus
subjects:
- kind: ServiceAccount
 name: prometheus
 namespace: kube-system
---
複製代碼

部署測試git

# 部署
kubectl apply -f prometheus.yml

# 查看
kubectl get pods --namespace kube-system
kubectl get svc --namespace kube-system

# 訪問prometheus
# 輸入 kube_router 關鍵字查找 看有無提示出現
prometheusNodePort=$(kubectl get svc -n kube-system | grep prometheus | awk '{print $5}' | cut -d '/' -f 1 | cut -d ':' -f 2)
nodeName=$(kubectl get no | grep '<none>' | head -1 | awk '{print $1}')
nodeIP=$(ping -c 1 $nodeName | grep PING | awk '{print $3}' | tr -d '()')
echo "http://$nodeIP:"$prometheusNodePort
複製代碼

部署grafana

複製以下內容到grafana.yml文件github

---
apiVersion: v1
kind: Service
metadata:
 name: grafana
 namespace: kube-system
spec:
 type: NodePort
 ports:
 - port: 3000
 protocol: TCP
 name: http
 selector:
 app: grafana
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
 name: grafana
 namespace: kube-system
spec:
 replicas: 1
 template:
 metadata:
 labels:
 app: grafana
 spec:
 serviceAccountName: grafana
 containers:
 - name: grafana
 image: grafana/grafana
 imagePullPolicy: IfNotPresent
 ports:
 - containerPort: 3000
 volumeMounts:
 - mountPath: /var/lib/grafana
 name: grafana-data
 volumes:
 - name: grafana-data
 emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
 name: grafana
 namespace: kube-system
---
複製代碼

部署測試web

# 部署
kubectl apply -f grafana.yml

# 查看
kubectl get pods --namespace kube-system
kubectl get svc --namespace kube-system


# 訪問grafana
grafanaNodePort=$(kubectl get svc -n kube-system | grep grafana | awk '{print $5}' | cut -d '/' -f 1 | cut -d ':' -f 2)
nodeName=$(kubectl get no | grep '<none>' | head -1 | awk '{print $1}')
nodeIP=$(ping -c 1 $nodeName | grep PING | awk '{print $3}' | tr -d '()')
echo "http://$nodeIP:"$grafanaNodePort

# 默認用戶密碼
admin/admin
複製代碼

導入並查看dashboard

# 下載官方dashboard的json文件
wget https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/dashboard/kube-router.json
複製代碼

建立名爲Prometheus類型也爲Prometheus的數據源,鏈接地址爲http://prometheus:9090/docker

選擇剛剛下載的json文件導入dashboardjson

查看dashboardvim

相關文章
相關標籤/搜索