這是K8S集羣中的兩個節點,node21和node22,集羣中交付了一個coredns服務,調度在node22上了,pod地址是10.64.22.5,同時映射了一個service,地址是172.64.0.2,按道理,全部的節點和pod,都是以172.64.0.2作爲域名解析的,可是如今有個很奇怪的現象:node
1. 在物理節點node21和node22上,直接使用10.64.22.5作解析,是能解析到的api
2. 在物理節點22上,使用service 地址172.64.0.2作解析,也是能解析到的app
3. 在node上的任意pod,使用172.64.0.2作解析,也是能解析到的tcp
4. 惟獨在node21上,使用service 172.64.0.2作解析,一直反饋是超時oop
雖說不影響整個集羣運行,可是一直不知道是爲啥緣由,已經從新部署過屢次也不能解決此問題,還請大神們指教一二。謝謝了。this
service.yamlspa
apiVersion: v1 kind: Service metadata: name: coredns namespace: kube-system annotations: prometheus.io/port: "9153" prometheus.io/scrape: "true" labels: k8s-app: coredns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "CoreDNS" spec: selector: k8s-app: coredns clusterIP: 172.64.0.2 ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP - name: metrics port: 9153 protocol: TCP
dp.yamlscala
apiVersion: apps/v1 kind: Deployment metadata: name: coredns namespace: kube-system labels: k8s-app: coredns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "CoreDNS" spec: # replicas: not specified here: # 1. In order to make Addon Manager do not reconcile this replicas parameter. # 2. Default is 1. # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. strategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 selector: matchLabels: k8s-app: coredns template: metadata: labels: k8s-app: coredns spec: serviceAccountName: coredns tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule - key: "CriticalAddonsOnly" operator: "Exists" containers: - name: coredns image: harbor.cmcc.com/public/coredns:v1.6.1 imagePullPolicy: IfNotPresent resources: requests: cpu: 100m memory: 70Mi args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: - name: config-volume mountPath: /etc/coredns readOnly: true ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP - containerPort: 9153 name: metrics protocol: TCP livenessProbe: httpGet: path: /health port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /ready port: 8181 scheme: HTTP securityContext: allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE drop: - all readOnlyRootFilesystem: true dnsPolicy: Default volumes: - name: config-volume configMap: name: coredns items: - key: Corefile path: Corefile
config.yaml3d
apiVersion: v1 kind: ConfigMap metadata: name: coredns namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists data: Corefile: | .:53 { errors health ready kubernetes cluster.local 172.64.0.0/16 prometheus :9153 forward . 192.168.64.11 cache 30 loop reload loadbalance }