[root@cc-k8s01 work]# wget https://storage.googleapis.com/kubernetes-helm/helm-v2.14.0-rc.2-linux-amd64.tar.gz [root@cc-k8s01 work]# tar zxf helm-v2.14.0-rc.2-linux-amd64.tar.gz [root@cc-k8s01 work]# cd linux-amd64/ [root@cc-k8s01 linux-amd64]# mv helm /opt/k8s/bin [root@cc-k8s01 linux-amd64]# helm The Kubernetes package manager To begin working with Helm, run the 'helm init' command: $ helm init
1.建立RBACq角色並受權html
kubectl create serviceaccount tiller --namespace kube-system kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
2.建立tls通迅的證書node
##建立tiller服務器證書CSR cd /opt/k8s/work cat > tiller-server-csr.json <<EOF { "CN": "tiller-server", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "4Paradigm" } ] } EOF ##生成證書,沿用kubernetes的CA證書 cd /opt/k8s/work cfssl gencert -ca=/opt/k8s/work/ca.pem \ -ca-key=/opt/k8s/work/ca-key.pem \ -config=/opt/k8s/work/ca-config.json \ -profile=kubernetes tiller-server-csr.json | cfssljson -bare tiller-server ls tiller-server* ####建立helm與tiller服務器通迅的證書CSR cd /opt/k8s/work cat > helm-csr.json <<EOF { "CN": "tiller", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "4Paradigm" } ] } EOF ##生成證書,沿用kubernetes的CA證書 cd /opt/k8s/work cfssl gencert -ca=/opt/k8s/work/ca.pem \ -ca-key=/opt/k8s/work/ca-key.pem \ -config=/opt/k8s/work/ca-config.json \ -profile=kubernetes helm-csr.json | cfssljson -bare helm
3.初始化tillerlinux
#因爲鏡像是國外鏡像,故需先使用其餘源
[root@cc-k8s02 ~]# docker pull gcr.azk8s.cn/kubernetes-helm/tiller:v2.14.0-rc.2
v2.14.0-rc.2: Pulling from kubernetes-helm/tiller
bdf0201b3a05: Pull complete
879b5272666d: Pull complete
3dd50e1bb957: Pull complete
ba6c28ffc2cd: Pull complete
Digest: sha256:5d19651f555dfcd9aafa52ee569413aa38e60ddab19e54c6dc33ad60da2d46ed
Status: Downloaded newer image for gcr.azk8s.cn/kubernetes-helm/tiller:v2.14.0-rc.2
[root@cc-k8s02 ~]# docker tag gcr.azk8s.cn/kubernetes-helm/tiller:v2.14.0-rc.2 gcr.io/kubernetes-helm/tiller:v2.14.0-rc.2
[root@cc-k8s01 work]# helm init --tiller-tls --tiller-tls-cert ./tiller-server.pem --tiller-tls-key ./tiller-server-key.pem --tiller-tls-verify --tls-ca-cert ca.pem $HELM_HOME has been configured at /root/.helm. [root@cc-k8s01 work]# kubectl get pods -n kube-system NAME READY STATUS RESTARTS AGE coredns-5b969f4c88-n8kht 1/1 Running 4 9d elasticsearch-logging-0 1/1 Running 0 9d elasticsearch-logging-1 1/1 Running 0 9d fluentd-es-v2.4.0-5n9q2 1/1 Running 0 9d fluentd-es-v2.4.0-8flsx 1/1 Running 0 9d fluentd-es-v2.4.0-qlhb5 1/1 Running 0 9d kibana-logging-f4d99b69f-mw28c 1/1 Running 0 9d kube-state-metrics-699fdf75f8-2tskt 1/1 Running 0 2d kubernetes-dashboard-7848d45466-mg4w5 1/1 Running 0 9d metrics-server-6f97f5879-qg8vx 1/1 Running 0 9d tiller-deploy-78fc7f6db4-jq5zs 1/1 Running 0 14s ##將helm的證書、私鑰和CA證書拷貝到/root/.helm,並更名爲key.pem和cert.pem cp ca.pem helm.pem helm-key.pem /root/.helm cd /root/.helm mv helm-key.pem key.pem mv helm.pem cert.pem ##而後就能夠使用helm 加上--tls參數與tiller進行加密通迅 [root@cc-k8s01 work]# helm ls --tls 產生如下報錯 [root@cc-k8s01 work]# helm ls --tls E0703 13:32:38.232485 22034 portforward.go:400] an error occurred forwarding 42494 -> 44134: error forwarding port 44134 to pod c656f2021e1cdea7ac0710bf13d9e55d27a7713f4c4c77f80b92379be5d544bf, uid : unable to do port forwarding: socat not found. E0703 13:32:39.236986 22034 portforward.go:400] an error occurred forwarding 42494 -> 44134: error forwarding port 44134 to pod c656f2021e1cdea7ac0710bf13d9e55d27a7713f4c4c77f80b92379be5d544bf, uid : unable to do port forwarding: socat not found. E0703 13:32:40.688934 22034 portforward.go:400] an error occurred forwarding 42494 -> 44134: error forwarding port 44134 to pod c656f2021e1cdea7ac0710bf13d9e55d27a7713f4c4c77f80b92379be5d544bf, uid : unable to do port forwarding: socat not found. E0703 13:33:13.681938 22034 portforward.go:340] error creating error stream for port 42494 -> 44134: Timeout occured [root@cc-k8s01 work]:~# helm version Client: &version.Version{SemVer:"v2.14.0-rc.2", GitCommit:"012cb0ac1a1b2f888144ef5a67b8dab6c2d45be6", GitTreeState:"clean"} E0711 10:09:50.160064 10916 portforward.go:332] an error occurred forwarding 33491 -> 44134: error forwarding port 44134 to pod tiller-deploy-542252878-15h67_kube-system, uid : unable to do port forwarding: socat not found. Error: cannot connect to Tiller !!解決yum install -y socat [root@cc-k8s01 work]# helm ls --tls Error: context deadline exceeded [root@cc-k8s01 nginx-ingress]# helm reset
##從新受權
kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' [root@cc-k8s01 work]# helm init --tiller-tls --tiller-tls-cert ./tiller-server.pem --tiller-tls-key ./tiller-server-key.pem --tiller-tls-verify --tls-ca-cert ca.pem $HELM_HOME has been configured at /root/.helm.
因爲helm的默認charts repository是 https://kubernetes-charts.storage.googleapis.com/ 沒法訪問,因此需經過離線安裝的方式先從github.com clone相關的chartsnginx
[root@cc-k8s01 work]# git clone https://github.com/helm/charts.git
Cloning into 'charts'...
remote: Enumerating objects: 11, done.
remote: Counting objects: 100% (11/11), done.
remote: Compressing objects: 100% (7/7), done.
remote: Total 69700 (delta 4), reused 8 (delta 4), pack-reused 69689
Receiving objects: 100% (69700/69700), 19.88 MiB | 4.27 MiB/s, done.
Resolving deltas: 100% (49888/49888), done.
[root@cc-k8s01 work]# cd charts/stable/nginx-ingress/
[root@cc-k8s01 nginx-ingress]# ls -l
total 48
-rw-r--r-- 1 root root 569 Jun 4 17:21 Chart.yaml
drwxr-xr-x 2 root root 4096 Jun 4 17:21 ci
-rw-r--r-- 1 root root 110 Jun 4 17:21 OWNERS
-rw-r--r-- 1 root root 17923 Jun 4 17:21 README.md
drwxr-xr-x 2 root root 4096 Jun 4 17:26 templates
-rw-r--r-- 1 root root 10045 Jun 4 17:21 values.yaml
nginx-ingress的鏡像是從quay.io拉取,能夠正常訪問。可是defaultbackend的鏡像是從k8s.gcr.io拉取,這個無法訪問。須要先從dockerhub上拉取,否則部署的時候會報ErrImagePullgit
[root@cc-k8s02 ~]# docker pull googlecontainer/defaultbackend-amd64:1.5
1.5: Pulling from googlecontainer/defaultbackend-amd64
65f4220de95d: Pull complete
Digest: sha256:4dc5e07c8ca4e23bddb3153737d7b8c556e5fb2f29c4558b7cd6e6df99c512c7
Status: Downloaded newer image for googlecontainer/defaultbackend-amd64:1.5
[root@cc-k8s02 ~]# docker tag googlecontainer/defaultbackend-amd64:1.5 k8s.gcr.io/defaultbackend-amd64:1.5
經常使用參數以下:github
安裝操做docker
[root@cc-k8s01 nginx-ingress]# helm install --tls --namespace kube-system --name nginx-ingress --set controller.kind=DaemonSet --set rbac.create=true --set controller.stats.enabled=true --set controller.metri controller.dnsPolicy=ClusterFirstWithHostNet ./ NAME: nginx-ingress LAST DEPLOYED: Wed Jul 3 14:25:54 2019 NAMESPACE: kube-system STATUS: DEPLOYED RESOURCES: ==> v1/Pod(related) NAME READY STATUS RESTARTS AGE nginx-ingress-controller-2rlpx 0/1 ContainerCreating 0 0s nginx-ingress-controller-8lpbs 0/1 ContainerCreating 0 0s nginx-ingress-controller-zh4z9 0/1 ContainerCreating 0 0s nginx-ingress-default-backend-7474b6b4cd-d6d9d 0/1 ContainerCreating 0 0s ==> v1/Service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE nginx-ingress-controller LoadBalancer 10.254.51.167 <pending> 80:31220/TCP,443:30851/TCP 0s nginx-ingress-controller-metrics ClusterIP 10.254.70.26 <none> 9913/TCP 0s nginx-ingress-controller-stats ClusterIP 10.254.250.132 <none> 18080/TCP 0s nginx-ingress-default-backend ClusterIP 10.254.142.218 <none> 80/TCP 0s ==> v1/ServiceAccount NAME SECRETS AGE nginx-ingress 1 0s ==> v1beta1/ClusterRole NAME AGE nginx-ingress 0s ==> v1beta1/ClusterRoleBinding NAME AGE nginx-ingress 0s ==> v1beta1/DaemonSet NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE nginx-ingress-controller 3 3 0 3 0 <none> 0s ==> v1beta1/Deployment NAME READY UP-TO-DATE AVAILABLE AGE nginx-ingress-default-backend 0/1 1 0 0s ==> v1beta1/Role NAME AGE nginx-ingress 0s ==> v1beta1/RoleBinding NAME AGE nginx-ingress 0s NOTES: The nginx-ingress controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace kube-system get services -o wide -w nginx-ingress-controller' An example Ingress that makes use of the controller: apiVersion: extensions/v1beta1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: rules: - host: www.example.com http: paths: - backend: serviceName: exampleService servicePort: 80 path: / # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: <base64 encoded cert> tls.key: <base64 encoded key> type: kubernetes.io/tls
上面安裝使用了hostNetwork,能夠看到在每一個Node監聽了80/443json
[root@cc-k8s02 ~]# netstat -nltp|grep -E "(:80|:443)"
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 13676/nginx: master
tcp 0 0 0.0.0.0:443 0.0.0.0:* LISTEN 13676/nginx: master
controller.stats.enabled=true 參數開啓了nginx_status頁面,能夠經過curl來驗證nginx-ingress的聯通性api
[root@cc-k8s02 ~]# curl localhost/nginx_status
Active connections: 1
server accepts handled requests
1395 1395 1392
Reading: 0 Writing: 1 Waiting: 0
若是不帶--purge,則名叫nginx-ingress仍會存在,只是狀態爲delete。下次若是使用同名部署則會報錯,使用–purge是完整刪除。服務器
[root@cc-k8s01 ~]# helm delete --tls nginx-ingress --purge
release "nginx-ingress" deleted