下載最新版二進制文件:https://github.com/helm/helm/releaseshtml
本文下載 helm-v2.11.0 版本linux
wget https://storage.googleapis.com/kubernetes-helm/helm-v2.11.0-linux-amd64.tar.gz
tar zxf helm-v2.11.0-linux-amd64.tar.gz
cp linux-amd64/helm linux-amd64/tiller /usr/local/bin/
yum -y install socat
docker pull xiaoqshuo/tiller:v2.11.0
[root@k8s-master01 ~]# helm init --tiller-image xiaoqshuo/tiller:v2.11.0 Creating /root/.helm Creating /root/.helm/repository Creating /root/.helm/repository/cache Creating /root/.helm/repository/local Creating /root/.helm/plugins Creating /root/.helm/starters Creating /root/.helm/cache/archive Creating /root/.helm/repository/repositories.yaml Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com Adding local repo with URL: http://127.0.0.1:8879/charts $HELM_HOME has been configured at /root/.helm. Tiller (the Helm server-side component) has been installed into your Kubernetes Cluster. Please note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy. To prevent this, run `helm init` with the --tiller-tls-verify flag. For more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation Happy Helming!
[root@k8s-master01 opt]# helm version Client: &version.Version{SemVer:"v2.11.0", GitCommit:"2e55dbe1fdb5fdb96b75ff144a339489417b146b", GitTreeState:"clean"} Server: &version.Version{SemVer:"v2.11.0", GitCommit:"2e55dbe1fdb5fdb96b75ff144a339489417b146b", GitTreeState:"clean"}
[root@k8s-master01 opt]# kubectl get pod -n kube-system | grep tiller tiller-deploy-84f64bdb87-w69rw 1/1 Running 0 88s
[root@k8s-master01 opt]# kubectl get pod,svc -n kube-system | grep tiller pod/tiller-deploy-84f64bdb87-w69rw 1/1 Running 0 94s service/tiller-deploy ClusterIP 10.108.21.50 <none> 44134/TCP 95s
# helm list Error: configmaps is forbidden: User "system:serviceaccount:kube-system:default" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
kubectl create serviceaccount --namespace kube-system tiller kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
[root@k8s-master01 ~]# helm repo list NAME URL stable https://kubernetes-charts.storage.googleapis.com local http://127.0.0.1:8879/charts
[root@k8s-master01 ~]# helm repo remove stable "stable" has been removed from your repositories [root@k8s-master01 ~]# helm repo remove local "local" has been removed from your repositories
[root@k8s-master01 ~]# helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts "aliyun" has been added to your repositories [root@k8s-master01 ~]# helm repo list NAME URL aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
git clone https://github.com/goharbor/harbor-helm.git
[root@k8s-master01 harbor-helm]# cat requirements.yaml dependencies: - name: redis version: 1.1.15 repository: https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts # repository: https://kubernetes-charts.storage.googleapis.com
[root@k8s-master01 harbor-helm]# helm dependency update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "aliyun" chart repository Update Complete. ⎈Happy Helming!⎈ Saving 1 charts Downloading redis from repo https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts Deleting outdated charts
docker pull goharbor/chartmuseum-photon:v0.7.1-v1.6.0 docker pull goharbor/harbor-adminserver:v1.6.0 docker pull goharbor/harbor-jobservice:v1.6.0 docker pull goharbor/harbor-ui:v1.6.0 docker pull goharbor/harbor-db:v1.6.0 docker pull goharbor/registry-photon:v2.6.2-v1.6.0 docker pull goharbor/chartmuseum-photon:v0.7.1-v1.6.0 docker pull goharbor/clair-photon:v2.0.5-v1.6.0 docker pull goharbor/notary-server-photon:v0.5.1-v1.6.0 docker pull goharbor/notary-signer-photon:v0.5.1-v1.6.0 docker pull bitnami/redis:4.0.8-r2
sed -i 's@# storageClass: "-"@storageClass: "gluster-heketi"@g' values.yaml
volumes: data: storageClass: "gluster-heketi" accessMode: ReadWriteOnce size: 1Gi
redis: # if external Redis is used, set "external.enabled" to "true" # and fill the connection informations in "external" section. # or the internal Redis will be used usePassword: false password: "changeit" cluster: enabled: false master: port: "6379" persistence: enabled: *persistence_enabled storageClass: "gluster-heketi" accessMode: ReadWriteOnce size: 1Gi
sed -i 's#name: {{ template "redis.fullname" . }}#name: {{ template "redis.fullname" . }}-master#g' redis/templates/svc.yaml
[root@k8s-master01 charts]# more !$ more redis/templates/svc.yaml apiVersion: v1 kind: Service metadata: name: {{ template "redis.fullname" . }}-master labels: app: {{ template "redis.fullname" . }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" annotations: {{- if .Values.service.annotations }} {{ toYaml .Values.service.annotations | indent 4 }} {{- end }} {{- if .Values.metrics.enabled }} {{ toYaml .Values.metrics.annotations | indent 4 }} {{- end }} spec: type: {{ .Values.serviceType }} {{ if eq .Values.serviceType "LoadBalancer" -}} {{ if .Values.service.loadBalancerIP -}} loadBalancerIP: {{ .Values.service.loadBalancerIP }} {{ end -}} {{- end -}} ports: - name: redis port: 6379 targetPort: redis {{- if .Values.metrics.enabled }} - name: metrics port: 9121 targetPort: metrics {{- end }} selector: app: {{ template "redis.fullname" . }}
[root@k8s-master01 charts]# tar zcf redis-1.1.15.tgz redis/
helm install --name harbor-v1 . --wait --timeout 1500 --debug --namespace harbor
[root@k8s-master01 harbor-helm]# helm install --name harbor-v1 . --wait --timeout 1500 --debug --namespace harbor [debug] Created tunnel using local port: '42156' [debug] SERVER: "127.0.0.1:42156" [debug] Original chart version: "" [debug] CHART PATH: /opt/k8s-cluster/harbor-helm Error: error unpacking redis-1.1.15.tgz.bak in harbor: chart metadata (Chart.yaml) missing
kubectl create serviceaccount --namespace kube-system tiller kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
[root@k8s-master01 harbor-helm]# kubectl create serviceaccount --namespace kube-system tiller serviceaccount/tiller created [root@k8s-master01 harbor-helm]# kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller clusterrolebinding.rbac.authorization.k8s.io/tiller-cluster-rule created [root@k8s-master01 harbor-helm]# kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' deployment.extensions/tiller-deploy patched
[root@k8s-master01 harbor-helm]# helm install --name harbor-v1 . --wait --timeout 1500 --debug --namespace harbor [debug] Created tunnel using local port: '45170' [debug] SERVER: "127.0.0.1:45170" [debug] Original chart version: "" [debug] CHART PATH: /opt/k8s-cluster/harbor-helm ... 中間爲配置文件 ... LAST DEPLOYED: Mon Dec 17 15:55:15 2018 NAMESPACE: harbor STATUS: DEPLOYED RESOURCES: ==> v1beta1/Ingress NAME AGE harbor-v1-harbor-ingress 1m ==> v1/Pod(related) NAME READY STATUS RESTARTS AGE harbor-v1-redis-b46754c6-bqqpg 1/1 Running 0 1m harbor-v1-harbor-adminserver-55d6846ccd-hcsw2 1/1 Running 0 1m harbor-v1-harbor-chartmuseum-86766b666f-84h5z 1/1 Running 0 1m harbor-v1-harbor-clair-558485cdff-nv8pl 1/1 Running 0 1m harbor-v1-harbor-jobservice-667fd5c856-4kkgl 1/1 Running 0 1m harbor-v1-harbor-notary-server-74f7c7c78d-qpbxd 1/1 Running 0 1m harbor-v1-harbor-notary-signer-58d56f6f85-b5p46 1/1 Running 0 1m harbor-v1-harbor-registry-5dfb58f55-7k9kc 1/1 Running 0 1m harbor-v1-harbor-ui-6644789c84-tmmdp 1/1 Running 1 1m harbor-v1-harbor-database-0 1/1 Running 0 1m ==> v1/Secret NAME AGE harbor-v1-harbor-adminserver 1m harbor-v1-harbor-chartmuseum 1m harbor-v1-harbor-database 1m harbor-v1-harbor-ingress 1m harbor-v1-harbor-jobservice 1m harbor-v1-harbor-registry 1m harbor-v1-harbor-ui 1m ==> v1/ConfigMap harbor-v1-harbor-adminserver 1m harbor-v1-harbor-chartmuseum 1m harbor-v1-harbor-clair 1m harbor-v1-harbor-jobservice 1m harbor-v1-harbor-notary 1m harbor-v1-harbor-registry 1m harbor-v1-harbor-ui 1m ==> v1/PersistentVolumeClaim harbor-v1-redis 1m harbor-v1-harbor-chartmuseum 1m harbor-v1-harbor-registry 1m ==> v1/Service harbor-v1-redis-master 1m harbor-v1-harbor-adminserver 1m harbor-v1-harbor-chartmuseum 1m harbor-v1-harbor-clair 1m harbor-v1-harbor-database 1m harbor-v1-harbor-jobservice 1m harbor-v1-harbor-notary-server 1m harbor-v1-harbor-notary-signer 1m harbor-v1-harbor-registry 1m harbor-v1-harbor-ui 1m ==> v1beta1/Deployment harbor-v1-redis 1m harbor-v1-harbor-adminserver 1m harbor-v1-harbor-chartmuseum 1m harbor-v1-harbor-clair 1m harbor-v1-harbor-jobservice 1m harbor-v1-harbor-notary-server 1m harbor-v1-harbor-notary-signer 1m harbor-v1-harbor-registry 1m harbor-v1-harbor-ui 1m ==> v1beta2/StatefulSet harbor-v1-harbor-database 1m NOTES: Please wait for several minutes for Harbor deployment to complete. Then you should be able to visit the UI portal at https://core.harbor.domain. For more details, please visit https://github.com/goharbor/harbor.
[root@k8s-master01 harbor-helm]# kubectl get pod -n harbor | grep harbor harbor-v1-harbor-adminserver-55d6846ccd-hcsw2 1/1 Running 6 8m36s harbor-v1-harbor-chartmuseum-86766b666f-84h5z 1/1 Running 0 8m36s harbor-v1-harbor-clair-558485cdff-nv8pl 1/1 Running 5 8m36s harbor-v1-harbor-database-0 1/1 Running 0 8m34s harbor-v1-harbor-jobservice-667fd5c856-4kkgl 1/1 Running 3 8m36s harbor-v1-harbor-notary-server-74f7c7c78d-qpbxd 1/1 Running 4 8m36s harbor-v1-harbor-notary-signer-58d56f6f85-b5p46 1/1 Running 4 8m35s harbor-v1-harbor-registry-5dfb58f55-7k9kc 1/1 Running 0 8m35s harbor-v1-harbor-ui-6644789c84-tmmdp 1/1 Running 5 8m35s harbor-v1-redis-b46754c6-bqqpg 1/1 Running 0 8m36s
[root@k8s-master01 harbor-helm]# kubectl get svc -n harbor NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE glusterfs-dynamic-database-data-harbor-v1-harbor-database-0 ClusterIP 10.96.254.80 <none> 1/TCP 119s glusterfs-dynamic-harbor-v1-harbor-chartmuseum ClusterIP 10.107.60.205 <none> 1/TCP 2m6s glusterfs-dynamic-harbor-v1-harbor-registry ClusterIP 10.106.114.23 <none> 1/TCP 2m36s glusterfs-dynamic-harbor-v1-redis ClusterIP 10.97.112.255 <none> 1/TCP 2m29s harbor-v1-harbor-adminserver ClusterIP 10.109.165.178 <none> 80/TCP 3m9s harbor-v1-harbor-chartmuseum ClusterIP 10.111.121.23 <none> 80/TCP 3m9s harbor-v1-harbor-clair ClusterIP 10.108.133.202 <none> 6060/TCP 3m8s harbor-v1-harbor-database ClusterIP 10.104.27.211 <none> 5432/TCP 3m8s harbor-v1-harbor-jobservice ClusterIP 10.102.60.45 <none> 80/TCP 3m7s harbor-v1-harbor-notary-server ClusterIP 10.107.43.156 <none> 4443/TCP 3m7s harbor-v1-harbor-notary-signer ClusterIP 10.98.180.61 <none> 7899/TCP 3m7s harbor-v1-harbor-registry ClusterIP 10.104.125.52 <none> 5000/TCP 3m6s harbor-v1-harbor-ui ClusterIP 10.101.63.66 <none> 80/TCP 3m6s harbor-v1-redis-master ClusterIP 10.106.63.183 <none> 6379/TCP 3m9s
[root@k8s-master01 harbor-helm]# kubectl get pv,pvc -n harbor | grep harbor persistentvolume/pvc-18da32d1-01d1-11e9-b859-000c2927a0d0 8Gi RWO Delete Bound harbor/harbor-v1-redisgluster-heketi 3m46s persistentvolume/pvc-18e270d6-01d1-11e9-b859-000c2927a0d0 5Gi RWO Delete Bound harbor/harbor-v1-harbor-chartmuseumgluster-heketi 3m23s persistentvolume/pvc-18e6b03e-01d1-11e9-b859-000c2927a0d0 5Gi RWO Delete Bound harbor/harbor-v1-harbor-registrygluster-heketi 3m53s persistentvolume/pvc-1d02d407-01d1-11e9-b859-000c2927a0d0 1Gi RWO Delete Bound harbor/database-data-harbor-v1-harbor-database-0gluster-heketi 3m16s persistentvolumeclaim/database-data-harbor-v1-harbor-database-0 Bound pvc-1d02d407-01d1-11e9-b859-000c2927a0d0 1Gi RWO gluster-heketi 4m20s persistentvolumeclaim/harbor-v1-harbor-chartmuseum Bound pvc-18e270d6-01d1-11e9-b859-000c2927a0d0 5Gi RWO gluster-heketi 4m27s persistentvolumeclaim/harbor-v1-harbor-registry Bound pvc-18e6b03e-01d1-11e9-b859-000c2927a0d0 5Gi RWO gluster-heketi 4m27s persistentvolumeclaim/harbor-v1-redis Bound pvc-18da32d1-01d1-11e9-b859-000c2927a0d0 8Gi RWO gluster-heketi 4m27s
[root@k8s-master01 harbor-helm]# kubectl get ingress -n harbor NAME HOSTS ADDRESS PORTS AGE harbor-v1-harbor-ingress core.harbor.domain,notary.harbor.domain 80, 443 3m27s
[root@k8s-master01 harbor-helm]# kubectl get secrets/harbor-v1-harbor-ingress -n harbor -o jsonpath="{.data.ca\.crt}" | base64 --decode -----BEGIN CERTIFICATE----- MIIC9TCCAd2gAwIBAgIRANwxR0iCGk5tbLIuMaoDBPgwDQYJKoZIhvcNAQELBQAw FDESMBAGA1UEAxMJaGFyYm9yLWNhMB4XDTE4MTIxNzA3NTUxNloXDTI4MTIxNDA3 NTUxNlowFDESMBAGA1UEAxMJaGFyYm9yLWNhMIIBIjANBgkqhkiG9w0BAQEFAAOC AQ8AMIIBCgKCAQEAu11h4ofcz31Dhv1Ll4ljbD9MbSSYzpXE5SdPDYxK2/GYCbbP wTQ5Lm0wyd45yUqIxoCDl8b+v4FqAjXLsm6HbP6SKVTVStFTJIn2gog2ypmObXqK pp8dtSlgYlSoldZC4i73Oh8P72B3y/dUysyxrAYrsaLRr9YI0EYO0XQGBX9veENm d4cJtcNuXU4WCoNZlvBT59Z2Vjbk2rXnb441Zk9K6aD8h2e+ktFAeJb9JFLqvfCz u0puOIpYcLVLiTrMzarn9TFpJkyKcKp1bE6mbTCTtZNV/kFJiJNuPOG1N7Mb+ZzD 8XiKUYB8/mWTY5If9cGKMh7xnzALEdPdalZJJQIDAQABo0IwQDAOBgNVHQ8BAf8E BAMCAqQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB/wQF MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAKC9HZJDAS4Cx6KJcgsALOUzOhktP39B cw9/PSi8X9kuTsPYxP1Rdogei38W2TRvgPrbPgKwCk48OnLR0myGnUaytjlbHXKz HrZGtRDzoyjw7XCDwXesqSMpJ+yz8j3DSuyLwApkQKIle2Z+nz3eINkxvkdA7ejY 1kN21CptEKxBXN7ZT40zPkBnJylADaeMFOV+AcgAKkbzfczBNHMOok349a+OiapO FjZbwgcx4rNxj0+v4Pzvb7qyNpfp7kEXpsQu1rjwLWZwjUvT5bdYhKoNKaEnwTGL 9B6dJBSNJ+5oS/4WoMt7pzuwKxoVpSJmNo2wSkG+R5sB8stfefZxKyg= -----END CERTIFICATE-----
[root@k8s-master01 harbor-helm]# mkdir -p /etc/docker/certs.d/core.harbor.domain/ [root@k8s-master01 harbor-helm]# cat <<EOF > /etc/docker/certs.d/core.harbor.domain/ca.crt -----BEGIN CERTIFICATE----- MIIC9TCCAd2gAwIBAgIRANwxR0iCGk5tbLIuMaoDBPgwDQYJKoZIhvcNAQELBQAw FDESMBAGA1UEAxMJaGFyYm9yLWNhMB4XDTE4MTIxNzA3NTUxNloXDTI4MTIxNDA3 NTUxNlowFDESMBAGA1UEAxMJaGFyYm9yLWNhMIIBIjANBgkqhkiG9w0BAQEFAAOC AQ8AMIIBCgKCAQEAu11h4ofcz31Dhv1Ll4ljbD9MbSSYzpXE5SdPDYxK2/GYCbbP wTQ5Lm0wyd45yUqIxoCDl8b+v4FqAjXLsm6HbP6SKVTVStFTJIn2gog2ypmObXqK pp8dtSlgYlSoldZC4i73Oh8P72B3y/dUysyxrAYrsaLRr9YI0EYO0XQGBX9veENm d4cJtcNuXU4WCoNZlvBT59Z2Vjbk2rXnb441Zk9K6aD8h2e+ktFAeJb9JFLqvfCz u0puOIpYcLVLiTrMzarn9TFpJkyKcKp1bE6mbTCTtZNV/kFJiJNuPOG1N7Mb+ZzD 8XiKUYB8/mWTY5If9cGKMh7xnzALEdPdalZJJQIDAQABo0IwQDAOBgNVHQ8BAf8E BAMCAqQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB/wQF MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAKC9HZJDAS4Cx6KJcgsALOUzOhktP39B cw9/PSi8X9kuTsPYxP1Rdogei38W2TRvgPrbPgKwCk48OnLR0myGnUaytjlbHXKz HrZGtRDzoyjw7XCDwXesqSMpJ+yz8j3DSuyLwApkQKIle2Z+nz3eINkxvkdA7ejY 1kN21CptEKxBXN7ZT40zPkBnJylADaeMFOV+AcgAKkbzfczBNHMOok349a+OiapO FjZbwgcx4rNxj0+v4Pzvb7qyNpfp7kEXpsQu1rjwLWZwjUvT5bdYhKoNKaEnwTGL 9B6dJBSNJ+5oS/4WoMt7pzuwKxoVpSJmNo2wSkG+R5sB8stfefZxKyg= -----END CERTIFICATE----- EOF
[root@k8s-master01 harbor-helm]# systemctl restart docker
[root@k8s-master01 harbor-helm]# docker login core.harbor.domain Username: admin Password: Login Succeeded
chmod 644 /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
cp /etc/docker/certs.d/core.harbor.domain/ca.crt /etc/pki/tls/certs/ca-bundle.crt
chmod 444 /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
[root@k8s-master01 harbor-helm]# docker images | grep busybox busybox 1.27 6ad733544a63 13 months ago 1.13 MB busybox 1.25.0 2b8fd9751c4c 2 years ago 1.09 MB
[root@k8s-master01 harbor-helm]# docker tag busybox:1.27 core.harbor.domain/develop/busybox:1.27
[root@k8s-master01 harbor-helm]# docker push core.harbor.domain/develop/busybox:1.27 The push refers to a repository [core.harbor.domain/develop/busybox] 0271b8eebde3: Pushed 1.27: digest: sha256:179cf024c8a22f1621ea012bfc84b0df7e393cb80bf3638ac80e30d23e69147f size: 527