flanneld -etcd-endpoints=http://192.168.x.x:2379 -iface=eth0 -etcd-prefix=/kubernetes/network flanneld -etcd-endpoints=http://192.168.x.x:2379 -iface=eth0 -etcd-prefix=/kubernetes/network # 這裏advertise必定要是一個具體的ip etcd --advertise-client-urls=http://192.168.x.x:2379 --listen-client-urls=http://0.0.0.0:2379 --debug cd /kubernetes/network/config etcdctl set /kubernetes/network/config < flannel-config.json kube-apiserver --service-cluster-ip-range=10.254.0.0/16 --etcd-servers=http://127.0.0.1:2379 --insecure-bind-address=0.0.0.0 --admission-control=ServiceAccount --service-account-key-file=/root/ssl/ca.key --client-ca-file=/root/ssl/ca.crt --tls-cert-file=/root/ssl/server.crt --tls-private-key-file=/root/ssl/server.key --allow-privileged=true --storage-backend=etcd2 --v=2 kube-controller-manager --master=http://127.0.0.1:8080 --service-account-private-key-file=/root/ssl/ca.key --cluster-signing-cert-file=/root/ssl/ca.crt --cluster-signing-key-file=/root/ssl/ca.key --root-ca-file=/root/ssl/ca.crt --v=2 kube-scheduler --master=http://127.0.0.1:8080 --v=2 kubelet --api-servers=http://192.168.x.x:8080 --allow-privileged=true --cluster-dns=10.254.0.2 --cluster-domain=cluster.local. --v=2 kube-proxy --master=http://192.168.x.x:8080 --v=2 ps -ef|egrep "kube-apiserver|kube-controller-manager|kube-scheduler" ps -ef|egrep --color "kubelet|kube-proxy" 發現apiserver若是沒有--allow-privileged=true字段,僅僅kubelete有,啓動daemonset類型的pod會報錯以下
運行在後臺: 注意端口 注意etcdip docker run --name etcd-browser -p 0.0.0.0:8000:8000 --env ETCD_HOST=192.168.2.11 --env ETCD_PORT=2379 --env AUTH_PASS=doe -itd buddho/etcd-browser
參考: https://jimmysong.io/kubernetes-handbook/appendix/issues.htmlhtml
The DaemonSet "skydive-agent" is invalid: spec.template.spec.containers[0].securityContext.privileged: Forbidden: disallowed by cluster policy 解決: api-server也加--allow-privileged=true,由於我debug模式啓的,開始時只爲kubelet加了
kube-proxy報錯kube-proxy[2241]: E0502 15:55:13.889842 2241 conntrack.go:42] conntrack returned error: error looking for path of conntrack: exec: "conntrack": executable file not found in $PATH 解決: yum install conntrack-tools
修改 /etc/kubernetes/kubelet 文件中的 ––cluster-domain=cluster.local. 爲––cluster-domain=cluster.local 最後的點去掉.
解決: yum install socat -y
node
機器重啓致使flannel的subnet變化,而docker加載的仍是原來的subnet,致使node間容器不互通
http://www.cnblogs.com/iiiiher/p/7889295.htmlnginx
kubenet掛掉致使api沒法訪問kubenet的監控端口10250web
參考docker
kube-apiserver啓動參數中加入--runtime-config=batch/v2alpha1=true用於支持後面的 CronJob 對象json
參考:
k8s集羣裏pod高負載訪問apiserver的8080(api對外地址)vim
參考: http://www.cnblogs.com/iiiiher/p/7880493.html#t9瀏覽器
我剛開始時候集羣內外是不想用證書的,爲了簡單. 後來發現不少的pod訪問api,他們的yaml默認都是走443 api-svc的方式. 並且有些pod啓動須要secret,所以研究了下集羣內證書. 目前集羣內pod訪問api大多走443,集羣外訪問一概8080.bash
token問題和證書問題
注:
必定要在controller上和api同時都加上證書. 另外在api上須要額外的追加公私鑰參數.
查看是否是yml裏指定的sa不存在
spec: serviceAccountName: flannel
192.168.14.134 m1.ma.com m1 192.168.14.132 n1.ma.com n1 192.168.14.133 n2.ma.com n2 ######################################################################## systemctl stop firewalld && systemctl disable firewalld sysctl -w net.ipv6.conf.all.disable_ipv6=1 sysctl -w net.ipv6.conf.default.disable_ipv6=1 sysctl -w net.ipv4.ip_forward=1 echo 'iptables -P FORWARD ACCEPT' >> /rc.local iptables -P FORWARD ACCEPT sysctl -p ######################################################################## mkdir -p /kubernetes/network/config/ cat > /kubernetes/network/config/flannel-config.json << EOF { "Network": "10.2.0.0/16", "SubnetLen": 24, "Backend": { "Type": "host-gw" } } EOF etcd --advertise-client-urls=http://0.0.0.0:2379 --listen-client-urls=http://0.0.0.0:2379 --debug cd /kubernetes/network/config etcdctl set /kubernetes/network/config < flannel-config.json flanneld -etcd-endpoints=http://192.168.14.134:2379 -iface=eth0 -etcd-prefix=/kubernetes/network cat /run/flannel/subnet.env vim /usr/lib/systemd/system/docker.service --bip=10.2.20.1/24 --mtu=1500 --ipmasq=true --bip=10.2.98.1/24 --mtu=1450 systemctl daemon-reload systemctl restart docker ps -ef|grep docker ######################################################################## kube-apiserver \ --service-cluster-ip-range=10.254.0.0/16 \ --etcd-servers=http://127.0.0.1:2379 \ --insecure-bind-address=0.0.0.0 \ --v=2 --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \ kube-controller-manager \ --master=http://127.0.0.1:8080 \ --v=2 kube-scheduler \ --master=http://127.0.0.1:8080 \ --v=2 pkill kube-apiserver pkill kube-controller-manager pkill kube-scheduler ps -ef|egrep "kube-apiserver|kube-controller-manager|kube-scheduler" ######################################################################## kubelet \ --api-servers=http://192.168.14.134:8080 \ --allow-privileged=true \ --v=2 kubelet \ --api-servers=http://192.168.14.134:8080 \ --allow-privileged=true \ --cluster-dns=10.254.0.2 \ --cluster-domain=cluster.local. \ --v=2 kube-proxy \ --master=http://192.168.14.134:8080 \ --v=2 pkill kubelet pkill kube-proxy ps -ef|egrep "kubelet|kube-proxy" ##################################################################### yum install bash-com* -y source <(kubectl completion bash) alias kk='kubectl get pod --all-namespaces -o wide --show-labels' alias ks='kubectl get svc --all-namespaces -o wide --show-labels' alias wk='watch kubectl get pod --all-namespaces -o wide --show-labels' alias klog-dns='kubectl logs -f --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c kubedns' alias klog-dnsmasq='kubectl logs -f --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c dnsmasq' alias klog-sidecar='kubectl logs -f --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c sidecar' yum install -y conntrack-tools socat #這個kubelet日誌裏看到須要
net.ipv4.tcp_mem = 16777216 16777216 16777216 net.ipv4.tcp_rmem = 1024 4096 16777216 net.ipv4.tcp_wmem = 1024 4096 16777216 net.core.rmem_default=262144 net.core.wmem_default=262144 net.core.rmem_max=16777216 net.core.wmem_max=16777216 net.core.optmem_max=16777216 fs.file-max=2097152 fs.nr_open=2097152 net.nf_conntrack_max=1000000 net.netfilter.nf_conntrack_max=1000000
journalctl -u kube-apiserver -f