ClusterIP的方式只能在集羣內部訪問
NodePort方式的話,測試環境使用還行,當有幾十上百的服務在集羣中運行時,NodePort的端口管理是災難。
LoadBalance方式受限於雲平臺,且一般在雲平臺部署ELB還須要額外的費用。
所幸k8s還提供了一種集羣維度暴露服務的方式,也就是ingress。ingress能夠簡單理解爲service的service,他經過獨立的ingress對象來制定請求轉發的規則,把請求路由到一個或多個service中。這樣就把服務與請求規則解耦了,能夠從業務維度統一考慮業務的暴露,而不用爲每一個service單獨考慮,下面是一個簡單的ingress應用圖,實現了簡單的請求轉發
nginx
ingress對象:
指的是k8s中的一個api對象,通常用yaml配置。做用是定義請求如何轉發到service的規則,能夠理解爲配置模板。
ingress-controller:
具體實現反向代理及負載均衡的程序,對ingress定義的規則進行解析,根據配置的規則來實現請求轉發。
簡單來講,ingress-controller纔是負責具體轉發的組件,經過各類方式將它暴露在集羣入口,外部對集羣的請求流量會先到ingress-controller,而ingress對象是用來告訴ingress-controller該如何轉發請求,好比哪些域名哪些path要轉發到哪些服務等等。web
ingress-controller並非k8s自帶的組件,實際上ingress-controller只是一個統稱,用戶能夠選擇不一樣的ingress-controller實現,目前,由k8s維護的ingress-controller只有google雲的GCE與ingress-nginx兩個,其餘還有不少第三方維護的ingress-controller,具體能夠參考 官方文檔。可是無論哪種ingress-controller,實現的機制都大同小異,只是在具體配置上有差別。通常來講,ingress-controller的形式都是一個pod,裏面跑着daemon程序和反向代理程序。daemon負責不斷監控集羣的變化,根據ingress對象生成配置並應用新配置到反向代理,好比nginx-ingress就是動態生成nginx配置, 動態更新upstream,並在須要的時候reload程序應用新配置。爲了方便,後面的例子都以k8s官方維護的nginx-ingress爲例。
2 咱們的ingress是支持https的,因此須要爲你的域名配置對應的證書,咱們在配置文件裏添加api
3 自動爲ingress-controller裏的配置文件添加nginx配置項,而後自動reload它,讓它生效websocket
當有新的ingress服務註冊以後,配置文件會發生變化cookie
4 你的服務對應的nginx是在本身服務的ymal裏進行配置的,通常來講,微服務的網關層都應該創建 一個ingress-nginx來對外提供服務!負載均衡
# 構建反射代理
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
name: hello-world-ingress
namespace: saas
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/use-regex: "true"
spec:
tls:
- hosts:
- www.abc.com
secretName: saas-tls
rules:
- host: www.abc.com
http:
paths:
- backend:
serviceName: hello-world
servicePort: 9001
## start server www.abc.com
server {
server_name www.abc.com ;
listen 80 ;
listen [::]:80 ;
listen 443 ssl http2 ;
listen [::]:443 ssl http2 ;
set $proxy_upstream_name "-";
# PEM sha: c24ba9e405ed77662c0fd7546a908ef45ca76066
ssl_certificate /etc/ingress-controller/ssl/default-fake-certificate.pem;
ssl_certificate_key /etc/ingress-controller/ssl/default-fake-certificate.pem;
ssl_certificate_by_lua_block {
certificate.call()
}
location ~* "^/" {
set $namespace "saas";
set $ingress_name "hello-world-ingress";
set $service_name "hello-world";
set $service_port "{0 9001 }";
set $location_path "/";
rewrite_by_lua_block {
lua_ingress.rewrite({
force_ssl_redirect = true,
use_port_in_redirects = false,
})
balancer.rewrite()
plugins.run()
}
header_filter_by_lua_block {
plugins.run()
}
body_filter_by_lua_block {
}
log_by_lua_block {
balancer.log()
monitor.call()
plugins.run()
}
if ($scheme = https) {
more_set_headers "Strict-Transport-Security: max-age=15724800; includeSubDomains";
}
port_in_redirect off;
set $balancer_ewma_score -1;
set $proxy_upstream_name "saas-hello-world-9001";
set $proxy_host $proxy_upstream_name;
set $pass_access_scheme $scheme;
set $pass_server_port $server_port;
set $best_http_host $http_host;
set $pass_port $pass_server_port;
set $proxy_alternative_upstream_name "";
client_max_body_size 1m;
proxy_set_header Host $best_http_host;
# Pass the extracted client certificate to the backend
# Allow websocket connections
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header X-Request-ID $req_id;
proxy_set_header X-Real-IP $the_real_ip;
proxy_set_header X-Forwarded-For $the_real_ip;
proxy_set_header X-Forwarded-Host $best_http_host;
proxy_set_header X-Forwarded-Port $pass_port;
proxy_set_header X-Forwarded-Proto $pass_access_scheme;
proxy_set_header X-Original-URI $request_uri;
proxy_set_header X-Scheme $pass_access_scheme;
# Pass the original X-Forwarded-For
proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;
# mitigate HTTPoxy Vulnerability
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
proxy_set_header Proxy "";
# Custom headers to proxied server
proxy_connect_timeout 5s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 4k;
proxy_request_buffering on;
proxy_http_version 1.1;
proxy_cookie_domain off;
proxy_cookie_path off;
# In case of errors try the next upstream server before returning an error
proxy_next_upstream error timeout;
proxy_next_upstream_timeout 0;
proxy_next_upstream_tries 3;
proxy_pass http://upstream_balancer;
proxy_redirect off;
}
}
## end server www.abc.com