記錄一次Team k8s環境搭建過程(初始方案,如今已經作過不少完善,例如普羅米修斯)javascript
hostname | OS | purpose | ip |
ub2-citst001.abc.com | ubuntu16.04 | docker registry | 10.239.220.38 |
centos-k8s001.abc.com | centos7.3 | haproxy+keepalived+etcd(leader) | 10.239.219.154 |
centos-k8s002.abc.com | centos7.3 | haproxy+keepalived+etcd | 10.239.219.153 |
centos-k8s003.abc.com | centos7.3 | etcd+nginx+ELK(elasticsearch,logstash,kibana) | 10.239.219.206 |
centos-k8s004.abc.com | centos7.3 | k8s master (kube-apiserver、kube-controller-manager、kube-scheduler) | 10.239.219.207 |
centos-k8s005.abc.com | centos7.3 |
k8s slave(
kubeproxy,kubelet,docker,flanneld
)+OSS service+ELK(elasticsearch+filebeat)
|
10.239.219.208 |
centos-k8s006.abc.com | centos7.3 | k8s slave(kubeproxy,kubelet,docker,flanneld)+mysql master+OSS service+ELK(elasticsearch+filebeat) | 10.239.219.210 |
centos-k8s007.abc.com | centos7.3 | k8s slave(kubeproxy,kubelet,docker,flanneld)+mysql slave+OSS service+ELK(elasticsearch+filebeat) | 10.239.219.209 |
vi /etc/profile
export http_proxy=http://ip or realm :port
export https_proxy=http://ip or realm :port
sudo cp /etc/apt/sources.list /etc/apt/sources_init.list
將之前的源備份一下,以防之後能夠用的。php
sudo vi /etc/apt/sources.list
deb http://mirrors.163.com/ubuntu/ wily main restricted universe multiverse
deb http://mirrors.163.com/ubuntu/ wily-security main restricted universe multiverse
deb http://mirrors.163.com/ubuntu/ wily-updates main restricted universe multiverse
deb http://mirrors.163.com/ubuntu/ wily-proposed main restricted universe multiverse
deb http://mirrors.163.com/ubuntu/ wily-backports main restricted universe multiverse
deb-src http://mirrors.163.com/ubuntu/ wily main restricted universe multiverse
deb-src http://mirrors.163.com/ubuntu/ wily-security main restricted universe multiverse
deb-src http://mirrors.163.com/ubuntu/ wily-updates main restricted universe multiverse
deb-src http://mirrors.163.com/ubuntu/ wily-proposed main restricted universe multiverse
deb-src http://mirrors.163.com/ubuntu/ wily-backports main restricted universe multiverse
更新源css
sudo apt-get update
{
"registry-mirrors": ["http://hub-mirror.c.163.com"]
}
systemctl restart docker
docker pull hello-world
sudo mkdir -p /etc/systemd/system/docker.service.d
vim /etc/systemd/system/docker.service.d/http-proxy.conf
[Service]
Environment="HTTP_PROXY=xxx.xxx.xxx.xxx:port"
Environment="HTTPS_PROXY=xxx.xxx.xxx.xxx:port"
Using default tag: latest
latest: Pulling from library/hello-world
d1725b59e92d: Pull complete
Digest: sha256:0add3ace90ecb4adbf7777e9aacf18357296e799f81cabc9fde470971e499788
Status: Downloaded newer image for hello-world:latest
docker images
docker pull index.tenxcloud.com/docker_library/registry
docker tag daocloud.io/library/registry:latest registry
docker images
makedir -p /docker/registry/
docker run -d -p 5000:5000 --name registry --restart=always --privileged=true -v /docker/registry:/var/lib/registry registry
{
"registry-mirrors": ["http://hub-mirror.c.163.com"],"insecure-registries":["宿主機的ip或域名:5000"]
}
systemctl restart docker
{
"registry-mirrors": ["http://hub-mirror.c.163.com"],"insecure-registries":["私有倉庫ip或者域名 :5000"]
}
systemctl daemon-reload
service docker restart
etcd簡介
etcd是一個高可用的分佈式鍵值(key-value)數據庫。etcd內部採用raft協議做爲一致性算法,etcd基於Go語言實現。 html
etcd是一個服務發現系統,具有如下的特色: java
簡單:安裝配置簡單,並且提供了HTTP API進行交互,使用也很簡單 node
安全:支持SSL證書驗證 python
快速:根據官方提供的benchmark數據,單實例支持每秒2k+讀操做 mysql
可靠:採用raft算法,實現分佈式系統數據的可用性和一致性linux
etcd應用場景
用於服務發現,服務發現(ServiceDiscovery)要解決的是分佈式系統中最多見的問題之一,即在同一個分佈式集羣中的進程或服務如何才能找到對方並創建鏈接。nginx
要解決服務發現的問題,須要具有下面三種必備屬性。
- 一個強一致性、高可用的服務存儲目錄。
基於Ralf算法的etcd天生就是這樣一個強一致性、高可用的服務存儲目錄。
一種註冊服務和健康服務健康情況的機制。
etcd安裝
分別在k8s001,k8s002,k8s003上安裝etcd,組成etcd集羣能夠直接在主機上安裝,也能夠經過docker安裝部署主機安裝:1. 分別在三臺機器上運行: yum install etcd -y2. yum安裝的etcd默認配置文件在/etc/etcd/etcd.conf,修改這個文件centos-k8s001:
# [member]
# 節點名稱
ETCD_NAME=centos-k8s001
# 數據存放位置
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
#ETCD_SNAPSHOT_COUNT="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
# 監聽其餘 Etcd 實例的地址
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
# 監聽客戶端地址
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#
#[cluster]
# 通知其餘 Etcd 實例地址
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://centos-k8s001:2380"
# if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
# 初始化集羣內節點地址
ETCD_INITIAL_CLUSTER="centos-k8s001=http://centos-k8s001:2380,centos-k8s002=http://centos-k8s002:2380,centos-k8s003=http://centos-k8s003:2380"
# 初始化集羣狀態,new 表示新建
ETCD_INITIAL_CLUSTER_STATE="new"
# 初始化集羣 token
ETCD_INITIAL_CLUSTER_TOKEN="mritd-etcd-cluster"
# 通知 客戶端地址
ETCD_ADVERTISE_CLIENT_URLS="http://centos-k8s001:2379,http://centos-k8s001:4001"
centos-k8s002:
# [member]
ETCD_NAME=centos-k8s002
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
#ETCD_SNAPSHOT_COUNT="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://centos-k8s002:2380"
# if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="centos-k8s001=http://centos-k8s001:2380,centos-k8s002=http://centos-k8s002:2380,centos-k8s003=http://centos-k8s003:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="mritd-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="http://centos-k8s002:2379,http://centos-k8s002:4001"
centos-k8s003:
# [member]
ETCD_NAME=centos-k8s003
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
#ETCD_SNAPSHOT_COUNT="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://centos-k8s003:2380"
# if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="centos-k8s001=http://centos-k8s001:2380,centos-k8s002=http://centos-k8s002:2380,centos-k8s003=http://centos-k8s003:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="mritd-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="http://centos-k8s003:2379,http://centos-k8s003:4001"
3. 在各節點上重啓etcd服務:
systemctl restart etcd
4.查看集羣是否成功:
etcdctl member list
4cb07e7292111d83: name=etcd1 peerURLs=http://centos-k8s001:2380 clientURLs=http://centos-k8s001:2379,http://centos-k8s001:4001 isLeader=true
713da186acaefc5b: name=etcd2 peerURLs=http://centos-k8s002:2380 clientURLs=http://centos-k8s002:2379,http://centos-k8s002:4001 isLeader=false
fabaedd18a2da8a7: name=etcd3 peerURLs=http://centos-k8s003:2380 clientURLs=http://centos-k8s003:2379,http://centos-k8s003:4001 isLeader=false
docker pull quay.io/coreos/etcd
x
docker tag quay.io/coreos/etcd ub2-citst001.abc.com:5000/quay.io/coreos/etcd
docker push ub2-citst001.abc.com:5000/quay.io/coreos/etcd
x
docker pull ub2-citst001.abc.com:5000/quay.io/coreos/etcd
x
// centos-k8s001啓動
docker run -d --name etcd -p 2379:2379 -p 2380:2380 -p 4001:4001 --restart=always --volume=etcd-data:/etcd-data ub2-citst001.abc.com:5000/quay.io/coreos/etcd /usr/local/bin/etcd --data-dir=/etcd-data --name etcd1 --initial-advertise-peer-urls http://centos-k8s001:2380 --listen-peer-urls http://0.0.0.0:2380 --advertise-client-urls http://centos-k8s001:2379,http://centos-k8s001:4001 --listen-client-urls http://0.0.0.0:2379 --initial-cluster-state new --initial-cluster-token docker-etcd --initial-cluster etcd1=http://centos-k8s001:2380,etcd2=http://centos-k8s002:2380,etcd3=http://centos-k8s003:2380
x
//centos-k8s002啓動
docker run -d --name etcd -p 2379:2379 -p 2380:2380 -p 4001:4001 --restart=always --volume=etcd-data:/etcd-data ub2-citst001.abc.com:5000/quay.io/coreos/etcd /usr/local/bin/etcd --data-dir=/etcd-data --name etcd2 --initial-advertise-peer-urls http://centos-k8s002:2380 --listen-peer-urls http://0.0.0.0:2380 --advertise-client-urls http://centos-k8s002:2379,http://centos-k8s002:4001 --listen-client-urls http://0.0.0.0:2379 --initial-cluster-state new --initial-cluster-token docker-etcd --initial-cluster etcd1=http://centos-k8s001:2380,etcd2=http://centos-k8s002:2380,etcd3=http://centos-k8s003:2380
x
// centos-k8s003啓動
docker run -d --name etcd -p 2379:2379 -p 2380:2380 -p 4001:4001 --restart=always --volume=etcd-data:/etcd-data ub2-citst001.abc.com:5000/quay.io/coreos/etcd /usr/local/bin/etcd --data-dir=/etcd-data --name etcd3 --initial-advertise-peer-urls http://centos-k8s003:2380 --listen-peer-urls http://0.0.0.0:2380 --advertise-client-urls http://centos-k8s003:2379,http://centos-k8s003:4001 --listen-client-urls http://0.0.0.0:2379 --initial-cluster-state new --initial-cluster-token docker-etcd --initial-cluster etcd1=http://centos-k8s001:2380,etcd2=http://centos-k8s002:2380,etcd3=http://centos-k8s003:2380
docker extc -it <container name or id> /bin/bash
// 此時已經進入容器 執行:
etcdctl member list
// 看到一下信息,表示etcd container 集羣部署成功
4cb07e7292111d83: name=etcd1 peerURLs=http://centos-k8s001:2380 clientURLs=http://centos-k8s001:2379,http://centos-k8s001:4001 isLeader=true
713da186acaefc5b: name=etcd2 peerURLs=http://centos-k8s002:2380 clientURLs=http://centos-k8s002:2379,http://centos-k8s002:4001 isLeader=false
fabaedd18a2da8a7: name=etcd3 peerURLs=http://centos-k8s003:2380 clientURLs=http://centos-k8s003:2379,http://centos-k8s003:4001 isLeader=false
•先決條件
以下操做在上圖4臺機器執行
1.確保系統已經安裝epel-release源
# yum -y install epel-release2.關閉防火牆服務和selinx,避免與docker容器的防火牆規則衝突。
# systemctl stop firewalld
# systemctl disable firewalld
# setenforce 0
•安裝配置Kubernetes Master
以下操做在master上執行
1.使用yum安裝etcd和kubernetes-master
# yum -y install kubernetes-master2.編輯/etc/kubernetes/apiserver文件
# The address on the local server to listen to.
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
# The port on the local server to listen on.
#KUBE_API_PORT="--port=8080"
# Port minions listen on
KUBELET_PORT="--kubelet-port=10250"
# Comma separated list of nodes in the etcd cluster
KUBE_ETCD_SERVERS="--etcd-servers=http://centos-k8s001:2379,http://centos-k8s002:2379,http://centos-k8s003:2379"
# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
# default admission control policies
#KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"
# Add your own!
KUBE_API_ARGS=""
3.啓動etcd、kube-apiserver、kube-controller-manager、kube-scheduler等服務,並設置開機啓動。
for SERVICES in etcd kube-apiserver kube-controller-manager kube-scheduler; do systemctl restart $SERVICES;systemctl enable $SERVICES;systemctl status $SERVICES ; done
//查看服務運行狀態 使用 systemctl status ,例:
systemctl status kube-apiserver
4.在 centos-k8s001 的etcd中定義flannel網絡
//若是是容器的方式部署etcd,先執行:
docker ps //找到etcd容器id
docker exec -it <容器id> /bin/sh //進入容器,而後執行下面命令
// 若直接在宿主機部署etcd,則直接執行:
etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}' //etcd中定義flannel
// 在其餘etcd 節點查看flannel 網絡配置:
// 如在cento-k8s002上查看:
etcdctl get /atomic.io/network/config
// 正確輸出:
{"Network":"172.17.0.0/16"} //配置成功
•安裝配置Kubernetes Node
以下操做在centos-k8s005,6,7上執行
1.使用yum安裝flannel和kubernetes-node
# yum -y install flannel kubernetes-node2.爲flannel網絡指定etcd服務,修改/etc/sysconfig/flanneld文件
FLANNEL_ETCD="http://centos-k8s001:2379"FLANNEL_ETCD_KEY="/atomic.io/network"3.修改/etc/kubernetes/config文件
KUBE_LOGTOSTDERR="--logtostderr=true"KUBE_LOG_LEVEL="--v=0"KUBE_ALLOW_PRIV="--allow-privileged=false"KUBE_MASTER="--master=http://centos-k8s004:8080"4.按照以下內容修改對應node的配置文件/etc/kubernetes/kubelet
centos-k8s005:
KUBELET_ADDRESS="--address=0.0.0.0"KUBELET_PORT="--port=10250"KUBELET_HOSTNAME="--hostname-override=centos-k8s005" #修改爲對應Node的IPKUBELET_API_SERVER="--api-servers=http://centos-k8s004:8080" #指定Master節點的API ServerKUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"KUBELET_ARGS=""centos-k8s006:
KUBELET_ADDRESS="--address=0.0.0.0"KUBELET_PORT="--port=10250"KUBELET_HOSTNAME="--hostname-override=centos-k8s006"KUBELET_API_SERVER="--api-servers=http://centos-k8s004:8080"KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"KUBELET_ARGS=""centos-k8s007:
KUBELET_ADDRESS="--address=0.0.0.0"KUBELET_PORT="--port=10250"KUBELET_HOSTNAME="--hostname-override=centos-k8s007"KUBELET_API_SERVER="--api-servers=http://centos-k8s004:8080"KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"KUBELET_ARGS=""5.在全部Node節點上啓動kube-proxy,kubelet,docker,flanneld等服務,並設置開機啓動。
# for SERVICES in kube-proxy kubelet docker flanneld;do systemctl restart $SERVICES;systemctl enable $SERVICES;systemctl status $SERVICES; done
•驗證集羣是否安裝成功
在master centos-k8s004上執行以下命令
[root@centos-k8s004 ~]# kubectl get nodeNAME STATUS AGEcentos-k8s005 Ready 38dcentos-k8s006 Ready 38dcentos-k8s007 Ready 37d
x
`-- mysql-build_file
|-- master
| |-- docker-entrypoint.sh
| |-- Dockerfile
| `-- my.cnf
`-- slave
|-- docker-entrypoint.sh
|-- Dockerfile
`-- my.cnf
# master/docker-entrypoint.sh:
MYSQL="mysql -uroot -proot"
sql="CREATE USER '$MYSQL_REPLICATION_USER'@'%' IDENTIFIED BY '$MYSQL_REPLICATION_PASSWORD'"
$MYSQL -e "$sql"
sql="GRANT REPLICATION SLAVE ON *.* TO '$MYSQL_REPLICATION_USER'@'%' IDENTIFIED BY '$MYSQL_REPLICATION_PASSWORD'"
$MYSQL -e "$sql"
sql="FLUSH PRIVILEGES"
$MYSQL -e "$sql"
# master/my.cnf :
[mysqld]
log-bin = mysql-bin
server-id = 1
character_set_server=utf8
log_bin_trust_function_creators=1
skip-name-resolve
binlog_format = mixed
relay-log = relay-bin
relay-log-index = slave-relay-bin.index
auto-increment-increment = 2
auto-increment-offset = 1
# master/Dockerfile:
FROM mysql:5.6
ENV http_proxy http://child-prc.abc.com:913
ENV https_proxy https://child-prc.abc.com:913
COPY my.cnf /etc/mysql/mysql.cnf
COPY docker-entrypoint.sh /docker-entrypoint-initdb.d/
#slave/docker-entrypoint.sh:
MYSQL="mysql -uroot -proot"
MYSQL_MASTER="mysql -uroot -proot -h$MYSQL_MASTER_SERVICE_HOST -P$MASTER_PORT"
sql="stop slave"
$MYSQL -e "$sql"
sql="SHOW MASTER STATUS"
result="$($MYSQL_MASTER -e "$sql")"
dump_data=/master-condition.log
echo -e "$result" > $dump_data
var=$(cat /master-condition.log | grep mysql-bin)
MASTER_LOG_FILE=$(echo $var | awk '{split($0,arr," ");print arr[1]}')
MASTER_LOG_POS=$(echo $var | awk '{split($0,arr," ");print arr[2]}')
sql="reset slave"
$MYSQL -e "$sql"
sql="CHANGE MASTER TO master_host='$MYSQL_MASTER_SERVICE_HOST', master_user='$MYSQL_REPLICATION_USER', master_password='$MYSQL_REPLICATION_PASSWORD', master_log_file='$MASTER_LOG_FILE', master_log_pos=$MASTER_LOG_POS, master_port=$MASTER_PORT"
$MYSQL -e "$sql"
sql="start slave"
$MYSQL -e "$sql"
#slave/my.cnf:
[mysqld]
log-bin = mysql-bin
#server-id 不能必須保證惟一,不能和其餘mysql images衝突
server-id = 2
character_set_server=utf8
log_bin_trust_function_creators=1
#slave/Dockerfile:
FROM mysql:5.6
ENV http_proxy http://child-prc.abc.com:913
ENV https_proxy https://child-prc.abc.com:913
COPY my.cnf /etc/mysql/mysql.cnf
COPY docker-entrypoint.sh /docker-entrypoint-initdb.d/
RUN touch master-condition.log && chown -R mysql:mysql /master-condition.log
x
cd /mysql-build_file/master
docker build -t ub2-citst001.abc.com:5000/mysql-master .
# output 輸出以下說明成功build:
#Sending build context to Docker daemon 4.096kB
#Step 1/5 : FROM mysql:5.6
# ---> a46c2a2722b9
#Step 2/5 : ENV http_proxy http://child-prc.abc.com:913
# ---> Using cache
# ---> 873859820af7
#Step 3/5 : ENV https_proxy https://child-prc.abc.com:913
# ---> Using cache
# ---> b5391bed1bda
#Step 4/5 : COPY my.cnf /etc/mysql/mysql.cnf
# ---> Using cache
# ---> ccbdced047a3
#Step 5/5 : COPY docker-entrypoint.sh /docker-entrypoint-initdb.d/
# ---> Using cache
# ---> 81cfad9f0268
#Successfully built 81cfad9f0268
#Successfully tagged ub2-citst001.sh.abc.com:5000/mysql-master
cd /mysql-build_file/slave
docker build -t ub2-citst001.abc.com:5000/mysql-slave .
# 查看images
docker images
# 將images放到私有鏡像倉庫:
docker push ub2-citst001.abc.com:5000/mysql-slave
docker push ub2-citst001.abc.com:5000/mysql-master
x
# centos-k8s004(k8s master)上操做:
#根目錄建立一個文件夾:file_for_k8s 用於存放yaml文件或
mkdir -p /file_for_k8s/MyCat
cd /file_for_k8s/MyCat/
mkdir master slave
cd master
#建立配置mysql-master.yaml文件
touch mysql-master.yaml
#內容以下:
apiVersion extensions/v1beta1
kind Deployment
metadata
name mysql-master
spec
replicas1
selector
matchLabels
app mysql-master
release stabel
template
metadata
labels
name mysql-master
app mysql-master
release stabel
spec
containers
name mysql-master
image ub2-citst001.abc.com 5000/mysql-master
volumeMounts
name mysql-config
mountPath /usr/data
env
name MYSQL_ROOT_PASSWORD
value"root"
name MYSQL_REPLICATION_USER
value"slave"
name MYSQL_REPLICATION_PASSWORD
value"slave"
ports
containerPort3306
#hostPort: 4000
name mysql-master
volumes
name mysql-config
hostPath
path /localdisk/NFS/mysqlData/master/
nodeSelector
kubernetes.io/hostname centos-k8s006
----------------------------------------------------------end-----------------------------------------------------------------
#建立mysql-slave-service.yaml
touch mysql-slave-service.yaml
#內容以下:
apiVersion v1
kind Service
metadata
name mysql-master
namespace default
spec
type NodePort
selector
app mysql-master
release stabel
ports
name http
port3306
nodePort31306
targetPort3306
--------------------------------------------------------------end---------------------------------------------------------------------
cd ../slave/
#建立配置mysql-slave.yaml文件
touch mysql-slave.yaml
#內容以下:
apiVersion extensions/v1beta1
kind Deployment
metadata
name mysql-slave
spec
replicas1
selector
matchLabels
app mysql-slave
release stabel
template
metadata
labels
app mysql-slave
name mysql-slave
release stabel
spec
containers
name mysql-slave
image ub2-citst001.abc.com 5000/mysql-slave
volumeMounts
name mysql-config
mountPath /usr/data
env
name MYSQL_ROOT_PASSWORD
value"root"
name MYSQL_REPLICATION_USER
value"slave"
name MYSQL_REPLICATION_PASSWORD
value"slave"
name MYSQL_MASTER_SERVICE_HOST
value"mysql-master"
name MASTER_PORT
value"3306"
ports
containerPort3306
name mysql-slave
volumes
name mysql-config
hostPath
path /localdisk/NFS/mysqlData/slave/
nodeSelector
kubernetes.io/hostname centos-k8s007
-----------------------------------------------------------------------end-------------------------------------------------------------------
#建立mysql-slave-service.yaml
touch mysql-slave-service.yaml
#內容以下:
apiVersion v1
kind Service
metadata
name mysql-slave
namespace default
spec
type NodePort
selector
app mysql-slave
release stabel
ports
name http
port3306
nodePort31307
targetPort3306
-----------------------------------------------------------------------end-----------------------------------------------------------------
#建立mycay實例:
cd ../master/
kubectl create -f mysql-master.yaml
kubectl create -f mysql-master-service.yaml
cd ../slave/
kubectl create -f mysql-slave.yaml
kubectl create -f mysql-slave-service.yaml
# 查看deployment建立狀態,centos-k8s004上執行
root@centos-k8s004 slave # kubectl get deployment
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
mysql-master 1 1 1 1 39d3
mysql-slave 1 1 1 1 39d
# 查看service建立狀態:
root@centos-k8s004 slave # kubectl get svc
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes 192.168.0.1 <none> 443/TCP 5d
mysql-master 192.168.45.209 <nodes> 3306:31306/TCP 5h
mysql-slave 192.168.230.192 <nodes> 3306:31307/TCP 5h
# AVAILABLE的值可能須要2~15s的時間才能與DESIRED的值同步,啓動pod須要時間。若是長時間仍是不一樣步或爲0,可用一下命令查看詳細狀態:
root@centos-k8s004 master # kubectl describe pod
Name mysql-master-4291032429-0fzsr
Namespace default
Node centos-k8s006/10.239.219.210
Start Time Wed, 31 Oct 2018 18 56 06 +0800
Labels name=mysql-master
pod-template-hash=4291032429
Status Running
IP172.17.44.2
Controllers ReplicaSet/mysql-master-4291032429
Containers
master
Container ID docker //674de0971fe2aa16c7926f345d8e8b2386278b14dedd826653e7347559737e28
Image ub2-citst001.abc.com 5000/mysql-master
Image ID docker-pullable //ub2-citst001.abc.com 5000/mysql-master@sha256 bc286c1374a3a5f18ae56bd785a771ffe0fad15567d56f8f67a615c606fb4e0d
Port 3306/TCP
State Running
Started Wed, 31 Oct 2018 18 56 07 +0800
ReadyTrue
Restart Count0
Volume Mounts
/usr/data from mysql-config (rw)
Environment Variables
MYSQL_ROOT_PASSWORD root
MYSQL_REPLICATION_USER slave
MYSQL_REPLICATION_PASSWORD slave
Conditions
Type Status
Initialized True
Ready True
PodScheduled True
Volumes
mysql-config
Type HostPath (bare host directory volume)
Path /localdisk/NFS/mysqlData/master/
QoS Class BestEffort
Tolerations <none>
No events.
Name mysql-slave-3654103728-0sxsm
Namespace default
Node centos-k8s007/10.239.219.209
Start Time Wed, 31 Oct 2018 18 56 19 +0800
Labels name=mysql-slave
pod-template-hash=3654103728
Status Running
IP172.17.16.2
Controllers ReplicaSet/mysql-slave-3654103728
Containers
slave
Container ID docker //d52f4f1e57d6fa6a7c04f1a9ba63fa3f0af778df69a3190c4f35f755f225fb50
Image ub2-citst001.abc.com 5000/mysql-slave
Image ID docker-pullable //ub2-citst001.abc.com 5000/mysql-slave@sha256 6a1c7cbb27184b966d2557bf53860daa439b7afda3d4aa5498844d4e66f38f47
Port 3306/TCP
State Running
Started Fri, 02 Nov 2018 13 49 48 +0800
Last State Terminated
Reason Completed
Exit Code0
Started Wed, 31 Oct 2018 18 56 20 +0800
Finished Fri, 02 Nov 2018 13 49 47 +0800
ReadyTrue
Restart Count1
Volume Mounts
/usr/data from mysql-config (rw)
Environment Variables
MYSQL_ROOT_PASSWORD root
MYSQL_REPLICATION_USER slave
MYSQL_REPLICATION_PASSWORD slave
MYSQL_MASTER_SERVICE_HOST centos-k8s006
MASTER_PORT4000
Conditions
Type Status
Initialized True
Ready True
PodScheduled True
Volumes
mysql-config
Type HostPath (bare host directory volume)
Path /localdisk/NFS/mysqlData/slave/
QoS Class BestEffort
Tolerations <none>
No events.
# 若是出現以下錯誤,請按照下面方法解決:
Error syncing pod, skipping failed to "StartContainer" for "POD" with ErrImagePull"image pull failede:latest, this may be because there are no credentials on this request. details: (open /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt: no such file or directory)"
#解決方法
問題是比較明顯的,就是沒有/etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt文件,用ls -l查看以後發現是一個軟連接,連接到/etc/rhsm/ca/redhat-uep.pem,可是這個文件不存在,使用yum search *rhsm*命令:
安裝python-rhsm-certificates包:
root@centos-k8s004 master # yum install python-rhsm-certificates -y
這裏又出現問題了
python-rhsm-certificates <= 1.20.3-1 被 (已安裝) subscription-manager-rhsm-certificates-1.20.11-1.el7.centos.x86_64 取代
那麼怎麼辦呢,咱們直接卸載掉subscription-manager-rhsm-certificates包,使用yum remove subscription-manager-rhsm-certificates -y命令,而後下載python-rhsm-certificates包:
# wget http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm
而後手動安裝該rpm包:
# rpm -ivh python-rhsm-certificates
這時發現/etc/rhsm/ca/redhat-uep.pem文件已存在
在node執行:
yum install *rhsm* -y
這時將/etc/docker/seccomp.json刪除,再次重啓便可,並執行:
#docker pull registry.access.redhat.com/rhel7/pod-infrastructure:latest
這時將以前建立的rc、svc和pod所有刪除從新建立,過一會就會發現pod啓動成功
# 在centos-k8s006查看docker container,說明成功建立mysql-master實例,查看mysql-slave狀態同理
root@centos-k8s006 master # docker ps
674de0971fe2 ub2-citst001.abc.com:5000/mysql-master "docker-entrypoint..." 5 weeks ago Up 5 weeks k8s_master.1fa78e47_mysql-master-4291032429-0fzsr_default_914f7535-dcfb-11e8-9eb8-005056a654f2_3462901b
220e4d37915d registry.access.redhat.com/rhel7/pod-infrastructure:latest "/usr/bin/pod" 5 weeks ago Up 5 weeks 0.0.0.0:4000->3306/tcp k8s_POD.62220e6f_mysql-master-4291032429-0fzsr_default_914f7535-dcfb-11e8-9eb8-005056a654f2_d0d62756
# 此時經過kubectl就可管理contianer
root@centos-k8s006 master # kubectl exec -it mysql-master-4291032429-0fzsr /bin/bash
root@mysql-master-4291032429-0fzsr:/#
# 在master建立一個數據庫,看是否會同步到slave
root@mysql-master-4291032429-0fzsr:/# mysql -u root -p root
mysql>create database test_database charsetr='utf8';
mysql>show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| test_database |
| mysql |
| performance_schema |
+--------------------+
# 退出容器:
Ctrl + p && Ctrl + q
# 進入mysql-slave查看數據庫:
root@centos-k8s006 master # kubectl exec -it mysql-slave-3654103728-0sxsm /bin/bash
root@mysql-slave-3654103728-0sxsm:/#mysql -u root -p root
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema | |
| mysql |
| performance_schema |
| test_database |
+--------------------+
# 能夠看出,同步成功!
uWSGI
~~~~~~~~~~~~~~~~
global
timeout = 60
index-url = http://pypi.douban.com/simple
trusted-host = pypi.douban.com
touch /root/www2/oss2/log/touchforlogrotate
uwsgi --ini /root/uwsgi.ini
tail -f /dev/null
[uwsgi]
socket=0.0.0.0:8001
chdir=/root/www2/oss2/
master=true
processes=4
threads=2
module=oss2.wsgi
touch-logreopen = /root/www2/oss2/log/touchforlogrotate
daemonize = /root/www2/oss2/log/log.log
wsgi-file =/root/www2/oss2/website/wsgi.py
py-autoreload=1
x
FROM centos
MAINTAINER by miaohenx
ENV http_proxy http://child-prc.abc.com:913/
ENV https_proxy https://child-prc.abc.com:913/
RUN mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup && curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo && yum makecache && yum -y install epel-release zlib-devel bzip2-devel openssl-devel ncurses-devel sqlite-devel readline-devel tk-devel gcc make openldap-devel && curl -O https://www.python.org/ftp/python/3.6.6/Python-3.6.6.tar.xz && tar -xvJf Python-3.6.6.tar.xz && cd Python-3.6.6 && ./configure prefix=/usr/local/python3 && make && make install && ln -s /usr/local/python3/bin/python3 /usr/bin/python3.6 && ln -s /usr/local/python3/bin/python3 /usr/bin/python3 && cd .. && rm -rf Python-3.6.*
RUN yum -y install python36-devel python36-setuptools && easy_install-3.6 pip && mkdir /root/.pip
COPY 1.txt /root/
COPY uwsgi.ini /root/
COPY www2 /root/www2/
COPY start_script.sh /root/
COPY pip.conf /root/.pip/
RUN pip3 install -r /root/1.txt && chmod +x /root/start_script.sh
EXPOSE 8000
ENTRYPOINT ["/root/start_script.sh"]
docker build -t oss-server . #注意末尾的.
docker images
1、Nginx的優勢是:
1)工做在網絡的7層之上,能夠針對 http 應用作一些分流的策略,好比針對域名、目錄結構,它的正則規則比 HAProxy 更爲強大和靈活,這也是它目前普遍流
行的主要緣由之一, Nginx 單憑這點可利用的場合就遠多於 LVS 了。
2) Nginx 對網絡穩定性的依賴很是小,理論上能 ping 通就就能進行負載功能,這個也是它的優點之一;相反 LVS 對網絡穩定性依賴比較大;
3) Nginx 安裝和配置比較簡單,測試起來比較方便,它基本能把錯誤用日誌打印出來。 LVS 的配置、測試就要花比較長的時間了, LVS 對網絡依賴比較大。
4)能夠承擔高負載壓力且穩定,在硬件不差的狀況下通常能支撐幾萬次的併發量,負載度比 LVS 相對小些。
5) Nginx 能夠經過端口檢測到服務器內部的故障,好比根據服務器處理網頁返回的狀態碼、超時等等,而且會把返回錯誤的請求從新提交到另外一個節點,不過其中缺點就是不支持url來檢測。
好比用戶正在上傳一個文件,而處理該上傳的節點恰好在上傳過程當中出現故障, Nginx 會把上傳切到另外一臺服務器從新處 理,而LVS就直接斷掉了,若是是上傳一個很大的文件或者很重要的文
件的話,用戶可能會所以而不滿。
6)Nginx 不只僅是一款優秀的負載均衡器/反向代理軟件,它同時也是功能強大的 Web 應用服務器。 LNMP 也是近幾年很是流行的 web 架構,在高流量的環境中穩定性也很好。
7)Nginx 如今做爲 Web 反向加速緩存愈來愈成熟了,速度比傳統的 Squid 服務器更快,能夠考慮用其做爲反向代理加速器。
8)Nginx 可做爲中層反向代理使用,這一層面 Nginx 基本上無對手,惟一能夠對比 Nginx 的就只有 lighttpd 了,不過 lighttpd 目前尚未作到 Nginx 徹底的功能,配置也不那麼清晰易讀,
社區資料也遠遠沒 Nginx 活躍。
9) Nginx 也可做爲靜態網頁和圖片服務器,這方面的性能也無對手。還有 Nginx社區很是活躍,第三方模塊也不少。
Nginx 的缺點是:
1)Nginx 僅能支持 http、 https 和 Email 協議,這樣就在適用範圍上面小些,這個是它的缺點。
2)對後端服務器的健康檢查,只支持經過端口來檢測,不支持經過 url 來檢測。不支持 Session 的直接保持,但能經過 ip_hash 來解決。
2、LVS:使用 Linux 內核集羣實現一個高性能、 高可用的負載均衡服務器,它具備很好的可伸縮性( Scalability)、可靠性( Reliability)和可管理性(Manageability)。
LVS 的優勢是:
1)抗負載能力強、是工做在網絡 4 層之上僅做分發之用, 沒有流量的產生,這個特色也決定了它在負載均衡軟件裏的性能最強的,對內存和 cpu 資源消耗比較低。
2)配置性比較低,這是一個缺點也是一個優勢,由於沒有可太多配置的東西,因此並不須要太多接觸,大大減小了人爲出錯的概率。
3)工做穩定,由於其自己抗負載能力很強,自身有完整的雙機熱備方案,如LVS+Keepalived,不過咱們在項目實施中用得最多的仍是 LVS/DR+Keepalived。
4)無流量, LVS 只分發請求,而流量並不從它自己出去,這點保證了均衡器 IO的性能不會收到大流量的影響。
5)應用範圍比較廣,由於 LVS 工做在 4 層,因此它幾乎能夠對全部應用作負載均衡,包括 http、數據庫、在線聊天室等等。
LVS 的缺點是:
1)軟件自己不支持正則表達式處理,不能作動靜分離;而如今許多網站在這方面都有較強的需求,這個是 Nginx/HAProxy+Keepalived 的優點所在。
2)若是是網站應用比較龐大的話, LVS/DR+Keepalived 實施起來就比較複雜了,特別後面有 Windows Server 的機器的話,若是實施及配置還有維護過程就比較複雜了,相對而言,
Nginx/HAProxy+Keepalived 就簡單多了。
3、HAProxy 的特色是:
1)HAProxy 也是支持虛擬主機的。
2)HAProxy 的優勢可以補充 Nginx 的一些缺點,好比支持 Session 的保持,Cookie的引導;同時支持經過獲取指定的 url 來檢測後端服務器的狀態。
3)HAProxy 跟 LVS 相似,自己就只是一款負載均衡軟件;單純從效率上來說HAProxy 會比 Nginx 有更出色的負載均衡速度,在併發處理上也是優於 Nginx 的。
4)HAProxy 支持 TCP 協議的負載均衡轉發,能夠對 MySQL 讀進行負載均衡,對後端的 MySQL 節點進行檢測和負載均衡,你們能夠用 LVS+Keepalived 對 MySQL主從作負載均衡。
5)HAProxy 負載均衡策略很是多, HAProxy 的負載均衡算法如今具體有以下8種:
1> roundrobin,表示簡單的輪詢,這個很少說,這個是負載均衡基本都具有的;
2> static-rr,表示根據權重,建議關注;
3> leastconn,表示最少鏈接者先處理,建議關注;
4> source,表示根據請求源 IP,這個跟 Nginx 的 IP_hash 機制相似,咱們用其做爲解決 session 問題的一種方法,建議關注;
5> ri,表示根據請求的 URI;
6> rl_param,表示根據請求的 URl 參數’balance url_param’ requires an URLparameter name;
7> hdr(name),表示根據 HTTP 請求頭來鎖定每一次 HTTP 請求;
8> rdp-cookie(name),表示根據據 cookie(name)來鎖定並哈希每一次 TCP 請求。
yum install -y haproxy keepalived # 安裝haproxy keepalived
vim /etc/keepalived/keepalived.conf # 配置keepalived,替換爲如下內容
! Configuration File for keepalived
global_defs {
notification_email { #定義收件人郵箱
root@localhost
}
notification_email_from root@localhost #定義發件人郵箱
smtp_server 127.0.0.1 #定義郵件服務器地址
smtp_connect_timeout 30 #定有郵件服務器鏈接超時時長爲30秒
router_id LVS_DEVEL #運行keepalive的機器的標識
}
vrrp_instance VI_1 { #定義VRRP實例,實例名自定義
state MASTER #指定當前節點的角色,master爲主,backup爲從
interface ens160 #直接HA監測的接口
virtual_router_id 51 #虛擬路由標識,在同一VRRP實例中,主備服務器ID必須同樣
priority 100 #定義節點優先級,數字越大越優先,主服務器優先級高於從服務器
advert_int 1 #設置主備之間永不檢查時間間隔,單位爲秒
authentication { #設置主從之間驗證類型和密碼
auth_type PASS
auth_pass a23c7f32dfb519d6a5dc67a4b2ff8f5e
}
virtual_ipaddress {
10.239.219.157 #定義虛擬ip地址
}
}
vrrp_instance VI_2 {
state BACKUP
interface ens160
virtual_router_id 52
priority 99
advert_int 1
authentication {
auth_type PASS
auth_pass 56f7663077966379d4106e8ee30eb1a5
}
virtual_ipaddress {
10.239.219.156
}
}
! Configuration File for keepalived
global_defs {
notification_email { #定義收件人郵箱
root@localhost
}
notification_email_from root@localhost #定義發件人郵箱
smtp_server 127.0.0.1 #定義郵件服務器地址
smtp_connect_timeout 30 #定有郵件服務器鏈接超時時長爲30秒
router_id LVS_DEVEL #運行keepalive的機器的標識
}
vrrp_instance VI_1 { #定義VRRP實例,實例名自定義
state BACKUP #指定當前節點的角色,master爲主,backup爲從
interface ens160 #直接HA監測的接口
virtual_router_id 51 #虛擬路由標識,在同一VRRP實例中,主備服務器ID必須同樣
priority 99 #定義節點優先級,數字越大越優先,主服務器優先級高於從服務器
advert_int 1 #設置主備之間永不檢查時間間隔,單位爲秒
authentication { #設置主從之間驗證類型和密碼
auth_type PASS
auth_pass a23c7f32dfb519d6a5dc67a4b2ff8f5e
}
virtual_ipaddress {
10.239.219.157 #定義虛擬ip地址
}
}
vrrp_instance VI_2 {
state MASTER
interface ens160
virtual_router_id 52
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 56f7663077966379d4106e8ee30eb1a5
}
virtual_ipaddress {
10.239.219.156
}
}
x
global #定義全局配置段
# to have these messages end up in /var/log/haproxy.log you will
# need to:
#
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
#
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2 #經過rsyslog將日誌進行歸檔記錄,在/etc/rsyslog.conf配置文件中,添加‘local2.* /var/log/haproxy',而且啓用$ModLoad imudp,$UDPServerRun 514,$ModLoad imtcp,$InputTCPServerRun 514 此四項功能,最後重啓rsyslog進程。
chroot /var/lib/haproxy #指定haproxy進程工做的目錄
pidfile /var/run/haproxy.pid #指定pid文件
maxconn 4000 #最大併發鏈接數
user haproxy #運行haproxy的用戶
group haproxy #運行haproxy的組
daemon #以守護進程的形式運行,即後臺運行
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults #默認配置端
mode http #工做模式,源碼包編譯默認爲tcp
log global #記錄全局日誌
option httplog #詳細記錄http日誌
option dontlognull #不記錄健康檢測的日誌信息
option http-server-close #啓用服務器端主動關閉功能
option forwardfor except 127.0.0.0/8 #傳遞client端IP至後端real server
option redispatch #基於cookie作會話保持時,後端對應存放session的服務器出現故障時,會話會被重定向至別的服務器
retries 3 #請求重傳次數
timeout http-request 10s #斷開客戶端鏈接的時長
timeout queue 1m #一個請求在隊列裏的超時時長
timeout connect 10s #設定在haproxy轉發至後端upstream server時等待的超時時長
timeout client 1m #client的一次非活動狀態的超時時長
timeout server 1m #等待服務器端的非活動的超時時長
timeout http-keep-alive 10s #持久鏈接超時時長
timeout check 10s #檢查請求鏈接的超時時長
maxconn 3000 #最大鏈接數
#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend webserver *:8000 # OSS server
acl url_static path_beg -i /static /images /javascript /stylesheets #匹配path以/static,/images開始的,且不區分大小寫
acl url_static path_end -i .jpg .gif .png .css .js .html
acl url_static hdr_beg(host) -i img. video. download. ftp. imgs. image.
acl url_dynamic path_end .php .jsp
use_backend static if url_static #知足名爲url_static這條acl規則,則將請求轉發至後端名爲static的real server組中去
use_backend dynamic if url_dynamic
default_backend dynamic #若是上面全部acl規則都不知足,將請求轉發到dynamic組中
#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend static #定義後端real server組,組名爲static
balance roundrobin #支持動態權重修改,支持慢啓動
server static_1 centos-k8s005:8000 check inter 3000 fall 3 rise 1 maxconn 30000
server static_2 centos-k8s006:8000 check inter 3000 fall 3 rise 1 maxconn 30000
server static_3 centos-k8s007:8000 check inter 3000 fall 3 rise 1 maxconn 30000
# server static_Error :8080 backup check #當此組中的全部server所有不能提供服務,纔將請求調度至此server上
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend dynamic
cookie cookie_name insert nocache #使用cookie實現session綁定,且不記錄緩存
balance roundrobin
server dynamic1 centos-k8s005:8000 check inter 3000 fall 3 rise 1 maxconn 1000 cookie dynamic1
server dynamic2 centos-k8s006:8000 check inter 3000 fall 3 rise 1 maxconn 1000 cookie dynamic2
server dynamic3 centos-k8s007:8000 check inter 3000 fall 3 rise 1 maxconn 1000 cookie dynamic3 #定義dynamic組中的server,將此server命名爲dynamic2,每隔3000ms檢測一個健康狀態,若是檢測3次都失敗,將此server剔除。在離線的狀態下,只要檢測1次成功,就讓其上線,此server支持最大的併發鏈接數爲1000,cookie的值爲dynamic2
frontend kibana *:5602 #Kibana
acl url_static path_beg -i /static /images /javascript /stylesheets #匹配path以/static,/images開始的,且不區分大小寫
acl url_static path_end -i .jpg .gif .png .css .js .html
acl url_static hdr_beg(host) -i img. video. download. ftp. imgs. image.
acl url_dynamic path_end .php .jsp
use_backend static2 if url_static #知足名爲url_static這條acl規則,則將請求轉發至後端名爲static的real server組中去
use_backend dynamic2 if url_dynamic
default_backend dynamic2 #若是上面全部acl規則都不知足,將請求轉發到dynamic組中
#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend static2 #定義後端real server組,組名爲static
balance roundrobin #支持動態權重修改,支持慢啓動
server static_1 centos-k8s003:5602 check inter 3000 fall 3 rise 1 maxconn 30000
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend dynamic2
cookie cookie_name insert nocache #使用cookie實現session綁定,且不記錄緩存
balance roundrobin
server dynamic1 centos-k8s003:5602 check inter 3000 fall 3 rise 1 maxconn 1000 cookie dynamic1
frontend kibana *:8080 # kubernetes-dashboard
acl url_static path_beg -i /static /images /javascript /stylesheets #匹配path以/static,/images開始的,且不區分大小寫
acl url_static path_end -i .jpg .gif .png .css .js .html
acl url_static hdr_beg(host) -i img. video. download. ftp. imgs. image.
acl url_dynamic path_end .php .jsp
use_backend static3 if url_static #知足名爲url_static這條acl規則,則將請求轉發至後端名爲static的real server組中去
use_backend dynamic3 if url_dynamic
default_backend dynamic3 #若是上面全部acl規則都不知足,將請求轉發到dynamic組中
#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend static3 #定義後端real server組,組名爲static
balance roundrobin #支持動態權重修改,支持慢啓動
server static_1 centos-k8s004:8080 check inter 3000 fall 3 rise 1 maxconn 30000
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend dynamic3
cookie cookie_name insert nocache #使用cookie實現session綁定,且不記錄緩存
balance roundrobin
server dynamic1 centos-k8s004:8080 check inter 3000 fall 3 rise 1 maxconn 1000 cookie dynamic1
listen state # 使用單獨輸出,不須要frontedn調用:定義haproxy的狀態統計頁面
bind *:8001 # 監聽的地址
mode http # http 7層工做模式:對應用層數據作深刻分析,所以支持7層的過濾、處理、轉換等機制
stats enable # 開啓統計頁面輸出
stats hide-version # 隱藏狀態頁面版本號
stats uri /haproxyadmin?stats # 指定狀態頁的訪問路徑
stats auth admin:root # 基於用戶名,密碼驗證。
stats admin if TRUE # 驗證經過時運行登陸。
acl num1 src 10.239.0.0/16 # 定義源地址爲10.239.0.0/16網段的acl規則,將其命名爲num1
tcp-request content accept if num1 # 若是知足此規則,則容許訪問
tcp-request content reject # 拒絕其餘全部的訪問
systemctl enabel haproxy keepalived #haproxy和keepalived開機自啓動
systemctl restart haproxy keepaliced # haproxy和keepalived從新啓動
x
#設置mysql備份目錄
folder=/localdisk/mysql_backup
cd $folder
day=`date +%Y%m%d`
rm -rf $day
mkdir $day
cd $day
#要備份的數據庫地址
host=xxxxxx.abc.com
#數據庫端口
port=3306
#用戶名
user=xxxxxxxx
#密碼
password=xxxxxxxxxx
#要備份的數據庫
db=oss2_base_test
#數據要保留的天數
days=7
# 備份命令
mysqldump -h$host -P$port -u$user -p$password $db>backup.sql
# 壓縮備份數據,節省磁盤空間,壓縮後數據大小僅爲壓縮前的5%
zip backup.sql.zip backup.sql
rm backup.sql
cd ..
day=`date -d "$days days ago" +%Y%m%d`
# 刪除固定天數前的數據
rm -rf $day
00 02 * * * sh /root/mysqlbackup.sh #表示天天凌晨兩點執行備份腳本
* * * * * command
minute hour day month week command
分 時 天 月 星期 命令
系統環境
# 查看java版本
java -vsersion
#output: openjdk version "1.8.0_161" OpenJDK Runtime Environment (build 1.8.0_161-b14) OpenJDK 64-Bit Server VM (build 25.161-b14, mixed mode)
# 查看javac
javac -version
#output
javac 1.8.0_161
yum -y localinstall jdk-8u73-linux-x64.rpm
mkdir /elk/
tar -zxvf elasticsearch-6.4.3.tar.gz
mv elasticsearch-6.4.3 /elk/
cd /elk/elasticsearch-6.4.3/config
vim elasticsearch.yml
#修改一下幾個參數:
cluster.name: oss-application #自定義,全部節點cluster.name一致
node.name: centos-k8s003 #自定義,建議用機器名 容易區分
network.host: centos-k8s003 #本機機器名
http.port: 9200
node.master: true #當前節點是否能夠被選舉爲master節點,能夠不選舉master作保存數
據
node.data: true #當前節點是否存儲數據,也能夠不存儲數據,只作master
discovery.zen.ping.unicast.hosts: [centos-k8s003, centos-k8s005, centos-k8s006, centos-k8s007] #elasticsearch集羣其餘節點
修改系統參數以確保系統有足夠資源啓動ES設置內核參數
vi /etc/sysctl.conf
# 增長如下參數
vm.max_map_count=655360
# 執行如下命令,確保生效配置生效:
sysctl -p
設置資源參數
vi /etc/security/limits.conf
# 修改
* soft nofile 65536
* hard nofile 131072
* soft nproc 65536
* hard nproc 131072
添加啓動用戶,設置權限啓動ElasticSearch5版本要非root用戶,須要新建一個用戶來啓動ElasticSearch
useradd elk #建立用戶elk
groupadd elk #建立組elk useradd
elk -g elk #將用戶添加到組
chown -R elk:elk /elk/
su elk
cd /elk/elasticsearch-6.4.3/bin/
./elk/elasticsearch -d #在後臺啓動
unzip -d /elk/ logstash-6.4.3.zip
input {
beats {
type => "oss-server-log"
port => 5044
}
}
output {
elasticsearch {
hosts => ["http://centos-k8s003:9200","http://centos-k8s005:9200","http://centos-k8s006:9200","http://centos-k8s007:9200"] #elasticsearch服務地址
index => "%{[fields][source]}-%{[fields][alilogtype]}-%{+YYYY.MM.dd}" #索引格式
}
}
cd bin/
nohup ./logstash -f config/logstash-simple.conf &
tar -zxvf kibana-6.4.3-linux-x86_64.tar.gz
mv kibana-6.4.3-linux-x86_64 /elk/
server.port: 5601 # 開啓默認端口5601
server.host: centos-k8s003 #站點地址
elasticsearch.url: "http://centos-k8s003:9200 " #指向>elasticsearch服務的ip地址
cd /elk/kibana-6.4.3-linux-x86_64/bin/
nohup ./kibana &