1:經過yum安裝dockermysql
yum -y install gcc yum -y install gcc-c++ yum remove docker \ docker-client \ docker-client-latest \ docker-common \ docker-latest \ docker-latest-logrotate \ docker-logrotate \ docker-engine yum install -y yum-utils device-mapper-persistent-data lvm2 yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo yum list docker-ce --showduplicates | sort -r yum -y install docker-ce-18.06.3.ce
2:爲docker設置源linux
vim /etc/docker/daemon.json
{ "registry-mirrors": ["https://r5yuqwmu.mirror.aliyuncs.com"], "data-root": "/home/data/docker", "log-driver": "json-file", "log-opts": {"max-size":"50m", "max-file":"1"} }
啓動docker:systemctl start dockerc++
https://docs.docker.com/engine/git
獲取docker鏡像的兩種方式:(以mysql爲例)github
1:docker pull mysql:5.7 sql
從dockerhub中去拉取網上公開的可用的鏡像,能夠本身去搜索本身想要的:https://hub.docker.com/search?q=&type=image&category=databasedocker
2:docker build -t mysql_self .shell
自定義Dockerfile,能夠基於公開的鏡像,自定義本身的鏡像。數據庫
#基礎鏡像使用 mysql:5.7 FROM mysql:5.7 #做者 MAINTAINER author ENV WORK_PATH /usr/local/work #定義會被容器自動執行的目錄 ENV AUTO_RUN_DIR /docker-entrypoint-initdb.d #定義sql文件名 ENV FILE_0 myself.sql ENV LANG en_GB.utf8 #配置文件複製進去 COPY my.cnf /etc RUN yes|cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime #定義shell文件名 ENV INSTALL_DB_SHELL install_db.sh #建立文件夾 RUN mkdir -p $WORK_PATH #把數據庫初始化數據的文件複製到工做目錄下 COPY ./$FILE_0 $WORK_PATH/ #把要執行的shell文件放到/docker-entrypoint-initdb.d/目錄下,容器會自動執行這個shell COPY ./$INSTALL_DB_SHELL $AUTO_RUN_DIR/ WORKDIR $WORK_PATH #給執行文件增長可執行權限 RUN chmod a+x $AUTO_RUN_DIR/$INSTALL_DB_SHELL
當鏡像都下載完成後,就能夠建立docker容器並運行使用,下面就以實例測試mysql、canal、kafka和zookeeper這幾個基建的部署。json
# 建立自定義network
docker network create --subnet=172.18.0.0/16 mynetwork
docker run options 能夠參考:https://docs.docker.com/engine/reference/commandline/run/
docker run -p 3306:3306 --name mysql_self --net mynetwork --ip 172.18.0.36 -v /home/data/docker/mysql/logs:/logs -v /home/data/docker/mysql/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=Abcd12345 -d --restart=always mysql_self:latest
docker run -d --name canal -p 11111:11111 --net mynetwork --ip 172.18.0.41 -e canal.destinations=self -e canal.instance.mysql.slaveId=12 -e canal.auto.scan=false -e canal.instance.master.address=172.18.0.36:3306 -e canal.instance.dbUsername=canal -e canal.instance.dbPassword=canal -e canal.instance.filter.regex=esen_approval.apt_approval --restart=always canal/canal-server:v1.1.4
canal要想搭配好mysql,其中有幾點須要注意;
1:canal.instance.master.address的地址必須指向mysql容器的ip和port
2:canal.instance.mysql.slaveId 不能與mysql的一致,mysql的log_bin要開啓 ,查看命令爲:show variables like 'log_bin'
3:canal.instance.dbUsername和canal.instance.dbPassword 要在啓動前設置好。
docker run -d --name zookeeper -p 2181:2181 --net mynetwork --ip 172.18.0.37 -v /home/data/docker/zookeeper/logs:/datalog -v /home/data/docker/zookeeper/data:/data --restart=always -t zookeeper:3.4.14
docker run -d --name kafka -p 9092:9092 -p 19092:19092 --net mynetwork --ip 172.18.0.38 -e KAFKA_BROKER_ID=0 -e KAFKA_ZOOKEEPER_CONNECT=172.18.0.37:2181 -e KAFKA_INTER_BROKER_LISTENER_NAME=INTERNAL -e KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT -e KAFKA_ADVERTISED_LISTENERS=INTERNAL://172.18.0.38:19092,EXTERNAL://外網IP:9092 -e KAFKA_LISTENERS=INTERNAL://0.0.0.0:19092,EXTERNAL://0.0.0.0:9092 --restart=always -t wurstmeister/kafka
sudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose sudo curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose chmod +x /usr/local/bin/docker-compose
docker-compose file 和docker版本之間的對照關係:https://docs.docker.com/compose/compose-file/
version: '3.7' services: mysql_hermes: build: /root/data/docker/mysql/mysql_hermes container_name: mysql_hermes environment: - "MYSQL_ROOT_PASSWORD=Abcd12345" ports: - "3306:3306" restart: always volumes: - "/root/data/docker/mysql/logs:/logs" - "/root/data/docker/mysql/data:/var/lib/mysql" zookeeper: image: zookeeper:3.4.14 container_name: zookeeper restart: always hostname: zookeeper ports: - 2181:2181 volumes: - /root/data/docker/zookeeper/logs:/datalog - /root/data/docker/zookeeper/data:/data networks: default: ipv4_address: 172.18.0.37 kafka: image: wurstmeister/kafka:latest container_name: kafka restart: always ports: - 9092:9092 - 19092:19092 environment: KAFKA_BROKER_ID: 1 KAFKA_ZOOKEEPER_CONNECT: 172.18.0.37:2181 KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT KAFKA_ADVERTISED_LISTENERS: INTERNAL://172.18.0.38:19092,EXTERNAL://47.99.72.114:9092 KAFKA_LISTENERS: INTERNAL://0.0.0.0:19092,EXTERNAL://0.0.0.0:9092 volumes: - /root/data/docker/kafka/data:/kafka depends_on: - zookeeper networks: default: ipv4_address: 172.18.0.38 canal: image: canal/canal-server:v1.1.4 restart: always container_name: canal depends_on: - mysql_hermes ports: - 11111:11111 environment: - canal.instance.mysql.slaveId=12 - canal.auto.scan=false - canal.destinations=hermes - canal.instance.master.address=mysql_hermes:3306 - canal.instance.dbUsername=canal - canal.instance.dbPassword=canal - canal.instance.filter.regex=esen_approval.apt_approval networks: default: ipam: config: - subnet: 172.18.0.0/16
啓動命令:docker-compose -f xx.yml up -d
中止命令:docker-compose -f xx.yml down