docker network create --driver bridge --subnet 10.0.0.0/24 --gateway 10.0.0.1 monitor
Grafana Templatenode
# pull grafana image docker pull grafana/grafana
/etc/grafana/
/var/lib/grafana
GF_SERVER_ROOT_URL=http://grafana.server.name
指定grafana的訪問路徑GF_SECURITY_ADMIN_PASSWORD=secret
指定grafana的登陸密碼GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource
指定安裝插件的變量docker run \ -d \ -v /var/lib/grafana \ --name grafana-storage \ busybox:latest
# 啓動Grafana容器 docker run \ -d \ -p 3000:3000 \ --name grafana \ --volumes-from grafana-storage \ -e "GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource" \ -e "GF_SERVER_ROOT_URL=http://10.0.0.10:3000" \ -e "GF_SECURITY_ADMIN_PASSWORD=marion" \ --network monitor \ --ip 10.0.0.10 \ --restart always \ grafana/grafana # 查看配置文件以及數據目錄的掛載位置 docker inspect grafana
sudo docker run \ --volume=/:/rootfs:ro \ --volume=/var/run:/var/run:rw \ --volume=/sys:/sys:ro \ --volume=/var/lib/docker/:/var/lib/docker:ro \ --volume=/dev/disk/:/dev/disk:ro \ --detach=true \ --name=cadvisor \ --network monitor \ --ip 10.0.0.11 \ google/cadvisor:latest
global: scrape_interval: 60s evaluation_interval: 60s scrape_configs: - job_name: prometheus static_configs: - targets: ['localhost:9090'] labels: instance: prometheus - job_name: 'cAdvisor' static_configs: - targets: ['10.0.0.11:8080'] labels: instance: db1
docker run \ -dit \ -v /root/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml \ --network monitor \ --ip 10.0.0.12 \ prom/prometheus
prometheus: image: prom/prometheus:latest container_name: monitoring_prometheus restart: unless-stopped volumes: - ./data/prometheus/config:/etc/prometheus/ - ./data/prometheus/data:/prometheus command: - '-config.file=/etc/prometheus/prometheus.yml' - '-storage.local.path=/prometheus' - '-alertmanager.url=http://alertmanager:9093' expose: - 9090 ports: - 9090:9090 links: - cadvisor:cadvisor - node-exporter:node-exporter node-exporter: image: prom/node-exporter:latest container_name: monitoring_node_exporter restart: unless-stopped expose: - 9100 cadvisor:, image: google/cadvisor:latest container_name: monitoring_cadvisor restart: unless-stopped volumes: - /:/rootfs:ro - /var/run:/var/run:rw - /sys:/sys:ro - /var/lib/docker/:/var/lib/docker:ro expose: - 8080 grafana: image: grafana/grafana:latest container_name: monitoring_grafana restart: unless-stopped links: - prometheus:prometheus volumes: - ./data/grafana:/var/lib/grafana environment: - GF_SECURITY_ADMIN_PASSWORD=MYPASSWORT - GF_USERS_ALLOW_SIGN_UP=false - GF_SERVER_DOMAIN=myrul.com - GF_SMTP_ENABLED=true - GF_SMTP_HOST=smtp.gmail.com:587 - GF_SMTP_USER=myadrress@gmail.com - GF_SMTP_PASSWORD=mypassword - GF_SMTP_FROM_ADDRESS=myaddress@gmail.com
# my global config global: scrape_interval: 120s # By default, scrape targets every 15 seconds. evaluation_interval: 120s # By default, scrape targets every 15 seconds. # scrape_timeout is set to the global default (10s). # Attach these labels to any time series or alerts when communicating with # external systems (federation, remote storage, Alertmanager). external_labels: monitor: 'my-project' # Load and evaluate rules in this file every 'evaluation_interval' seconds. rule_files: # - "alert.rules" # - "first.rules" # - "second.rules" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. - job_name: 'prometheus' # Override the global default and scrape targets from this job every 5 seconds. scrape_interval: 120s # metrics_path defaults to '/metrics' # scheme defaults to 'http'. static_configs: - targets: ['localhost:9090','cadvisor:8080','node-exporter:9100', 'nginx-exporter:9113']
docker-compose up -d
或掃描關注二維碼,關注更多動態
nginx