E: elasticsearch 存儲數據 java L: logstash 收集,過濾,轉發,匹配 java K: kibana 過濾,分析,圖形展現 java F: filebeat 收集日誌,過濾 go
1.找出訪問網站頻次最高的 IP 排名前十 2.找出訪問網站排名前十的 URL 3.找出中午 10 點到 2 點之間 www 網站訪問頻次最高的 IP 4.對比昨天這個時間段和今天這個時間段訪問頻次有什麼變化 5.對比上週這個時間和今天這個時間的區別 6.找出特定的頁面被訪問了多少次 7.找出有問題的 IP 地址,並告訴我這個 IP 地址都訪問了什麼頁面,在對比前幾天他來過嗎?他從什麼時間段開 始訪問的,什麼時間段走了 8.找出來訪問最慢的前十個頁面並統計平均響應時間,對比昨天這也頁面訪問也這麼慢嗎? 9.找出搜索引擎今天各抓取了多少次?抓取了哪些頁面?響應時間如何? 10.找出僞形成搜索引擎的 IP 地址 11.5 分鐘以內告訴我結果
代理層: nginx haproxy web層: nginx tomcat java php db層: mysql mongo redis es 系統層: message secure
cat >/etc/elasticsearch/elasticsearch.yml <<EOF node.name: node-1 path.data: /var/lib/elasticsearch path.logs: /var/log/elasticsearch bootstrap.memory_lock: true network.host: 10.0.0.51,127.0.0.1 http.port: 9200 EOF systemctl stop elasticsearch systemctl stop kibana rm -rf /var/lib/elasticsearch/* rm -rf /var/lib/kibana/* systemctl start elasticsearch systemctl start kibana netstat -lntup|grep 9200 netstat -lntup|grep 5601
ntpdate time1.aliyun.com
[root@db-01 ~]# cat /etc/yum.repos.d/nginx.repo [nginx-stable] name=nginx stable repo baseurl=http://nginx.org/packages/centos/$releasever/$basearch/ gpgcheck=0 enabled=1 gpgkey=https://nginx.org/keys/nginx_signing.key [nginx-mainline] name=nginx mainline repo baseurl=http://nginx.org/packages/mainline/centos/$releasever/$basearch/ gpgcheck=0 enabled=0 gpgkey=https://nginx.org/keys/nginx_signing.key yum makecache fast yum install nginx -y systemctl start nginx
rpm -ivh filebeat-6.6.0-x86_64.rpm rpm -qc filebeat
cp /etc/filebeat/filebeat.yml /opt/ cat >/etc/filebeat/filebeat.yml<<EOF filebeat.inputs: - type: log enabled: true paths: - /var/log/nginx/access.log output.elasticsearch: hosts: ["10.0.0.51:9200"] EOF
systemctl start filebeat tail -f /var/log/filebeat/filebeat
es-head查看
Management >> Index Patterns >> filebeat-6.6.0-2019.11.15 >>@timestamp >>create >> discover
全部日誌都存儲在message的value裏,不能拆分單獨顯示
能夠把日誌全部字段拆分出來 { $remote_addr : 192.168.12.254 - : - $remote_user : - [$time_local]: [10/Sep/2019:10:52:08 +0800] $request: GET /jhdgsjfgjhshj HTTP/1.0 $status : 404 $body_bytes_sent : 153 $http_referer : - $http_user_agent :ApacheBench/2.3 $http_x_forwarded_for:- }
如何使nginx日誌格式轉換成咱們想要的json格式
log_format json '{ "time_local": "$time_local", ' '"remote_addr": "$remote_addr", ' '"referer": "$http_referer", ' '"request": "$request", ' '"status": $status, ' '"bytes": $body_bytes_sent, ' '"agent": "$http_user_agent", ' '"x_forwarded": "$http_x_forwarded_for", ' '"up_addr": "$upstream_addr",' '"up_host": "$upstream_http_host",' '"upstream_time": "$upstream_response_time",' '"request_time": "$request_time"' ' }'; access_log /var/log/nginx/access.log json;
清除舊日誌php
> /var/log/nginx/access.log
檢查並重啓nginxhtml
nginx -t systemctl restart nginx
經過查看發現,雖然nginx日誌變成了json,可是es裏仍是存儲在message裏仍然不能拆分
如何在ES裏展現的是json格式
cat >/etc/filebeat/filebeat.yml<<EOF filebeat.inputs: - type: log enabled: true paths: - /var/log/nginx/access.log json.keys_under_root: true json.overwrite_keys: true output.elasticsearch: hosts: ["10.0.0.51:9200"] EOF
es-head >> filebeat-6.6.0-2019.11.15 >> 動做 >>刪除
systemctl restart filebeat
nginx-6.6.0-2019.11.15
cat >/etc/filebeat/filebeat.yml<<EOF filebeat.inputs: - type: log enabled: true paths: - /var/log/nginx/access.log json.keys_under_root: true json.overwrite_keys: true output.elasticsearch: hosts: ["10.0.0.51:9200"] index: "nginx-%{[beat.version]}-%{+yyyy.MM}" setup.template.name: "nginx" setup.template.pattern: "nginx-*" setup.template.enabled: false setup.template.overwrite: true EOF
cat >/etc/filebeat/filebeat.yml<<EOF filebeat.inputs: - type: log enabled: true paths: - /var/log/nginx/access.log json.keys_under_root: true json.overwrite_keys: true - type: log enabled: true paths: - /var/log/nginx/error.log output.elasticsearch: hosts: ["10.0.0.51:9200"] indices: - index: "nginx-access-%{[beat.version]}-%{+yyyy.MM}" when.contains: source: "/var/log/nginx/access.log" - index: "nginx-error-%{[beat.version]}-%{+yyyy.MM}" when.contains: source: "/var/log/nginx/error.log" setup.template.name: "nginx" setup.template.pattern: "nginx-*" setup.template.enabled: false setup.template.overwrite: true EOF
cat >/etc/filebeat/filebeat.yml<<EOF filebeat.inputs: - type: log enabled: true paths: - /var/log/nginx/access.log json.keys_under_root: true json.overwrite_keys: true tags: ["access"] - type: log enabled: true paths: - /var/log/nginx/error.log tags: ["error"] output.elasticsearch: hosts: ["10.0.0.51:9200"] indices: - index: "nginx-access-%{[beat.version]}-%{+yyyy.MM}" when.contains: tags: "access" - index: "nginx-error-%{[beat.version]}-%{+yyyy.MM}" when.contains: tags: "error" setup.template.name: "nginx" setup.template.pattern: "nginx-*" setup.template.enabled: false setup.template.overwrite: true EOF
[root@web01 ~]# /opt/tomcat/bin/shutdown.sh [root@web01 ~]# sed -n '162p' /opt/tomcat/conf/server.xml pattern="{"clientip":"%h","ClientUser":"%l","authenticated":"%u","AccessTime":"%t","method":"%r","status":"%s","SendBytes":"%b","Query?string":"%q","partner":"%{Referer}i","AgentVersion":"%{User-Agent}i"}"/>
/opt/tomcat/bin/startup.sh
cat >/etc/filebeat/filebeat.yml <<EOF filebeat.inputs: - type: log enabled: true paths: - /opt/tomcat/logs/localhost_access_log.*.txt json.keys_under_root: true json.overwrite_keys: true tags: ["tomcat"] output.elasticsearch: hosts: ["10.0.0.51:9200"] index: "tomcat_access-%{[beat.version]}-%{+yyyy.MM}" setup.template.name: "tomcat" setup.template.pattern: "tomcat_*" setup.template.enabled: false setup.template.overwrite: true EOF
systemctl restart filebeat
[root@db01 ~]# cat /etc/filebeat/filebeat.yml filebeat.inputs: - type: log enabled: true paths: - /var/log/elasticsearch/elasticsearch.log multiline.pattern: '^\[' multiline.negate: true multiline.match: after output.elasticsearch: hosts: ["10.0.0.51:9200"] index: "es-%{[beat.version]}-%{+yyyy.MM}" setup.template.name: "es" setup.template.pattern: "es-*" setup.template.enabled: false setup.template.overwrite: true
systemctl stop nginx rm -rf /var/log/nginx/* 本身修改日誌格式爲main的普通格式 systemctl start nginx
cd /usr/share/elasticsearch/ ./bin/elasticsearch-plugin install file:///root/ingest-geoip-6.6.0.zip ./bin/elasticsearch-plugin install file:///root/ingest-user-agent-6.6.0.zip systemctl restart elasticsearch
filebeat.config.modules: path: ${path.config}/modules.d/*.yml reload.enabled: true reload.period: 10s
filebeat modules --list filebeat enable nginx
[root@web01 ~]# cat /etc/filebeat/modules.d/nginx.yml - module: nginx access: enabled: true var.paths: ["/var/log/nginx/*.log"] error: enabled: true var.paths: ["/var/log/nginx/error.log"]
cat >/etc/filebeat/filebeat.yml<<EOF filebeat.config.modules: path: ${path.config}/modules.d/*.yml reload.enabled: true reload.period: 10s output.elasticsearch: hosts: ["10.0.0.51:9200"] indices: - index: "nginx-www-%{[beat.version]}-%{+yyyy.MM}" when.contains: source: "/var/log/nginx/www.log" - index: "nginx-blog-%{[beat.version]}-%{+yyyy.MM}" when.contains: source: "/var/log/nginx/blog.log" - index: "nginx-error-%{[beat.version]}-%{+yyyy.MM}" when.contains: source: "/var/log/nginx/error.log" setup.template.name: "nginx" setup.template.pattern: "nginx-*" setup.template.enabled: false setup.template.overwrite: true EOF
systemctl restart filebeat
編輯my.cnf [mysqld] slow_query_log=ON slow_query_log_file=/var/log/mariadb/slow.log long_query_time=1
systemctl restart mysql 慢日誌製造語句 select sleep(2) user,host from mysql.user ;
mysql -uroot -poldboy123 -e "show variables like '%slow_query_log%'"
filebeat module enable mysql
module: mysql error: enabled: true var.paths: ["/var/log/mariadb/mariadb.log"] slowlog: enabled: true var.paths: ["/var/log/mariadb/slow.log"]
filebeat.config.modules: path: ${path.config}/modules.d/*.yml reload.enabled: true reload.period: 10s output.elasticsearch: hosts: ["10.0.0.51:9200"] indices: - index: "mysql_slowlog-%{[beat.version]}-%{+yyyy.MM}" when.contains: fileset.module: "mysql" fileset.name: "slowlog" - index: "mysql_error-%{[beat.version]}-%{+yyyy.MM}" when.contains: fileset.module: "mysql" fileset.name: "error" setup.template.name: "mysql" setup.template.pattern: "mysql_*" setup.template.enabled: false setup.template.overwrite: true
systemctl restart filebeat
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo sed -i 's#download.docker.com#mirrors.tuna.tsinghua.edu.cn/docker-ce#g' /etc/yum.repos.d/docker-ce.repo yum install docker-ce -y systemctl start docker
systemctl stop nginx pkill java docker run -d -p 80:80 nginx docker run -d -p 8080:80 nginx
docker logs -f ce22c2583da5
cat >>/etc/filebeat/filebeat.yml<<EOF filebeat.inputs: - type: docker containers.ids: - '*' output.elasticsearch: hosts: ["10.0.0.51:9200"] index: "docker-%{[beat.version]}-%{+yyyy.MM}" setup.template.name: "docker" setup.template.pattern: "docker-*" setup.template.enabled: false setup.template.overwrite: true EOF
systemctl restart filebeat
curl 127.0.0.1/11111111111111111111 curl 127.0.0.1:8080/22222222222222222222
docker-mysql-xxxx docker-nginx-xxxx
{ "log": "10.0.0.1 - - [18/Nov/2019:02:16:44 +0000] \"GET /web01 HTTP/1.1\" 404 555 \"-\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36\" \"-\"\n", "stream": "stdout", "time": "2019-11-18T02:16:44.010910131Z", "service": "nginx" }
yum install docker-compose -y
cat >docker-compose.yml<<EOF version: '3' services: nginx: image: nginx:latest labels: service: nginx logging: options: labels: "service" ports: - "80:80" db: image: nginx:latest labels: service: db logging: options: labels: "service" ports: - "8080:80" EOF
docker stop $(docker ps -q) docker rm $(docker ps -qa)
docker-compose up -d
cat >/etc/filebeat/filebeat.yml<<EOF filebeat.inputs: - type: log enabled: true paths: - /var/lib/docker/containers/*/*-json.log json.keys_under_root: true json.overwrite_keys: true output.elasticsearch: hosts: ["10.0.0.51:9200"] indices: - index: "docker-nginx-%{[beat.version]}-%{+yyyy.MM}" when.contains: attrs.service: "nginx" - index: "docker-db-%{[beat.version]}-%{+yyyy.MM}" when.contains: attrs.service: "db" setup.template.name: "docker" setup.template.pattern: "docker-*" setup.template.enabled: false setup.template.overwrite EOF
systemctl restart filebeat
curl 127.0.0.1/nginxxxxxxxx curl 127.0.0.1:8080/dbbbbbbbbbbbbb
錯誤日誌字段: stream:stderr 正常日誌字段: stream:stdout
cat >/etc/filebeat/filebeat.yml<EOF filebeat.inputs: - type: log enabled: true paths: - /var/lib/docker/containers/*/*-json.log json.keys_under_root: true json.overwrite_keys: true output.elasticsearch: hosts: ["10.0.0.51:9200"] indices: - index: "docker-nginx-access-%{[beat.version]}-%{+yyyy.MM}" when.contains: stream: "stdout" attrs.service: "nginx" - index: "docker-nginx-error-%{[beat.version]}-%{+yyyy.MM}" when.contains: stream: "stderr" attrs.service: "nginx" - index: "docker-db-access-%{[beat.version]}-%{+yyyy.MM}" when.contains: stream: "stdout" attrs.service: "db" - index: "docker-db-error-%{[beat.version]}-%{+yyyy.MM}" when.contains: stream: "stderr" attrs.service: "db" setup.template.name: "docker" setup.template.pattern: "docker-*" setup.template.enabled: false setup.template.overwrite: true EOF
systemctl restart filebeat
curl 127.0.0.1/nginxxxxxxxx curl 127.0.0.1:8080/dbbbbbbbbbbbbb
mkdir /opt/{nginx,mysql}
docker ps docker cp 容器ID:/etc/nginx/nginx.conf . 修改nginx配置文件裏的日誌記錄類型爲json格式 docker cp /etc/nginx/nginx.conf 容器ID:/etc/nginx/nginx.conf docker commit 容器ID nginx:v2 docker-compose stop docker rm -f $(docker ps -a -q) docker run -d -p 80:80 -v /opt/nginx:/var/log/nginx nginx:v2 docker run -d -p 8080:80 -v /opt/mysql:/var/log/nginx nginx:v2
cat >/etc/filebeat/filebeat.yml<<EOF filebeat.inputs: - type: log enabled: true paths: - /opt/nginx/access.log json.keys_under_root: true json.overwrite_keys: true tags: ["nginx_access"] - type: log enabled: true paths: - /opt/nginx/error.log tags: ["nginx_error"] - type: log enabled: true paths: - /opt/mysql/access.log json.keys_under_root: true json.overwrite_keys: true tags: ["mysql_access"] - type: log enabled: true paths: - /opt/mysql/error.log tags: ["mysql_error"] output.elasticsearch: hosts: ["10.0.0.51:9200"] indices: - index: "nginx-access-%{[beat.version]}-%{+yyyy.MM}" when.contains: tags: "nginx_access" - index: "nginx-error-%{[beat.version]}-%{+yyyy.MM}" when.contains: tags: "nginx_error" - index: "mysql-access-%{[beat.version]}-%{+yyyy.MM}" when.contains: tags: "mysql_access" - index: "mysql-error-%{[beat.version]}-%{+yyyy.MM}" when.contains: tags: "mysql_error" setup.template.name: "nginx" setup.template.pattern: "nginx-*" setup.template.enabled: false setup.template.overwrite: true EOF
systemctl restart filebeat
curl 127.0.0.1/nginxxxxxxxx curl 127.0.0.1:8080/dbbbbbbbbbbbbb
yum install redis sed -i 's#^bind 127.0.0.1#bind 127.0.0.1 10.0.0.51#' /etc/redis.conf systemctl start redis netstat -lntup|grep redis redis-cli -h 10.0.0.51
systemctl stop docker.service
cat >/etc/filebeat/filebeat.yml<<EOF filebeat.inputs: - type: log enabled: true paths: - /var/log/nginx/www.log json.keys_under_root: true json.overwrite_keys: true tags: ["access"] - type: log enabled: true paths: - /var/log/nginx/error.log tags: ["error"] output.redis: hosts: ["10.0.0.51"] keys: - key: "nginx_access" when.contains: tags: "access" - key: "nginx_error" when.contains: tags: "error" setup.template.name: "nginx" setup.template.pattern: "nginx_*" setup.template.enabled: false setup.template.overwrite: true EOF
systemctl stop nginx > /var/log/nginx/www.log cat >/etc/nginx/conf.d/www.conf <<EOF server { listen 80; server_name www.mysun.com; access_log /var/log/nginx/www.log json; location / { root /code/www; index index.html index.htm; } } EOF mkdir -p /code/www/ echo "web01 www" > /code/www/index.html nginx -t systemctl start nginx echo "10.0.0.51 www.mysun.com" >> /etc/hosts curl www.mysun.com/www tail -f /var/log/nginx/www.log
systemctl restart filebeat
redis-cli LRANGE nginx_access 0 -1
yum install java -y cat >/etc/logstash/conf.d/redis.conf <<EOF input { redis { host => "10.0.0.51" port => "6379" db => "0" key => "nginx_access" data_type => "list" } redis { host => "10.0.0.51" port => "6379" db => "0" key => "nginx_error" data_type => "list" } } filter { mutate { convert => ["upstream_time", "float"] convert => ["request_time", "float"] } } output { stdout {} if "access" in [tags] { elasticsearch { hosts => "http://10.0.0.51:9200" manage_template => false index => "nginx_access-%{+yyyy.MM}" } } if "error" in [tags] { elasticsearch { hosts => "http://10.0.0.51:9200" manage_template => false index => "nginx_error-%{+yyyy.MM}" } } } EOF
刪除ES舊的索引 /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/redis.conf
yum install httpd-tools -y ab -c 100 -n 2000 www.mysun.com/www redis-cli LLEN nginx_access
systemctl start logstash
cat >/etc/hosts<<EOF 10.0.0.51 db01 10.0.0.52 db02 10.0.0.53 db03 EOF ssh-keygen ssh-copy-id 10.0.0.52 ssh-copy-id 10.0.0.53
db01操做前端
cd /data/soft tar zxf zookeeper-3.4.11.tar.gz -C /opt/ ln -s /opt/zookeeper-3.4.11/ /opt/zookeeper mkdir -p /data/zookeeper cp /opt/zookeeper/conf/zoo_sample.cfg /opt/zookeeper/conf/zoo.cfg cat >/opt/zookeeper/conf/zoo.cfg<<EOF tickTime=2000 initLimit=10 syncLimit=5 dataDir=/data/zookeeper clientPort=2181 server.1=10.0.0.51:2888:3888 server.2=10.0.0.52:2888:3888 server.3=10.0.0.53:2888:3888 EOF echo "1" > /data/zookeeper/myid cat /data/zookeeper/myid rsync -avz /opt/zookeeper* 10.0.0.52:/opt/ rsync -avz /opt/zookeeper* 10.0.0.53:/opt/
db02操做java
mkdir -p /data/zookeeper echo "2" > /data/zookeeper/myid cat /data/zookeeper/myid
db03操做node
mkdir -p /data/zookeeper echo "3" > /data/zookeeper/myid cat /data/zookeeper/myid
/opt/zookeeper/bin/zkServer.sh start
/opt/zookeeper/bin/zkServer.sh status
在一個節點上執行,建立一個頻道mysql
/opt/zookeeper/bin/zkCli.sh -server 10.0.0.51:2181 create /test "hello"
在其餘節點上看可否接收到linux
/opt/zookeeper/bin/zkCli.sh -server 10.0.0.52:2181 get /test
cd /data/soft/ tar zxf kafka_2.11-1.0.0.tgz -C /opt/ ln -s /opt/kafka_2.11-1.0.0/ /opt/kafka mkdir /opt/kafka/logs cat >/opt/kafka/config/server.properties<<EOF broker.id=1 listeners=PLAINTEXT://10.0.0.51:9092 num.network.threads=3 num.io.threads=8 socket.send.buffer.bytes=102400 socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 log.dirs=/opt/kafka/logs num.partitions=1 num.recovery.threads.per.data.dir=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 transaction.state.log.min.isr=1 log.retention.hours=24 log.segment.bytes=1073741824 log.retention.check.interval.ms=300000 zookeeper.connect=10.0.0.51:2181,10.0.0.52:2181,10.0.0.53:2181 zookeeper.connection.timeout.ms=6000 group.initial.rebalance.delay.ms=0 EOF rsync -avz /opt/kafka* 10.0.0.52:/opt/ rsync -avz /opt/kafka* 10.0.0.53:/opt/
###db02操做nginx
sed -i "s#10.0.0.51:9092#10.0.0.52:9092#g" /opt/kafka/config/server.properties sed -i "s#broker.id=1#broker.id=2#g" /opt/kafka/config/server.properties
sed -i "s#10.0.0.51:9092#10.0.0.53:9092#g" /opt/kafka/config/server.properties sed -i "s#broker.id=1#broker.id=3#g" /opt/kafka/config/server.properties
/opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/server.properties
jps
/opt/kafka/bin/kafka-topics.sh --create --zookeeper 10.0.0.51:2181,10.0.0.52:2181,10.0.0.53:2181 --partitions 3 --replication-factor 3 --topic kafkatest
/opt/kafka/bin/kafka-topics.sh --describe --zookeeper 10.0.0.51:2181,10.0.0.52:2181,10.0.0.53:2181 --topic kafkatest
/opt/kafka/bin/kafka-topics.sh --delete --zookeeper 10.0.0.51:2181,10.0.0.52:2181,10.0.0.53:2181 --topic kafkatest
/opt/kafka/bin/kafka-topics.sh --create --zookeeper 10.0.0.51:2181,10.0.0.52:2181,10.0.0.53:2181 --partitions 3 --replication-factor 3 --topic messagetest
/opt/kafka/bin/kafka-console-producer.sh --broker-list 10.0.0.51:9092,10.0.0.52:9092,10.0.0.53:9092 --topic messagetest
/opt/kafka/bin/kafka-console-consumer.sh --zookeeper 10.0.0.51:2181,10.0.0.52:2181,10.0.0.53:2181 --topic messagetest --from-beginning
/opt/kafka/bin/kafka-topics.sh --list --zookeeper 10.0.0.51:2181,10.0.0.52:2181,10.0.0.53:2181
/opt/kafka/bin/kafka-server-start.sh -daemon /opt/kafka/config/server.properties
cat >/etc/filebeat/filebeat.yml <<EOF filebeat.inputs: - type: log enabled: true paths: - /var/log/nginx/access.log json.keys_under_root: true json.overwrite_keys: true tags: ["access"] - type: log enabled: true paths: - /var/log/nginx/error.log tags: ["error"] output.kafka: hosts: ["10.0.0.51:9092", "10.0.0.52:9092", "10.0.0.53:9092"] topic: 'filebeat' setup.template.name: "nginx" setup.template.pattern: "nginx_*" setup.template.enabled: false setup.template.overwrite: true EOF
cat >/etc/logstash/conf.d/kafka.conf <<EOF input { kafka{ bootstrap_servers=>["10.0.0.51:9092,10.0.0.52:9092,10.0.0.53:9092"] topics=>["filebeat"] #group_id=>"logstash" codec => "json" } } filter { mutate { convert => ["upstream_time", "float"] convert => ["request_time", "float"] } } output { stdout {} if "access" in [tags] { elasticsearch { hosts => "http://10.0.0.51:9200" manage_template => false index => "nginx_access-%{+yyyy.MM}" } } if "error" in [tags] { elasticsearch { hosts => "http://10.0.0.51:9200" manage_template => false index => "nginx_error-%{+yyyy.MM}" } } } EOF
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/kafka.conf
systemctl start logstash
在filter區塊裏添加remove_field字段便可web
filter { mutate { convert => ["upstream_time", "float"] convert => ["request_time", "float"] remove_field => [ "beat" ] } }