docker-compose部署logstash、kafka
version: '3.3'services:pod-logstash:image: logstash:7.13.4volumes:- /data/xtalpi/config/logstash-podlog.conf:/usr/share/logstash/config/logstash-podlog.conf- /data/xtalpi/config/logstash.yml:/usr/share/logstash/config/logstash.yml- /data:/data- "/etc/localtime:/etc/localtime:ro"- "/etc/timezone:/etc/timezone:ro"restart: alwayscommand: ["/usr/share/logstash/bin/logstash","-f","/usr/share/logstash/config/logstash-podlog.conf"]# command: ["sleep","1000"]user: rootlinks:- kafka:kafkadepends_on:- kafkaenvironment:TZ: Asia/Shanghaixpipline-logstash:image: logstash:7.13.4volumes:- /data/xtalpi/config/logstash-podlog.conf.xpipline:/usr/share/logstash/config/logstash-x-podlog.conf- /data/xtalpi/config/logstash.yml:/usr/share/logstash/config/logstash.yml- /data:/data- "/etc/localtime:/etc/localtime:ro"- "/etc/timezone:/etc/timezone:ro"restart: alwayscommand: ["/usr/share/logstash/bin/logstash","-f","/usr/share/logstash/config/logstash-x-podlog.conf"]# command: ["sleep","1000"]user: rootlinks:- kafka:kafkadepends_on:- kafka- pod-logstashenvironment:TZ: Asia/Shanghaisystem-logstash:image: logstash:7.13.4volumes:- /data/xtalpi/config/logstash-syslog.conf:/usr/share/logstash/config/logstash-syslog.conf- /data/xtalpi/config/logstash.yml:/usr/share/logstash/config/logstash.yml- /data:/data- "/etc/localtime:/etc/localtime:ro"- "/etc/timezone:/etc/timezone:ro"restart: alwayscommand: ["/usr/share/logstash/bin/logstash","-f","/usr/share/logstash/config/logstash-syslog.conf"]user: rootdepends_on:- kafkaenvironment:TZ: Asia/Shanghainginx:image: nginx:1.21.1volumes:- /data/xtalpi/config/nginx.conf:/etc/nginx/nginx.conf- /data/xtalpi/config/mime.types:/etc/nginx/mime.types- /data/log/nginx:/data/nginx/logs- /data:/data- "/etc/localtime:/etc/localtime:ro"- "/etc/timezone:/etc/timezone:ro"restart: always#command: ["sleep","1000000"]links:- getLog:getLogports:- 8888:8888environment:TZ: Asia/ShanghaigetLog:image: centos:centos7volumes:- /data/xtalpi/getLog:/data/xtalpi/getLog- "/etc/localtime:/etc/localtime:ro"- "/etc/timezone:/etc/timezone:ro"- /data:/datarestart: alwaysworking_dir: "/data/xtalpi/getLog/"command: ["/data/xtalpi/getLog/getLog","-listen-addr","0.0.0.0:10000"]ports:- 10000:10000environment:TZ: Asia/Shanghaizookeeper:image: wurstmeister/zookeeper:3.4.6volumes:- "/etc/localtime:/etc/localtime:ro"- "/etc/timezone:/etc/timezone:ro"ports:- "2181:2181"environment:TZ: Asia/Shanghaikafka:image: wurstmeister/kafka:2.12-2.5.0volumes:- /data/log/kafka:/kafka- "/etc/localtime:/etc/localtime:ro"- "/etc/timezone:/etc/timezone:ro"user: rootports:- 9092:9092links:- zookeeper:zkenvironment:KAFKA_ADVERTISED_HOST_NAME: "10.41.16.11"KAFKA_ADVERTISED_PORT: "9092"KAFKA_ZOOKEEPER_CONNECT: "zk:2181"KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.41.16.11:9092TZ: Asia/Shanghai
input {file {type => "xpipline-log"path => [ "/data/podlog/*/production/xpipeline*.log" ]}}filter {grok {match => {"message" => " (?<fullTime>\S{10}:\S{8}) %{IPORHOST} \[%{IP:ip}\] - %{NUMBER:response_code:int} %{WORD:request_type} %{DATA:request_uri} %{NUMBER:response_time:int}us %{NUMBER:bytes:int}"}overwrite => ["message"]}#grok匹配失败日志记录直接删除if "_grokparsefailure" in [tags] { drop {} }#date匹配match => [ "字段", "时间格式" ],target将匹配字段赋值给"@timestamp"date {match => [ "fullTime", "yyyy-MM-dd':'HH:mm:ss" ]target => "@timestamp"}}output {elasticsearch {hosts => ["10.42.0.5:9201"]index => "newdrug-xpipline-%{+YYYY.MM}"}}
