docker搭建Elasticsearch+Kafka+Logstash+Filebeat日志分析系统
1、docker拉取
docker pull elasticsearch:8.17.4docker pull kibana:8.17.4docker pull logstash:8.17.4docker pull docker.elastic.co/beats/filebeat:8.17.4docker pull zookeeper:latestdocker pull confluentinc/cp-kafka:latest
2、elasticsearch安装
mkdir -p /data/elasticsearch/{data,logs}
chmod 777 /data/elasticsearch/data
chmod 777 /data/elasticsearch/logscat >> /etc/sysctl.conf << EOF
vm.max_map_count=262144
EOF
sysctl -p
elasticsearch启动
docker run -d \
--restart unless-stopped \
--name elasticsearch \
--hostname elasticsearch \
-e "ES_JAVA_OPTS=-Xms1024m -Xmx1024m" \
-e "discovery.type=single-node" \
-p 9200:9200 \
-v /data/elasticsearch/logs:/usr/share/elasticsearch/logs \
-v /data/elasticsearch/data:/usr/share/elasticsearch/data \
-v /etc/localtime:/etc/localtime \
elasticsearch:8.17.4
创建用户密码:GuL0ZfYeMX7fAMqK
docker exec -it elasticsearch bash
/usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic -i
/usr/share/elasticsearch/bin/elasticsearch-reset-password -u kibana_system -i
/usr/share/elasticsearch/bin/elasticsearch-reset-password -u logstash_system -i
Elasticsearch 登录信息
账户信息
| 服务 | 用户名 | 密码 |
|---|---|---|
| Elasticsearch | elastic | GuL0ZfYeMX7fAMqK |
| Kibana | kibana_system | GuL0ZfYeMX7fAMqK |
访问地址
| 服务 | URL |
|---|---|
| Elasticsearch | https://172.16.52.115:9200/ |
| Kibana | https://172.16.52.115:5601/ |
说明
- Elasticsearch 账户
elastic是超级管理员账户 - Kibana 系统账户
kibana_system用于 Kibana 与 Elasticsearch 的通信 - 两个服务使用相同的密码
- 访问需要使用 HTTPS 协议
3、kibana安装
mkdir -p /data/kibana/{conf,logs}
chmod 777 /data/kibana/conf
chmod 777 /data/kibana/logs
cat > /data/kibana/conf/kibana.yml << EOFserver.port: 5601
server.host: "0.0.0.0"
server.publicBaseUrl: "http://172.16.52.115:5601"
elasticsearch.hosts: ["https://172.16.52.115:9200"]
elasticsearch.username: "kibana_system"
elasticsearch.password: "GuL0ZfYeMX7fAMqK"
elasticsearch.ssl.verificationMode: "none"
xpack.reporting.csv.maxSizeBytes: 20971520
xpack.security.encryptionKey: "something_at_least_32_characters"
logging:appenders:file:type: filefileName: /usr/share/kibana/logs/kibana.loglayout:type: jsonroot:appenders:- default- file
i18n.locale: "zh-CN"
EOF
kibana启动
docker run -d \
--restart unless-stopped \
--name kibana \
--hostname kibana \
-p 5601:5601 \
-v /etc/localtime:/etc/localtime \
-v /data/kibana/logs:/usr/share/kibana/logs \
-v /data/kibana/conf/kibana.yml:/usr/share/kibana/config/kibana.yml \
kibana:8.17.4
4、zookeeper安装
mkdir -p /data/zookeeper/{conf,logs,data}
--restart unless-stopped \
--name zookeeper \
-p 2181:2181 \
-v /data/zookeeper/data:/data \
-v /data/zookeeper/logs:/datalog \
-v /etc/localtime:/etc/localtime \
zookeeper:latest
5、kafka安装
mkdir -p /data/kafka/data
chmod 777 /data/kafka/datadocker run -d \
--restart unless-stopped \
--name kafka \
-p 9092:9092 \
-e KAFKA_ZOOKEEPER_CONNECT=172.16.52.115:2181 \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://172.16.52.115:9092 \
-e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 \
-v /etc/localtime:/etc/localtime \
-v /data/kafka/data:/var/lib/kafka/data \
confluentinc/cp-kafka:latest
# kafka命令:docker exec -it kafka bash
kafka-topics --list --bootstrap-server 172.16.52.115:9092 # 列出所有 Topic;
kafka-topics --create --bootstrap-server 172.16.52.115:9092 --replication-factor 1 --partitions 6 --topic nginx # 创建一个 topic;
kafka-console-producer --broker-list localhost:9092 --topic nginx #运行kafka生产者发送消息{"datas":[{"channel":"","metric":"temperature","producer":"ijinus","sn":"IJA0101-00002245","time":"1543208156100","value":"80"}],"ver":"1.0"} #发送消息
kafka-topics --bootstrap-server 172.16.52.115:9092 --delete --topic nginx #删除kafka的topic
6、logstash安装
mkdir -p /data/logstash/{conf,logs,data}
chmod 777 /data/logstash/data
chmod 777 /data/logstash/logscat > /data/logstash/conf/logstash.yml << EOF
node.name: node-1
http.host: 0.0.0.0
http.port: 9600
path.data: /usr/share/logstash/data
path.logs: /usr/share/logstash/logs
pipeline.ordered: false
EOFcat > /data/logstash/conf/logstash.conf << EOF
input{kafka{bootstrap_servers => "172.16.52.115:9092"topics => ["nginx-access", "syslog"]consumer_threads => 5decorate_events => trueauto_offset_reset => "latest"codec => "json"}
}filter {json{source => "message"}mutate {remove_field => ["host","prospector","fields","input","log"]}grok {match => { "message" => "%{IPORHOST:client_ip} (%{USER:ident}|- ) (%{USER:auth}|-) \\[%{HTTPDATE:timestamp}\\] \"(?:%{WORD:verb} (%{NOTSPACE:request}|-)(?: HTTP/%{NUMBER:http_version})?|-)\" %{NUMBER:status} (?:%{NUMBER:bytes}|-) \"(?:%{URI:referrer}|-)\" \"%{GREEDYDATA:agent}\"" }}
}output {elasticsearch {hosts => "https://172.16.52.115:9200"ssl => truessl_certificate_verification => falseindex => "%{[@metadata][kafka][topic]}-%{+YYYY.MM.dd}" user => "elastic"password => "GuL0ZfYeMX7fAMqK"}
}
EOF
启动logstash:
docker run -d \
--restart unless-stopped \
--name logstash \
-v /etc/localtime:/etc/localtime \
-v /data/logstash/data:/usr/share/logstash/data \
-v /data/logstash/logs:/usr/share/logstash/logs \
-v /data/logstash/conf/logstash.conf:/usr/share/logstash/pipeline/logstash.conf \
-v /data/logstash/conf/logstash.yml:/usr/share/logstash/config/logstash.yml \
logstash:8.17.4
7、部署filebeat
7.1、nginx安装
yum install -y yum-utilsepel-release
# yum安装nginx
yum install -y nginx
rm -rf /etc/nginx/conf.d/default.conf
systemctl enable nginx# 配置nginx主配置文件
vi /etc/nginx/nginx.conf# nginx.conf 内容:
user nginx;
worker_processes auto;error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;events {worker_connections 1024;
}http {include /etc/nginx/mime.types;default_type application/octet-stream;sendfile on;server_tokens off;tcp_nopush on;tcp_nodelay on;# nginx日志格式log_format main '$remote_addr - $remote_user [$time_local] "$request" ''$status $body_bytes_sent "$http_referer" ''"$http_user_agent" "$http_x_forwarded_for"';access_log /var/log/nginx/access.log main;keepalive_timeout 1d;proxy_set_header Host $host;proxy_set_header X-Real-IP $remote_addr;proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;proxy_set_header x-client-ip $remote_addr;include /etc/nginx/conf.d/*.conf;
}# 配置虚拟主机
vi /etc/nginx/conf.d/test.conf# test.conf 内容:
server {listen 80;server_name localhost;access_log /var/log/nginx/access.log main;location / {root /var/www/html;index index.html index.htm;}
}# 启动nginx
systemctl start nginx
systemctl status nginx# 测试nginx
mkdir /var/www/html -p
echo "welcome to my site..." > /var/www/html/index.html
7.2、filebeat安装
filebeat主要功能用来采集日志,然后发送到kafka
1.拉取镜像
docker pull docker.elastic.co/beats/filebeat:8.17.4
2.创建日志文件夹路径
mkdir -p /data/filebeat/log
3.创建filebeat.yml文件,内容如下:
vi /data/filebeat/filebeat.ymlfilebeat.inputs:
- type: logignore_older: 10henable: truepaths:- /var/log/nginx/access.logfields:filetype: nginx-accessfields_under_root: truescan_frequency: 10sfilebeat.config.modules:path: ${path.config}/modules.d/*.ymlreload.enabled: falsesetup.template.settings:index.number_of_shards: 1name: "172.16.52.115"setup.kibana:output.kafka:hosts: ["172.16.52.115:9092"]topic: nginx-accessenabled: trueworker: 6compression_level: 3loadbalance: trueprocessors:
- drop_fields:fields: ["input", "host", "agent.type", "agent.ephemeral_id", "agent.id", "agent.version", "ecs"]logging.level: info
4.执行命令启动filebeat
docker run -d --name filebeat \
-v /data/filebeat/log:/var/log/filebeat \
-v /data/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml \
-v /var/log/nginx:/var/log/nginx \
-v /etc/localtime:/etc/localtime \
docker.elastic.co/beats/filebeat:8.17.4
8、验证
1、在kafka的消费端执行命令同样可以看出数据,如图:
docker exec -it kafka bash
kafka-console-consumer --bootstrap-server 172.16.52.115:9092 --topic nginx-access --from-beginning
2、在elasticsearch查看索引
https://172.16.52.115:9200/_cat/indices?v
3、清理es索引
curl -XDELETE "https://172.16.52.115:9200/nginx-access-2025.01.11" -u elastic:GuL0ZfYeMX7fAMqK -k
docker-compose
mkdir /data/docker/log_elk/elasticsearch2/{config,data} -p
mkdir /data/docker/log_elk/logstash2/{data,config,pipeline} -p
mkdir /data/docker/log_elk/kibana2/config -p
mkdir /data/docker/log_elk/filebeat -p
chmod -R 777 /data/docker/log_elk
4、创建文件
4.1、elasticsearch配置
vim /data/docker/log_elk/elasticsearch2/config/elasticsearch.yml
cluster.name: "docker-cluster"
network.host: 0.0.0.0
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true4.2、logstash配置
vim /data/docker/log_elk/logstash2/config/logstash.ymlhttp.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: ["http://172.16.52.115:9200"]
xpack.monitoring.enab1ed: true
xpack.monitoring.elasticsearch.username: "elastic"
xpack.monitoring.elasticsearch.password: "GuL0ZfYeMX7fAMqK"
vim /data/docker/log_elk/logstash2/pipeline/logstash.conf
input{kafka{bootstrap_servers => "172.16.52.115:9092"topics => ["nginx","syslog"]consumer_threads => 5decorate_events => trueauto_offset_reset => "latest"codec => "json"}
}filter {json{source => "message"}mutate {remove_field => ["host","prospector","fields","input","log"]}grok {match => { "message" => "%{IPORHOST:client_ip} (%{USER:ident}|- ) (%{USER:auth}|-) \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:verb} (%{NOTSPACE:request}|-)(?: HTTP/%{NUMBER:http_version})?|-)\" %{NUMBER:status} (?:%{NUMBER:bytes}|-) \"(?:%{URI:referrer}|-)\" \"%{GREEDYDATA:agent}\"" }}
}output {elasticsearch {hosts => "http://172.16.52.115:9200"ssl => truessl_certificate_verification => falseindex => "%{[@metadata][kafka][topic]}-%{+YYYY.MM.dd}" user => "elastic"password => "GuL0ZfYeMX7fAMqK"}
}
4.3、kibana配置
vim /data/docker/log_elk/kibana2/config/kibana.yml
server.port: 5601
server.name: kibana
server.host: 0.0.0.0
elasticsearch.hosts: ["http://172.16.52.115:9200"]
elasticsearch.username: "elastic"
elasticsearch.password: "GuL0ZfYeMX7fAMqK"
4.4、filebeat配置
vim /data/docker/log_elk/filebeat/filebeat.ymlfilebeat.inputs:
- type: logpaths:- /var/log/nginx/access.logtags: ["nginx-accesslog"]fields:document_type: nginxaccesstopic: nginx- type: logpaths:- /opt/var/log/messagestags: ["syslog-accesslog"]fields:document_type: syslogaccesstopic: syslogoutput.kafka:hosts: ["172.16.52.115:9092"]topic: '%{[fields.topic]}'partition.hash:reachable_only: truecompression: gzipmax_message_bytes: 1000000required_acks: 1logging.to_files: true
5、配置docker-compose.yaml
vim /data/docker/log_elk/docker-compose.yaml
version: '3.3'
services:elasticsearch:image: elasticsearch:7.17.4hostname: elasticsearchrestart: alwayscontainer_name: elk-esdeploy:mode: replicatedreplicas: 1ports:- "9200:9200"- "9300:9300"volumes:- /data/docker/log_elk/elasticsearch2/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml- /data/docker/log_elk/elasticsearch2/data:/usr/share/elasticsearch/dataenvironment:TZ: Asia/ShanghaiES_JAVA_OPTS: "-Xmx256m -Xms256m"ELASTIC_PASSWORD: "GuL0ZfYeMX7fAMqK"# Use single node discovery in order to disable production mode and avoid bootstrap checks.# see: https://www.elastic.co/guide/en/elasticsearch/reference/7.11/bootstrap-checks.htmldiscovery.type: single-node# Force publishing on the 'elk' overlay.network.publish_host: _eth0_networks:- elklogstash:image: logstash:7.17.4command: "/usr/local/bin/docker-entrypoint"restart: alwayscontainer_name: elk-logsports:- "5044:5044"- "5000:5000"- "9600:9600"volumes:- /data/docker/log_elk/logstash2/data:/usr/share/logstash/data- /data/docker/log_elk/logstash2/config/logstash.yml:/usr/share/logstash/config/logstash.yml- /data/docker/log_elk/logstash2/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.confenvironment:TZ: Asia/ShanghaiLS_JAVA_OPTS: "-Xmx256m -Xms256m"hostname: logstash# restart: alwaysnetworks:- elkdeploy:mode: replicatedreplicas: 1filebeat:image: docker.elastic.co/beats/filebeat:7.17.4container_name: elk-filebeatrestart: alwaysvolumes:- /data/docker/log_elk/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml- /var/log/nginx:/var/log/nginx:ro- /var/log/messages:/opt/var/log/messages:ronetworks:- elkdeploy:mode: replicatedreplicas: 1kibana:image: kibana:7.17.4restart: alwayscontainer_name: elk-kibanaenvironment:TZ: Asia/Shanghaiports:- "5601:5601"volumes:- /data/docker/log_elk/kibana2/config/kibana.yml:/usr/share/kibana/config/kibana.ymlnetworks:- elkdeploy:mode: replicatedreplicas: 1zookeeper:restart: alwayscontainer_name: elk-zkimage: zookeeper:latestenvironment:TZ: Asia/Shanghaiports:- 2181:2181kafka:image: confluentinc/cp-kafka:latestcontainer_name: elk-kafenvironment:TZ: Asia/ShanghaiKAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.16.52.115:9092KAFKA_ZOOKEEPER_CONNECT: 172.16.52.115:2181KAFKA_CREATE_TOPICS: "logdev:1:1"restart: alwaysports:- 9092:9092
networks:elk:
6、elk启动服务
cd /data/docker/log_elkdocker-compose up -d
7、elk验证
7.1、kafka验证 kafka主题检查:
docker exec -it elk-kaf kafka-topics --list --bootstrap-server 172.16.52.115:9092
2、检查kafka是否正常消费
docker exec -it elk-kaf bash
kafka-console-producer --topic test01 --bootstrap-server kafka:9092
kafka-console-consumer --topic test01 --from-beginning --bootstrap-server kafka:9092 --group group01
查询主题列表
kafka-topics --bootstrap-server kafka:9092 --list
查询消费组列表
kafka-consumer-groups --bootstrap-server localhost:9092 --list

