ELK-基于docker安装
测试环境:Ubuntu20.04
环境准备
grep -q vm.max_map_count /etc/sysctl.conf || echo "vm.max_map_count=262144" >> /etc/sysctl.conf
sysctl -p | grep vm.max_map_count
# 初始化目录
install -o 1000 -g root -d /data/elasticsearch/{data,logs}
install -o 1000 -g root -d /data/logstash/pipeline
logstash配置
pipelines配置
cat > /data/logstash/pipelines.yml << 'EOF'
- pipeline.id: client-01
pipeline.workers: 2
pipeline.batch.size: 3000
pipeline.batch.delay: 200
path.config: /usr/share/logstash/pipeline/client-01.conf
- pipeline.id: client-02
pipeline.workers: 2
pipeline.batch.size: 3000
pipeline.batch.delay: 200
path.config: /usr/share/logstash/pipeline/client-02.conf
EOF
client-01配置
cat > /data/logstash/pipeline/client-01.conf << 'EOF'
# 客户端配置
input {
beats{
port => 5026
}
}
filter {
......
}
output {
elasticsearch {
hosts => "elasticsearch:9200"
manage_template => false
index => "%{project}-%{[fields][document_type]}-%{+YYYY-MM}"
user => "elastic"
password => "123456"
}
#stdout{codec => rubydebug}
}
EOF
client-02配置
cat > /data/logstash/pipeline/client-02.conf << 'EOF'
# 配置说:此logstash设计目标适用于所有项目服务端数据录入(应用场景为多项目公用服务器)
# logstash收集各种类型日志-建议使用pipeline
input {
beats {
port => 5027
}
}
filter {
# 处理消息日志
......
}
output {
elasticsearch {
hosts => "elasticsearch:9200"
manage_template => false
index => "%{project}-server-%{+YYYY.MM.dd}"
user => "elastic"
password => "123456"
}
#stdout {
# codec => rubydebug
#}
}
EOF
检测配置文件
# 启动环境
docker run --rm --net elk -it -v /data/logstash:/data/logstash docker.elastic.co/logstash/logstash:7.15.0 bash
# 测试
logstash -f /data/logstash/pipeline/client-01.conf -t
logstash -f /data/logstash/pipeline/client-02.conf -t
docker-compose准备
环境变量设置
可以不设置,有默认配置
# docker-compose.yml环境变量
cat > .env << EOF
ELASTIC_VERSION=7.15.0
ELASTIC_SECURITY=true
ELASTIC_PASSWORD=123456
EOF
配置文件
docker-compose.yml
version: '3.7'
services:
elasticsearch:
container_name: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION:-7.15.0}
environment:
- discovery.type=single-node
- action.destructive_requires_name=true
- bootstrap.system_call_filter=false
- xpack.security.enabled=${ELASTIC_SECURITY:-true}
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- ELASTIC_SECURITY=${ELASTIC_SECURITY:-true}
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD:-123456}
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /data/elasticsearch/data:/usr/share/elasticsearch/data
- /data/elasticsearch/logs:/usr/share/elasticsearch/logs
ports:
- 9200:9200
- 9300:9300
networks:
- elk
kibana:
container_name: kibana
image: docker.elastic.co/kibana/kibana:${ELASTIC_VERSION:-7.15.0}
environment:
- I18N_LOCALE=zh-CN
- SERVER_PUBLICBASEURL="http://0.0.0.0:8080"
- ELASTICSEARCH_USERNAME=elastic
- ELASTICSEARCH_PASSWORD="${ELASTIC_PASSWORD:-123456}"
ports:
- 5601:5601
links:
- elasticsearch
depends_on:
- elasticsearch
networks:
- elk
logstash:
container_name: logstash
image: docker.elastic.co/logstash/logstash:${ELASTIC_VERSION:-7.15.0}
environment:
- XPACK_MONITORING_ENABLED=true
- XPACK_MONITORING_ELASTICSEARCH_USERNAME=elastic
- XPACK_MONITORING_ELASTICSEARCH_PASSWORD="${ELASTIC_PASSWORD:-123456}"
# 管道管理是收费的
# Configuration Management is not available: basic is not a valid license for this feature.
# - XPACK_MANAGEMENT_ENABLED=true
# - XPACK_MANAGEMENT_LOGSTASH_POLL_INTERVAL=5s
# - XPACK_MANAGEMENT_PIPELINE_ID=["client-*", "server-*"]
# - XPACK_MANAGEMENT_ELASTICSEARCH_HOSTS="http://elasticsearch:9200/"
# - XPACK_MANAGEMENT_ELASTICSEARCH_USERNAME=elastic
# - XPACK_MANAGEMENT_ELASTICSEARCH_PASSWORD="${ELASTIC_PASSWORD:-123456}"
volumes:
- /data/logstash/pipelines.yml:/usr/share/logstash/config/pipelines.yml
- /data/logstash/pipeline:/usr/share/logstash/pipeline
ports:
- 5026:5026
- 5027:5027
links:
- elasticsearch
depends_on:
- elasticsearch
networks:
- elk
networks:
elk:
driver: bridge
启动
# 前台启动,主要看有异常不
# docker-compose up
# 启动指定服务:logstash
# docker-compose start logstash
# 启动
docker-compose start
filebeat
客户端
# 配置
cat > filebeat.yml << 'EOF'
#=========================== Filebeat prospectors =============================
filebeat.inputs:
- type: filestream
enabled: true
paths:
- /var/log/nginx/*.json.log
exclude_lines: ['"request_method":"HEAD"']
fields:
# 此名称将用于索引名称,请注意命名规则
document_type: client
#----------------------------- Logstash output --------------------------------
output.logstash:
hosts: ["127.0.0.1:5026"]
EOF
# 测试
./filebeat -e -c filebeat.yml
# 测试日志
_timestamp=$(date +"%Y-%m-%dT%H:%M:%S%:z")
a='{"@timestamp":"'$(date +"%Y-%m-%dT%H:%M:%S%:z")'","host":"2.2.2.2","clientip":"1.1.1.1","size":5,"responsetime":0.000,"upstreamtime":"0.000","upstreamhost":"127.0.0.1:9000","http_host":"aaa.zaza.com","url":"/index.php","request_uri":"/","request_method":"POST","request_body":"os=1&channel=1&name=zaza","xff":"1.1.1.1","referer":"","agent":"","status":"200"}'
mkdir -pv /var/log/nginx
echo $a >> /var/log/nginx/test.zaza.com.access.json.log
服务端日志
cat > filebeat.yml << 'EOF'
#=========================== Filebeat prospectors ==============================
filebeat.prospectors:
- input_type: log
paths:
- /var/log/*.log
multiline.pattern: '^[[:digit:]]{4}-[[:digit:]]{2}-[[:digit:]]{2}'
multiline.negate: True
multiline.match: after
fields:
# 此名称将用于索引名称,请注意命名规则
document_type: server
#----------------------------- Logstash output --------------------------------
output.logstash:
hosts: ["127.0.0.1:5027"]
EOF
# 测试
./filebeat -e -c filebeat.yml
# 测试日志示例
2021-03-30 12:37:26[00000080] aaaaaa bbbbbb
删除启动失败容器的文件
elasticsearch | Exception in thread “main” java.nio.file.FileAlreadyExistsException: /usr/share/elasticsearch/config/elasticsearch.keystore.tmp
容器启动不了->进不了容器->无法删除异常数据->容器启动不了->…
cd /data/docker/overlay2
find ./ -name elasticsearch.keystore.tmp
# 删除对应的文件
elasticsearch.keystore.tmp
参考
- 原文作者:zaza
- 原文链接:https://zazayaya.github.io/2021/09/27/elk-install-by-docker.html
- 说明:转载本站文章请标明出处,部分资源来源于网络,如有侵权请及时与我联系!