ELK日志平台搭建
ELK日志平台搭建
概述
ELK即Elasticsearch、Logstash、Kibana,组合起来可以搭建线上日志系统
- Elasticsearch:用于存储收集到的日志信息。
- Logstash:用于收集日志,SpringBoot应用整合了Logstash以后会把日志发送给Logstash,Logstash再把日志转发给Elasticsearch。
- Kibana:通过Web端的可视化界面来查看日志。
搭建
基本镜像环境
首先拉取ELK的镜像:
1
2
3docker pull elasticsearch:7.14.0
docker pull logstash:7.14.0
docker pull kibana:7.14.0处理elasticsearch启动问题的配置:
需要设置系统内核参数,否则会因为内存不足无法启动
1
2sysctl -w vm.max_map_count=262144
sysctl -p给data文件权限:
chmod 777 /opt/docker_app/elasticsearch/data
编写docker-compose.yml脚本:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50version: '3'
services:
elasticsearch:
image: elasticsearch:7.14.0
container_name: elasticsearch_7.14.0
restart: always
environment:
- "cluster.name=elasticsearch" #设置集群名称为elasticsearch
- "discovery.type=single-node" #以单一节点模式启动
- "ES_JAVA_OPTS=-Xms512m -Xmx512m" #设置使用jvm内存大小
- TZ=Asia/Shanghai
volumes:
- /opt/docker_app/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml #配置文件挂载
- /opt/docker_app/elasticsearch/plugins:/usr/share/elasticsearch/plugins #插件文件挂载
- /opt/docker_app/elasticsearch/data:/usr/share/elasticsearch/data #数据文件挂载
ports:
- 9200:9200
- 9300:9300
kibana:
image: kibana:7.14.0
container_name: kibana_7.14.0
restart: always
links:
- elasticsearch:es #可以用es这个域名访问elasticsearch服务
depends_on:
- elasticsearch #kibana在elasticsearch启动之后再启动
environment:
- "elasticsearch.hosts=http://es:9200" #设置访问elasticsearch的地址
- TZ=Asia/Shanghai
volumes:
- /opt/docker_app/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
ports:
- 5601:5601
logstash:
image: logstash:7.14.0
container_name: logstash_7.14.0
restart: always
environment:
- TZ=Asia/Shanghai
volumes:
- /opt/docker_app/logstash/logstash.conf:/usr/share/logstash/pipeline/logstash.conf #挂载logstash的配置文件
depends_on:
- elasticsearch #kibana在elasticsearch启动之后再启动
links:
- elasticsearch:es #可以用es这个域名访问elasticsearch服务
ports:
- 4560:4560
- 4561:4561
- 4562:4562
- 4563:4563编写logstash.conf:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54input {
tcp {
mode => "server"
host => "0.0.0.0"
port => 4560
codec => json_lines
type => "debug"
}
tcp {
mode => "server"
host => "0.0.0.0"
port => 4561
codec => json_lines
type => "error"
}
tcp {
mode => "server"
host => "0.0.0.0"
port => 4562
codec => json_lines
type => "business"
}
tcp {
mode => "server"
host => "0.0.0.0"
port => 4563
codec => json_lines
type => "record"
}
}
filter{
if [type] == "record" {
mutate {
remove_field => "port"
remove_field => "host"
remove_field => "@version"
}
json {
source => "message"
remove_field => ["message"]
}
}
}
output {
elasticsearch {
hosts => ["es:9200"]
action => "index"
codec => json
user => elastic
password => "123456"
index => "AdminTamplate-%{type}-%{+YYYY.MM.dd}"
template_name => "AdminTamplate"
}
}运行docker-compose脚本:
docker-compose up -d
Logstash需要安装
json_lines
插件:logstash-plugin install logstash-codec-json_lines
SpringBoot应用集成Logstash
添加依赖:
1
2
3
4
5
6<!--集成logstash-->
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>7.0.1</version>
</dependency>在logback-spring.xml中配置日志:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE configuration>
<configuration>
<!--引用默认日志配置-->
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<!--使用默认的控制台日志输出实现-->
<include resource="org/springframework/boot/logging/logback/console-appender.xml"/>
<!--应用名称-->
<springProperty scope="context" name="APP_NAME" source="spring.application.name" defaultValue="springBoot"/>
<!--日志文件保存路径-->
<property name="LOG_FILE_PATH" value="${LOG_FILE:-${LOG_PATH:-${LOG_TEMP:-${java.io.tmpdir:-/tmp}}}/logs}"/>
<!--LogStash访问host-->
<springProperty name="LOG_STASH_HOST" scope="context" source="logstash.host" defaultValue="192.168.227.130"/>
<!--DEBUG日志输出到文件-->
<appender name="FILE_DEBUG"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--输出DEBUG以上级别日志-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>DEBUG</level>
</filter>
<encoder>
<!--设置为默认的文件日志格式-->
<pattern>${FILE_LOG_PATTERN}</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!--设置文件命名格式-->
<fileNamePattern>${LOG_FILE_PATH}/debug/${APP_NAME}-%d{yyyy-MM-dd}-%i.log</fileNamePattern>
<!--设置日志文件大小,超过就重新生成文件,默认10M-->
<maxFileSize>${LOG_FILE_MAX_SIZE:-10MB}</maxFileSize>
<!--日志文件保留天数,默认30天-->
<maxHistory>${LOG_FILE_MAX_HISTORY:-30}</maxHistory>
</rollingPolicy>
</appender>
<!--ERROR日志输出到文件-->
<appender name="FILE_ERROR"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--只输出ERROR级别的日志-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<encoder>
<!--设置为默认的文件日志格式-->
<pattern>${FILE_LOG_PATTERN}</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!--设置文件命名格式-->
<fileNamePattern>${LOG_FILE_PATH}/error/${APP_NAME}-%d{yyyy-MM-dd}-%i.log</fileNamePattern>
<!--设置日志文件大小,超过就重新生成文件,默认10M-->
<maxFileSize>${LOG_FILE_MAX_SIZE:-50MB}</maxFileSize>
<!--日志文件保留天数,默认30天-->
<maxHistory>${LOG_FILE_MAX_HISTORY:-30}</maxHistory>
</rollingPolicy>
</appender>
<!--DEBUG日志输出到LogStash-->
<appender name="LOG_STASH_DEBUG" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>DEBUG</level>
</filter>
<destination>${LOG_STASH_HOST}:4560</destination>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>Asia/Shanghai</timeZone>
</timestamp>
<!--自定义日志输出格式-->
<pattern>
<pattern>
{
"project": "AdminTemplate",
"level": "%level",
"service": "${APP_NAME:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger",
"message": "%message",
"stack_trace": "%exception{20}"
}
</pattern>
</pattern>
</providers>
</encoder>
<!--当有多个LogStash服务时,设置访问策略为轮询-->
<connectionStrategy>
<roundRobin>
<connectionTTL>5 minutes</connectionTTL>
</roundRobin>
</connectionStrategy>
</appender>
<!--ERROR日志输出到LogStash-->
<appender name="LOG_STASH_ERROR" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<destination>${LOG_STASH_HOST}:4561</destination>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>Asia/Shanghai</timeZone>
</timestamp>
<!--自定义日志输出格式-->
<pattern>
<pattern>
{
"project": "AdminTemplate",
"level": "%level",
"service": "${APP_NAME:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger",
"message": "%message",
"stack_trace": "%exception{20}"
}
</pattern>
</pattern>
</providers>
</encoder>
<!--当有多个LogStash服务时,设置访问策略为轮询-->
<connectionStrategy>
<roundRobin>
<connectionTTL>5 minutes</connectionTTL>
</roundRobin>
</connectionStrategy>
</appender>
<!--业务日志输出到LogStash-->
<appender name="LOG_STASH_BUSINESS" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>${LOG_STASH_HOST}:4562</destination>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>Asia/Shanghai</timeZone>
</timestamp>
<!--自定义日志输出格式-->
<pattern>
<pattern>
{
"project": "AdminTemplate",
"level": "%level",
"service": "${APP_NAME:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger",
"message": "%message",
"stack_trace": "%exception{20}"
}
</pattern>
</pattern>
</providers>
</encoder>
<!--当有多个LogStash服务时,设置访问策略为轮询-->
<connectionStrategy>
<roundRobin>
<connectionTTL>5 minutes</connectionTTL>
</roundRobin>
</connectionStrategy>
</appender>
<!--接口访问记录日志输出到LogStash-->
<appender name="LOG_STASH_RECORD" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>${LOG_STASH_HOST}:4563</destination>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>Asia/Shanghai</timeZone>
</timestamp>
<!--自定义日志输出格式-->
<pattern>
<pattern>
{
"project": "AdminTemplate",
"level": "%level",
"service": "${APP_NAME:-}",
"class": "%logger",
"message": "%message"
}
</pattern>
</pattern>
</providers>
</encoder>
<!--当有多个LogStash服务时,设置访问策略为轮询-->
<connectionStrategy>
<roundRobin>
<connectionTTL>5 minutes</connectionTTL>
</roundRobin>
</connectionStrategy>
</appender>
<!--控制框架输出日志-->
<logger name="org.slf4j" level="INFO"/>
<logger name="springfox" level="INFO"/>
<logger name="io.swagger" level="INFO"/>
<logger name="org.springframework" level="INFO"/>
<root level="DEBUG">
<appender-ref ref="CONSOLE"/>
<!--<appender-ref ref="FILE_DEBUG"/>-->
<!--<appender-ref ref="FILE_ERROR"/>-->
<appender-ref ref="LOG_STASH_DEBUG"/>
<appender-ref ref="LOG_STASH_ERROR"/>
</root>
<logger name="com.wht.config.LogAspect" level="DEBUG">
<appender-ref ref="LOG_STASH_RECORD"/>
</logger>
<logger name="com.wht" level="DEBUG">
<appender-ref ref="LOG_STASH_BUSINESS"/>
</logger>
</configuration>启动项目等待日志打印
在Kinbana中查看日志
创建索引
在监控面板查看
本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 goMars的学习随记!
评论