一、安装elasticsearch
sysctl -w vm.max_map_count=262144
mkdir -p /opt/elk/elastic
docker run --name=elasticsearch \
--env=TZ=Asia/Shanghai \
--env='ES_JAVA_OPTS=-Xms1024m -Xmx1024m' \
--volume=/opt/elk/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
--volume=/opt/elk/elastic/plugins:/usr/share/elasticsearch/plugins \
--volume=/opt/elk/elastic/data:/usr/share/elasticsearch/data \
--volume=/opt/elk/elastic/logs:/usr/share/elasticsearch/logs \
--privileged \
--workdir=/usr/share/elasticsearch \
-p 9200:9200 \
-p 9300:9300 \
--restart=always \
--runtime=runc \
--detach=true \
-t \
elasticsearch:7.17.17 \
eswrapper
elasticsearch.yml内容:
cluster.name: "docker-cluster"
network.host: 0.0.0.0
# 访问ID限定,0.0.0.0为不限制,生产环境请设置为固定IP
transport.host: 0.0.0.0
# elasticsearch节点名称
node.name: node-1
# elasticsearch节点信息
cluster.initial_master_nodes: ["node-1"]
# 下面的配置是关闭跨域验证
http.cors.enabled: true
http.cors.allow-origin: "*"
二、安装kibana
mkdir -p /opt/elk/kibana
docker run --name=kibana \
--user=kibana \
--volume=/opt/elk/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml \
--workdir=/usr/share/kibana \
-p 5601:5601 \
--restart=no \
--runtime=runc \
--detach=true \
-t \
kibana:7.17.17 \
/usr/local/bin/kibana-docker
kibana.yml内容:
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://192.168.3.9:9200"]
# 操作界面语言设置
i18n.locale: "zh-CN"
三、安装redis
mkdir -p /opt/elk/redis
docker run --name=redis \
--volume=/opt/elk/redis/redis.conf:/etc/redis/redis.conf:rw \
--volume=/etc/localtime:/etc/localtime:ro \
--volume=/opt/elk/redis/data:/data:rw \
--workdir=/data \
-p 6379:6379 \
--restart=unless-stopped \
--runtime=runc \
--detach=true \
-t \
redis:7.2.4 \
redis-server /etc/redis/redis.conf
redis.conf内容:
port 6379
requirepass Ptl3BKCbMZ
四、安装logstatsh
mkdir -p /opt/elk/logstash/{logs,pathdata1,pathdata2}
下载logstash,版本:7.17.17
wget https://artifacts.elastic.co/downloads/logstash/logstash-7.17.17-linux-x86_64.tar.gz
解压后放在/opt/elk/logstash下,设置环境变量:
PATH=/opt/elk/logstash/logstash-7.17.7/bin:$PATH
export PATH
设置to-redis.conf (logstash收集日志并把日志转储到redis)
input {
tcp {
port => 9092
type => "log-ruoyi-gateway"
}
tcp {
port => 9093
type => "log-ruoyi-auth"
}
tcp {
port => 9094
type => "log-ruoyi-system"
}
tcp {
port => 9095
type => "log-ruoyi-file"
}
tcp {
port => 9096
type => "log-ruoyi-job"
}
tcp {
port => 9097
type => "log-ruoyi-gen"
}
tcp {
port => 9098
type => "log-ruoyi-business"
}
}
filter {
json {
source => "message"
}
}
output {
if [type] == "log-ruoyi-gateway" {
redis {
host => "192.168.3.9"
port => "6379"
key => "log-ruoyi-gateway"
data_type => "list"
codec => "json"
password => "Ptl3BKCbMZ"
db => "0"
}
}
if [type] == "log-ruoyi-auth" {
redis {
host => "192.168.3.9"
port => "6379"
key => "log-ruoyi-auth"
data_type => "list"
codec => "json"
password => "Ptl3BKCbMZ"
db => "0"
}
}
if [type] == "log-ruoyi-system" {
redis {
host => "192.168.3.9"
port => "6379"
key => "log-ruoyi-system"
data_type => "list"
codec => "json"
password => "Ptl3BKCbMZ"
db => "0"
}
}
if [type] == "log-ruoyi-file" {
redis {
host => "192.168.3.9"
port => "6379"
key => "log-ruoyi-file"
data_type => "list"
codec => "json"
password => "Ptl3BKCbMZ"
db => "0"
}
}
if [type] == "log-ruoyi-job" {
redis {
host => "192.168.3.9"
port => "6379"
key => "log-ruoyi-job"
data_type => "list"
codec => "json"
password => "Ptl3BKCbMZ"
db => "0"
}
}
if [type] == "log-ruoyi-gen" {
redis {
host => "192.168.3.9"
port => "6379"
key => "log-ruoyi-gen"
data_type => "list"
codec => "json"
password => "Ptl3BKCbMZ"
db => "0"
}
}
if [type] == "log-ruoyi-business" {
redis {
host => "192.168.3.9"
port => "6379"
key => "log-ruoyi-business"
data_type => "list"
codec => "json"
password => "Ptl3BKCbMZ"
db => "0"
}
}
}
设置redis-to.conf (logstash读取redis里的日志,并将日志插入到elasticsearch中,redis起了MQ消息队列的作用,消息被取走后,自动清除消息;有了redis做MQ,防止es挂了丢失消息,并且可以减缓es收到的压力)
input{
redis{
data_type=>"list"
host=>"192.168.3.9"
port=>"6379"
db=>"0"
key=>"log-ruoyi-gateway"
codec=>"json"
password=>"Ptl3BKCbMZ"
type=>"log-ruoyi-gateway"
}
redis{
data_type=>"list"
host=>"192.168.3.9"
port=>"6379"
db=>"0"
key=>"log-ruoyi-auth"
codec=>"json"
password=>"Ptl3BKCbMZ"
type=>"log-ruoyi-auth"
}
redis{
data_type=>"list"
host=>"192.168.3.9"
port=>"6379"
db=>"0"
key=>"log-ruoyi-system"
codec=>"json"
password=>"Ptl3BKCbMZ"
type=>"log-ruoyi-system"
}
redis{
data_type=>"list"
host=>"192.168.3.9"
port=>"6379"
db=>"0"
key=>"log-ruoyi-file"
codec=>"json"
password=>"Ptl3BKCbMZ"
type=>"log-ruoyi-file"
}
redis{
data_type=>"list"
host=>"192.168.3.9"
port=>"6379"
db=>"0"
key=>"log-ruoyi-job"
codec=>"json"
password=>"Ptl3BKCbMZ"
type=>"log-ruoyi-job"
}
redis{
data_type=>"list"
host=>"192.168.3.9"
port=>"6379"
db=>"0"
key=>"log-ruoyi-gen"
codec=>"json"
password=>"Ptl3BKCbMZ"
type=>"log-ruoyi-gen"
}
redis{
data_type=>"list"
host=>"192.168.3.9"
port=>"6379"
db=>"0"
key=>"log-ruoyi-business"
codec=>"json"
password=>"Ptl3BKCbMZ"
type=>"log-ruoyi-business"
}
}
output{
if [type] == "log-ruoyi-gateway" {
elasticsearch{
hosts=>["192.168.3.9:9200"]
index=>"nk-gateway-%{+YYYY.MM.dd}"
}
}
if [type] == "log-ruoyi-auth" {
elasticsearch{
hosts=>["192.168.3.9:9200"]
index=>"nk-auth-%{+YYYY.MM.dd}"
}
}
if [type] == "log-ruoyi-system" {
elasticsearch{
hosts=>["192.168.3.9:9200"]
index=>"nk-system-%{+YYYY.MM.dd}"
}
}
if [type] == "log-ruoyi-file" {
elasticsearch{
hosts=>["192.168.3.9:9200"]
index=>"nk-file-%{+YYYY.MM.dd}"
}
}
if [type] == "log-ruoyi-job" {
elasticsearch{
hosts=>["192.168.3.9:9200"]
index=>"nk-job-%{+YYYY.MM.dd}"
}
}
if [type] == "log-ruoyi-gen" {
elasticsearch{
hosts=>["192.168.3.9:9200"]
index=>"nk-gen-%{+YYYY.MM.dd}"
}
}
if [type] == "log-ruoyi-business" {
elasticsearch{
hosts=>["192.168.3.9:9200"]
index=>"nk-business-%{+YYYY.MM.dd}"
}
}
}
检测logstatsh配置文件的语法:
logstash -f ./to-redis.conf -t
logstash -f ./redis-to.conf -t
启动logstatsh:
nohup logstash -f /opt/elk/logstash/to-redis.conf --path.data=/opt/elk/logstash/pathdata1 &>/opt/elk/logstash/logs/to_redis.log &
nohup logstash -f /opt/elk/logstash/redis-to.conf --path.data=/opt/elk/logstash/pathdata2 &>/opt/elk/logstash/logs/redis-to.log &
配置logback
引起依赖
springboot已经默认集成了logback框架,所以无需在引入logback了,以下依赖是logback和logstash通讯所需的依赖
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>6.3</version>
</dependency>
配置文件
yml配置文件中加上
logging:
config: classpath:log/logback-spring.xml
在resource中加上log/logback-spring.xml配置文件

<?xml version="1.0" encoding="UTF-8"?>
<!-- 日志级别从低到高分为TRACE < DEBUG < INFO < WARN < ERROR < FATAL,如果设置为WARN,则低于WARN的信息都不会输出 -->
<!-- scan:当此属性设置为true时,配置文件如果发生改变,将会被重新加载,默认值为true -->
<!-- scanPeriod:设置监测配置文件是否有修改的时间间隔,如果没有给出时间单位,默认单位是毫秒。当scan为true时,此属性生效。默认的时间间隔为1分钟。 -->
<!-- debug:当此属性设置为true时,将打印出logback内部日志信息,实时查看logback运行状态。默认值为false。 -->
<configuration scan="true" scanPeriod="10 seconds">
<!--<include resource="org/springframework/boot/logging/logback/base.xml"
/> -->
<contextName>Logback For Boss</contextName>
<!-- name的值是变量的名称,value的值时变量定义的值。通过定义的值会被插入到logger上下文中。定义变量后,可以使“${}”来使用变量。 -->
<property name="log.path" value="./logs/hlwserver/path" />
<!-- 定义日志文件 输入位置 -->
<property name="logDir" value="./logs/hlwserver/" />
<!-- 日志最大的历史 30天 -->
<property name="maxHistory" value="30" />
<!-- logstash远程日志配置-->
<appender name="logstash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>192.168.5.188:9092</destination>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder"/>
</appender>
<!-- 控制台输出日志 -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%cyan(%d{yyyy-MM-dd HH:mm:ss.SSS}) [%thread] %highlight(%-5level) %logger{36} - %msg%n</pattern>
<charset class="java.nio.charset.Charset">UTF-8</charset>
</encoder>
</appender>
<!-- ERROR级别单独输出的日志 -->
<appender name="ERROR"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<rollingPolicy
class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${logDir}\%d{yyyyMMdd}\error.log</fileNamePattern>
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger -
%msg%n</pattern>
<charset class="java.nio.charset.Charset">UTF-8</charset>
</encoder>
<append>true</append>
<prudent>false</prudent>
</appender>
<!-- 所有级别日志的文件输出 -->
<appender name="ALL_LOGS"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy
class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${logDir}\%d{yyyyMMdd}\info.log</fileNamePattern>
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger -
%msg%n</pattern>
<charset class="java.nio.charset.Charset">UTF-8</charset>
</encoder>
<append>false</append>
<prudent>false</prudent>
</appender>
<!--文件日志, 按照每天生成日志文件 -->
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!--日志文件输出的文件名-->
<FileNamePattern>${logDir}/%d{yyyyMMdd}/boss.%d{yyyy-MM-dd}.log</FileNamePattern>
<!--日志文件保留天数-->
<MaxHistory>30</MaxHistory>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<!--格式化输出:%d表示日期,%thread表示线程名,%-5level:级别从左显示5个字符宽度%msg:日志消息,%n是换行符-->
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
</encoder>
<!--日志文件最大的大小-->
<triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
<MaxFileSize>10MB</MaxFileSize>
</triggeringPolicy>
</appender>
<!-- 异步输出 -->
<appender name="dayLogAsyncAppender" class="ch.qos.logback.classic.AsyncAppender">
<includeCallerData>true</includeCallerData>
<!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>512</queueSize>
<appender-ref ref="FILE"/>
</appender>
<!--专为 spring 定制
-->
<logger name="org.springframework" level="info"/>
<!-- show parameters for hibernate sql 专为 Hibernate 定制 -->
<logger name="org.hibernate.type.descriptor.sql.BasicBinder" level="TRACE" />
<logger name="org.hibernate.type.descriptor.sql.BasicExtractor" level="DEBUG" />
<logger name="org.hibernate.SQL" level="DEBUG" />
<logger name="org.hibernate.engine.QueryParameters" level="DEBUG" />
<logger name="org.hibernate.engine.query.HQLQueryPlan" level="DEBUG" />
<!-- root级别 DEBUG -->
<root level="INFO">
<!-- 控制台输出 -->
<appender-ref ref="STDOUT" />
<!-- 文件输出 -->
<appender-ref ref="ERROR" />
<appender-ref ref="ALL_LOGS" />
<appender-ref ref="logstash" />
</root>
</configuration>
主要配置

启动程序就会有日志,然后去kibana中查看是否有生成的索引
Categories:
docker与kubernetes