<!-- Example for logging into the build folder of your project --> <!-- <property name="LOG_FILE" value="/Users/yinjihuan/Downloads/sleuth-user-service.log"/>--> <propertyname="LOG_FILE"value="D:/IdeaProjects/spring-cloud-study/springcloud-hystrix/sleuth-user-service.log" /> <!-- You can override this to have a custom pattern --> <propertyname="CONSOLE_LOG_PATTERN" value="%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>
<!-- Appender to log to console --> <appendername="console"class="ch.qos.logback.core.ConsoleAppender"> <filterclass="ch.qos.logback.classic.filter.ThresholdFilter"> <!-- Minimum logging level to be presented in the console logs--> <level>INFO</level> </filter> <encoder> <pattern>${CONSOLE_LOG_PATTERN}</pattern> <charset>utf8</charset> </encoder> </appender>
<!-- Appender to log to file --> <appendername="flatfile"class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${LOG_FILE}</file> <rollingPolicyclass="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_FILE}.%d{yyyy-MM-dd}</fileNamePattern> <maxHistory>7</maxHistory> </rollingPolicy> <encoder> <pattern>${CONSOLE_LOG_PATTERN}</pattern> <charset>utf8</charset> </encoder> </appender> <!-- Appender to log to file in a JSON format--> <appendername="logstash"class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${LOG_FILE}.json</file> <rollingPolicyclass="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_FILE}.json.%d{yyyy-MM-dd}.gz</fileNamePattern> <maxHistory>7</maxHistory> </rollingPolicy> <encoderclass="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder"> <providers> <timestamp> <timeZone>UTC</timeZone> </timestamp> <pattern> <pattern> { "severity": "%level", "service": "${springAppName:-}", "trace": "%X{X-B3-TraceId:-}", "span": "%X{X-B3-SpanId:-}", "parent": "%X{X-B3-ParentSpanId:-}", "exportable": "%X{X-Span-Export:-}", "pid": "${PID:-}", "thread": "%thread", "class": "%logger{40}", "rest": "%message" } </pattern> </pattern> </providers> </encoder> </appender> <rootlevel="INFO"> <appender-refref="console"/> <!-- uncomment this to have also JSON logs--> <appender-refref="logstash"/> <appender-refref="flatfile"/> </root> </configuration>
双击 kibana-7.6.2-windows-x86_64 下 bin 目录下的 kibana.bat
如果成功会有类似提示:
1 2
log [09:58:01.360] [info][listening] Server running at http://localhost:5601 log [09:58:01.529] [info][server][Kibana][http] http server running at http://localhost:5601
# 查看 rabbitmq 状态 D:\Program Files\rabbitmq_server-3.8.3\sbin>rabbitmqctl status Error: unable to perform an operation on node 'rabbit@DESKTOP-SAUA6M1'. Please see diagnostics information and suggestions below.
# enable rabbitmq_management ### D:\Program Files\rabbitmq_server-3.8.3\sbin>rabbitmq-plugins.bat enable rabbitmq_management Enabling plugins on node rabbit@DESKTOP-SAUA6M1: rabbitmq_management The following plugins have been configured: rabbitmq_management rabbitmq_management_agent rabbitmq_web_dispatch Applying plugin configuration to rabbit@DESKTOP-SAUA6M1... The following plugins have been enabled: rabbitmq_management rabbitmq_management_agent rabbitmq_web_dispatch
set 3 plugins. Offline change; changes will take effect at broker restart.
2020-05-14 15:51:16.549 INFO 416 --- [ main] z.s.ZipkinServer : Starting ZipkinServer on DESKTOP-SAUA6M1 with PID 416 (D:\Zipkin\zipkin-server-2.21.1-exec.jar started by wuzhiyong in D:\Zipkin) 2020-05-14 15:51:16.552 INFO 416 --- [ main] z.s.ZipkinServer : The following profiles are active: shared 2020-05-14 15:51:17.400 INFO 416 --- [ main] c.l.a.c.u.SystemInfo : Hostname: desktop-saua6m1 (from 'hostname' command) 2020-05-14 15:51:18.100 INFO 416 --- [oss-http-*:9411] c.l.a.s.Server : Serving HTTP at /0:0:0:0:0:0:0:0:9411 - http://127.0.0.1:9411/ 2020-05-14 15:51:18.100 INFO 416 --- [ main] c.l.a.s.ArmeriaAutoConfiguration : Armeria server started at ports: {/0:0:0:0:0:0:0:0:9411=ServerPort(/0:0:0:0:0:0:0:0:9411, [http])} 2020-05-14 15:51:18.115 INFO 416 --- [ main] z.s.ZipkinServer : Started ZipkinServer in 2.126 seconds (JVM running for 3.133)
查看
rabbitMq
请求接口后再从 zipkin 中查看某个调用链:
用 Elasticsearch 存储调用链数据
目前收集的数据都是存在 Zipkin 服务的内存中,服务一重启这些数据就没了,我们需要将这些数据持久化。实际使用中数据量可能会比较大,所以一般数据库并不是很好的选择,可以选择 ES 来存储数据,ES 在搜索方面有先天的优势