新增:作业提交项目提交;mdc日志追踪提效组件提交

This commit is contained in:
yangyang01000846
2025-11-12 13:41:29 +08:00
parent a1a329f6e3
commit 9eef32d9ea
39 changed files with 1939 additions and 9 deletions

View File

@@ -0,0 +1,19 @@
package com.sdm.submit;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.cloud.client.discovery.EnableDiscoveryClient;
import org.springframework.cloud.openfeign.EnableFeignClients;
import org.springframework.scheduling.annotation.EnableScheduling;
@SpringBootApplication(scanBasePackages = {"com.sdm.submit","com.sdm.common"})
@EnableDiscoveryClient
@EnableScheduling
@EnableFeignClients(basePackages = "com.sdm.common.feign")
public class SubmitApplication {
public static void main(String[] args) {
SpringApplication.run(SubmitApplication.class, args);
}
}

View File

@@ -0,0 +1,49 @@
package com.sdm.submit.config;
import com.sdm.common.utils.AESUtil;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.env.EnvironmentPostProcessor;
import org.springframework.core.env.*;
import org.springframework.stereotype.Component;
import java.util.Properties;
@Component
public class DecryptEnvironmentPostProcessor implements EnvironmentPostProcessor {
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
Properties props = new Properties(); // 临时存储需要替换的配置
// 假设加密密码前缀为 "ENC(",后缀为 ")"
MutablePropertySources propertySources = environment.getPropertySources();
for (PropertySource<?> propertySource : propertySources) {
if (propertySource instanceof EnumerablePropertySource) {
EnumerablePropertySource<?> enumerablePropertySource = (EnumerablePropertySource<?>) propertySource;
String[] propertyNames = enumerablePropertySource.getPropertyNames();
// 遍历所有配置key:value
for (String propertyName : propertyNames) {
String propertyVal = environment.getProperty(propertyName);
// 根据自己写的规则来解析那些配置是需要解密的
if (propertyVal != null && propertyVal.startsWith("ENC(") && propertyVal.endsWith(")")) {
// 解析得到加密的数据
String encryptedValue = propertyVal.substring(4, propertyVal.length() - 1);
// 调用自定义工具类解密
String decryptedValue = null;
try {
decryptedValue = AESUtil.decode(encryptedValue);
} catch (Exception e) {
throw new RuntimeException(e);
}
// 保存需要替换的配置
props.put(propertyName, decryptedValue);
}
}
}
}
// 添加解密后的属性到环境中
if (!props.isEmpty()) {
PropertiesPropertySource pps = new PropertiesPropertySource("decryptedProperties", props);
environment.getPropertySources().addFirst(pps);
}
}
}

View File

@@ -0,0 +1,40 @@
package com.sdm.submit.config;
import io.minio.MinioClient;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class MinioConfig {
@Value("${minio.endpoint}")
private String endpoint;
@Value("${minio.port}") // API端口默认9000
private int port;
@Value("${minio.access-key}")
private String accessKey;
@Value("${minio.secret-key}")
private String secretKey;
@Value("${minio.bucket-name}")
private String bucketName;
@Value("${minio.secure}")
private boolean secure;
@Bean
public MinioClient minioClient() {
return MinioClient.builder()
.endpoint(endpoint, port, secure)
.credentials(accessKey, secretKey)
.build();
}
public String getBucketName() {
return bucketName;
}
}

View File

@@ -0,0 +1,73 @@
package com.sdm.submit.config;
import okhttp3.ConnectionPool;
import okhttp3.OkHttpClient;
import okhttp3.Protocol;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSocketFactory;
import javax.net.ssl.TrustManager;
import javax.net.ssl.X509TrustManager;
import java.security.SecureRandom;
import java.security.cert.CertificateException;
import java.security.cert.X509Certificate;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
@Configuration
public class OkHttpConfiguration {
@Bean
public OkHttpClient okHttpClient() {
return new OkHttpClient.Builder()
.sslSocketFactory(sslSocketFactory(), x509TrustManager())
//是否开启缓存
.retryOnConnectionFailure(true)
.connectionPool(pool())
.connectTimeout(60, TimeUnit.SECONDS)
.readTimeout(60, TimeUnit.SECONDS)
.writeTimeout(60, TimeUnit.SECONDS)
.hostnameVerifier((hostname, session) -> true)
.protocols(Collections.singletonList(Protocol.HTTP_1_1))
.build();
}
@Bean
public X509TrustManager x509TrustManager() {
return new X509TrustManager() {
@Override
public void checkClientTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException {
}
@Override
public void checkServerTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException {
}
@Override
public X509Certificate[] getAcceptedIssuers() {
return new X509Certificate[0];
}
};
}
@Bean
public SSLSocketFactory sslSocketFactory() {
try {
SSLContext sslContext = SSLContext.getInstance("TLS");
sslContext.init(null, new TrustManager[]{x509TrustManager()}, new SecureRandom());
return sslContext.getSocketFactory();
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
@Bean
public ConnectionPool pool() {
return new ConnectionPool(5000, 300, TimeUnit.SECONDS);
}
}

View File

@@ -0,0 +1,45 @@
package com.sdm.submit.config;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.RedisSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;
@Configuration
public class RedisConfig {
@Bean(name = "redisTemplate")
public RedisTemplate<Object, Object> redisTemplate(RedisConnectionFactory factory) {
RedisTemplate<Object, Object> template = new RedisTemplate<>();
RedisSerializer<String> redisSerializer = new StringRedisSerializer();
template.setConnectionFactory(factory);
// key序列化方式
template.setKeySerializer(redisSerializer);
// value序列化
template.setValueSerializer(redisSerializer);
// value hashmap序列化
template.setHashValueSerializer(redisSerializer);
// key haspmap序列化
template.setHashKeySerializer(redisSerializer);
return template;
}
@Bean(name = "bytesRedisTemplate")
public RedisTemplate<String, byte[]> bytesRedisTemplate(RedisConnectionFactory connectionFactory) {
RedisTemplate<String, byte[]> redisTemplate = new RedisTemplate<>();
redisTemplate.setConnectionFactory(connectionFactory);
// 设置key和value的序列化规则
redisTemplate.setValueSerializer(RedisSerializer.byteArray());
redisTemplate.setKeySerializer(new StringRedisSerializer());
redisTemplate.afterPropertiesSet();
return redisTemplate;
}
}

View File

@@ -0,0 +1,21 @@
package com.sdm.submit.config;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.SchedulingConfigurer;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
import org.springframework.scheduling.config.ScheduledTaskRegistrar;
/**
* 解决多个定时任务单线程有任务不能执行的问题
*
*/
@Configuration
public class ScheduledTaskConfig implements SchedulingConfigurer {
@Override
public void configureTasks(ScheduledTaskRegistrar taskRegistrar) {
ThreadPoolTaskScheduler taskScheduler = new ThreadPoolTaskScheduler();
taskScheduler.setPoolSize(5);
taskScheduler.initialize();
taskRegistrar.setTaskScheduler(taskScheduler);
}
}

View File

@@ -0,0 +1,22 @@
package com.sdm.submit.config;
import io.swagger.v3.oas.models.ExternalDocumentation;
import io.swagger.v3.oas.models.OpenAPI;
import io.swagger.v3.oas.models.info.Info;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class SwaggerConfig {
@Bean
public OpenAPI openAPI() {
return new OpenAPI()
.info(new Info()
.title("SPDM系统-作业提交模块")
.description("SPDM系统-作业提交模块接口文档")
.version("V1"))
.externalDocs(new ExternalDocumentation()
.description("作业提交模块")
.url("/"));
}
}

View File

@@ -0,0 +1,48 @@
package com.sdm.submit.config.mybatis;
import lombok.extern.slf4j.Slf4j;
import java.util.concurrent.atomic.AtomicInteger;
/**
* @Author xuyundi
* @Date 2023/7/21
* @Note
*/
@Slf4j
public class DBContextHolder {
private static final ThreadLocal<DBTypeEnum> contextHolder = new ThreadLocal<>();
private static final AtomicInteger counter = new AtomicInteger(-1);
public static void set(DBTypeEnum dbType) {
contextHolder.set(dbType);
}
public static DBTypeEnum get() {
return contextHolder.get();
}
public static void master() {
set(DBTypeEnum.MASTER);
log.info("切换到master");
}
public static void slave() {
set(DBTypeEnum.SLAVE);
log.info("切换到slave");
// 如果有多个从库,可通过轮询进行切换
// int index = counter.getAndIncrement() % 2;
// if (counter.get() > 9999) {
// counter.set(-1);
// }
// if (index == 0) {
// set(DBTypeEnum.SLAVE1);
// log.info("切换到slave1");
// }else {
// set(DBTypeEnum.SLAVE2);
// log.info("切换到slave2");
// }
}
}

View File

@@ -0,0 +1,10 @@
package com.sdm.submit.config.mybatis;
/**
* @Author xuyundi
* @Date 2023/7/21
* @Note
*/
public enum DBTypeEnum {
MASTER, SLAVE
}

View File

@@ -0,0 +1,88 @@
package com.sdm.submit.config.mybatis;
import lombok.extern.slf4j.Slf4j;
import org.apache.ibatis.executor.Executor;
import org.apache.ibatis.executor.keygen.SelectKeyGenerator;
import org.apache.ibatis.mapping.BoundSql;
import org.apache.ibatis.mapping.MappedStatement;
import org.apache.ibatis.mapping.SqlCommandType;
import org.apache.ibatis.plugin.*;
import org.apache.ibatis.session.ResultHandler;
import org.apache.ibatis.session.RowBounds;
import org.springframework.transaction.support.TransactionSynchronizationManager;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
/**
* @Author xuyundi
* @Date 2023/7/21
* @Note
*/
@Slf4j
@Intercepts({
@Signature(type = Executor.class, method = "update", args = {
MappedStatement.class, Object.class}),
@Signature(type = Executor.class, method = "query", args = {
MappedStatement.class, Object.class, RowBounds.class,
ResultHandler.class})})
public class DatabasePlugin implements Interceptor {
private static final String REGEX = ".*insert\\u0020.*|.*delete\\u0020.*|.*update\\u0020.*";
private static final Map<String, DBTypeEnum> cacheMap = new ConcurrentHashMap<>();
@Override
public Object intercept(Invocation invocation) throws Throwable {
boolean synchronizationActive = TransactionSynchronizationManager.isSynchronizationActive();
if (!synchronizationActive) {
Object[] objects = invocation.getArgs();
MappedStatement ms = (MappedStatement) objects[0];
DBTypeEnum databaseType = null;
if ((databaseType = cacheMap.get(ms.getId())) == null) {
//读方法
if (ms.getSqlCommandType().equals(SqlCommandType.SELECT)) {
//!selectKey 为自增id查询主键(SELECT LAST_INSERT_ID() )方法,使用主库
if (ms.getId().contains(SelectKeyGenerator.SELECT_KEY_SUFFIX)) {
databaseType = DBTypeEnum.MASTER;
} else {
BoundSql boundSql = ms.getSqlSource().getBoundSql(objects[1]);
String sql = boundSql.getSql().toLowerCase(Locale.CHINA).replaceAll("[\\t\\n\\r]", " ");
if (sql.matches(REGEX)) {
databaseType = DBTypeEnum.MASTER;
} else {
databaseType = DBTypeEnum.SLAVE;
}
}
} else {
databaseType = DBTypeEnum.MASTER;
}
// log.warn("设置方法[{}] use [{}] Strategy, SqlCommandType [{}]..", ms.getId(), databaseType.name(), ms.getSqlCommandType().name());
cacheMap.put(ms.getId(), databaseType);
}
DBContextHolder.set(databaseType);
}
return invocation.proceed();
}
@Override
public Object plugin(Object target) {
if (target instanceof Executor) {
return Plugin.wrap(target, this);
} else {
return target;
}
}
@Override
public void setProperties(Properties properties) {
}
}

View File

@@ -0,0 +1,17 @@
package com.sdm.submit.config.mybatis;
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;
import org.springframework.lang.Nullable;
/**
* @Author xuyundi
* @Date 2023/7/21
* @Note
*/
public class DynamicDataSource extends AbstractRoutingDataSource {
@Nullable
@Override
protected Object determineCurrentLookupKey() {
return DBContextHolder.get();
}
}

View File

@@ -0,0 +1,97 @@
package com.sdm.submit.config.mybatis;
import com.baomidou.mybatisplus.core.MybatisConfiguration;
import com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.jdbc.DataSourceBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import javax.sql.DataSource;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* @Author xuyundi
* @Date 2023/7/21
* @Note
*/
@Configuration
@MapperScan(basePackages = "com.sdm.submit.dao", sqlSessionFactoryRef = "mybatisSqlSessionFactoryAdaptor")
public class MybatisPlusConfig {
@Value("${spring.datasource.slave.enable}")
private boolean slaveEnable;
// /**
// * mybatis-plus分页插件
// */
// @Bean
// public PaginationInterceptor paginationInterceptor() {
// PaginationInterceptor paginationInterceptor = new PaginationInterceptor();
// paginationInterceptor.setDialectType(DBType.MYSQL.getDb());
// return paginationInterceptor;
// }
@Bean(name = "masterDataSource")
@Qualifier("masterDataSource")
@ConfigurationProperties(prefix = "spring.datasource.master")
public DataSource masterDataSource() {
return DataSourceBuilder.create().build();
}
@Bean(name = "slaveDataSource")
@Qualifier("slaveDataSource")
@ConfigurationProperties(prefix = "spring.datasource.slave")
public DataSource slaveDataSource() {
return DataSourceBuilder.create().build();
}
/**
* 构造多数据源连接池
* Master 数据源连接池采用 HikariDataSource
* Slave 数据源连接池采用 DruidDataSource
*
* @param master
* @param slave
* @return
*/
@Bean
@Primary
public DynamicDataSource dataSource(@Qualifier("masterDataSource") DataSource master,
@Qualifier("slaveDataSource") DataSource slave) {
Map<Object, Object> targetDataSources = new HashMap<>();
targetDataSources.put(DBTypeEnum.MASTER, master);
targetDataSources.put(DBTypeEnum.SLAVE, slave);
DynamicDataSource dataSource = new DynamicDataSource();
dataSource.setTargetDataSources(targetDataSources);// 该方法是AbstractRoutingDataSource的方法
dataSource.setDefaultTargetDataSource(slave);// 默认的datasource设置为myTestDbDataSourcereturn dataSource;
return dataSource;
}
@Bean(name = "mybatisSqlSessionFactoryAdaptor")
public MybatisSqlSessionFactoryBean sqlSessionFactory(@Qualifier("masterDataSource") DataSource master,
@Qualifier("slaveDataSource") DataSource slave) throws IOException {
MybatisSqlSessionFactoryBean fb = new MybatisSqlSessionFactoryBean();
fb.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath*:mapper/*.xml"));
fb.setDataSource(this.dataSource(master, slave));
// 设置开启下划线转驼峰
MybatisConfiguration configuration = new MybatisConfiguration();
configuration.setMapUnderscoreToCamelCase(true);
fb.setConfiguration(configuration);
// 是否启动多数据源配置,目的是方便多环境下在本地环境调试,不影响其他环境
if (slaveEnable) {
fb.setPlugins(new DatabasePlugin());
}
return fb;
}
}

View File

@@ -0,0 +1,74 @@
package com.sdm.submit.config.thread;
import com.sdm.common.common.mdc.MdcTaskDecorator;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import java.util.concurrent.Executor;
import java.util.concurrent.ThreadPoolExecutor;
/**
* 自定义线程池配置JDK 17 + Spring Boot 兼容)
*/
@Configuration
public class SubmitCommonThreadPoolConfig {
/**
* 核心线程数:线程池长期维持的最小线程数,即使空闲也不销毁(除非设置 allowCoreThreadTimeOut
* 建议CPU 密集型任务(如计算)= CPU 核心数 + 1IO 密集型任务(如网络请求)= CPU 核心数 * 2
*/
private static final int CORE_POOL_SIZE = Runtime.getRuntime().availableProcessors() * 2;
/**
* 最大线程数:线程池允许的最大线程数,当任务队列满时会创建新线程直到达到该值
*/
private static final int MAX_POOL_SIZE = Runtime.getRuntime().availableProcessors() * 4;
/**
* 队列容量:用于缓存任务的阻塞队列,当核心线程都在工作时,新任务会进入该队列等待
*/
private static final int QUEUE_CAPACITY = 100;
/**
* 空闲线程存活时间:当线程数超过核心线程数时,多余的空闲线程的存活时间(单位:秒)
*/
private static final int KEEP_ALIVE_SECONDS = 60;
/**
* 线程名称前缀:用于区分不同线程池的线程,便于日志追踪
*/
private static final String THREAD_NAME_PREFIX = "custom-thread-";
/**
* 自定义线程池默认名称customExecutor
* 可通过 @Async("customExecutor") 指定使用该线程池
*/
@Bean(name = "submitCommonExecutor")
public Executor customThreadPool() {
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
// 核心线程数
executor.setCorePoolSize(CORE_POOL_SIZE);
// 最大线程数
executor.setMaxPoolSize(MAX_POOL_SIZE);
// 队列容量
executor.setQueueCapacity(QUEUE_CAPACITY);
// 空闲线程存活时间
executor.setKeepAliveSeconds(KEEP_ALIVE_SECONDS);
// 线程名称前缀
executor.setThreadNamePrefix(THREAD_NAME_PREFIX);
executor.setTaskDecorator(new MdcTaskDecorator());
// 线程池拒绝策略:当任务数超过最大线程数+队列容量时的处理方式
// JDK 17 支持的策略:
// - ThreadPoolExecutor.AbortPolicy默认直接抛出 RejectedExecutionException
// - ThreadPoolExecutor.CallerRunsPolicy由提交任务的线程执行减缓提交速度适用于并发不高的场景
// - ThreadPoolExecutor.DiscardPolicy直接丢弃新任务不抛出异常
// - ThreadPoolExecutor.DiscardOldestPolicy丢弃队列中最旧的任务再尝试提交新任务
executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
// 初始化线程池(必须调用,否则线程池不生效)
executor.initialize();
return executor;
}
}

View File

@@ -0,0 +1,66 @@
package com.sdm.submit.controller;
import com.alibaba.fastjson2.JSONObject;
import com.sdm.common.common.SdmResponse;
import com.sdm.common.entity.req.system.LaunchApproveReq;
import com.sdm.common.feign.inter.data.IDataFeignClient;
import com.sdm.common.utils.log.CoreLogger;
import io.swagger.v3.oas.annotations.Operation;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
import java.time.LocalDateTime;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
@Slf4j
@RestController
public class TestController {
@Autowired
@Qualifier(value = "submitCommonExecutor")
private Executor submitCommonExecutor;
@Autowired
private IDataFeignClient dataFeignClient;
// SdmResponse
@GetMapping("/testLog")
@Operation(summary = "测试日志打印", description = "测试日志打印")
public String testLog() {
log.debug("testLog start:这是一个普通debug日志");
log.warn("testLog start:这是一个普通warn日志");
log.info("testLog start:这是一个普通info日志");
log.error("testLog start:这是一个普通日志");
CoreLogger.debug("testLog start:{}", LocalDateTime.now());
CoreLogger.info("testLog start:{}", LocalDateTime.now());
String str1="aaaaa-str";
String str2="bbbbb-str";
CoreLogger.warn("testLog param1:{}, param2{}", str1,str2);
try {
int m = 1/0;
} catch (Exception e) {
CoreLogger.error("testLog end error:{}", e.getMessage());
CoreLogger.error("testLog end error Stack",e);
}
for (int i = 0; i <2 ; i++) {
CompletableFuture.runAsync(()->printLogs("测试自定义异步线程池日志打印"),submitCommonExecutor);
}
CoreLogger.info("开始测试feign远程调用");
SdmResponse sdmResponse = dataFeignClient.approveDataFile(new LaunchApproveReq());
CoreLogger.info("测试feign远程调用结果:{}", JSONObject.toJSONString(sdmResponse));
return "submit ok";
}
private void printLogs(String str) {
CoreLogger.debug("testLog start:这是一个异步debug日志{}",str);
CoreLogger.warn("testLog start:这是一个异步warn日志{}",str);
CoreLogger.info("testLog start:这是一个异步info日志{}",str);
CoreLogger.error("testLog start:这是一个异步日志:{}",str);
}
}

View File

@@ -0,0 +1 @@
org.springframework.boot.env.EnvironmentPostProcessor=com.sdm.submit.config.DecryptEnvironmentPostProcessor

View File

@@ -0,0 +1,127 @@
server:
port: 7108
spring:
application:
name: submit
datasource:
username: root
password: mysql
jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai
driver-class-name: com.mysql.cj.jdbc.Driver
hikari:
maximum-pool-size: 450 # 连接池最大连接数(关键!)
minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁)
idle-timeout: 300000 # 空闲连接超时时间5分钟
max-lifetime: 600000 # 连接最大存活时间10分钟
connection-timeout: 30000 # 获取连接超时时间30秒避免线程阻塞
master:
username: root
password: mysql
jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai
driver-class-name: com.mysql.cj.jdbc.Driver
slave:
username: root
password: mysql
jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai
driver-class-name: com.mysql.cj.jdbc.Driver
enable: true
cloud:
nacos:
discovery:
server-addr: 192.168.65.161:8848
group: DEV_GROUP
enabled: true
# username: nacos
# password: ENC(+QKYnI6gAYu1SbLaZQTkZA==)
data:
redis:
# Redis默认情况下有16个分片(库)这里配置具体使用的分片默认是0
database: 0
# redis服务器地址填写自己的服务器地址
host: 192.168.2.166
# redis端口默认6379
port: 6379
#redis连接超时等待,10秒
timeout: PT10S
# redis访问密码默认为空
password:
lettuce:
pool:
# 连接池最大连接数(使用负值表示没有限制) 默认 8
max-active: 50
# 连接池中的最大空闲连接 默认 8
max-idle: 20
# 连接池中的最小空闲连接 默认 0
min-idle: 1
# 连接池最大阻塞等待时间(使用负值表示没有限制) 默认 -1这里配置10s
max-wait: PT10S
# password:
# sentinel:
# master: mymaster
# nodes: 10.18.109.50:26379,10.18.109.51:26379,10.18.109.52:26379
servlet:
multipart:
# 单个文件的最大值
max-file-size: 500MB
# 上传文件总的最大值
max-request-size: 10240MB
management:
endpoints:
web:
exposure:
include: health,info
endpoint:
health:
show-details: always
mybatis-plus:
configuration:
map-underscore-to-camel-case: true
auto-mapping-behavior: full
log-impl: org.apache.ibatis.logging.stdout.StdOutImpl
# cache-enabled: true
mapper-locations: classpath*:mapper/**/*.xml
global-config:
# 逻辑删除配置
db-config:
# 删除前
logic-not-delete-value: 1
# 删除后
logic-delete-value: 0
# MyBatis SQL日志配置
logging:
level:
com.baomidou.mybatisplus.core.MybatisConfiguration: debug
com.baomidou.mybatisplus.core.override.MybatisMapperRegistry: trace
com.sdm.data.mapper: debug
java.sql: debug
java.sql.Connection: debug
java.sql.Statement: debug
java.sql.PreparedStatement: debug
lombok:
anyConstructor:
addConstructorProperties: true
#logging:
# config: ./config/logback.xml
#地址: https://play.min.io
#凭据:
#账号: minioadmin / 密码: minioadmin
#特点:
#由 MinIO 官方提供,用于演示和测试。
#公开可访问,但数据可能定期清理。
#支持 S3 API适合快速验证代码
minio:
endpoint: 192.168.65.161
port: 9000
access-key: minioadmin
secret-key: minioadmin
secure: false
bucket-name: spdm

View File

@@ -0,0 +1,117 @@
server:
port: 7108
spring:
application:
name: submit
datasource:
username: root
password: mysql
jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai
driver-class-name: com.mysql.cj.jdbc.Driver
hikari:
maximum-pool-size: 450 # 连接池最大连接数(关键!)
minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁)
idle-timeout: 300000 # 空闲连接超时时间5分钟
max-lifetime: 600000 # 连接最大存活时间10分钟
connection-timeout: 30000 # 获取连接超时时间30秒避免线程阻塞
master:
username: root
password: mysql
jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai
driver-class-name: com.mysql.cj.jdbc.Driver
slave:
username: root
password: mysql
jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai
driver-class-name: com.mysql.cj.jdbc.Driver
enable: true
cloud:
nacos:
discovery:
server-addr: 192.168.65.161:8848/nacos
group: LOCAL_GROUP
enabled: true
# username: nacos
# password: ENC(+QKYnI6gAYu1SbLaZQTkZA==)
data:
redis:
# Redis默认情况下有16个分片(库)这里配置具体使用的分片默认是0
database: 0
# redis服务器地址填写自己的服务器地址
host: 192.168.2.166
# redis端口默认6379
port: 6379
#redis连接超时等待,10秒
timeout: PT10S
# redis访问密码默认为空
password:
lettuce:
pool:
# 连接池最大连接数(使用负值表示没有限制) 默认 8
max-active: 50
# 连接池中的最大空闲连接 默认 8
max-idle: 20
# 连接池中的最小空闲连接 默认 0
min-idle: 1
# 连接池最大阻塞等待时间(使用负值表示没有限制) 默认 -1这里配置10s
max-wait: PT10S
# password:
# sentinel:
# master: mymaster
# nodes: 10.18.109.50:26379,10.18.109.51:26379,10.18.109.52:26379
servlet:
multipart:
# 单个文件的最大值
max-file-size: 500MB
# 上传文件总的最大值
max-request-size: 10240MB
mybatis-plus:
configuration:
map-underscore-to-camel-case: true
auto-mapping-behavior: full
log-impl: org.apache.ibatis.logging.stdout.StdOutImpl
# cache-enabled: true
mapper-locations: classpath*:mapper/**/*.xml
global-config:
# 逻辑删除配置
db-config:
# 删除前
logic-not-delete-value: 1
# 删除后
logic-delete-value: 0
# MyBatis SQL日志配置
logging:
level:
com.baomidou.mybatisplus.core.MybatisConfiguration: debug
com.baomidou.mybatisplus.core.override.MybatisMapperRegistry: trace
com.sdm.data.mapper: debug
java.sql: debug
java.sql.Connection: debug
java.sql.Statement: debug
java.sql.PreparedStatement: debug
lombok:
anyConstructor:
addConstructorProperties: true
#logging:
# config: ./config/logback.xml
#地址: https://play.min.io
#凭据:
#账号: minioadmin / 密码: minioadmin
#特点:
#由 MinIO 官方提供,用于演示和测试。
#公开可访问,但数据可能定期清理。
#支持 S3 API适合快速验证代码
minio:
endpoint: 192.168.65.161
port: 9000
access-key: minioadmin
secret-key: minioadmin
secure: false
bucket-name: spdm

View File

@@ -0,0 +1,109 @@
server:
port: 7108
spring:
application:
name: submit
datasource:
hikari:
maximum-pool-size: 450 # 连接池最大连接数(关键!)
minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁)
idle-timeout: 300000 # 空闲连接超时时间5分钟
max-lifetime: 600000 # 连接最大存活时间10分钟
connection-timeout: 30000 # 获取连接超时时间30秒避免线程阻塞
master:
username: root
password: ENC(+QKYnI6gAYu1SbLaZQTkZA==)
jdbc-url: jdbc:mysql://172.27.2.238:3306/spdm_prod?useUnicode=true&characterEncoding=utf-8&useSSL=true&clientCertificateKeyStoreUrl=file:/opt/spdm/mysql_ssl/keystoremysql&clientCertificateKeyStorePassword=guangqi&trustCertificateKeyStoreUrl=file:/opt/spdm/mysql_ssl/truststoremysql&trustCertificateKeyStorePassword=guangqi&serverTimezone=Asia/Shanghai
# jdbc-url: jdbc:mysql://10.30.10.210:3306/spdm?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai
driver-class-name: com.mysql.cj.jdbc.Driver
slave:
username: root
password: ENC(+QKYnI6gAYu1SbLaZQTkZA==)
jdbc-url: jdbc:mysql://172.27.2.238:3306/spdm_prod?useUnicode=true&characterEncoding=utf-8&useSSL=true&clientCertificateKeyStoreUrl=file:/opt/spdm/mysql_ssl/keystoremysql&clientCertificateKeyStorePassword=guangqi&trustCertificateKeyStoreUrl=file:/opt/spdm/mysql_ssl/truststoremysql&trustCertificateKeyStorePassword=guangqi&serverTimezone=Asia/Shanghai
# jdbc-url: jdbc:mysql://10.30.10.210:3306/spdm?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai
driver-class-name: com.mysql.cj.jdbc.Driver
enable: true
cloud:
nacos:
discovery:
server-addr: 172.27.2.238:8848
group: PROD_GROUP
enabled: true
username: nacos
password: ENC(+QKYnI6gAYu1SbLaZQTkZA==)
namespace: 3
data:
redis:
# Redis默认情况下有16个分片(库)这里配置具体使用的分片默认是0
database: 1
# redis服务器地址填写自己的服务器地址
host: 172.27.2.238
# redis端口默认6379
port: 6379
#redis连接超时等待,10秒
timeout: PT10S
# redis访问密码默认为空
password: ENC(+QKYnI6gAYu1SbLaZQTkZA==)
lettuce:
pool:
# 连接池最大连接数(使用负值表示没有限制) 默认 8
max-active: 50
# 连接池中的最大空闲连接 默认 8
max-idle: 20
# 连接池中的最小空闲连接 默认 0
min-idle: 1
# 连接池最大阻塞等待时间(使用负值表示没有限制) 默认 -1这里配置10s
max-wait: PT10S
# password:
# sentinel:
# master: mymaster
# nodes: 10.18.109.50:26379,10.18.109.51:26379,10.18.109.52:26379
servlet:
multipart:
# 单个文件的最大值
max-file-size: 500MB
# 上传文件总的最大值
max-request-size: 10240MB
mybatis-plus:
configuration:
map-underscore-to-camel-case: true
auto-mapping-behavior: full
# log-impl: org.apache.ibatis.logging.stdout.StdOutImpl
mapper-locations: classpath*:mapper/**/*.xml
global-config:
# 逻辑删除配置
db-config:
# 删除前
logic-not-delete-value: 1
# 删除后
logic-delete-value: 0
#showSql
#logging:
# level:
# com.sdm.dao: debug
lombok:
anyConstructor:
addConstructorProperties: true
ftp:
host: 172.27.3.135
port: 6318
#地址: https://play.min.io
#凭据:
#账号: minioadmin / 密码: minioadmin
#特点:
#由 MinIO 官方提供,用于演示和测试。
#公开可访问,但数据可能定期清理。
#支持 S3 API适合快速验证代码
minio:
endpoint: 192.168.65.161
port: 9000
access-key: minioadmin
secret-key: minioadmin
secure: false
bucket-name: spdm

View File

@@ -0,0 +1,101 @@
server:
port: 7108
spring:
application:
name: submit
datasource:
hikari:
maximum-pool-size: 450 # 连接池最大连接数(关键!)
minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁)
idle-timeout: 300000 # 空闲连接超时时间5分钟
max-lifetime: 600000 # 连接最大存活时间10分钟
connection-timeout: 30000 # 获取连接超时时间30秒避免线程阻塞
master:
username: root
password: ENC(+QKYnI6gAYu1SbLaZQTkZA==)
jdbc-url: jdbc:mysql://172.27.2.235:3306/spdm?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai
# jdbc-url: jdbc:mysql://10.30.10.210:3306/spdm?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai
driver-class-name: com.mysql.cj.jdbc.Driver
slave:
username: root
password: ENC(+QKYnI6gAYu1SbLaZQTkZA==)
jdbc-url: jdbc:mysql://172.27.2.235:3306/spdm?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai
# jdbc-url: jdbc:mysql://10.30.10.210:3306/spdm?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai
driver-class-name: com.mysql.cj.jdbc.Driver
enable: true
cloud:
nacos:
discovery:
server-addr: 172.27.2.238:8848
group: TEST_GROUP
enabled: true
username: nacos
password: ENC(+QKYnI6gAYu1SbLaZQTkZA==)
data:
redis:
# Redis默认情况下有16个分片(库)这里配置具体使用的分片默认是0
database: 0
# redis服务器地址填写自己的服务器地址
host: 172.27.2.238
# redis端口默认6379
port: 6379
#redis连接超时等待,10秒
timeout: PT10S
# redis访问密码默认为空
password: ENC(+QKYnI6gAYu1SbLaZQTkZA==)
lettuce:
pool:
# 连接池最大连接数(使用负值表示没有限制) 默认 8
max-active: 50
# 连接池中的最大空闲连接 默认 8
max-idle: 20
# 连接池中的最小空闲连接 默认 0
min-idle: 1
# 连接池最大阻塞等待时间(使用负值表示没有限制) 默认 -1这里配置10s
max-wait: PT10S
servlet:
multipart:
# 单个文件的最大值
max-file-size: 500MB
# 上传文件总的最大值
max-request-size: 10240MB
mybatis-plus:
configuration:
map-underscore-to-camel-case: true
auto-mapping-behavior: full
# log-impl: org.apache.ibatis.logging.stdout.StdOutImpl
mapper-locations: classpath*:mapper/**/*.xml
global-config:
# 逻辑删除配置
db-config:
# 删除前
logic-not-delete-value: 1
# 删除后
logic-delete-value: 0
#showSql
#logging:
# level:
# com.sdm.dao: debug
lombok:
anyConstructor:
addConstructorProperties: true
#地址: https://play.min.io
#凭据:
#账号: minioadmin / 密码: minioadmin
#特点:
#由 MinIO 官方提供,用于演示和测试。
#公开可访问,但数据可能定期清理。
#支持 S3 API适合快速验证代码
minio:
endpoint: 192.168.65.161
port: 9000
access-key: minioadmin
secret-key: minioadmin
secure: false
bucket-name: spdm

View File

@@ -0,0 +1,3 @@
spring:
profiles:
active: dev

View File

@@ -0,0 +1,13 @@
#!/bin/bash
# Spring Boot 项目日志查看脚本
LOG_HOME="/home/app/data/logs"
LOG_FILE="${LOG_HOME}/running.log"
# 查看实时日志
if [ ! -f "${LOG_FILE}" ]; then
echo "日志文件不存在:${LOG_FILE}(可能项目未启动)"
exit 1
fi
echo "正在查看实时运行日志(按 Ctrl+C 退出)... 日志路径:${LOG_FILE}"
tail -f "${LOG_FILE}"

View File

@@ -0,0 +1,25 @@
#!/bin/bash
# Spring Boot 项目重启脚本
# 定义基础路径(公共参数)
BASE_DIR="/home/app/data"
echo "=== 开始重启项目 ==="
# 先停止服务
if [ -f "${BASE_DIR}/stop.sh" ]; then
"${BASE_DIR}/stop.sh"
else
echo "错误:未找到停止脚本 ${BASE_DIR}/stop.sh"
exit 1
fi
# 再启动服务
if [ -f "${BASE_DIR}/start.sh" ]; then
"${BASE_DIR}/start.sh"
else
echo "错误:未找到启动脚本 ${BASE_DIR}/start.sh"
exit 1
fi
echo "=== 重启操作完成 ==="

View File

@@ -0,0 +1,74 @@
#!/bin/bash
# Spring Boot 项目启动脚本不修改logback配置实时打印日志
JAR_PATH="/home/app/data"
JAR_NAME="data-0.0.1-SNAPSHOT.jar"
FULL_JAR_PATH="${JAR_PATH}/${JAR_NAME}"
# 与logback.xml保持一致的日志路径
LOG_HOME="/home/app/data/logs"
LOG_FILE="${LOG_HOME}/running.log"
# JVM参数
JVM_OPTS="-Xms512m -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${LOG_HOME}/heapdump.hprof"
# 默认为交互模式(显示实时日志)
silent_mode=0
# 处理参数:如果传入 --silent 则启用静默模式
if [ "$1" = "--silent" ]; then
silent_mode=1
fi
# 函数定义
check_jar_exists() {
if [ ! -f "${FULL_JAR_PATH}" ]; then
echo "ERROR: Jar包不存在路径${FULL_JAR_PATH}"
exit 1
fi
}
get_running_pid() {
ps -ef | grep "${JAR_NAME}" | grep -v "grep" | awk '{print $2}'
}
# 启动服务
check_jar_exists
PID=$(get_running_pid)
if [ -n "${PID}" ]; then
echo "项目已在运行中PID: ${PID}"
exit 0
fi
# 确保日志目录存在
if [ ! -d "${LOG_HOME}" ]; then
mkdir -p "${LOG_HOME}"
echo "日志目录不存在,已自动创建:${LOG_HOME}"
fi
echo "正在启动项目... 实时日志如下(按 Ctrl+C 可退出查看,进程会继续运行)"
echo "======================================================================"
# 启动项目并保留控制台输出
nohup java ${JVM_OPTS} -jar "${FULL_JAR_PATH}" > "${LOG_FILE}" 2>&1 &
# 获取刚启动的进程PID
NEW_PID=$(get_running_pid)
# 实时显示日志
if [ -n "${NEW_PID}" ]; then
# 根据模式决定是否显示实时日志
if [ ${silent_mode} -eq 0 ]; then
echo "实时日志如下(按 Ctrl+C 可退出查看,进程会继续运行)"
tail -f "${LOG_FILE}"
fi
# 无论 tail 命令如何退出,都认为启动成功
echo "======================================================================"
echo "项目启动成功PID: ${NEW_PID}"
echo "日志文件路径:${LOG_FILE}"
exit 0 # 强制返回成功状态
else
echo "======================================================================"
echo "项目启动失败!请查看日志:${LOG_FILE}"
exit 1
fi

View File

@@ -0,0 +1,20 @@
#!/bin/bash
# Spring Boot 项目状态查询脚本
JAR_NAME="data-0.0.1-SNAPSHOT.jar"
LOG_HOME="/home/app/data/logs"
LOG_FILE="${LOG_HOME}/running.log"
# 函数定义
get_running_pid() {
ps -ef | grep "${JAR_NAME}" | grep -v "grep" | awk '{print $2}'
}
# 查看服务状态
PID=$(get_running_pid)
if [ -n "${PID}" ]; then
echo "项目运行中PID: ${PID}"
echo "日志文件路径:${LOG_FILE}"
else
echo "项目未在运行中"
fi

View File

@@ -0,0 +1,40 @@
#!/bin/bash
# Spring Boot 项目停止脚本
JAR_NAME="data-0.0.1-SNAPSHOT.jar"
# 函数定义
get_running_pid() {
ps -ef | grep "${JAR_NAME}" | grep -v "grep" | awk '{print $2}'
}
# 停止服务
PID=$(get_running_pid)
if [ -z "${PID}" ]; then
echo "项目未在运行中,无需停止"
exit 0
fi
echo "正在停止项目... PID: ${PID}"
kill -15 "${PID}"
WAIT=0
while [ ${WAIT} -lt 10 ]; do
if [ -z "$(get_running_pid)" ]; then
echo "项目已优雅停止"
exit 0
fi
sleep 1
WAIT=$((WAIT + 1))
done
echo "优雅停止超时,强制终止进程... PID: ${PID}"
kill -9 "${PID}"
sleep 2
if [ -z "$(get_running_pid)" ]; then
echo "项目已强制停止"
else
echo "ERROR: 进程终止失败请手动检查ps -ef | grep ${JAR_NAME}"
exit 1
fi

View File

@@ -0,0 +1,89 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<!-- 彩色日志依赖的渲染类 -->
<conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter" />
<conversionRule conversionWord="wex" converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter" />
<conversionRule conversionWord="wEx" converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter" />
<!-- 控制台格式 -->
<property name="CONSOLE_LOG_PATTERN" value="${CONSOLE_LOG_PATTERN:-%clr([%X{traceId}] %d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr([%15.15t]){faint} %clr(%logger){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}" />
<!-- 普通日志格式(无颜色) -->
<property name="FILE_LOG_PATTERN" value="[%X{traceId}] %d{yyyy-MM-dd HH:mm:ss.SSS} %5p ${PID:- } [%15.15t] %logger : %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}" />
<!-- 日志文件存储地址 -->
<property name="LOG_HOME" value="/home/app/submit/logs" />
<!-- 1. 控制台输出(恢复原始配置,无 %if -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${CONSOLE_LOG_PATTERN}</pattern>
</encoder>
</appender>
<!-- 2. INFO级别日志输出到running.log -->
<appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/running.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<FileNamePattern>${LOG_HOME}/running.log.%d{yyyy-MM-dd}.%i.log</FileNamePattern>
<MaxHistory>30</MaxHistory>
<TotalSizeCap>500MB</TotalSizeCap>
<maxFileSize>10MB</maxFileSize>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${FILE_LOG_PATTERN}</pattern>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<!-- 3. DEBUG级别日志输出到running_debug.log -->
<appender name="DEBUG_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/running_debug.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<FileNamePattern>${LOG_HOME}/running_debug.log.%d{yyyy-MM-dd}.%i.log</FileNamePattern>
<MaxHistory>30</MaxHistory>
<TotalSizeCap>500MB</TotalSizeCap>
<maxFileSize>10MB</maxFileSize>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${FILE_LOG_PATTERN}</pattern>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>DEBUG</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 4. core.log 专用输出器(保留 callerInfo 格式) -->
<appender name="CORE_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/core.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<FileNamePattern>${LOG_HOME}/core.log.%d{yyyy-MM-dd}.%i.log</FileNamePattern>
<MaxHistory>30</MaxHistory>
<TotalSizeCap>500MB</TotalSizeCap>
<maxFileSize>10MB</maxFileSize>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<!-- 仅 core.log 显示真实调用位置(类名.方法名(行号) -->
<pattern>[%X{traceId}] %d{yyyy-MM-dd HH:mm:ss.SSS} %5p ${PID:- } [%15.15t] %X{callerInfo} : %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}</pattern>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<!-- 绑定 coreLogger → 输出到 core.log + 控制台 -->
<logger name="coreLogger" level="INFO" additivity="false">
<appender-ref ref="CORE_FILE" /> <!-- 核心日志写入 core.log -->
<appender-ref ref="STDOUT" /> <!-- 同时输出到控制台(显示 CoreLogger -->
</logger>
<!-- 全局root配置 -->
<root level="INFO">
<appender-ref ref="STDOUT" />
<appender-ref ref="INFO_FILE" />
<appender-ref ref="DEBUG_FILE" />
</root>
</configuration>