diff --git a/capability/src/main/resources/logback.xml b/capability/src/main/resources/logback.xml index a42fa36a..6b2d68a6 100644 --- a/capability/src/main/resources/logback.xml +++ b/capability/src/main/resources/logback.xml @@ -108,6 +108,6 @@ - - + + \ No newline at end of file diff --git a/common/src/main/java/com/sdm/common/common/UserNameResponseAdvice.java b/common/src/main/java/com/sdm/common/common/UserNameResponseAdvice.java new file mode 100644 index 00000000..2a8d7a93 --- /dev/null +++ b/common/src/main/java/com/sdm/common/common/UserNameResponseAdvice.java @@ -0,0 +1,366 @@ +package com.sdm.common.common; + +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.sdm.common.entity.req.system.UserQueryReq; +import com.sdm.common.entity.resp.system.CIDUserResp; +import com.sdm.common.service.UserNameCacheService; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.core.MethodParameter; +import org.springframework.http.MediaType; +import org.springframework.http.server.ServerHttpRequest; +import org.springframework.http.server.ServerHttpResponse; +import org.springframework.web.bind.annotation.RestController; +import org.springframework.web.bind.annotation.RestControllerAdvice; +import org.springframework.web.servlet.mvc.method.annotation.ResponseBodyAdvice; + +import java.lang.reflect.Field; +import java.util.*; +import java.util.stream.Collectors; + +@RestControllerAdvice +@Slf4j +public class UserNameResponseAdvice implements ResponseBodyAdvice { + + @Autowired + private UserNameCacheService userNameCacheService; + + @Override + public boolean supports(MethodParameter returnType, Class converterType) { + Class controllerClass = returnType.getContainingClass(); + String className = controllerClass.getSimpleName(); + String fullClassName = controllerClass.getName(); + + // 排除SysUserController + if ("SysUserController".equals(className) || fullClassName.contains("SysUserController")) { + return false; + } + + // 只处理RestController的方法 + return returnType.getContainingClass().isAnnotationPresent(RestController.class); + } + + @Override + public Object beforeBodyWrite(Object body, MethodParameter returnType, + MediaType selectedContentType, Class selectedConverterType, + ServerHttpRequest request, ServerHttpResponse response) { + long startTime = System.currentTimeMillis(); + try { + if (body == null) { + return null; + } + + processBody(body); + + long cost = System.currentTimeMillis() - startTime; + log.debug("用户名转换完成,耗时: {}ms", cost); + + } catch (Exception e) { + log.error("用户名转换处理失败", e); + } + + return body; + } + + private void processBody(Object body) { + if (body == null) return; + + // 如果是SdmResponse类型,处理其data字段 + if (isSdmResponse(body)) { + processSdmResponse(body); + } else if (body instanceof Collection) { + ((Collection) body).forEach(this::processBody); + } else if (body instanceof Page) { + ((Page) body).getRecords().forEach(this::processBody); + } else { + processSingleObjectWithChildren(body); + } + } + + /** + * 处理SdmResponse对象 + */ + private void processSdmResponse(Object sdmResponse) { + try { + Field dataField = sdmResponse.getClass().getDeclaredField("data"); + dataField.setAccessible(true); + Object data = dataField.get(sdmResponse); + + if (data != null) { + // 递归处理data字段 兼容处理双重嵌套的data结构 + processDoubleNestedData(data); + } + } catch (Exception e) { + log.warn("解析SdmResponse的data字段失败: {}", e.getMessage()); + } + } + + /** + * 处理双重嵌套的data结构 + */ + private void processDoubleNestedData(Object outerData) { + if (outerData == null) return; + + try { + // 检查外层data是否还有内层data字段 + if (hasDataField(outerData)) { + Object innerData = getDataValue(outerData); + + if (innerData != null) { + // 处理内层data + processInnerData(innerData); + } + } else { + // 如果没有内层data,直接处理外层data + processBody(outerData); + } + } catch (Exception e) { + log.warn("处理双重嵌套data结构失败: {}", e.getMessage()); + } + } + + /** + * 获取data字段或data键的值 + */ + private Object getDataValue(Object obj) { + if (obj == null) return null; + + try { + // 情况1:如果是Map,通过get方法获取 + if (obj instanceof Map) { + Map map = (Map) obj; + return map.get("data"); + } + + // 情况2:如果是普通对象,通过反射获取字段值 + Field dataField = obj.getClass().getDeclaredField("data"); + dataField.setAccessible(true); + return dataField.get(obj); + + } catch (Exception e) { + log.warn("获取data值失败: {}", e.getMessage()); + return null; + } + } + + /** + * 检查是否有data字段或data键 + */ + private boolean hasDataField(Object obj) { + if (obj == null) { + return false; + } + // 情况1:如果是Map类型,检查是否有"data"键 + if (obj instanceof Map) { + Map map = (Map) obj; + return map.containsKey("data"); + } + // 情况2:如果是普通Java对象,检查是否有data字段 + try { + Field dataField = obj.getClass().getDeclaredField("data"); + return dataField != null; + } catch (NoSuchFieldException e) { + return false; + } + } + + /** + * 处理内层data + */ + private void processInnerData(Object innerData) { + if (innerData == null) return; + + try { + // 内层data可能是列表、分页或单个对象 + if (innerData instanceof Collection) { + ((Collection) innerData).forEach(this::processBody); + } else if (innerData instanceof Page) { + ((Page) innerData).getRecords().forEach(this::processBody); + } else if (hasRecordsField(innerData)) { + // 处理分页结构的records字段 + processRecordsField(innerData); + } else { + // 单个对象 + processBody(innerData); + } + } catch (Exception e) { + log.warn("处理内层data失败: {}", e.getMessage()); + } + } + + /** + * 检查是否有records字段(分页结构) + */ + private boolean hasRecordsField(Object data) { + try { + Field recordsField = data.getClass().getDeclaredField("records"); + return recordsField != null; + } catch (NoSuchFieldException e) { + return false; + } + } + + /** + * 处理records字段(分页数据) + */ + private void processRecordsField(Object data) { + try { + Field recordsField = data.getClass().getDeclaredField("records"); + recordsField.setAccessible(true); + Object records = recordsField.get(data); + + if (records instanceof Collection) { + ((Collection) records).forEach(this::processBody); + } + } catch (Exception e) { + log.warn("处理records字段失败: {}", e.getMessage()); + } + } + + + /** + * 判断是否是SdmResponse类型 + */ + private boolean isSdmResponse(Object obj) { + if (obj == null) return false; + + // 通过类名判断 + String className = obj.getClass().getSimpleName(); + if ("SdmResponse".equals(className)) { + return true; + } + // 检查特定字段 + return hasSdmResponseFields(obj); + } + + private boolean hasSdmResponseFields(Object obj) { + try { + Class clazz = obj.getClass(); + Field codeField = clazz.getDeclaredField("code"); + Field messageField = clazz.getDeclaredField("message"); + Field dataField = clazz.getDeclaredField("data"); + + return codeField != null && messageField != null && dataField != null; + } catch (NoSuchFieldException e) { + return false; + } + } + + private void processSingleObjectWithChildren(Object obj) { + if (obj == null) return; + + try { + // 收集不重复的userId + Set userIds = collectUserIds(obj); + if (!userIds.isEmpty()) { + // 获取用户名称 + Map userNames = userNameCacheService.batchGetUserNames(userIds); + setUserNamesToObject(obj, userNames); + } + // 递归处理children字段 + processChildrenField(obj); + } catch (Exception e) { + log.warn("处理对象 {} 的用户名失败: {}", obj.getClass().getSimpleName(), e.getMessage()); + } + } + + /** + * 递归处理children字段 + */ + private void processChildrenField(Object obj) throws Exception { + List fields = getAllFields(obj); + + for (Field field : fields) { + if ("children".equals(field.getName())) { + field.setAccessible(true); + Object children = field.get(obj); + + if (children != null) { + // 递归处理children + processBody(children); + } + // 找到children字段后就可以退出循环了 + break; + } + } + } + + private Set collectUserIds(Object obj) throws Exception { + Set userIds = new HashSet<>(); + List fields = getAllFields(obj); + + for (Field field : fields) { + if (isUserIdField(field)) { + field.setAccessible(true); + Long userId = (Long) field.get(obj); + userIds.add(userId); + } + } + + return userIds; + } + + /** + * 获取对象的所有字段(包括父类) + */ + private List getAllFields(Object obj) { + List fields = new ArrayList<>(); + Class clazz = obj.getClass(); + + // 递归获取所有父类的字段 + while (clazz != null && clazz != Object.class) { + fields.addAll(Arrays.asList(clazz.getDeclaredFields())); + clazz = clazz.getSuperclass(); + } + + return fields; + } + + private boolean isUserIdField(Field field) { + String fieldName = field.getName(); + return "creator".equals(fieldName) || "updater".equals(fieldName) || "userId".equals(fieldName); + } + + private void setUserNamesToObject(Object obj, Map userNames) throws Exception { + List fields = getAllFields(obj); + + for (Field field : fields) { + if (isUserIdField(field)) { + field.setAccessible(true); + Long userId = (Long) field.get(obj); + String userName = userNames.get(userId); + if (userName != null) { + // 设置对应的name字段 + String nameFieldName = field.getName() + "Name"; + try { + Field nameField = getFieldRecursively(obj.getClass(), nameFieldName); + nameField.setAccessible(true); + nameField.set(obj, userName); + } catch (NoSuchFieldException e) { + // 如果没有对应的name字段,忽略 + log.debug("对象 {} 没有字段 {}", obj.getClass().getSimpleName(), nameFieldName); + } + } + } + } + } + + /** + * 递归获取字段(包括父类) + */ + private Field getFieldRecursively(Class clazz, String fieldName) throws NoSuchFieldException { + try { + // 在当前类中查找 + return clazz.getDeclaredField(fieldName); + } catch (NoSuchFieldException e) { + // 如果在当前类中没找到,递归到父类中查找 + Class superClass = clazz.getSuperclass(); + if (superClass != null && superClass != Object.class) { + return getFieldRecursively(superClass, fieldName); + } else { + throw new NoSuchFieldException("字段 '" + fieldName + "' 在类 " + clazz.getName() + " 及其父类中未找到"); + } + } + } +} + diff --git a/common/src/main/java/com/sdm/common/config/CacheConfig.java b/common/src/main/java/com/sdm/common/config/CacheConfig.java new file mode 100644 index 00000000..ada42515 --- /dev/null +++ b/common/src/main/java/com/sdm/common/config/CacheConfig.java @@ -0,0 +1,51 @@ +package com.sdm.common.config; + +import lombok.extern.slf4j.Slf4j; +import org.springframework.cache.Cache; +import org.springframework.cache.CacheManager; +import org.springframework.cache.annotation.EnableCaching; +import org.springframework.cache.concurrent.ConcurrentMapCache; +import org.springframework.cache.interceptor.CacheErrorHandler; +import org.springframework.cache.interceptor.SimpleCacheErrorHandler; +import org.springframework.cache.support.SimpleCacheManager; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import java.util.Arrays; + +@Configuration +@EnableCaching +@Slf4j +public class CacheConfig { + + /** + * 用户名缓存配置 + */ + @Bean + public CacheManager cacheManager() { + SimpleCacheManager cacheManager = new SimpleCacheManager(); + cacheManager.setCaches(Arrays.asList( + // 用户名缓存 + new ConcurrentMapCache("userNames") + // 可以添加其他缓存配置 + )); + return cacheManager; + } + + @Bean + public CacheErrorHandler cacheErrorHandler() { + return new SimpleCacheErrorHandler() { + @Override + public void handleCacheGetError(RuntimeException exception, Cache cache, Object key) { + log.warn("缓存获取失败, key: {}, 错误: {}", key, exception.getMessage()); + // 不抛出异常,继续执行 + } + + @Override + public void handleCachePutError(RuntimeException exception, Cache cache, Object key, Object value) { + log.warn("缓存存入失败, key: {}, 错误: {}", key, exception.getMessage()); + // 不抛出异常,继续执行 + } + }; + } +} \ No newline at end of file diff --git a/common/src/main/java/com/sdm/common/entity/constants/PermConstants.java b/common/src/main/java/com/sdm/common/entity/constants/PermConstants.java index bf469896..0d30629b 100644 --- a/common/src/main/java/com/sdm/common/entity/constants/PermConstants.java +++ b/common/src/main/java/com/sdm/common/entity/constants/PermConstants.java @@ -30,4 +30,7 @@ public class PermConstants { public static final String ENCODE_FILE_FLAG = ".sdmaes"; public static final String DECODE_FILE_FLAG = "AES_DECODE"; public static final int REDIS_EXPIRE_TIME = 3600 * 48; //超期时间48小时 + // 分片碎文件的后缀 + public static final String CHUNK_TEMPFILE_SUFFIX=".temp"; + } diff --git a/common/src/main/java/com/sdm/common/entity/pojo/BaseEntity.java b/common/src/main/java/com/sdm/common/entity/pojo/BaseEntity.java index 9258a4e9..c5ebbb10 100644 --- a/common/src/main/java/com/sdm/common/entity/pojo/BaseEntity.java +++ b/common/src/main/java/com/sdm/common/entity/pojo/BaseEntity.java @@ -14,10 +14,10 @@ public class BaseEntity extends BaseBean { @Schema(description = "更新者ID") @JsonFormat(shape = JsonFormat.Shape.STRING) - public long updater; + public Long updater; @Schema(description = "更新者名称") - public String updateName; + public String updaterName; @Schema(description = "更新时间") public String updateTime; @@ -31,7 +31,8 @@ public class BaseEntity extends BaseBean { @Schema(description = "创建者ID") @JsonFormat(shape = JsonFormat.Shape.STRING) - public long creator; + public Long creator; + public String creatorName; @Schema(description = "创建时间") public String createTime; diff --git a/common/src/main/java/com/sdm/common/entity/req/data/ChunkUploadMinioFileReq.java b/common/src/main/java/com/sdm/common/entity/req/data/ChunkUploadMinioFileReq.java new file mode 100644 index 00000000..3c9d7fd5 --- /dev/null +++ b/common/src/main/java/com/sdm/common/entity/req/data/ChunkUploadMinioFileReq.java @@ -0,0 +1,37 @@ +package com.sdm.common.entity.req.data; + +import lombok.Data; +import org.springframework.web.multipart.MultipartFile; + +@Data +public class ChunkUploadMinioFileReq { + + // 接口5.1 前端生成的,用于发起统一一次审批流的凭证 + private String uploadTaskId; + + // 接口5.1 返回的这个文件对应的业务数据主键id + private Long businessId; + + // 接口5.1 返回的 objectKey + private String objectKey; + + // 原始文件的名称 + private String sourceFileName; + + // 当前为第几分片 + private Integer chunk; + + // DigestUtils.md5Hex(chunkData[]) + private String chunkMd5; + + // 分片总数 + private Integer chunkTotal; + + // 分块文件传输对象 + private MultipartFile file; + + // 单一文件维度。第一片请求不传,后面的请求必传,第一次请求成功后后端会返回,本次文件的父目录 + private String fileTempPath; + + +} diff --git a/common/src/main/java/com/sdm/common/entity/req/data/UploadFilesReq.java b/common/src/main/java/com/sdm/common/entity/req/data/UploadFilesReq.java index ab351842..c718edff 100644 --- a/common/src/main/java/com/sdm/common/entity/req/data/UploadFilesReq.java +++ b/common/src/main/java/com/sdm/common/entity/req/data/UploadFilesReq.java @@ -12,6 +12,12 @@ import java.util.List; @Schema(description = "文件上传请求参数") public class UploadFilesReq { + @Schema(description = "用户勾选的所有的文件的原始名称和大小,前端限制不能选择相同名称的文件,后端逻辑判断对应dirId下不能和历史文件名相同") + private List sourceFiles; + + @Schema(description = "本次新增数据的任务id,毫秒值时间戳即可") + private String uploadTaskId; + @Schema(description = "文件路径") private String path; diff --git a/common/src/main/java/com/sdm/common/entity/resp/data/BatchAddFileInfoResp.java b/common/src/main/java/com/sdm/common/entity/resp/data/BatchAddFileInfoResp.java new file mode 100644 index 00000000..60861165 --- /dev/null +++ b/common/src/main/java/com/sdm/common/entity/resp/data/BatchAddFileInfoResp.java @@ -0,0 +1,25 @@ +package com.sdm.common.entity.resp.data; + +import lombok.Data; + + +@Data +public class BatchAddFileInfoResp { + /** + * 这次任务上传的id 时间戳 + */ + private String uploadTaskId; + /** + * 文件名 + */ + private String sourceFileName; + /** + * 文件fileId + */ + private Long businessId; + /** + * MinIO的文件object_key 绝对路径名 + */ + private String objectKey; + +} diff --git a/common/src/main/java/com/sdm/common/entity/resp/data/ChunkUploadMinioFileResp.java b/common/src/main/java/com/sdm/common/entity/resp/data/ChunkUploadMinioFileResp.java new file mode 100644 index 00000000..e39856da --- /dev/null +++ b/common/src/main/java/com/sdm/common/entity/resp/data/ChunkUploadMinioFileResp.java @@ -0,0 +1,23 @@ +package com.sdm.common.entity.resp.data; + +import lombok.Data; + +@Data +public class ChunkUploadMinioFileResp { + + // 本次分片上传的结果 true 成功,false 失败 + private Boolean result; + + // 文件关联的业务数据Id + private Long businessId; + + // 分片上传的任务Id,用于回调处理的审批流创建 + private String uploadTaskId; + + // 分片文件的临时目录,第一次请求后,每次都会返回 + private String fileTempPath; + + // 失败的原因 + private String errMsg; + +} diff --git a/common/src/main/java/com/sdm/common/service/UserNameCacheService.java b/common/src/main/java/com/sdm/common/service/UserNameCacheService.java new file mode 100644 index 00000000..e5a639b1 --- /dev/null +++ b/common/src/main/java/com/sdm/common/service/UserNameCacheService.java @@ -0,0 +1,55 @@ +package com.sdm.common.service; + +import com.sdm.common.common.SdmResponse; +import com.sdm.common.entity.req.system.UserQueryReq; +import com.sdm.common.entity.resp.system.CIDUserResp; +import com.sdm.common.feign.inter.system.ISysUserFeignClient; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.cache.annotation.Cacheable; +import org.springframework.stereotype.Service; + +import java.util.*; +import java.util.stream.Collectors; + +@Service +@Slf4j +public class UserNameCacheService { + + @Autowired + private ISysUserFeignClient sysUserFeignClient; + + /** + * 批量获取用户名 - 带缓存 TODO 后续加入Redis的时候改成Redis + */ + @Cacheable(value = "userNames", key = "#userIds.toString()") + public Map batchGetUserNames(Set userIds) { + log.info("【缓存未命中】批量查询用户名,用户数量: {}", userIds.size()); + + // 批量调用用户服务 + SdmResponse> response = sysUserFeignClient.listUserByIds( + UserQueryReq.builder().userIds(new ArrayList<>(userIds)).build() + ); + + Map userMap = response.getData().stream() + .collect(Collectors.toMap( + CIDUserResp::getUserId, + CIDUserResp::getNickname + )); + + return userMap; + } + + /** + * 生成缓存key的静态方法 + */ + public static String generateKey(Set userIds) { + if (userIds == null || userIds.isEmpty()) { + return "empty"; + } + // 排序后拼接成字符串 + List sortedList = new ArrayList<>(userIds); + Collections.sort(sortedList); + return sortedList.toString(); + } +} diff --git a/data/src/main/java/com/sdm/data/config/thread/NonSensitiveTaskThreadPool.java b/data/src/main/java/com/sdm/data/config/thread/NonSensitiveTaskThreadPool.java new file mode 100644 index 00000000..c08e0ad2 --- /dev/null +++ b/data/src/main/java/com/sdm/data/config/thread/NonSensitiveTaskThreadPool.java @@ -0,0 +1,54 @@ +package com.sdm.data.config.thread; + +import com.sdm.common.mdc.MdcTaskDecorator; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; + +import java.util.concurrent.Executor; +import java.util.concurrent.ThreadPoolExecutor; + +/** + * 自定义线程池配置--非敏感业务使用,能接受可能长时间排队 + */ +@Configuration +public class NonSensitiveTaskThreadPool { + + @Value("${nonSensitiveTaskPool.coreSize:2}") + private int CORE_POOL_SIZE ; + @Value("${nonSensitiveTaskPool.maxSize:4}") + private int MAX_POOL_SIZE ; + @Value("${nonSensitiveTaskPool.queueSize:5000}") + private int QUEUE_CAPACITY ; + @Value("${nonSensitiveTaskPool.keepLive:60}") + private int KEEP_ALIVE_SECONDS ; + @Value("${nonSensitiveTaskPool.threadName:nonSensitiveTaskPool-}") + private String THREAD_NAME_PREFIX ; + + @Bean(name = "nonSensitiveTaskPool") + public Executor nonSensitiveTaskPool() { + ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); + // 核心线程数 + executor.setCorePoolSize(CORE_POOL_SIZE); + // 最大线程数 + executor.setMaxPoolSize(MAX_POOL_SIZE); + // 队列容量 + executor.setQueueCapacity(QUEUE_CAPACITY); + // 空闲线程存活时间 + executor.setKeepAliveSeconds(KEEP_ALIVE_SECONDS); + // 线程名称前缀 + executor.setThreadNamePrefix(THREAD_NAME_PREFIX); + executor.setTaskDecorator(new MdcTaskDecorator()); + // 线程池拒绝策略:当任务数超过最大线程数+队列容量时的处理方式 + // - ThreadPoolExecutor.AbortPolicy(默认):直接抛出 RejectedExecutionException + // - ThreadPoolExecutor.CallerRunsPolicy:由提交任务的线程执行(减缓提交速度,适用于并发不高的场景) + // - ThreadPoolExecutor.DiscardPolicy:直接丢弃新任务,不抛出异常 + // - ThreadPoolExecutor.DiscardOldestPolicy:丢弃队列中最旧的任务,再尝试提交新任务 + executor.setRejectedExecutionHandler(new ThreadPoolExecutor.DiscardPolicy()); + + // 初始化线程池(必须调用,否则线程池不生效) + executor.initialize(); + return executor; + } +} \ No newline at end of file diff --git a/data/src/main/java/com/sdm/data/controller/DataFileController.java b/data/src/main/java/com/sdm/data/controller/DataFileController.java index 5ca32764..4e7c8b84 100644 --- a/data/src/main/java/com/sdm/data/controller/DataFileController.java +++ b/data/src/main/java/com/sdm/data/controller/DataFileController.java @@ -2,9 +2,10 @@ package com.sdm.data.controller; import com.sdm.common.common.SdmResponse; import com.sdm.common.entity.req.data.*; -import com.sdm.common.entity.resp.PageDataResp; -import com.sdm.data.model.req.RenameFileReq; import com.sdm.common.entity.req.system.LaunchApproveReq; +import com.sdm.common.entity.resp.PageDataResp; +import com.sdm.common.entity.resp.data.BatchAddFileInfoResp; +import com.sdm.common.entity.resp.data.ChunkUploadMinioFileResp; import com.sdm.common.entity.resp.data.FileMetadataInfoResp; import com.sdm.common.feign.inter.data.IDataFeignClient; import com.sdm.data.model.entity.FileMetadataInfo; @@ -383,5 +384,27 @@ public class DataFileController implements IDataFeignClient { return IDataFileService.queryFileMetadataInfo(uuid, uuidOwnType); } + /** + * 分片上传文件到minio + * + * @param req + * @return SdmResponse + */ + @PostMapping("/chunkUploadToMinio") + @Operation(summary = "文件分片上传到minio") + public SdmResponse chunkUploadToMinio(ChunkUploadMinioFileReq req) { + return IDataFileService.chunkUploadToMinio(req); + } + + /** + * 文件信息入库准备发起评审 + */ + @PostMapping("/batchAddFileInfo") + @Operation(summary = "文件信息入库准备发起评审") + public SdmResponse> batchAddFileInfo(@RequestBody UploadFilesReq req) { + return IDataFileService.batchAddFileInfo(req); + } + + } \ No newline at end of file diff --git a/data/src/main/java/com/sdm/data/model/entity/FileMetadataInfo.java b/data/src/main/java/com/sdm/data/model/entity/FileMetadataInfo.java index dd96052f..98ffef6d 100644 --- a/data/src/main/java/com/sdm/data/model/entity/FileMetadataInfo.java +++ b/data/src/main/java/com/sdm/data/model/entity/FileMetadataInfo.java @@ -186,6 +186,14 @@ public class FileMetadataInfo implements Serializable { @TableField(value = "cidFlowId") private String cidFlowId; + @Schema(description= "uploadTaskId:上传新增数据的任务id,毫秒值时间戳") + @TableField(value = "uploadTaskId") + private String uploadTaskId; + + @Schema(description= "uploadStatus:文件上传状态,0上传中,1上传完成") + @TableField(value = "uploadStatus") + private String uploadStatus; + @Schema(description= "cidFlowReviewer:cid审核电子流程里面的评审人,只有列表展示使用") @TableField(value = "cidFlowReviewer", insertStrategy = FieldStrategy.NEVER,select = false,updateStrategy = FieldStrategy.NEVER) private String cidFlowReviewer; diff --git a/data/src/main/java/com/sdm/data/service/IDataFileService.java b/data/src/main/java/com/sdm/data/service/IDataFileService.java index e9ada202..e217f203 100644 --- a/data/src/main/java/com/sdm/data/service/IDataFileService.java +++ b/data/src/main/java/com/sdm/data/service/IDataFileService.java @@ -2,11 +2,12 @@ package com.sdm.data.service; import com.sdm.common.common.SdmResponse; import com.sdm.common.entity.req.data.*; -import com.sdm.common.entity.resp.PageDataResp; -import com.sdm.data.model.entity.FileMetadataInfo; -import com.sdm.data.model.req.RenameFileReq; import com.sdm.common.entity.req.system.LaunchApproveReq; +import com.sdm.common.entity.resp.PageDataResp; +import com.sdm.common.entity.resp.data.BatchAddFileInfoResp; +import com.sdm.common.entity.resp.data.ChunkUploadMinioFileResp; import com.sdm.common.entity.resp.data.FileMetadataInfoResp; +import com.sdm.data.model.entity.FileMetadataInfo; import com.sdm.data.model.req.*; import com.sdm.data.model.resp.KKFileViewURLFromMinioResp; import jakarta.servlet.http.HttpServletResponse; @@ -315,4 +316,8 @@ public interface IDataFileService { SdmResponse queryFileMetadataInfo(String uuid, String uuidOwnType); + SdmResponse chunkUploadToMinio(ChunkUploadMinioFileReq req); + + SdmResponse> batchAddFileInfo(UploadFilesReq req); + } \ No newline at end of file diff --git a/data/src/main/java/com/sdm/data/service/IMinioService.java b/data/src/main/java/com/sdm/data/service/IMinioService.java index 0690e04d..f36a21f1 100644 --- a/data/src/main/java/com/sdm/data/service/IMinioService.java +++ b/data/src/main/java/com/sdm/data/service/IMinioService.java @@ -42,7 +42,9 @@ public interface IMinioService { * @param directoryName 目录名称(objectKey) */ public void deleteDirectoryRecursively(String directoryName); - + + public void deleteDirectoryRecursively2(String directoryName,String bucketName); + /** * 递归删除指定目录下的所有对象。 * @@ -130,4 +132,8 @@ public interface IMinioService { String getMinioPresignedUrl(String objectKey); String getMinioPresignedUrl(String objectKey, String bucketName); + Boolean chunkUpload(String bucketName, MultipartFile file, String fileName); + + Boolean merge(String tempBucketName,String tempFilePath,String mergeBucketName,String fileName); + } \ No newline at end of file diff --git a/data/src/main/java/com/sdm/data/service/impl/MinioFileIDataFileServiceImpl.java b/data/src/main/java/com/sdm/data/service/impl/MinioFileIDataFileServiceImpl.java index dd653d27..852b12b7 100644 --- a/data/src/main/java/com/sdm/data/service/impl/MinioFileIDataFileServiceImpl.java +++ b/data/src/main/java/com/sdm/data/service/impl/MinioFileIDataFileServiceImpl.java @@ -8,6 +8,7 @@ import com.github.pagehelper.PageInfo; import com.sdm.common.common.SdmResponse; import com.sdm.common.common.ThreadLocalContext; import com.sdm.common.entity.constants.NumberConstants; +import com.sdm.common.entity.constants.PermConstants; import com.sdm.common.entity.enums.*; import com.sdm.common.entity.req.data.*; import com.sdm.common.entity.req.project.SpdmNodeListReq; @@ -15,6 +16,8 @@ import com.sdm.common.entity.req.system.LaunchApproveReq; import com.sdm.common.entity.req.system.UserListReq; import com.sdm.common.entity.req.system.UserQueryReq; import com.sdm.common.entity.resp.PageDataResp; +import com.sdm.common.entity.resp.data.BatchAddFileInfoResp; +import com.sdm.common.entity.resp.data.ChunkUploadMinioFileResp; import com.sdm.common.entity.resp.data.FileMetadataInfoResp; import com.sdm.common.entity.resp.project.SimulationNodeResp; import com.sdm.common.entity.resp.system.CIDUserResp; @@ -48,9 +51,11 @@ import org.assertj.core.util.DateUtil; import org.jetbrains.annotations.NotNull; import org.springframework.beans.BeanUtils; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; +import org.springframework.util.Assert; import org.springframework.util.StringUtils; import org.springframework.web.multipart.MultipartFile; @@ -62,6 +67,8 @@ import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.text.SimpleDateFormat; import java.util.*; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executor; import java.util.stream.Collectors; @@ -86,6 +93,9 @@ public class MinioFileIDataFileServiceImpl implements IDataFileService { @Value("${fileSystem.minio}") private String type; + @Value("${fileSystem.chunkBucket:spdm}") + private String chunkBucket; + @Autowired private IFileMetadataInfoService fileMetadataInfoService; @@ -122,6 +132,10 @@ public class MinioFileIDataFileServiceImpl implements IDataFileService { @Autowired private ISimulationNodeFeignClient isSimulationNodeFeignClient; + @Autowired + @Qualifier(value = "nonSensitiveTaskPool") + private Executor nonSensitiveTaskPool; + @Override public String getType() { return type; @@ -201,6 +215,193 @@ public class MinioFileIDataFileServiceImpl implements IDataFileService { return SdmResponse.success(dto); } + @Override + public SdmResponse chunkUploadToMinio(ChunkUploadMinioFileReq req) { + ChunkUploadMinioFileResp resp = new ChunkUploadMinioFileResp(); + // 基础路径配置 + // -2. 参数校验 + try { + validateReq(req); + } catch (Exception e) { + CoreLogger.error("validateReq error:{}", e.getMessage()); + return buildFailedResponse(resp,e.getMessage(),req); + } + // -1.确定文件夹 + String timestamp = String.valueOf(System.currentTimeMillis()); + // 合并目录 + String filePath = getMinioFolderPath(req.getObjectKey()); + // 0. 一个文件直接传 + if(Objects.equals(req.getChunkTotal(),NumberConstants.ONE)&& + Objects.equals(req.getChunk(),NumberConstants.ONE)){ + String finalFileName = filePath + req.getSourceFileName(); + Boolean b = minioService.chunkUpload(chunkBucket, req.getFile(), finalFileName); + if(!b){ + return buildFailedResponse(resp,"单一文件上传失败",req); + } + return buildSuccessResponse(resp,req,""); + } + // 碎片目录 + String tempDirPath = org.apache.commons.lang3.StringUtils.isBlank(req.getFileTempPath())? + filePath +"temp/"+timestamp+"/":req.getFileTempPath(); + // 1. 保存当前分片到临时目录 1 2 3 4 ....temp + String chunkFileName =tempDirPath+req.getChunk()+ PermConstants.CHUNK_TEMPFILE_SUFFIX; + // 片文件上传到minio + Boolean b = minioService.chunkUpload(chunkBucket, req.getFile(), chunkFileName); + if(!b){ + deleteTempFileAfterFailed(tempDirPath,chunkBucket); + return buildFailedResponse(resp,"chunkUpload第"+req.getChunk()+"次失败",req); + } + // 2. 判断分片是否已全部上传完毕 + if (req.getChunk() < req.getChunkTotal()) { + return buildSuccessResponse(resp,req,tempDirPath); + } + // 3. 全部分片已经上传 => 自动合并 + String finalFileName = filePath + req.getSourceFileName(); + Boolean merge = minioService.merge(chunkBucket, tempDirPath, chunkBucket, finalFileName); + if(!merge){ + deleteTempFileAfterFailed(tempDirPath,chunkBucket); + return buildFailedResponse(resp,req.getSourceFileName()+"合并分片失败",req); + } + // 4. 合并完成后删除临时目录 + deleteTempFileAfterFailed(tempDirPath,chunkBucket); + return buildSuccessResponse(resp,req,tempDirPath); + } + + + @Override + @Transactional(rollbackFor = Exception.class) + public SdmResponse> batchAddFileInfo(UploadFilesReq req) { + Long dirId = null; + String uuid = req.getUuid(); + + if (uuid != null) { + FileMetadataInfo nodeMetadataInfo = fileMetadataInfoService.lambdaQuery().eq(FileMetadataInfo::getRelatedResourceUuid, req.getUuid()).one(); + dirId = nodeMetadataInfo.getId(); + } else if (req.getDirId() != null) { + dirId = req.getDirId(); + } else { + return SdmResponse.failed("目录ID不能为空"); + } + FileMetadataInfo dirMetadataInfo = fileMetadataInfoService.lambdaQuery().eq(FileMetadataInfo::getId, dirId).eq(FileMetadataInfo::getDataType, DataTypeEnum.DIRECTORY.getValue()).one(); + if (dirMetadataInfo == null) { + return SdmResponse.failed("目录不存在"); + } + + if (CollectionUtils.isNotEmpty(req.getSourceFiles())) { + List addFileInfoRespList = new ArrayList<>(); + for (UploadFilesReq fileReq : req.getSourceFiles()) { + String originalName = fileReq.getFileName(); + String versionSuffix = "_V1"; + String modifiedFileName; + + int dotIndex = originalName.lastIndexOf('.'); + if (dotIndex != -1) { + // 如果文件有后缀,则在文件名和后缀之间插入版本号 + modifiedFileName = originalName.substring(0, dotIndex) + versionSuffix + originalName.substring(dotIndex); + } else { + log.error("文件没有后缀"); + return SdmResponse.failed("文件名没有后缀"); + } + + String fileMinioObjectKey = getFileMinioObjectKey(dirMetadataInfo.getObjectKey() + modifiedFileName); + // 检查此文件夹下是否有历史同名文件 提示 + if (CollectionUtils.isNotEmpty(fileMetadataInfoService.lambdaQuery().eq(FileMetadataInfo::getObjectKey, fileMinioObjectKey).list())) { + return SdmResponse.failed("该文件夹下存在同名文件:" + fileReq.getFileName() + ",请确认是否覆盖"); + } + + boolean hasUploadPermission = fileUserPermissionService.hasFilePermission(dirMetadataInfo.getId(), ThreadLocalContext.getUserId(), FilePermissionEnum.UPLOAD); + if (!hasUploadPermission) { + return SdmResponse.failed("没有上传权限"); + } + try { + // 创建目录元数据并保存到数据库 + FileMetadataInfo fileInfo = createFileMetadata(fileMinioObjectKey, fileReq.getFileName(), req.getFileType(), + req.getProjectId(), req.getAnalysisDirectionId(), req.getRemarks(), dirMetadataInfo.getId(), fileReq.getSize()); + fileInfo.setUploadTaskId(req.getUploadTaskId()); + // 只有知识库的文件需要审核 + // 1 知识库文件夹 + boolean isknowledge = Objects.equals(DirTypeEnum.KNOWLEDGE_BASE_DIR.getValue(), dirMetadataInfo.getDirType()); + if (isknowledge) { + fileInfo.setApprovalStatus(ApprovalFileDataStatusEnum.PENDING.getKey()); + fileInfo.setApproveType(ApproveFileDataTypeEnum.UPLOAD_REVIEWING.getCode()); + } + + fileMetadataInfoService.save(fileInfo); + + // 需要保存文件的历史版本记录,同一文件的所有版本共享一个ID, + fileInfo.setFileGroupId(fileInfo.getId()); + fileMetadataInfoService.updateById(fileInfo); + + // 循环查询当前文件每一级父目录id,并保存为一条file_storage,用户后续文件搜索统计 + Long parentDirId = dirMetadataInfo.getId(); + FileStorage fileStorage = new FileStorage(); + + fileStorage.setFileId(fileInfo.getId()); + fileStorage.setFileName(fileInfo.getOriginalName()); + fileStorage.setUserId(ThreadLocalContext.getUserId()); + fileStorage.setFileBizType(fileInfo.getFileType()); + // 文件后缀 + fileStorage.setFileSuffix(getSuffixWithoutDot(fileInfo.getOriginalName())); + fileStorage.setFileSize(fileReq.getSize()); + while (parentDirId != null) { + fileStorage.setId(null); + fileStorage.setDirId(parentDirId); + fileStorageService.save(fileStorage); + parentDirId = fileMetadataInfoService.lambdaQuery().eq(FileMetadataInfo::getId, parentDirId).oneOpt() + .map(FileMetadataInfo::getParentId) + .orElse(null); + } + + // 调用审批流,开启审批; 上面先入库拿到主键id,审批流创建失败后再回退数据 + // String templateId, String templateName,String approveContents,int approveAction:1:新增 2:修改 3:删除 +// if(isknowledge){ +// String approveContents = getApproveContents(fileInfo.getId(), "知识库文件新增", NumberConstants.ONE, fileInfo, null); +// Pair approvePair = launchFileDataApprove(req.getTemplateId(), req.getTemplateName(), approveContents, NumberConstants.ONE); +// if(!approvePair.getLeft()|| org.apache.commons.lang3.StringUtils.isBlank(approvePair.getRight())) { +// log.error("uploadFiles create approveInit failed, params :{}", JSONObject.toJSONString(req)); +// // - 回退MinIO中已上传的文件(删除该文件)。catch 里统一操作了 +// //- 新增 file_metadata_info 信息不入表。 +// //- 向前端返回“上传文件失败”。 +// throw new RuntimeException("文件上传,创建审批流失败,cidFlowId:"+approvePair.getRight()); +// } +// // cid流程id +// fileInfo.setCidFlowId(approvePair.getRight()); +// } + + // 创建文件扩展信息并保存到数据库 + List fileMetadataExtensionList = new ArrayList<>(); + List fileMetadataExtensionRequestList = req.getFileMetadataExtensionRequest(); + if (fileMetadataExtensionRequestList != null && !fileMetadataExtensionRequestList.isEmpty()) { + fileMetadataExtensionRequestList.forEach(extensionRequest -> { + FileMetadataExtension fileMetadataExtension = new FileMetadataExtension(); + fileMetadataExtension.setTFilemetaId(fileInfo.getId()); + fileMetadataExtension.setExtensionKey(extensionRequest.getExtensionKey()); + fileMetadataExtension.setExtensionValue(extensionRequest.getExtensionValue()); + fileMetadataExtension.setDataType(Objects.toString(extensionRequest.getDataType(), "string")); // 默认为字符串类型,可根据需要调整 + fileMetadataExtensionList.add(fileMetadataExtension); + }); + } + fileMetadataExtensionService.saveBatch(fileMetadataExtensionList); + + // 创建默认权限记录 + createFilePermission(fileInfo.getId()); + + BatchAddFileInfoResp addFileInfoResp = new BatchAddFileInfoResp(); + addFileInfoResp.setSourceFileName(originalName); + addFileInfoResp.setBusinessId(fileInfo.getId()); + addFileInfoResp.setUploadTaskId(req.getUploadTaskId()); + addFileInfoResp.setObjectKey(fileMinioObjectKey); + addFileInfoRespList.add(addFileInfoResp); + } catch (Exception e) { + log.error("上传文件失败", e); + throw new RuntimeException("上传文件失败: " + e.getMessage(), e); + } + } + return SdmResponse.success(addFileInfoRespList); + } + return SdmResponse.success(); + } + /** * 校验审批回调请求参数的合法性 * @param req 审批回调请求对象 @@ -1934,4 +2135,74 @@ public class MinioFileIDataFileServiceImpl implements IDataFileService { } } + /** + * 参数校验 + */ + private void validateReq(ChunkUploadMinioFileReq req) { + // 基础参数校验 + Assert.hasText(req.getSourceFileName(), "原始文件名称不能为空"); + Assert.notNull(req.getChunk(), "分片编号不能为空"); + Assert.notNull(req.getChunkTotal(), "分片总数不能为空"); + Assert.notNull(req.getFile(), "分片文件不能为空"); + Assert.isTrue(!req.getFile().isEmpty(), "分片文件不能为空"); + // 业务逻辑校验 + Assert.isTrue(req.getChunk() >= 1 && req.getChunk() <= req.getChunkTotal(), + "分片编号非法:当前" + req.getChunk() + ",总分片" + req.getChunkTotal()); + Assert.isTrue(req.getChunkTotal() >= 1, "分片总数必须大于等于1"); + Assert.hasText(req.getObjectKey(), "文件存储位置不能为空"); + Assert.hasText(req.getUploadTaskId(), "文件上传任务Id不能为空"); + Assert.notNull(req.getBusinessId(), "文件绑定业务Id不能为空"); + if(req.getChunk() > NumberConstants.ONE) { + Assert.hasText(req.getFileTempPath(), "分片临时目录不允许为空"); + } + } + + // 包含业务数据的响应体 + private SdmResponse buildSuccessResponse(ChunkUploadMinioFileResp resp, ChunkUploadMinioFileReq req,String fileTempPath) { + resp.setResult(true); + resp.setBusinessId(req.getBusinessId()); + resp.setUploadTaskId(req.getUploadTaskId()); + resp.setFileTempPath(fileTempPath); + // 成功时,错误信息通常为空 + resp.setErrMsg(""); + return SdmResponse.success(resp); + } + + // 构建一个失败的响应对象 + private SdmResponse buildFailedResponse(ChunkUploadMinioFileResp resp, String errMsg,ChunkUploadMinioFileReq req) { + resp.setResult(false); + resp.setBusinessId(req.getBusinessId()); + resp.setUploadTaskId(req.getUploadTaskId()); + // 如果 errMsg 为 null,设置为空字符串 "" + resp.setErrMsg(errMsg != null ? errMsg : ""); + return SdmResponse.failed(resp); + } + + /** + * 从文件路径字符串中获取文件夹路径(仅适用于 '/' 作为分隔符的情况) + * @param filePath 完整文件路径(如:knowledge/20251111-yy/仿真地图 (46)_V1.xls) + * @return 文件夹路径(如:knowledge/20251111-yy/),若输入为空或无分隔符则返回原路径 + */ + private static String getMinioFolderPath(String filePath) { + // 1. 校验输入合法性 + if (filePath == null || filePath.trim().isEmpty()) { + return filePath; + } + // 2. 查找最后一个 '/' 分隔符的位置 + int lastSeparatorIndex = filePath.lastIndexOf("/"); + // 3. 若未找到分隔符(说明是单纯文件名,无路径),返回原字符串 + if (lastSeparatorIndex == -1) { + return filePath; + } + // 4. 截取从开头到最后一个分隔符(包含分隔符)的子字符串 + return filePath.substring(0, lastSeparatorIndex + 1); + } + + // 分片上传,异常的时候删除临时目录,假如后面断点续传,某些场景的删除要最后确定失败再删除 + private void deleteTempFileAfterFailed(String tempFilePath,String finalChunkBucket) { + CompletableFuture.runAsync(() -> { + minioService.deleteDirectoryRecursively2(tempFilePath, finalChunkBucket); + }, nonSensitiveTaskPool); + } + } \ No newline at end of file diff --git a/data/src/main/java/com/sdm/data/service/impl/SystemFileIDataFileServiceImpl.java b/data/src/main/java/com/sdm/data/service/impl/SystemFileIDataFileServiceImpl.java index 404a9859..2efd7c7f 100644 --- a/data/src/main/java/com/sdm/data/service/impl/SystemFileIDataFileServiceImpl.java +++ b/data/src/main/java/com/sdm/data/service/impl/SystemFileIDataFileServiceImpl.java @@ -2,20 +2,17 @@ package com.sdm.data.service.impl; import com.alibaba.fastjson2.JSONObject; import com.sdm.common.common.ResultCode; -import com.sdm.common.common.SdmResponse; import com.sdm.common.common.SdmIterator; +import com.sdm.common.common.SdmResponse; import com.sdm.common.common.ThreadLocalContext; import com.sdm.common.entity.constants.PermConstants; import com.sdm.common.entity.data.*; import com.sdm.common.entity.enums.UserRole; import com.sdm.common.entity.pojo.system.SysCompany; import com.sdm.common.entity.pojo.system.SysUserInfo; -import com.sdm.common.entity.req.data.CreateDirReq; -import com.sdm.common.entity.req.data.DelDirReq; -import com.sdm.common.entity.req.data.QueryDirReq; -import com.sdm.common.entity.req.data.UploadFilesReq; +import com.sdm.common.entity.req.data.*; +import com.sdm.common.entity.resp.data.BatchAddFileInfoResp; import com.sdm.common.entity.resp.data.FileMetadataInfoResp; -import com.sdm.data.model.req.RenameFileReq; import com.sdm.common.service.CommonService; import com.sdm.common.utils.*; import com.sdm.data.dao.DataMapper; @@ -1283,6 +1280,17 @@ public class SystemFileIDataFileServiceImpl implements IDataFileService { return null; } + @Override + public SdmResponse chunkUploadToMinio(ChunkUploadMinioFileReq req) { + // 系统盘分片存储暂时不支持 + return null; + } + + @Override + public SdmResponse> batchAddFileInfo(UploadFilesReq req) { + return null; + } + @Override public void downloadFile(DownloadFileReq req, HttpServletResponse response) { if (StringUtils.isNotBlank(req.getPath()) && req.getPath().contains("..")) { diff --git a/data/src/main/java/com/sdm/data/service/minio/MinioService.java b/data/src/main/java/com/sdm/data/service/minio/MinioService.java index 11933332..812bb4d2 100644 --- a/data/src/main/java/com/sdm/data/service/minio/MinioService.java +++ b/data/src/main/java/com/sdm/data/service/minio/MinioService.java @@ -1,5 +1,7 @@ package com.sdm.data.service.minio; +import com.alibaba.fastjson2.JSON; +import com.sdm.common.log.CoreLogger; import com.sdm.data.config.MinioConfig; import com.sdm.data.service.IMinioService; import io.minio.*; @@ -21,7 +23,9 @@ import java.io.IOException; import java.io.InputStream; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; +import java.text.DecimalFormat; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -171,6 +175,10 @@ public class MinioService implements IMinioService { deleteDirectoryRecursively(directoryName, minioConfig.getSpdmBucket()); } + public void deleteDirectoryRecursively2(String directoryName,String bucketName) { + deleteDirectoryRecursively(directoryName, bucketName); + } + /** * 从MinIO删除文件 * @param minioObjectKey 文件名 @@ -423,4 +431,169 @@ public class MinioService implements IMinioService { } return presignedUrl; } + + /** + * description: 分片碎文件上传 + * + * @param bucketName 桶名称 + * @param file 文件 + * @param fileName /xx/xx/文件名,数字类型,1 2 3 ... .temp + * @author bo + * @date 2023/5/21 13:06 + */ + @Override + public Boolean chunkUpload(String bucketName, MultipartFile file, String fileName) { + InputStream inputStream = null; + try { + inputStream = file.getInputStream(); + minioClient.putObject(PutObjectArgs.builder() + .bucket(bucketName) + .object(fileName) + .stream(inputStream, file.getSize(), -1) + .build()); + return true; + } catch (Exception e) { + CoreLogger.error("chunkUpload error:{}",e.getMessage()); + return false; + } finally { + if (inputStream != null) { + try { + inputStream.close(); + } catch (Exception e) { + CoreLogger.error("chunkUpload finally error:{}",e.getMessage()); + } + } + } + } + + // 分片上传合并 + /** + * 讲快文件合并到新桶 块文件必须满足 名字是 0 1 2 3 5.... + * + * @param tempBucketName 存块文件的桶 + * @param tempFilePath 存块文件的文件夹路径 + * @param mergeBucketName 存新文件的桶 + * @param fileName 存到新桶中的文件名称 + * @return boolean + */ + @Override + public Boolean merge(String tempBucketName,String tempFilePath,String mergeBucketName,String fileName) { + try { + List sourceObjectList = new ArrayList(); + List folderList = getFolderList(tempBucketName,tempFilePath); + List fileNames = new ArrayList<>(); + if (!folderList.isEmpty()) { + for (Object value : folderList) { + Map o = (Map) value; + String name = (String) o.get("fileName"); + fileNames.add(name); + } + } + if (!fileNames.isEmpty()) { + fileNames.sort((fileName1, fileName2) -> { + int num1 = getFileNumber(fileName1); + int num2 = getFileNumber(fileName2); + return Integer.compare(num1, num2); + }); + for (String name : fileNames) { + sourceObjectList.add(ComposeSource.builder().bucket(tempBucketName).object(name).build()); + } + } + minioClient.composeObject( + ComposeObjectArgs.builder() + .bucket(mergeBucketName) + .object(fileName) + .sources(sourceObjectList) + .build()); + return true; + } catch (Exception e) { + CoreLogger.error("merge error:{},fileName:{}",e.getMessage(),fileName); + return false; + } + } + + public List getFolderList(String bucketName, String tempFilePath) { + List items = new ArrayList<>(); + if(org.apache.commons.lang3.StringUtils.isBlank(tempFilePath)){ + CoreLogger.warn("getFolderList tempFilePath null"); + return items; + } + try { + // 1. 规范化 prefix:去除开头的 /,目录路径末尾补 /(根目录除外) + String prefix = tempFilePath; + // 去除开头的 / + prefix = prefix.replaceAll("^/", ""); + // 如果不是空字符串(根目录),且末尾没有 /,则补 / + if (!prefix.isEmpty() && !prefix.endsWith("/")) { + prefix += "/"; + } + // 2. 调用 listObjects,使用规范化后的 prefix + Iterable> results = minioClient.listObjects( + ListObjectsArgs.builder() + .bucket(bucketName) + .prefix(prefix) // 关键:使用规范化后的前缀 + .recursive(false) // 只查当前前缀的直接子对象(即“当前目录”下的文件) + .build() + ); + Iterator> iterator = results.iterator(); + items = new ArrayList<>(); + String format = "{'fileName':'%s','fileSize':'%s'}"; + while (iterator.hasNext()) { + Item item = iterator.next().get(); + // 注意:item.objectName() 是完整的对象键(比如 a/b/c/1.txt),如果需要相对路径,可做截取 + items.add(JSON.parse((String.format(format, item.objectName(), + formatFileSize(item.size()))))); + } + } catch (Exception e) { + CoreLogger.error("getFolderList error:{},tempFilePath:{}",e.getMessage(),tempFilePath); + } + return items; + } + + /** + * description: 格式化文件大小 + * + * @param fileS 文件的字节长度 + * @author bo + * @date 2023/5/21 11:40 + */ + private static String formatFileSize(long fileS) { + DecimalFormat df = new DecimalFormat("#.00"); + String fileSizeString = ""; + String wrongSize = "0B"; + if (fileS == 0) { + return wrongSize; + } + if (fileS < 1024) { + fileSizeString = df.format((double) fileS) + " B"; + } else if (fileS < 1048576) { + fileSizeString = df.format((double) fileS / 1024) + " KB"; + } else if (fileS < 1073741824) { + fileSizeString = df.format((double) fileS / 1048576) + " MB"; + } else { + fileSizeString = df.format((double) fileS / 1073741824) + " GB"; + } + return fileSizeString; + } + + /** + * 从固定格式的文件名中截取数字。 + * 格式必须是:.../数字.temp + */ + private static int getFileNumber(String fileName) { + try { + int lastSlashIndex = fileName.lastIndexOf('/'); + int dotIndex = fileName.lastIndexOf('.'); + // 截取数字部分的字符串 + String numberStr = fileName.substring(lastSlashIndex + 1, dotIndex); + // 转换为整数 + return Integer.parseInt(numberStr); + } catch (Exception e) { + CoreLogger.error("getFileNumber error:{},fileName:{}",e.getMessage(),fileName); + throw new RuntimeException("临时文件名格式非数字.temp类型"); + } + } + + + } \ No newline at end of file diff --git a/data/src/main/resources/application-dev.yml b/data/src/main/resources/application-dev.yml index b0664055..7f6f38b0 100644 --- a/data/src/main/resources/application-dev.yml +++ b/data/src/main/resources/application-dev.yml @@ -10,11 +10,16 @@ spring: jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai driver-class-name: com.mysql.cj.jdbc.Driver hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: mysql diff --git a/data/src/main/resources/application-local.yml b/data/src/main/resources/application-local.yml index fec28bad..a8651678 100644 --- a/data/src/main/resources/application-local.yml +++ b/data/src/main/resources/application-local.yml @@ -10,11 +10,16 @@ spring: jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai driver-class-name: com.mysql.cj.jdbc.Driver hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: mysql diff --git a/data/src/main/resources/application-prod.yml b/data/src/main/resources/application-prod.yml index a22712d0..e31e2b8e 100644 --- a/data/src/main/resources/application-prod.yml +++ b/data/src/main/resources/application-prod.yml @@ -6,11 +6,16 @@ spring: name: data datasource: hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: ENC(+QKYnI6gAYu1SbLaZQTkZA==) diff --git a/data/src/main/resources/application-test.yml b/data/src/main/resources/application-test.yml index b3aca0cf..263cf8d3 100644 --- a/data/src/main/resources/application-test.yml +++ b/data/src/main/resources/application-test.yml @@ -6,11 +6,16 @@ spring: name: data datasource: hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: ENC(+QKYnI6gAYu1SbLaZQTkZA==) diff --git a/data/src/main/resources/logback.xml b/data/src/main/resources/logback.xml index 25695cf5..5779ce87 100644 --- a/data/src/main/resources/logback.xml +++ b/data/src/main/resources/logback.xml @@ -76,7 +76,6 @@ - @@ -130,6 +129,6 @@ - - + + \ No newline at end of file diff --git a/pbs/src/main/java/com/sdm/pbs/controller/PbsController.java b/pbs/src/main/java/com/sdm/pbs/controller/PbsController.java index 8cdfecfa..ce23d929 100644 --- a/pbs/src/main/java/com/sdm/pbs/controller/PbsController.java +++ b/pbs/src/main/java/com/sdm/pbs/controller/PbsController.java @@ -17,16 +17,6 @@ public class PbsController { @Resource private PbsService pbsService; - /** - * 获取run的版本结构 - * - * @return - */ - @PostMapping("/getRunVersion") - public SdmResponse getRunVersion(@RequestBody @Validated GetRunVersionReq req) { - return pbsService.getRunVersion(req); - } - /** * 获取run的结果 * diff --git a/pbs/src/main/java/com/sdm/pbs/model/bo/RunVersionInfo.java b/pbs/src/main/java/com/sdm/pbs/model/bo/RunVersionInfo.java deleted file mode 100644 index bc77ea77..00000000 --- a/pbs/src/main/java/com/sdm/pbs/model/bo/RunVersionInfo.java +++ /dev/null @@ -1,18 +0,0 @@ -package com.sdm.pbs.model.bo; - -import com.sdm.common.entity.pojo.pbs.PbsRun; -import lombok.Data; - -import java.util.List; - -/** - * @Author xuyundi - * @Date 2024/3/5 - * @Note - */ -@Data -public class RunVersionInfo { - PbsRun pbsRun; - String taskStatus; - List children; -} diff --git a/pbs/src/main/java/com/sdm/pbs/model/req/GetRunVersionReq.java b/pbs/src/main/java/com/sdm/pbs/model/req/GetRunVersionReq.java deleted file mode 100644 index dd57e9ff..00000000 --- a/pbs/src/main/java/com/sdm/pbs/model/req/GetRunVersionReq.java +++ /dev/null @@ -1,16 +0,0 @@ -package com.sdm.pbs.model.req; - -import jakarta.validation.constraints.NotNull; -import lombok.Data; - -/** - * @Author xuyundi - * @Date 2024/3/5 - * @Note - */ -@Data -public class GetRunVersionReq { - - @NotNull(message = "runId不能为空") - private Long runId; -} diff --git a/pbs/src/main/java/com/sdm/pbs/service/PbsService.java b/pbs/src/main/java/com/sdm/pbs/service/PbsService.java index 056cc8d9..e06713e2 100644 --- a/pbs/src/main/java/com/sdm/pbs/service/PbsService.java +++ b/pbs/src/main/java/com/sdm/pbs/service/PbsService.java @@ -12,8 +12,6 @@ import java.util.List; @Service public interface PbsService { - SdmResponse getRunVersion(GetRunVersionReq req); - List getRunResult(GetRunResultReq req); List getTaskByRunId(long runId); diff --git a/pbs/src/main/java/com/sdm/pbs/service/impl/PbsServiceImpl.java b/pbs/src/main/java/com/sdm/pbs/service/impl/PbsServiceImpl.java index cecb40df..e9f3ddb9 100644 --- a/pbs/src/main/java/com/sdm/pbs/service/impl/PbsServiceImpl.java +++ b/pbs/src/main/java/com/sdm/pbs/service/impl/PbsServiceImpl.java @@ -55,82 +55,6 @@ public class PbsServiceImpl implements PbsService { @Resource private ProjectMapper projectMapper; - @Override - public SdmResponse getRunVersion(GetRunVersionReq req) { - PbsRun pbsRun = pbsMapper.getPbsRunById(req.getRunId(), ThreadLocalContext.getTenantId()); - if (org.springframework.util.ObjectUtils.isEmpty(pbsRun)) { - return SdmResponse.failed("该任务不存在"); - } - RunVersionInfo finalVersionInfo; - RunVersionInfo runVersionInfo = getRunVersion(pbsRun); - List childRunBeans = pbsMapper.getPbsRunByParentId(pbsRun.getId()); - if (!CollectionUtils.isEmpty(childRunBeans)) { - List runVersionInfos = getRunVersionChild(childRunBeans); - runVersionInfo.setChildren(runVersionInfos); - } - if (pbsRun.getParentId() != 0) { - finalVersionInfo = getRunVersionParent(pbsRun.getParentId(), runVersionInfo); - } else { - finalVersionInfo = runVersionInfo; - } - return SdmResponse.success(finalVersionInfo); - } - - public List getRunVersionChild(List runs) { - List runVersionInfos = new ArrayList<>(); - for (PbsRun runBean : runs) { - RunVersionInfo versionInfo = getRunVersion(runBean); - List childRunBeans = pbsMapper.getPbsRunByParentId(runBean.getId()); - if (!CollectionUtils.isEmpty(childRunBeans)) { - List childVersionInfos = getRunVersionChild(childRunBeans); - versionInfo.setChildren(childVersionInfos); - } - runVersionInfos.add(versionInfo); - } - return runVersionInfos; - } - - public RunVersionInfo getRunVersionParent(long parentId, RunVersionInfo runVersionInfo) { - PbsRun runBean = pbsMapper.getPbsRunById(parentId, ThreadLocalContext.getTenantId()); - RunVersionInfo parentVersionInfo = getRunVersion(runBean); - List runVersionInfos = new ArrayList<>(); - runVersionInfos.add(runVersionInfo); - parentVersionInfo.setChildren(runVersionInfos); - if (runBean.getParentId() != 0) { - return getRunVersionParent(runBean.getParentId(), parentVersionInfo); - } - return parentVersionInfo; - } - - private RunVersionInfo getRunVersion(PbsRun runBean) { - RunVersionInfo versionInfo = new RunVersionInfo(); - versionInfo.setPbsRun(runBean); - List statuses = pbsMapper.getPbsTaskStatus(runBean.getId()); - if (statuses.size() == 1) { - versionInfo.setTaskStatus(statuses.get(0)); - } else { - boolean isFail = false; - boolean isDone = true; - for (String status : statuses) { - if (status.equals("FAIL") || status.equals("CANCEL") || status.equals("CANCELLING")) { - isFail = true; - } - if (!status.equals("DONE")) { - isDone = false; - } - if (isFail) { - versionInfo.setTaskStatus("FAIL"); - } else if (isDone) { - versionInfo.setTaskStatus("DONE"); - } else { - versionInfo.setTaskStatus("RUN"); - } - } - - } - return versionInfo; - } - @Override public List getRunResult(GetRunResultReq req) { return pbsMapper.getPbsRunResult(req.getRunId()); diff --git a/pbs/src/main/resources/logback.xml b/pbs/src/main/resources/logback.xml index 50a7d635..a190d059 100644 --- a/pbs/src/main/resources/logback.xml +++ b/pbs/src/main/resources/logback.xml @@ -105,6 +105,6 @@ - - + + \ No newline at end of file diff --git a/performance/src/main/resources/logback.xml b/performance/src/main/resources/logback.xml index 2b507257..de93c1ff 100644 --- a/performance/src/main/resources/logback.xml +++ b/performance/src/main/resources/logback.xml @@ -87,6 +87,6 @@ - - + + \ No newline at end of file diff --git a/project/src/main/java/com/sdm/project/controller/SimulationRunController.java b/project/src/main/java/com/sdm/project/controller/SimulationRunController.java index 0e0643b4..73547a1b 100644 --- a/project/src/main/java/com/sdm/project/controller/SimulationRunController.java +++ b/project/src/main/java/com/sdm/project/controller/SimulationRunController.java @@ -5,9 +5,11 @@ import com.sdm.common.entity.req.data.CreateDirReq; import com.sdm.common.entity.req.data.QueryDirReq; import com.sdm.common.entity.req.data.UploadFilesReq; import com.sdm.project.model.entity.SimulationRun; +import com.sdm.project.model.req.GetRunVersionReq; import com.sdm.project.model.req.ProjectTreeTagReq; import com.sdm.project.model.req.SpdmAddTaskRunReq; import com.sdm.project.model.req.SpdmTaskRunReq; +import com.sdm.project.model.resp.RunVersionInfoResp; import com.sdm.project.service.ISimulationRunService; import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.tags.Tag; @@ -90,4 +92,24 @@ public class SimulationRunController { return runService.uploadRunFiles(req); } + /** + * 获取task下所有算例版本 + * + * @return + */ + @PostMapping("/getTaskRunVersion") + public SdmResponse> getTaskRunVersion(@RequestBody GetRunVersionReq req) { + return runService.getTaskRunVersion(req); + } + + /** + * 获取指定算例的版本结构 + * + * @return + */ + @PostMapping("/getRunVersion") + public SdmResponse getRunVersion(@RequestBody GetRunVersionReq req) { + return runService.getRunVersion(req); + } + } diff --git a/project/src/main/java/com/sdm/project/dao/SimulationRunMapper.java b/project/src/main/java/com/sdm/project/dao/SimulationRunMapper.java index aebfe60c..d79959fe 100644 --- a/project/src/main/java/com/sdm/project/dao/SimulationRunMapper.java +++ b/project/src/main/java/com/sdm/project/dao/SimulationRunMapper.java @@ -3,6 +3,8 @@ package com.sdm.project.dao; import com.sdm.project.model.entity.SimulationRun; import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import java.util.List; + /** *

* Mapper 接口 @@ -13,4 +15,6 @@ import com.baomidou.mybatisplus.core.mapper.BaseMapper; */ public interface SimulationRunMapper extends BaseMapper { + List findRunAndDescendantsIds(String runId); + } diff --git a/project/src/main/java/com/sdm/project/model/req/GetRunVersionReq.java b/project/src/main/java/com/sdm/project/model/req/GetRunVersionReq.java new file mode 100644 index 00000000..8814ea52 --- /dev/null +++ b/project/src/main/java/com/sdm/project/model/req/GetRunVersionReq.java @@ -0,0 +1,20 @@ +package com.sdm.project.model.req; + +import lombok.Data; + +/** + * @Author xuyundi + * @Date 2024/3/5 + * @Note + */ +@Data +public class GetRunVersionReq { + /** + * 算例uuid + */ + private String runId; + /** + * 任务uuid + */ + private String taskId; +} diff --git a/project/src/main/java/com/sdm/project/model/resp/RunVersionInfoResp.java b/project/src/main/java/com/sdm/project/model/resp/RunVersionInfoResp.java new file mode 100644 index 00000000..20c0435f --- /dev/null +++ b/project/src/main/java/com/sdm/project/model/resp/RunVersionInfoResp.java @@ -0,0 +1,82 @@ +package com.sdm.project.model.resp; + +import com.fasterxml.jackson.annotation.JsonFormat; +import com.sdm.project.model.entity.SimulationRun; +import io.swagger.v3.oas.annotations.media.Schema; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.time.LocalDateTime; +import java.util.List; + +@Data +public class RunVersionInfoResp { + private Integer id; + + private String uuid; + + @Schema(description = "算例名称") + private String runName; + + @Schema(description = "1:server 2:pbs 3:local only") + private Integer type; + + @Schema(description = "流程模板id") + private String taskId; + + @Schema(description = "流程模板id") + private String flowTemplate; + + @Schema(description = "Run状态 0:未执行 1:执行中 2:完成 3:失败") + private Integer status; + + @Schema(description = "Run总共的流程步骤") + private Integer totalStep; + + @Schema(description = "当前的流程步骤") + private Integer currentStep; + + @Schema(description = "当前的流程步骤名称") + private String currentStepName; + + @Schema(description = "Run执行结果 0:gray 1:red 2:yellow 3:green") + private Integer achieveStatus; + + @Schema(description = "run描述信息") + private String description; + + @Schema(description = "Run对应在文件系统中的路径") + private Long folderId; + + @Schema(description = "计算父版本Id") + private String parentId; + + @Schema(description = "是否个人模板") + private String isPersonalTemplate; + + @Schema(description = "租户id") + private Long tenantId; + + @Schema(description = "英文名") + private Long englishName; + + @Schema(description= "创建者ID") + private Long creator; + private String creatorName; + + @Schema(description= "创建时间") + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss") + private LocalDateTime createTime; + + @Schema(description= "更新者ID") + private Long updater; + private String updaterName; + + @Schema(description= "创建时间") + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss") + private LocalDateTime updateTime; + + List children; + +} diff --git a/project/src/main/java/com/sdm/project/service/ISimulationRunService.java b/project/src/main/java/com/sdm/project/service/ISimulationRunService.java index a83c4210..7bc20be0 100644 --- a/project/src/main/java/com/sdm/project/service/ISimulationRunService.java +++ b/project/src/main/java/com/sdm/project/service/ISimulationRunService.java @@ -6,9 +6,13 @@ import com.sdm.common.entity.req.data.QueryDirReq; import com.sdm.common.entity.req.data.UploadFilesReq; import com.sdm.project.model.entity.SimulationRun; import com.baomidou.mybatisplus.extension.service.IService; +import com.sdm.project.model.req.GetRunVersionReq; import com.sdm.project.model.req.ProjectTreeTagReq; import com.sdm.project.model.req.SpdmAddTaskRunReq; import com.sdm.project.model.req.SpdmTaskRunReq; +import com.sdm.project.model.resp.RunVersionInfoResp; + +import java.util.List; /** *

@@ -26,7 +30,7 @@ public interface ISimulationRunService extends IService { SdmResponse deleteTaskRun(SpdmTaskRunReq req); - SdmResponse queryTaskRun(SpdmTaskRunReq req); + SdmResponse> queryTaskRun(SpdmTaskRunReq req); SdmResponse createRunDir(CreateDirReq req); @@ -34,4 +38,8 @@ public interface ISimulationRunService extends IService { SdmResponse uploadRunFiles(UploadFilesReq req); + SdmResponse> getTaskRunVersion(GetRunVersionReq req); + + SdmResponse getRunVersion(GetRunVersionReq req); + } diff --git a/project/src/main/java/com/sdm/project/service/impl/NodeServiceImpl.java b/project/src/main/java/com/sdm/project/service/impl/NodeServiceImpl.java index 57c4b94c..13ce02ce 100644 --- a/project/src/main/java/com/sdm/project/service/impl/NodeServiceImpl.java +++ b/project/src/main/java/com/sdm/project/service/impl/NodeServiceImpl.java @@ -152,6 +152,12 @@ public class NodeServiceImpl extends ServiceImpl editNodeList = req.getEditNodeList(); if (CollectionUtils.isNotEmpty(editNodeList)) { for (SpdmNodeDetailReq editNode : editNodeList) { + // 项目/阶段名称重名校验 + if (CollectionUtils.isNotEmpty(this.lambdaQuery().eq(SimulationNode::getNodeName, editNode.getNodeName()).list())) { + if (NodeTypeEnum.PROJECT.getValue().equals(editNode.getNodeType())) { + return SdmResponse.failed("同名项目已存在,请检查"); + } + } if (StringUtils.isNotBlank(editNode.getEndTime()) && StringUtils.isNotBlank(editNode.getBeginTime())) { if (DateUtils.parse(editNode.getEndTime(), DateUtils.PATTERN_DEFAULT).before(DateUtils.parse(editNode.getBeginTime(), DateUtils.PATTERN_DEFAULT))) { return SdmResponse.failed("计划结束时间不能早于计划开始时间"); diff --git a/project/src/main/java/com/sdm/project/service/impl/SimulationRunServiceImpl.java b/project/src/main/java/com/sdm/project/service/impl/SimulationRunServiceImpl.java index 351a2e2c..3d941293 100644 --- a/project/src/main/java/com/sdm/project/service/impl/SimulationRunServiceImpl.java +++ b/project/src/main/java/com/sdm/project/service/impl/SimulationRunServiceImpl.java @@ -29,10 +29,8 @@ import com.sdm.project.model.po.NodeAllBase; import com.sdm.project.model.po.ProjectNodePo; import com.sdm.project.model.po.RunNodePo; import com.sdm.project.model.po.TaskNodePo; -import com.sdm.project.model.req.ProjectTreeReq; -import com.sdm.project.model.req.ProjectTreeTagReq; -import com.sdm.project.model.req.SpdmAddTaskRunReq; -import com.sdm.project.model.req.SpdmTaskRunReq; +import com.sdm.project.model.req.*; +import com.sdm.project.model.resp.RunVersionInfoResp; import com.sdm.project.service.ISimulationPerformanceService; import com.sdm.project.service.ISimulationRunService; import com.sdm.project.service.ISimulationTaskMemberService; @@ -46,10 +44,7 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; +import java.util.*; import java.util.stream.Collectors; import static com.sdm.project.service.impl.NodeServiceImpl.getTagProperty; @@ -609,4 +604,56 @@ public class SimulationRunServiceImpl extends ServiceImpl> getTaskRunVersion(GetRunVersionReq req) { + List runList = this.lambdaQuery().eq(SimulationRun::getTaskId, req.getTaskId()).list(); + if (CollectionUtils.isNotEmpty(runList)) { + List runRespList = runList.stream() + .map(source -> { + RunVersionInfoResp target = new RunVersionInfoResp(); + BeanUtils.copyProperties(source, target); + return target; + }).collect(Collectors.toList()); + return SdmResponse.success(buildRunTreeWithMap(runRespList)); + } + return SdmResponse.success(); + } + + @Override + public SdmResponse getRunVersion(GetRunVersionReq req) { + // 获取目标算例 + SimulationRun targetRun = this.lambdaQuery().eq(SimulationRun::getUuid, req.getRunId()).one(); + if (targetRun == null) { + return null; + } + // 获取该算例及其所有子孙节点的ID + List relatedRunIds = this.baseMapper.findRunAndDescendantsIds(req.getRunId()); + // 获取相关算例 + List relatedRuns = this.lambdaQuery().in(SimulationRun::getUuid, relatedRunIds).list(); + List relatedRunsResp = relatedRuns.stream() + .map(source -> { + RunVersionInfoResp target = new RunVersionInfoResp(); + BeanUtils.copyProperties(source, target); + return target; + }).collect(Collectors.toList()); + // 构建子树 + buildRunTreeWithMap(relatedRunsResp); + return SdmResponse.success(relatedRunsResp.stream().filter(i -> StringUtils.equals(i.getUuid(), targetRun.getUuid())).findFirst().get()); + } + + private List buildRunTreeWithMap(List allRuns) { + // 按父节点ID分组 + Map> childrenMap = allRuns.stream().filter(run -> StringUtils.isNotEmpty(run.getParentId())) + .collect(Collectors.groupingBy(RunVersionInfoResp::getParentId)); + // 设置每个节点的子节点 + allRuns.forEach(run -> { + List children = childrenMap.getOrDefault(run.getUuid(), new ArrayList<>()); + // 对子节点排序 + children.sort(Comparator.comparing(RunVersionInfoResp::getCreateTime)); + run.setChildren(children); + }); + // 返回根节点 + return allRuns.stream().filter(run -> StringUtils.isEmpty(run.getParentId())).collect(Collectors.toList()); + } + } diff --git a/project/src/main/java/com/sdm/project/service/impl/TaskServiceImpl.java b/project/src/main/java/com/sdm/project/service/impl/TaskServiceImpl.java index 06d7814e..b7cf03e6 100644 --- a/project/src/main/java/com/sdm/project/service/impl/TaskServiceImpl.java +++ b/project/src/main/java/com/sdm/project/service/impl/TaskServiceImpl.java @@ -606,6 +606,9 @@ public class TaskServiceImpl implements ITaskService { public SdmResponse operation(SpdmTaskOpr taskOpr) { if (CollectionUtils.isNotEmpty(taskOpr.getTaskIds()) && ObjectUtils.isNotEmpty(taskOpr.getReq())) { SpdmTaskOprReq req = taskOpr.getReq(); + if (req.getProgress() > 100) { + return SdmResponse.failed("任务进度超过100%,请核查"); + } for (String taskId : taskOpr.getTaskIds()) { req.setTaskId(taskId); if (simulationTaskService.lambdaQuery().eq(SimulationTask::getUuid, taskId).count() <= 0) { diff --git a/project/src/main/resources/application-dev.yml b/project/src/main/resources/application-dev.yml index 526caa83..e3b64de8 100644 --- a/project/src/main/resources/application-dev.yml +++ b/project/src/main/resources/application-dev.yml @@ -10,11 +10,16 @@ spring: jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai driver-class-name: com.mysql.cj.jdbc.Driver hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: mysql diff --git a/project/src/main/resources/application-local.yml b/project/src/main/resources/application-local.yml index 2fc986d2..d20ac8f9 100644 --- a/project/src/main/resources/application-local.yml +++ b/project/src/main/resources/application-local.yml @@ -10,11 +10,16 @@ spring: jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai driver-class-name: com.mysql.cj.jdbc.Driver hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: mysql diff --git a/project/src/main/resources/application-prod.yml b/project/src/main/resources/application-prod.yml index 00e14878..1b12e489 100644 --- a/project/src/main/resources/application-prod.yml +++ b/project/src/main/resources/application-prod.yml @@ -6,11 +6,16 @@ spring: name: project datasource: hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: ENC(+QKYnI6gAYu1SbLaZQTkZA==) diff --git a/project/src/main/resources/application-test.yml b/project/src/main/resources/application-test.yml index a2b5bf35..7b8a2cf9 100644 --- a/project/src/main/resources/application-test.yml +++ b/project/src/main/resources/application-test.yml @@ -6,11 +6,16 @@ spring: name: project datasource: hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: ENC(+QKYnI6gAYu1SbLaZQTkZA==) diff --git a/project/src/main/resources/mapper/SimulationRunMapper.xml b/project/src/main/resources/mapper/SimulationRunMapper.xml index e5221776..ef4269e6 100644 --- a/project/src/main/resources/mapper/SimulationRunMapper.xml +++ b/project/src/main/resources/mapper/SimulationRunMapper.xml @@ -2,4 +2,14 @@ + + diff --git a/submit/src/main/resources/logback.xml b/submit/src/main/resources/logback.xml index b235cae9..03bfc928 100644 --- a/submit/src/main/resources/logback.xml +++ b/submit/src/main/resources/logback.xml @@ -126,6 +126,6 @@ - - + + \ No newline at end of file diff --git a/system/src/main/resources/application-dev.yml b/system/src/main/resources/application-dev.yml index bf521dda..62973374 100644 --- a/system/src/main/resources/application-dev.yml +++ b/system/src/main/resources/application-dev.yml @@ -10,11 +10,16 @@ spring: jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai driver-class-name: com.mysql.cj.jdbc.Driver hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: mysql diff --git a/system/src/main/resources/application-local.yml b/system/src/main/resources/application-local.yml index 0922cd12..ae6a15a3 100644 --- a/system/src/main/resources/application-local.yml +++ b/system/src/main/resources/application-local.yml @@ -10,11 +10,16 @@ spring: jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai driver-class-name: com.mysql.cj.jdbc.Driver hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: mysql diff --git a/system/src/main/resources/application-prod.yml b/system/src/main/resources/application-prod.yml index 4424f308..0d2c944c 100644 --- a/system/src/main/resources/application-prod.yml +++ b/system/src/main/resources/application-prod.yml @@ -6,11 +6,16 @@ spring: name: system datasource: hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: ENC(+QKYnI6gAYu1SbLaZQTkZA==) diff --git a/system/src/main/resources/application-test.yml b/system/src/main/resources/application-test.yml index b2cabf19..69dfaaaf 100644 --- a/system/src/main/resources/application-test.yml +++ b/system/src/main/resources/application-test.yml @@ -6,11 +6,16 @@ spring: name: system datasource: hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: ENC(+QKYnI6gAYu1SbLaZQTkZA==) diff --git a/system/src/main/resources/logback.xml b/system/src/main/resources/logback.xml index b98e38de..ee94afc9 100644 --- a/system/src/main/resources/logback.xml +++ b/system/src/main/resources/logback.xml @@ -114,6 +114,6 @@ - - + + \ No newline at end of file diff --git a/task/src/main/resources/application-dev.yml b/task/src/main/resources/application-dev.yml index 7291c0bf..05c609d9 100644 --- a/task/src/main/resources/application-dev.yml +++ b/task/src/main/resources/application-dev.yml @@ -11,11 +11,16 @@ spring: jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai driver-class-name: com.mysql.cj.jdbc.Driver hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: mysql diff --git a/task/src/main/resources/application-local.yml b/task/src/main/resources/application-local.yml index 62b2a61e..124898c7 100644 --- a/task/src/main/resources/application-local.yml +++ b/task/src/main/resources/application-local.yml @@ -10,11 +10,16 @@ spring: jdbc-url: jdbc:mysql://192.168.65.161:3306/spdm_baseline?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai driver-class-name: com.mysql.cj.jdbc.Driver hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s master: username: root password: 123456 diff --git a/task/src/main/resources/application-prod.yml b/task/src/main/resources/application-prod.yml index aa950593..355c5369 100644 --- a/task/src/main/resources/application-prod.yml +++ b/task/src/main/resources/application-prod.yml @@ -12,11 +12,16 @@ spring: # jdbc-url: jdbc:mysql://10.30.10.210:3306/spdm?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai driver-class-name: com.mysql.cj.jdbc.Driver hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s slave: username: root password: ENC(+QKYnI6gAYu1SbLaZQTkZA==) diff --git a/task/src/main/resources/application-test.yml b/task/src/main/resources/application-test.yml index 875a1838..c81d5755 100644 --- a/task/src/main/resources/application-test.yml +++ b/task/src/main/resources/application-test.yml @@ -12,11 +12,16 @@ spring: # jdbc-url: jdbc:mysql://10.30.10.210:3306/spdm?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=Asia/Shanghai driver-class-name: com.mysql.cj.jdbc.Driver hikari: - maximum-pool-size: 450 # 连接池最大连接数(关键!) - minimum-idle: 50 # 最小空闲连接数(与最大一致,避免频繁创建销毁) - idle-timeout: 300000 # 空闲连接超时时间(5分钟) - max-lifetime: 600000 # 连接最大存活时间(10分钟) - connection-timeout: 30000 # 获取连接超时时间(30秒,避免线程阻塞) + # 设置连接池能够容纳的最大连接数。建议值:CPU核心数 * 2 + 有效磁盘I/O数。一个常见的经验值是 10-20。 + maximum-pool-size: 20 + # 连接池在空闲时保持的最小连接数。 + minimum-idle: 5 + # 一个连接在被标记为空闲之前可以保持空闲状态的最长时间(毫秒)。当连接的空闲时间超过此值后,它可能会被连接池 evict(驱逐)。 + idle-timeout: 60000 # 1 min + # 一个连接从被创建开始,其生命周期的最大时长(毫秒)。HikariCP的默认值就是30分钟,这是一个非常合理的设置。 + max-lifetime: 1800000 # 30 min(Hikari 默认) + # 应用程序尝试从连接池获取一个连接时,等待的最长时间(毫秒)。建议值:30-60秒。 + connection-timeout: 30000 # 30s slave: username: root password: ENC(+QKYnI6gAYu1SbLaZQTkZA==) diff --git a/task/src/main/resources/logback.xml b/task/src/main/resources/logback.xml index 78413dce..c4f7c12e 100644 --- a/task/src/main/resources/logback.xml +++ b/task/src/main/resources/logback.xml @@ -115,7 +115,7 @@ - - + + \ No newline at end of file