数据存储模块优化
This commit is contained in:
@@ -6,11 +6,14 @@ import com.baomidou.mybatisplus.core.toolkit.ObjectUtils;
|
||||
import com.sdm.common.common.SdmResponse;
|
||||
import com.sdm.common.entity.resp.PageDataResp;
|
||||
import com.sdm.common.log.annotation.SysLog;
|
||||
import com.sdm.data.model.entity.FileStorage;
|
||||
import com.sdm.data.model.entity.FileMetadataInfo;
|
||||
import com.sdm.data.model.req.AddUserQuotaEntity;
|
||||
import com.sdm.data.model.req.ListUserQuotaReq;
|
||||
import com.sdm.data.model.req.QueryBigFileReq;
|
||||
import com.sdm.data.model.resp.ListBigFileResp;
|
||||
import com.sdm.data.service.DataStorageAnalysis;
|
||||
import com.sdm.data.service.IFileMetadataInfoService;
|
||||
import com.sdm.data.service.IFileStorageService;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.Parameter;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
@@ -32,31 +35,38 @@ public class DataStorageAnalysisController {
|
||||
@Autowired
|
||||
private DataStorageAnalysis dataStorageAnalysis;
|
||||
|
||||
@Autowired
|
||||
private IFileStorageService fileStorageService;
|
||||
|
||||
/**
|
||||
* 根据nodeId(项目nodeId)获取指定查询类型(queryNodeType)文件空间占用(近几个月、增量查询)
|
||||
* 查询项目存储空间占用:项目Uuid+项目queryNodeType
|
||||
* 查询学科存储空间占用:学科字典名:MVH+学科queryNodeType
|
||||
*
|
||||
* @param queryNodeType 需要统计的节点类型
|
||||
* @param uuids 节点uuid
|
||||
* @param queryNodeNames 节点名称
|
||||
* @return
|
||||
*/
|
||||
@GetMapping("/getNodeSizeByNodeType")
|
||||
@Operation(summary = "根据nodeId(项目nodeId)获取指定查询类型(queryNodeType)文件空间占用")
|
||||
public SdmResponse getNodeSizeByNodeType(
|
||||
@Parameter(description = "查询节点类型:project,discipline") @RequestParam(value = "queryNodeType", required = false) String queryNodeType,
|
||||
@Parameter(description = "节点id:项目Uuid列表或者学科字典名") @RequestParam(value = "uuids", required = false) List<String> uuids,
|
||||
@Parameter(description = "节点名称") @RequestParam(value = "queryNodeNames", required = false) List<String> queryNodeNames,
|
||||
@Parameter(description = "查询时间间隔(月)") @RequestParam(value = "intervalMonths", required = false) Integer intervalMonths,
|
||||
@Parameter(description = "增量查询指定的月:2025-06") @RequestParam(value = "targetYm", required = false) String targetYm
|
||||
) {
|
||||
|
||||
if (ObjectUtils.isEmpty(queryNodeType) || ObjectUtils.isEmpty(uuids)) {
|
||||
if (ObjectUtils.isEmpty(queryNodeType)) {
|
||||
return SdmResponse.success();
|
||||
}
|
||||
List<List<JSONObject>> result = new ArrayList<>();
|
||||
for (String uuid : uuids) {
|
||||
if(CollectionUtils.isEmpty(queryNodeNames)){
|
||||
queryNodeNames = fileStorageService.getdefaultNodeSizeUUID(queryNodeType, 10);
|
||||
}
|
||||
|
||||
for (String queryNodeName : queryNodeNames) {
|
||||
try {
|
||||
SdmResponse<List<JSONObject>> sdmResponse = dataStorageAnalysis.getNodeSizeByNodeType(queryNodeType, uuid, intervalMonths, targetYm);
|
||||
SdmResponse<List<JSONObject>> sdmResponse = dataStorageAnalysis.getNodeSizeByNodeType(queryNodeType, queryNodeName, intervalMonths, targetYm);
|
||||
if (CollectionUtils.isNotEmpty(sdmResponse.getData())) {
|
||||
result.add(sdmResponse.getData());
|
||||
}
|
||||
@@ -85,8 +95,8 @@ public class DataStorageAnalysisController {
|
||||
// 存储系统大文件筛选
|
||||
@PostMapping("/listBigFile")
|
||||
@Operation(summary = "存储系统大文件筛选")
|
||||
public SdmResponse<PageDataResp<List<FileStorage>>> listBigFile(@RequestBody QueryBigFileReq queryBigFileReq){
|
||||
return dataStorageAnalysis.listBigFile(queryBigFileReq);
|
||||
public SdmResponse<PageDataResp<List<ListBigFileResp>>> listBigFile(@RequestBody QueryBigFileReq queryBigFileReq){
|
||||
return dataStorageAnalysis.queryBigFileDetail(queryBigFileReq);
|
||||
}
|
||||
|
||||
//批量删除大文件
|
||||
|
||||
@@ -18,6 +18,8 @@ import java.util.List;
|
||||
* @since 2025-11-04
|
||||
*/
|
||||
public interface FileStorageMapper extends BaseMapper<FileStorage> {
|
||||
List<String> getdefaultNodeNameByNodeSize(@Param("queryNodeType") String queryNodeType, @Param("limitNum") Integer limitNum);
|
||||
|
||||
List<NodeSizeDTO> selectNodeSizeByNodeType(@Param("directoryIds") List<Long> directoryIds, @Param("intervalMonths") Integer intervalMonths, @Param("tenantId") Long tenantId);
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package com.sdm.data.model.req;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonFormat;
|
||||
import com.sdm.common.entity.req.data.BaseReq;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import lombok.Data;
|
||||
@@ -24,12 +25,14 @@ public class QueryBigFileReq extends BaseReq {
|
||||
* 文件创建时间 startTime
|
||||
*/
|
||||
@Schema(description = "文件创建搜索开始时间")
|
||||
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
|
||||
private LocalDateTime startTime;
|
||||
|
||||
/**
|
||||
* 文件创建时间 endTime
|
||||
*/
|
||||
@Schema(description = "文件创建搜索结束时间")
|
||||
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
|
||||
private LocalDateTime endTime;
|
||||
|
||||
/**
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
package com.sdm.data.model.resp;
|
||||
|
||||
import com.baomidou.mybatisplus.annotation.FieldStrategy;
|
||||
import com.baomidou.mybatisplus.annotation.TableField;
|
||||
import com.fasterxml.jackson.annotation.JsonFormat;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import lombok.Data;
|
||||
|
||||
import java.time.LocalDateTime;
|
||||
|
||||
@Data
|
||||
public class ListBigFileResp {
|
||||
@Schema(description = "文件id")
|
||||
private Long id;
|
||||
|
||||
@Schema(description = "文件原始名称")
|
||||
private String originalName;
|
||||
|
||||
//格式化后的文件大小
|
||||
@Schema(description = "格式化后的文件大小")
|
||||
private String formatFileSize;
|
||||
|
||||
@Schema(description= "approvalStatus")
|
||||
private String approvalStatus;
|
||||
|
||||
// 返回前端 approveType 非0的,前端禁止下载、编辑、删除
|
||||
@Schema(description= "审核类型:0-审核完成,1-文件上传审核中,2-文件修改审核中,3-文件删除审核中")
|
||||
@TableField("approveType")
|
||||
private Integer approveType;
|
||||
|
||||
//创建时间
|
||||
@Schema(description = "创建时间")
|
||||
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
|
||||
private LocalDateTime createTime;
|
||||
|
||||
@Schema(description = "所属项目")
|
||||
String ownProjectName;
|
||||
|
||||
@Schema(description = "所属项目id")
|
||||
private String ownProjectId;
|
||||
|
||||
@Schema(description = "所属阶段")
|
||||
String ownPhaseName;
|
||||
|
||||
@Schema(description = "所属阶段id")
|
||||
private String ownPhaseId;
|
||||
|
||||
@Schema(description = "所属学科")
|
||||
String ownDisciplineName;
|
||||
|
||||
@Schema(description = "所属学科id")
|
||||
private String ownDisciplineId;
|
||||
|
||||
@Schema(description = "所属工况任务")
|
||||
String owntaskName;
|
||||
|
||||
@Schema(description = "所属工况任务id")
|
||||
private String owntaskId;
|
||||
|
||||
@Schema(description = "所属算列")
|
||||
String ownRunName;
|
||||
|
||||
@Schema(description = "所属算列id")
|
||||
private String ownRunId;
|
||||
|
||||
@Schema(description = "仿真执行人")
|
||||
String executorName;
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import com.sdm.data.model.entity.FileStorageQuota;
|
||||
import com.sdm.data.model.req.AddUserQuotaEntity;
|
||||
import com.sdm.data.model.req.ListUserQuotaReq;
|
||||
import com.sdm.data.model.req.QueryBigFileReq;
|
||||
import com.sdm.data.model.resp.ListBigFileResp;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
@@ -16,12 +17,12 @@ public interface DataStorageAnalysis {
|
||||
* 根据类型获取文件空间占用
|
||||
*
|
||||
* @param queryNodeType 节点类型(project,discipline)
|
||||
* @param uuid 节点id
|
||||
* @param queryNodeName 节点id
|
||||
* @param intervalMonths 查询时间间隔(月)
|
||||
* @param targetYm 增量查询指定的月:2025-06
|
||||
* @return 文件总大小(字节)
|
||||
*/
|
||||
SdmResponse<List<JSONObject>> getNodeSizeByNodeType(String queryNodeType, String uuid, Integer intervalMonths, String targetYm);
|
||||
SdmResponse<List<JSONObject>> getNodeSizeByNodeType(String queryNodeType, String queryNodeName, Integer intervalMonths, String targetYm);
|
||||
|
||||
/**
|
||||
* 根据用户id获取用户的空间占用
|
||||
@@ -32,11 +33,18 @@ public interface DataStorageAnalysis {
|
||||
*/
|
||||
SdmResponse getDirectorySizeByUserId(List<Long> userIds, Integer intervalMonths, String targetYm);
|
||||
|
||||
/**
|
||||
* 存储系统大文件查询,获取文件详细信息
|
||||
*/
|
||||
SdmResponse<PageDataResp<List<ListBigFileResp>>> queryBigFileDetail(QueryBigFileReq queryBigFileReq);
|
||||
|
||||
|
||||
/**
|
||||
* 存储系统大文件筛选
|
||||
*/
|
||||
SdmResponse<PageDataResp<List<FileStorage>>> listBigFile(QueryBigFileReq queryBigFileReq);
|
||||
|
||||
|
||||
SdmResponse batchDeleteBigFile(List<Long> fileIds);
|
||||
|
||||
SdmResponse addUserQuota(AddUserQuotaEntity addUserQuota);
|
||||
|
||||
@@ -18,6 +18,7 @@ import java.util.List;
|
||||
* @since 2025-11-04
|
||||
*/
|
||||
public interface IFileStorageService extends IService<FileStorage> {
|
||||
List<String> getdefaultNodeSizeUUID(String queryNodeType, Integer limitNum);
|
||||
List<NodeSizeDTO> selectNodeSizeByNodeType(List<Long> directoryIds, Integer intervalMonths, Long tenantId);
|
||||
|
||||
List<NodeSizeDTO> statDirStorageByTargetYm(List<Long> dirIds, String targetYm, Long tenantId);
|
||||
|
||||
@@ -6,12 +6,13 @@ import com.github.pagehelper.PageHelper;
|
||||
import com.github.pagehelper.PageInfo;
|
||||
import com.sdm.common.common.SdmResponse;
|
||||
import com.sdm.common.common.ThreadLocalContext;
|
||||
import com.sdm.common.entity.enums.ApproveFileDataTypeEnum;
|
||||
import com.sdm.common.entity.enums.NodeTypeEnum;
|
||||
import com.sdm.common.entity.req.system.UserListReq;
|
||||
import com.sdm.common.entity.req.system.UserQueryReq;
|
||||
import com.sdm.common.entity.resp.PageDataResp;
|
||||
import com.sdm.common.entity.resp.system.CIDUserResp;
|
||||
import com.sdm.common.feign.impl.system.SysUserFeignClientImpl;
|
||||
import com.sdm.common.service.UserNameCacheService;
|
||||
import com.sdm.common.utils.FileSizeUtils;
|
||||
import com.sdm.common.utils.PageUtils;
|
||||
import com.sdm.data.model.dto.NodeSizeDTO;
|
||||
@@ -25,20 +26,19 @@ import com.sdm.common.entity.req.data.DelFileReq;
|
||||
import com.sdm.data.model.req.ListUserQuotaReq;
|
||||
import com.sdm.data.model.req.QueryBigFileReq;
|
||||
import com.sdm.data.model.resp.FileStorageQuotaResp;
|
||||
import com.sdm.data.model.resp.ListBigFileResp;
|
||||
import com.sdm.data.service.*;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.collections4.CollectionUtils;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.springframework.beans.BeanUtils;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.context.annotation.Lazy;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
@@ -55,6 +55,9 @@ public class DataStorageAnalysisImpl implements DataStorageAnalysis {
|
||||
@Autowired
|
||||
SysUserFeignClientImpl sysUserFeignClient;
|
||||
|
||||
@Autowired
|
||||
UserNameCacheService userNameCacheService;
|
||||
|
||||
@Autowired
|
||||
@Lazy
|
||||
IDataFileService dataFileService;
|
||||
@@ -62,37 +65,19 @@ public class DataStorageAnalysisImpl implements DataStorageAnalysis {
|
||||
@Autowired
|
||||
IFileStorageQuotaService fileStorageQuotaService;
|
||||
|
||||
public SdmResponse<List<JSONObject>> getNodeSizeByNodeType(String queryNodeType, String uuid, Integer intervalMonths, String targetYm) {
|
||||
List<FileMetadataInfo> nodeList = null;
|
||||
String actualNodeName = uuid; // 存储实际的节点名称
|
||||
|
||||
if(NodeTypeEnum.PROJECT.getValue().equals(queryNodeType)){
|
||||
// uuid是项目的uuid
|
||||
nodeList = fileMetadataInfoService.lambdaQuery()
|
||||
public SdmResponse<List<JSONObject>> getNodeSizeByNodeType(String queryNodeType, String queryNodeName, Integer intervalMonths, String targetYm) {
|
||||
// uuid是项目的uuid
|
||||
List<FileMetadataInfo> nodeList = fileMetadataInfoService.lambdaQuery()
|
||||
.eq(FileMetadataInfo::getRelatedResourceUuidOwnType, queryNodeType)
|
||||
.eq(FileMetadataInfo::getRelatedResourceUuid, uuid)
|
||||
.eq(FileMetadataInfo::getOriginalName, queryNodeName)
|
||||
.list();
|
||||
actualNodeName = nodeList.get(0).getOriginalName(); // 对于项目,节点名称是项目的名字
|
||||
}else if(NodeTypeEnum.DISCIPLINE.getValue().equals(queryNodeType)){
|
||||
// uuid就是字典中配置的学科名:MHC,不再是uuid了
|
||||
nodeList = fileMetadataInfoService.lambdaQuery()
|
||||
.eq(FileMetadataInfo::getRelatedResourceUuidOwnType, queryNodeType)
|
||||
.eq(FileMetadataInfo::getOriginalName, uuid)
|
||||
.list();
|
||||
actualNodeName = uuid; // 对于学科,节点名称就是传入的uuid值
|
||||
}else {
|
||||
log.error("不支持的节点类型: {}", queryNodeType);
|
||||
return SdmResponse.success(new ArrayList<>());
|
||||
|
||||
if (ObjectUtils.isEmpty(nodeList)) {
|
||||
log.error("获取节点信息失败,节点类型: {}, 标识符: {}", queryNodeType, queryNodeName);
|
||||
return getEmptyNodeSize(queryNodeName);
|
||||
}
|
||||
|
||||
Long tenantId = ThreadLocalContext.getTenantId();
|
||||
if (ObjectUtils.isEmpty(nodeList)) {
|
||||
log.error("获取节点信息失败,节点类型: {}, 标识符: {}", queryNodeType, uuid);
|
||||
return getEmptyNodeSize(actualNodeName);
|
||||
}
|
||||
|
||||
// List<AllNodeByProjectIdAndTypeResp> nodeLists = response.getData();
|
||||
|
||||
|
||||
//将 nodeList的 relatedResourceUuid1-学科1 relatedResourceUuid2-学科1 转换成 valueToKeysMap: 学科1->[uuid1,uuid2] 学科2->[uuid3,uuid4]
|
||||
Map<String, List<String>> nodeNameToUuidListMap = nodeList.stream()
|
||||
@@ -111,9 +96,9 @@ public class DataStorageAnalysisImpl implements DataStorageAnalysis {
|
||||
.in(FileMetadataInfo::getRelatedResourceUuid, uuids)
|
||||
.eq(FileMetadataInfo::getTenantId, tenantId)
|
||||
.list().stream().collect(Collectors.toMap(FileMetadataInfo::getRelatedResourceUuid, FileMetadataInfo::getId));
|
||||
if (CollectionUtils.isEmpty(uuidToDirIdMap)) {
|
||||
log.error("获取节点ID映射失败,节点类型: {}, 标识符: {}", queryNodeType, uuid);
|
||||
return getEmptyNodeSize(actualNodeName);
|
||||
if (uuidToDirIdMap.isEmpty()) {
|
||||
log.error("获取节点ID映射失败,节点类型: {}, 标识符: {}", queryNodeType, queryNodeName);
|
||||
return getEmptyNodeSize(queryNodeName);
|
||||
}
|
||||
|
||||
// fileMetadIds: uuid对应的fileid结合
|
||||
@@ -132,7 +117,7 @@ public class DataStorageAnalysisImpl implements DataStorageAnalysis {
|
||||
|
||||
if (ObjectUtils.isEmpty(nodeSizeDTOS)) {
|
||||
// 空间为空 也要返回nodeName
|
||||
return getEmptyNodeSize(actualNodeName);
|
||||
return getEmptyNodeSize(queryNodeName);
|
||||
}
|
||||
|
||||
|
||||
@@ -166,41 +151,146 @@ public class DataStorageAnalysisImpl implements DataStorageAnalysis {
|
||||
}
|
||||
|
||||
@Override
|
||||
public SdmResponse getDirectorySizeByUserId(List<Long> userIds, Integer intervalMonths, String targetYm) {
|
||||
public SdmResponse<List<JSONObject>> getDirectorySizeByUserId(List<Long> userIds, Integer intervalMonths, String targetYm) {
|
||||
Long tenantId = ThreadLocalContext.getTenantId();
|
||||
if (ObjectUtils.isEmpty(userIds)) {
|
||||
return SdmResponse.success();
|
||||
}
|
||||
List<UserTotalFileSizeDTO> totalFileSizeByCreator = null;
|
||||
|
||||
// 1. 获取存储统计数据
|
||||
List<UserTotalFileSizeDTO> totalFileSizeByCreator = new ArrayList<>();
|
||||
|
||||
// 逻辑:如果 userIds 为空,则在 SQL 层通过 LIMIT 10 获取占用空间最大的前10名用户
|
||||
// 注意:需要在 fileStorageService 的具体实现中增加对 userIds 为空的 LIMIT 10 处理
|
||||
if (ObjectUtils.isNotEmpty(intervalMonths)) {
|
||||
// 近几个月
|
||||
totalFileSizeByCreator = fileStorageService.getTotalFileSizeByCreator(userIds, intervalMonths,tenantId);
|
||||
}
|
||||
if (ObjectUtils.isNotEmpty(targetYm)) {
|
||||
//查询增量的
|
||||
totalFileSizeByCreator = fileStorageService.getTotalFileSizeByCreatorAndTargetYm(userIds, targetYm,tenantId);
|
||||
totalFileSizeByCreator = fileStorageService.getTotalFileSizeByCreator(userIds, intervalMonths, tenantId);
|
||||
} else if (ObjectUtils.isNotEmpty(targetYm)) {
|
||||
totalFileSizeByCreator = fileStorageService.getTotalFileSizeByCreatorAndTargetYm(userIds, targetYm, tenantId);
|
||||
}
|
||||
|
||||
if (CollectionUtils.isEmpty(totalFileSizeByCreator)) return SdmResponse.success();
|
||||
if (CollectionUtils.isEmpty(totalFileSizeByCreator)) {
|
||||
return SdmResponse.success(new ArrayList<>());
|
||||
}
|
||||
|
||||
// 组装返回结果 userName, totalFileSize(格式化大小)
|
||||
Map<Long, Long> userIdToTotalSizeMap = totalFileSizeByCreator.stream().collect(Collectors.toMap(UserTotalFileSizeDTO::getUserId, UserTotalFileSizeDTO::getTotalSize));
|
||||
|
||||
// 2. 批量获取用户信息 (性能优化核心)
|
||||
// 提取所有涉及到的 userId
|
||||
List<Long> targetUserIds = new ArrayList<>();
|
||||
if(CollectionUtils.isEmpty(userIds)){
|
||||
targetUserIds = totalFileSizeByCreator.stream()
|
||||
.map(UserTotalFileSizeDTO::getUserId)
|
||||
.distinct()
|
||||
.toList();
|
||||
}else {
|
||||
targetUserIds = userIds;
|
||||
}
|
||||
|
||||
|
||||
Map<Long, String> userIdToNicknameMap = userNameCacheService.batchGetUserNames(new HashSet<>(targetUserIds));
|
||||
|
||||
// 3. 组装结果
|
||||
List<JSONObject> result = new ArrayList<>();
|
||||
for (UserTotalFileSizeDTO userTotalFileSizeDTO : totalFileSizeByCreator) {
|
||||
UserQueryReq req = new UserQueryReq();
|
||||
req.setUserId(userTotalFileSizeDTO.getUserId());
|
||||
req.setTenantId(ThreadLocalContext.getTenantId());
|
||||
|
||||
SdmResponse<CIDUserResp> cidUserRespSdmResponse = sysUserFeignClient.queryUserDetail(req);
|
||||
if (!cidUserRespSdmResponse.isSuccess()) continue;
|
||||
|
||||
JSONObject tmp = new JSONObject();
|
||||
tmp.put("userName", cidUserRespSdmResponse.getData().getNickname());
|
||||
tmp.put("totalFileSize", FileSizeUtils.formatFileSizeToGB(new BigDecimal(userTotalFileSizeDTO.getTotalSize())));
|
||||
result.add(tmp);
|
||||
for (Map.Entry<Long, String> entry : userIdToNicknameMap.entrySet()){
|
||||
JSONObject jsonObject = new JSONObject();
|
||||
jsonObject.put("userName", entry.getValue());
|
||||
Long totalSize = userIdToTotalSizeMap.getOrDefault(entry.getKey(), 0L);
|
||||
jsonObject.put("totalFileSize", FileSizeUtils.formatFileSizeToGB(new BigDecimal(totalSize)));
|
||||
result.add(jsonObject);
|
||||
}
|
||||
|
||||
return SdmResponse.success(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SdmResponse<PageDataResp<List<ListBigFileResp>>> queryBigFileDetail(QueryBigFileReq queryBigFileReq) {
|
||||
queryBigFileReq.setApproveTypeList(ApproveFileDataTypeEnum.getVisibleInFileList());
|
||||
// 1. 调用大文件查询服务
|
||||
SdmResponse<PageDataResp<List<FileStorage>>> searchResult = listBigFile(queryBigFileReq);
|
||||
PageDataResp<List<FileStorage>> pageDataResp = searchResult.getData();
|
||||
List<FileStorage> fileStorages = pageDataResp.getData();
|
||||
|
||||
// 如果数据为空,提前返回
|
||||
if (org.apache.commons.collections4.CollectionUtils.isEmpty(fileStorages)) {
|
||||
return PageUtils.getJsonObjectSdmResponse(new ArrayList<>(), new PageInfo<>());
|
||||
}
|
||||
|
||||
// 3. 获取当前页文件的详细元数据
|
||||
List<Long> fileIdList = fileStorages.stream().map(FileStorage::getFileId).toList();
|
||||
List<FileMetadataInfo> currentFiles = fileMetadataInfoService.lambdaQuery()
|
||||
.eq(FileMetadataInfo::getTenantId, ThreadLocalContext.getTenantId())
|
||||
.in(FileMetadataInfo::getId, fileIdList)
|
||||
.list();
|
||||
|
||||
// 批量分层获取所有相关的父目录
|
||||
// key 是 ID,value 是对应的元数据实体。用于在内存中快速查找。
|
||||
Map<Long, FileMetadataInfo> parentCacheMap = new HashMap<>();
|
||||
// 当前需要去数据库查的父级 ID 集合
|
||||
Set<Long> nextFetchIds = currentFiles.stream()
|
||||
.map(FileMetadataInfo::getParentId)
|
||||
.filter(pid -> pid != null && pid != 0)
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
int safetyDepth = 0; // 防死循环计数器
|
||||
// 只要还有没查过的父 ID,且深度在合理范围内(10层),就继续批量查
|
||||
while (org.apache.commons.collections4.CollectionUtils.isNotEmpty(nextFetchIds) && safetyDepth < 10) {
|
||||
// 一次性查出当前这一层所有的父节点信息
|
||||
List<FileMetadataInfo> parents = fileMetadataInfoService.listByIds(nextFetchIds);
|
||||
if (org.apache.commons.collections4.CollectionUtils.isEmpty(parents)) break;
|
||||
|
||||
nextFetchIds = new HashSet<>(); // 重置,准备收集下一层 ID
|
||||
for (FileMetadataInfo p : parents) {
|
||||
parentCacheMap.put(p.getId(), p);
|
||||
// 如果这个父节点还有上级,且我们之前没查过这个上级,就加进下一次查询列表
|
||||
if (p.getParentId() != null && p.getParentId() != 0 && !parentCacheMap.containsKey(p.getParentId())) {
|
||||
nextFetchIds.add(p.getParentId());
|
||||
}
|
||||
}
|
||||
safetyDepth++;
|
||||
}
|
||||
|
||||
// 内存组装数据:将 FileMetadata 转换为 Response,并回溯层级信息
|
||||
List<ListBigFileResp> finalResultList = currentFiles.stream().map(file -> {
|
||||
ListBigFileResp resp = new ListBigFileResp();
|
||||
BeanUtils.copyProperties(file, resp);
|
||||
resp.setFormatFileSize(FileSizeUtils.formatFileSize(BigDecimal.valueOf(file.getFileSize())));
|
||||
|
||||
// 从 parentCacheMap 中回溯,设置项目、阶段、专业信息
|
||||
Long pid = file.getParentId();
|
||||
int limit = 0;
|
||||
// 这里的循环完全在内存中进行,速度极快且不产生日志
|
||||
while (pid != null && parentCacheMap.containsKey(pid) && limit < 15) {
|
||||
FileMetadataInfo folder = parentCacheMap.get(pid);
|
||||
String ownType = folder.getRelatedResourceUuidOwnType();
|
||||
|
||||
if (NodeTypeEnum.PROJECT.getValue().equals(ownType)) {
|
||||
resp.setOwnProjectName(folder.getOriginalName());
|
||||
resp.setOwnProjectId(folder.getRelatedResourceUuid());
|
||||
} else if (NodeTypeEnum.PHASE.getValue().equals(ownType)) {
|
||||
resp.setOwnPhaseName(folder.getOriginalName());
|
||||
resp.setOwnPhaseId(folder.getRelatedResourceUuid());
|
||||
} else if (NodeTypeEnum.DISCIPLINE.getValue().equals(ownType)) {
|
||||
resp.setOwnDisciplineName(folder.getOriginalName());
|
||||
resp.setOwnDisciplineId(folder.getRelatedResourceUuid());
|
||||
}else if (NodeTypeEnum.TASK.getValue().equals(ownType)) {
|
||||
resp.setOwntaskName(folder.getOriginalName());
|
||||
resp.setOwntaskId(folder.getRelatedResourceUuid());
|
||||
}if (NodeTypeEnum.RUN.getValue().equals(ownType)) {
|
||||
resp.setOwnRunName(folder.getOriginalName());
|
||||
resp.setOwnRunId(folder.getRelatedResourceUuid());
|
||||
}
|
||||
pid = folder.getParentId();
|
||||
limit++;
|
||||
}
|
||||
return resp;
|
||||
}).toList();
|
||||
|
||||
// 6. 构造分页信息并返回
|
||||
PageInfo<FileMetadataInfo> pageInfo = new PageInfo<>();
|
||||
pageInfo.setTotal(pageDataResp.getTotal());
|
||||
pageInfo.setPageNum(pageDataResp.getCurrentPage());
|
||||
pageInfo.setPageSize(pageDataResp.getPageSize());
|
||||
|
||||
return PageUtils.getJsonObjectSdmResponse(finalResultList, pageInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SdmResponse<PageDataResp<List<FileStorage>>> listBigFile(QueryBigFileReq queryBigFileReq) {
|
||||
List<FileStorage> list = getFileStorages(queryBigFileReq);
|
||||
|
||||
@@ -217,7 +217,7 @@ public class DimensionTemplateServiceImpl extends ServiceImpl<DimensionTemplateM
|
||||
resultDir.addAll(dirInfos);
|
||||
|
||||
// 2、再获取节点文件夹
|
||||
List<FileMetadataInfo> nodeDirInfos = fileMetadataInfoService.lambdaQuery().in(FileMetadataInfo::getId, fileIds).list();
|
||||
List<FileMetadataInfo> nodeDirInfos = fileMetadataInfoService.lambdaQuery().in(FileMetadataInfo::getId, fileIds).orderByDesc(FileMetadataInfo::getCreateTime).list();
|
||||
if(CollectionUtils.isNotEmpty(nodeDirInfos)) {
|
||||
String chooseNodeType = nodeDirInfos.get(0).getRelatedResourceUuidOwnType();
|
||||
List<String> chooseUuids = nodeDirInfos.stream().map(FileMetadataInfo::getRelatedResourceUuid).toList();
|
||||
|
||||
@@ -21,6 +21,11 @@ import java.util.List;
|
||||
*/
|
||||
@Service
|
||||
public class FileStorageServiceImpl extends ServiceImpl<FileStorageMapper, FileStorage> implements IFileStorageService {
|
||||
@Override
|
||||
public List<String> getdefaultNodeSizeUUID(String queryNodeType, Integer limitNum) {
|
||||
return baseMapper.getdefaultNodeNameByNodeSize(queryNodeType,limitNum);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<NodeSizeDTO> selectNodeSizeByNodeType(List<Long> directoryIds, Integer intervalMonths, Long tenantId) {
|
||||
return baseMapper.selectNodeSizeByNodeType(directoryIds, intervalMonths, tenantId);
|
||||
|
||||
@@ -99,12 +99,7 @@ import java.util.stream.Collectors;
|
||||
@ConditionalOnProperty(name = "fileSystem.chose", havingValue = "minio")
|
||||
public class MinioFileIDataFileServiceImpl implements IDataFileService {
|
||||
// fileData 知识库文件列表可见的数据
|
||||
private final List<Integer> fileDatdList = Arrays.asList(
|
||||
NumberConstants.ZERO,
|
||||
NumberConstants.TWO,
|
||||
NumberConstants.THREE,
|
||||
NumberConstants.FOUR
|
||||
);
|
||||
private final List<Integer> fileDatdList = ApproveFileDataTypeEnum.getVisibleInFileList();
|
||||
|
||||
@Autowired
|
||||
private IFileMetadataInfoService fileMetadataInfoService;
|
||||
|
||||
@@ -1,6 +1,35 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
|
||||
<mapper namespace="com.sdm.data.dao.FileStorageMapper">
|
||||
<select id="getdefaultNodeNameByNodeSize" resultType="java.lang.String">
|
||||
select
|
||||
originalName
|
||||
from
|
||||
(
|
||||
select
|
||||
file_metadata_info.originalName,
|
||||
SUM(dirSize.totalSize) AS totalSize
|
||||
from
|
||||
(
|
||||
select
|
||||
fs.dirId ,
|
||||
SUM(fs.fileSize) AS totalSize
|
||||
from
|
||||
file_storage fs
|
||||
where
|
||||
fs.dirId
|
||||
in(select id from file_metadata_info fmi where fmi.relatedResourceUuidOwnType = #{queryNodeType})
|
||||
group by
|
||||
fs.dirId) dirSize
|
||||
left join file_metadata_info ON
|
||||
dirSize.dirId = file_metadata_info.id
|
||||
group by
|
||||
file_metadata_info.originalName
|
||||
order by
|
||||
totalSize DESC
|
||||
) tmp
|
||||
LIMIT #{limitNum}
|
||||
</select>
|
||||
|
||||
<select id="selectNodeSizeByNodeType" resultType="com.sdm.data.model.dto.NodeSizeDTO">
|
||||
SELECT
|
||||
@@ -62,61 +91,72 @@
|
||||
userId,
|
||||
SUM(fileSize) AS totalSize
|
||||
FROM file_storage
|
||||
WHERE
|
||||
file_storage.tenantId = #{tenantId}
|
||||
and
|
||||
userId IN
|
||||
<foreach collection="userIds" item="userId" open="(" separator="," close=")">
|
||||
#{userId}
|
||||
</foreach>
|
||||
|
||||
WHERE tenantId = #{tenantId}
|
||||
<if test="userIds != null and userIds.size() > 0">
|
||||
AND userId IN
|
||||
<foreach collection="userIds" item="userId" open="(" separator="," close=")">
|
||||
#{userId}
|
||||
</foreach>
|
||||
</if>
|
||||
<if test="intervalMonths != null and intervalMonths > 0">
|
||||
AND createTime >= DATE_SUB(NOW(), INTERVAL #{intervalMonths} MONTH)
|
||||
</if>
|
||||
GROUP BY userId;
|
||||
GROUP BY userId
|
||||
ORDER BY totalSize DESC
|
||||
<!-- 只有没传 IDs 时才限制前 10 -->
|
||||
<if test="userIds == null or userIds.size() == 0">
|
||||
LIMIT 10
|
||||
</if>
|
||||
</select>
|
||||
|
||||
|
||||
<select id="getTotalFileSizeByCreatorAndTargetYm" resultType="com.sdm.data.model.dto.UserTotalFileSizeDTO">
|
||||
<!-- 目标年月之前(历史累计,不包含目标年月) -->
|
||||
WITH TargetUsers AS (
|
||||
<choose>
|
||||
<when test="userIds != null and userIds.size() > 0">
|
||||
<!-- 直接使用传入的 ID -->
|
||||
<foreach collection="userIds" item="uid" open="SELECT " separator=" UNION ALL SELECT " close="">
|
||||
#{uid} as userId
|
||||
</foreach>
|
||||
</when>
|
||||
<otherwise>
|
||||
<!-- 先查出累计最大的前10人 ID -->
|
||||
SELECT userId
|
||||
FROM file_storage
|
||||
WHERE tenantId = #{tenantId}
|
||||
AND createYearMonth <= #{targetYm}
|
||||
GROUP BY userId
|
||||
ORDER BY SUM(fileSize) DESC
|
||||
LIMIT 10
|
||||
</otherwise>
|
||||
</choose>
|
||||
)
|
||||
<!-- 使用交叉连接/条件聚合,一次扫描完成统计 -->
|
||||
SELECT
|
||||
userId,
|
||||
'BEFORE' AS statDimension,
|
||||
SUM(fileSize) AS totalSize
|
||||
t.userId,
|
||||
s.statDimension,
|
||||
SUM(s.totalSize) as totalSize
|
||||
FROM TargetUsers t
|
||||
INNER JOIN (
|
||||
-- 历史累计
|
||||
SELECT userId, 'BEFORE' as statDimension, fileSize
|
||||
FROM file_storage
|
||||
WHERE
|
||||
file_storage.tenantId = #{tenantId}
|
||||
and
|
||||
userId IN
|
||||
<foreach collection="userIds" item="userId" open="(" separator="," close=")">
|
||||
#{userId}
|
||||
</foreach>
|
||||
AND createYearMonth < #{targetYm}
|
||||
GROUP BY userId
|
||||
WHERE tenantId = #{tenantId} AND createYearMonth < #{targetYm}
|
||||
|
||||
UNION ALL
|
||||
|
||||
<!-- 目标年月(当月增量,仅包含目标年月) -->
|
||||
SELECT
|
||||
userId AS dirId,
|
||||
'INCREMENT' AS statDimension,
|
||||
SUM(fileSize) AS totalSize
|
||||
-- 当月增量
|
||||
SELECT userId, 'INCREMENT' as statDimension, fileSize
|
||||
FROM file_storage
|
||||
WHERE
|
||||
file_storage.tenantId = #{tenantId}
|
||||
and
|
||||
userId IN
|
||||
<foreach collection="userIds" item="userId" open="(" separator="," close=")">
|
||||
#{userId}
|
||||
</foreach>
|
||||
AND createYearMonth = #{targetYm}
|
||||
GROUP BY userId
|
||||
WHERE tenantId = #{tenantId} AND createYearMonth = #{targetYm}
|
||||
) s ON t.userId = s.userId
|
||||
GROUP BY t.userId, s.statDimension
|
||||
</select>
|
||||
|
||||
<select id="selectBigFiles" resultType="com.sdm.data.model.entity.FileStorage">
|
||||
SELECT
|
||||
distinct
|
||||
file_storage.fileName,file_storage.fileId,file_storage.userGroupId,file_storage.userId,file_storage.fileBizType,file_storage.fileSuffix
|
||||
file_storage.fileName,file_storage.fileId,file_storage.userGroupId,file_storage.userId,file_storage.fileBizType,file_storage.fileSuffix,file_storage.updateTime
|
||||
FROM file_storage
|
||||
left join file_metadata_info on file_storage.fileId = file_metadata_info.id
|
||||
<where>
|
||||
@@ -160,6 +200,7 @@
|
||||
#{userId}
|
||||
</foreach>
|
||||
</if>
|
||||
order by file_storage.updateTime desc
|
||||
</where>
|
||||
</select>
|
||||
</mapper>
|
||||
Reference in New Issue
Block a user