南川网站建设,html5手机网站教程,深圳到北京,浙江省网站建设报价1.为OSSClient添加大文件上传功能#xff0c;共三个核心方法#xff1a;初始化大文件上传、上传文件切片、文件整合。/** * 大文件上传-初始化大文件上传 生成大文件上传的uploadId* ps. 初始化时由filename生成的请求#xff0c;在切片上传和文件整合中都需保持一致* retur…1.为OSSClient添加大文件上传功能共三个核心方法初始化大文件上传、上传文件切片、文件整合。/** * 大文件上传-初始化大文件上传 生成大文件上传的uploadId * ps. 初始化时由filename生成的请求在切片上传和文件整合中都需保持一致 * return uploadId */ public InitiateMultipartUploadResult initBigFileUpload(String fileName, String contentType) { try { // 生成完整文件路径 (文件夹文件后缀) String fileSuffix getFileSuffix(fileName); String path getPath(properties.getPrefix(), fileSuffix); // 构建文件切片初始化请求 InitiateMultipartUploadRequest request new InitiateMultipartUploadRequest(properties.getBucketName(), path); // 设置文件元数据 ObjectMetadata metadata new ObjectMetadata(); if (StringUtils.isBlank(contentType)){ metadata.setContentType(); }else { metadata.setContentType(contentType); } request.setObjectMetadata(metadata); // 设置访问权限 request.setCannedACL(getAccessPolicy().getAcl()); return client.initiateMultipartUpload(request); }catch (Exception e){ throw new OssException(大文件上传初始化失败请检查配置信息:[ e.getMessage() ]); } } /** * 大文件上传-上传文件切片 * param uploadId 上传id uploadId * param partNumber 切片序号 * param partSize 切片大小 * param inputStream 文件流 * return 上传结果序号 eTag */ public UploadPartResult bigFileUploadPart(String uploadId, String path, int partNumber, long partSize, InputStream inputStream) { try { // 构造文件切片请求 UploadPartRequest request new UploadPartRequest(); request.setBucketName(properties.getBucketName()); // 存储的桶位置 request.setKey(path); // 云端存储的文件名 request.setUploadId(uploadId); request.setPartNumber(partNumber); request.setPartSize(partSize); request.withInputStream(new ByteArrayInputStream(IOUtils.toByteArray(inputStream))); return client.uploadPart(request); // 进行上传 }catch (Exception e){ throw new OssException(大文件上传初失败第 partNumber 部分上传失败请检查配置信息:[ e.getMessage() ]); } } /** * 大文件上传-完成文件整合 * param uploadId 上传id uploadId * param partETags 切片上传成功标签 * return 成功结果文件路径 文件名称 */ public UploadResult completeBigFileUpload(String uploadId, String path, String fileName, ListPartETag partETags) { try { // 构建文件整合请求 CompleteMultipartUploadRequest request new CompleteMultipartUploadRequest(properties.getBucketName(), path, uploadId, partETags); CompleteMultipartUploadResult result client.completeMultipartUpload(request); // return UploadResult.builder().url(getUrl() / path).filename(fileName).build(); return UploadResult.builder().url(getUrl() / result.getKey()).filename(fileName).build(); }catch (Exception e){ throw new OssException(大文件上传初失败请检查配置信息:[ e.getMessage() ]); } } /** * 获取文件后缀 * param fileName 文件名 * return 后缀 */ public String getFileSuffix(String fileName) { if (StringUtils.isBlank(fileName)){ return ; } int lastDotIndex fileName.lastIndexOf(.); if (lastDotIndex 0){ return fileName.substring(lastDotIndex); } return ; } /** * 大文件上传-初始化大文件上传 生成大文件上传的uploadId * ps. 初始化时由filename生成的请求在切片上传和文件整合中都需保持一致 * return uploadId */ public InitiateMultipartUploadResult initBigFileUpload(String fileName, String contentType) { try { // 生成完整文件路径 (文件夹文件后缀) String fileSuffix getFileSuffix(fileName); String path getPath(properties.getPrefix(), fileSuffix); // 构建文件切片初始化请求 InitiateMultipartUploadRequest request new InitiateMultipartUploadRequest(properties.getBucketName(), path); // 设置文件元数据 ObjectMetadata metadata new ObjectMetadata(); if (StringUtils.isBlank(contentType)){ metadata.setContentType(); }else { metadata.setContentType(contentType); } request.setObjectMetadata(metadata); // 设置访问权限 request.setCannedACL(getAccessPolicy().getAcl()); return client.initiateMultipartUpload(request); }catch (Exception e){ throw new OssException(大文件上传初始化失败请检查配置信息:[ e.getMessage() ]); } } /** * 大文件上传-上传文件切片 * param uploadId 上传id uploadId * param partNumber 切片序号 * param partSize 切片大小 * param inputStream 文件流 * return 上传结果序号 eTag */ public UploadPartResult bigFileUploadPart(String uploadId, String path, int partNumber, long partSize, InputStream inputStream) { try { // 构造文件切片请求 UploadPartRequest request new UploadPartRequest(); request.setBucketName(properties.getBucketName()); // 存储的桶位置 request.setKey(path); // 云端存储的文件名 request.setUploadId(uploadId); request.setPartNumber(partNumber); request.setPartSize(partSize); request.withInputStream(new ByteArrayInputStream(IOUtils.toByteArray(inputStream))); return client.uploadPart(request); // 进行上传 }catch (Exception e){ throw new OssException(大文件上传初失败第 partNumber 部分上传失败请检查配置信息:[ e.getMessage() ]); } } /** * 大文件上传-完成文件整合 * param uploadId 上传id uploadId * param partETags 切片上传成功标签 * return 成功结果文件路径 文件名称 */ public UploadResult completeBigFileUpload(String uploadId, String path, String fileName, ListPartETag partETags) { try { // 构建文件整合请求 CompleteMultipartUploadRequest request new CompleteMultipartUploadRequest(properties.getBucketName(), path, uploadId, partETags); CompleteMultipartUploadResult result client.completeMultipartUpload(request); // return UploadResult.builder().url(getUrl() / path).filename(fileName).build(); return UploadResult.builder().url(getUrl() / result.getKey()).filename(fileName).build(); }catch (Exception e){ throw new OssException(大文件上传初失败请检查配置信息:[ e.getMessage() ]); } } /** * 获取文件后缀 * param fileName 文件名 * return 后缀 */ public String getFileSuffix(String fileName) { if (StringUtils.isBlank(fileName)){ return ; } int lastDotIndex fileName.lastIndexOf(.); if (lastDotIndex 0){ return fileName.substring(lastDotIndex); } return ; } |2.为system模块增加一个OssBigService用于处理大文件功能。package com.ruoyi.system.service; import com.ruoyi.system.domain.vo.BigFileUploadStatus; import org.springframework.web.multipart.MultipartFile; /** * 大文件上传 服务层接口 * * author Lion Li */ public interface ISysOssBigService { /** * 初始化大文件上传 * param fileMd5 大文件编号(同一文件只有一个前端生成) * param fileSize 文件大小 * param fileName 文件名 * param fileContentType 文件类型 * return 初始化后的uploadId */ String initBigFileUpload(String fileMd5, Long fileSize, String fileName, String fileContentType); /** * 上传文件切片 * param fileMd5 大文件编号(同一文件只有一个前端生成) * param file 文件 * param chunkNumber 切片编号 * param totalChunks 总切片数 * return 文件切片上传状态 */ BigFileUploadStatus uploadFileChunk(String fileMd5, MultipartFile file, Integer chunkNumber, Integer totalChunks); /** * 上传完成后整合大文件 * param fileMd5 大文件编号(同一文件只有一个前端生成) * return 文件切片上传状态 */ BigFileUploadStatus integrateBigFile(String fileMd5); /** * 获取大文件上传状态 * param fileMd5 大文件编号(同一文件只有一个前端生成) * return 文件上传状态 */ BigFileUploadStatus getUploadStatus(String fileMd5); } /** * 大文件上传 服务层实现 * * author Lion Li */ Slf4j Service RequiredArgsConstructor public class SysOssBigServiceImpl implements ISysOssBigService { private static final String UPLOAD_STATUS_KEY big:upload:status:; // 在redis中存储的key值 private static final long EXPIRE_DAYS 1L; // 在redis存储时间一天 private static final String INIT_UPLOAD initUpload; // 存储状态-初始化完成 private static final String UPLOADING uploading; // 存储状态-上传中 private static final String FINISH_UPLOAD finishUpload; // 存储状态-上传完成 private final SysOssMapper ossMapper; /** * 初始化大文件上传 * param fileMd5 大文件编号(同一文件只有一个前端生成) * param fileSize 文件大小 * param fileName 文件名 * param fileContentType 文件类型 * return 初始化后的uploadId */ public String initBigFileUpload(String fileMd5, Long fileSize, String fileName, String fileContentType){ if (StrUtil.isEmptyIfStr(fileName)){ return null; } // 初始化大文件上传 OssClient client OssFactory.instance(); InitiateMultipartUploadResult initUploadResult client.initBigFileUpload(fileName, fileContentType); log.debug(初始化大文件上传结果: 上传id{}, 上传路径{}, initUploadResult.getUploadId(), initUploadResult.getKey()); // 记录文件状态 BigFileUploadStatus status new BigFileUploadStatus(); status.setFileMd5(fileMd5); status.setFileName(fileName); status.setFileSize(fileSize); status.setUploadId(initUploadResult.getUploadId()); status.setPath(initUploadResult.getKey()); status.setStatus(INIT_UPLOAD); status.setProgress(0.0); status.setUploadParts(new ConcurrentHashMap()); this.saveUploadStatus(status); return status.getUploadId(); } /** * 上传文件切片 * param fileMd5 大文件编号(同一文件只有一个前端生成) * param file 文件 * param chunkNumber 切片编号 * param totalChunks 总切片数 * return 文件切片上传状态 */ public BigFileUploadStatus uploadFileChunk(String fileMd5, MultipartFile file, Integer chunkNumber, Integer totalChunks){ // 获取文件上传状态 BigFileUploadStatus status this.getUploadStatus(fileMd5); if (Objects.isNull(status)){ return null; } // 上传文件分片 try { OssClient client OssFactory.instance(); UploadPartResult uploadPartResult client.bigFileUploadPart(status.getUploadId(), status.getPath(), chunkNumber, file.getSize(), file.getInputStream()); if (Objects.nonNull(uploadPartResult)){ status.setStatus(UPLOADING); // 记录已上传的分片信息 PartInfo partInfo new PartInfo(uploadPartResult.getPartNumber(), uploadPartResult.getETag()); status.getUploadParts().put(uploadPartResult.getPartNumber(), partInfo); // 计算上传进度 double progress (double) status.getUploadParts().size() / totalChunks * 100; status.setProgress(progress); log.info(文件{}分片{}/{}上传成功, fileMd5, chunkNumber, totalChunks); // 更新存储的大文件上传状态信息 this.saveUploadStatus(status); return status; }else { throw new ServiceException(大文件切片上传失败请检查云服务器接口); } }catch (IOException e) { log.error(分片上传失败, , e); return null; } } /** * 上传完成后整合大文件 * param fileMd5 大文件编号(同一文件只有一个前端生成) * return 文件切片上传状态 */ public BigFileUploadStatus integrateBigFile(String fileMd5){ BigFileUploadStatus status this.getUploadStatus(fileMd5); if (Objects.isNull(status)){ return null; } ListPartETag parts status.getUploadParts() .values() .stream() .map(info -new PartETag(info.getPartNumber(), info.getETag())) .collect(Collectors.toList()); // 完成minio分片上传 OssClient client OssFactory.instance(); UploadResult uploadResult client.completeBigFileUpload(status.getUploadId(), status.getPath(), status.getFileName(), parts); // 更新状态 status.setStatus(FINISH_UPLOAD); status.setFileUrl(uploadResult.getUrl()); status.setProgress(100.0); this.saveUploadStatus(status); // 添加到OSS文件表 String[] fileSubStr status.getFileName().split(\\.); SysOss oss new SysOss(); oss.setFileName(status.getPath()); oss.setOriginalName(status.getFileName()); oss.setFileSuffix(. fileSubStr[fileSubStr.length-1]); oss.setUrl(uploadResult.getUrl()); oss.setService(client.getConfigKey()); ossMapper.insert(oss); log.info(文件{}上传完成URL{}, status.getFileMd5(), oss.getFileName()); return status; } /** * 存储大文件上传状态 * param status 文件上传状态 */ private void saveUploadStatus(BigFileUploadStatus status){ log.info(保存大文件上传状态到Redis: key{}, status{}, UPLOAD_STATUS_KEY status.getFileMd5(), status); RedisUtils.setCacheObject(UPLOAD_STATUS_KEY status.getFileMd5(), status, Duration.ofDays(EXPIRE_DAYS)); } /** * 获取大文件上传状态 * param fileMd5 大文件编号(同一文件只有一个前端生成) * return 文件上传状态 */ public BigFileUploadStatus getUploadStatus(String fileMd5){ BigFileUploadStatus status RedisUtils.getCacheObject(UPLOAD_STATUS_KEY fileMd5); if (Objects.isNull(status)){ return null; }else { return status; } } }3.impl中涉及的相关实体。Data public class BigFileUploadStatus { // 文件MD5 用于秒传和断点续传 相当于主键 private String fileMd5; // 文件名 private String fileName; // 文件大小 private Long fileSize; // 文件类别 private String fileContentType; // 后端生成的上传ID private String uploadId; // 上传状态 private String status; // 上传进度 private Double progress 0.0; // 文件最终存储地址 private String fileUrl; // 文件上传过程中的存储路径 private String path; // 文件切片上传进度 private MapInteger, PartInfo uploadParts; } Data public class BigFileUploadStatus { // 文件MD5 用于秒传和断点续传 相当于主键 private String fileMd5; // 文件名 private String fileName; // 文件大小 private Long fileSize; // 文件类别 private String fileContentType; // 后端生成的上传ID private String uploadId; // 上传状态 private String status; // 上传进度 private Double progress 0.0; // 文件最终存储地址 private String fileUrl; // 文件上传过程中的存储路径 private String path; // 文件切片上传进度 private MapInteger, PartInfo uploadParts; }4.编写控制器/** * 大文件上传 控制层 * * author Lion Li */ Slf4j Validated RequiredArgsConstructor RestController RequestMapping(/system/big/oss) public class SysBigOssController { private final ISysOssBigService sysOssBigService; /** * 检查文件状态 */ PostMapping(/check) public RString checkFile(RequestBody BigFileUploadRequest request) { String fileMd5 request.getFileMd5(); if (StrUtil.isEmptyIfStr(fileMd5)){ return R.fail(未获取到文件Md5信息); } // 检查是否已上传过 BigFileUploadStatus status sysOssBigService.getUploadStatus(fileMd5); if (Objects.isNull(status) || status.getProgress() 0.0){ return R.ok(文件未上传, null); }else { return R.ok(文件已存在, status.getFileUrl()); } } /** * 初始化分片上传 */ PostMapping(/init) public RString initUpload(RequestBody BigFileUploadRequest request) { String uploadId sysOssBigService.initBigFileUpload(request.getFileMd5(), request.getFileSize(), request.getFileName(), request.getFileContentType()); return StrUtil.isEmptyIfStr(uploadId) ? R.fail(初始化失败) : R.ok(初始化成功); } /** * 上传分片 */ PostMapping(/chunk) public RString uploadChunk(RequestParam MultipartFile file, RequestParam String fileMd5, RequestParam Integer chunkNumber, RequestParam Integer totalChunks) { BigFileUploadStatus status sysOssBigService.uploadFileChunk(fileMd5, file, chunkNumber, totalChunks); return Objects.nonNull(status) ? R.ok(切片上传成功) : R.fail(切片上传失败); } /** * 完成上传 */ PostMapping(/complete) public R completeUpload(RequestBody BigFileUploadRequest bigFileUploadRequest) { BigFileUploadStatus status sysOssBigService.integrateBigFile(bigFileUploadRequest.getFileMd5()); return Objects.nonNull(status) status.getStatus().equals(finishUpload) ? R.ok(文件上传成功, status) : R.fail(文件上传失败); } /** * 获取上传进度 */ GetMapping(/progress) public RBigFileUploadStatus completeUpload(String fileMd5) { BigFileUploadStatus status sysOssBigService.getUploadStatus(fileMd5); return R.ok(status); } }5.前端添加大文件上传组件5、前端添加大文件上传组件 template div classbig-file-upload el-upload refuploadRef :auto-uploadfalse :on-changehandleFileChange :show-file-listfalse action classupload-demo el-button typeprimary选择大文件/el-button /el-upload div v-ifselectedFile classfile-info p文件名: {{ selectedFile.name }}/p p文件大小: {{ formatFileSize(selectedFile.size) }}/p p上传进度: {{ uploadProgress }}%/p el-progress :percentageuploadProgress :statusuploadStatus / div classupload-actions el-button typeprimary :loadinguploading :disableduploading clickstartUpload {{ uploading ? 上传中... : 开始上传 }} /el-button el-button clickcancelUpload :disabled!uploading 取消上传 /el-button /div /div div v-ifuploadResult classupload-result el-alert title上传成功 typesuccess :description文件地址: ${uploadResult.fileUrl} show-icon closable / /div /div /template script setup import { ref, computed } from vue; import { ElMessage, ElMessageBox } from element-plus; import SparkMD5 from spark-md5; import { checkFileExists, completeUpload, initUpload } from /api/system/oss; import axios from axios; import {getToken} from /utils/auth; // 响应式数据 const uploadRef ref() const selectedFile ref(null) const uploading ref(false) const uploadProgress ref(0) const uploadResult ref(null) const uploadStatus ref() // 配置 const CHUNK_SIZE 5 * 1024 * 1024; // 5MB const MAX_CONCURRENT 3; // 最大并发数 // 计算属性 const uploadStatusText computed(() { if (uploadProgress.value 100) return success if (uploading.value) return success return undefined }) // 处理方法 const handleFileChange (file) { selectedFile.value file.raw; uploadProgress.value 0; uploadResult.value null; uploadStatus.value ; // console.log(selectedFile.value); } const formatFileSize (bytes) { if (bytes 0) return 0 B const k 1024 const sizes [B, KB, MB, GB] const i Math.floor(Math.log(bytes) / Math.log(k)) return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) sizes[i] } const startUpload async () { if (!selectedFile.value) { ElMessage.warning(请先选择文件); return } uploading.value true; uploadProgress.value 0; uploadResult.value null; try { // 1. 计算文件MD5 const fileMd5 await calculateFileMD5(selectedFile.value); // 2. 检查文件是否已存在秒传 const checkResult await checkFileExists({fileMd5:fileMd5}) if (checkResult.code 200 checkResult.msg 文件已存在) { ElMessage.success(文件已存在无需重复上传); uploadResult.value { fileUrl: checkResult.data }; uploadProgress.value 100; uploading.value false; return } // 3. 初始化上传 const initResult await initUpload({ fileMd5: fileMd5, fileName: selectedFile.value.name, fileSize: selectedFile.value.size, fileContentType: selectedFile.value.type }) if (initResult.code ! 200 initResult.msg 初始化失败){ ElMessage.error(文件上传初始化失败请联系管理员); } const uploadId initResult.data; // 4. 分片上传 await uploadChunks(selectedFile.value, fileMd5, uploadId); // 5. 完成上传 const completeResult await completeUpload(fileMd5, uploadId); console.log(completeResult); if (completeResult.code 200) { ElMessage.success(文件上传成功); uploadResult.value completeResult.data; uploadProgress.value 100; }else { ElMessage.error(文件上传失败请联系管理员!); } } catch (error) { // console.error(上传失败:, error) ElMessage.error(文件上传失败); uploadStatus.value exception; } finally { uploading.value false; } } const cancelUpload () { uploading.value false; ElMessage.info(上传已取消); } // 工具方法 const calculateFileMD5 (file) { return new Promise((resolve, reject) { const spark new SparkMD5.ArrayBuffer() const fileReader new FileReader() const chunkSize 2 * 1024 * 1024 // 2MB chunks for MD5 calculation const chunks Math.ceil(file.size / chunkSize) let currentChunk 0 fileReader.onload (e) { spark.append(e.target.result) currentChunk if (currentChunk chunks) { loadNext() } else { resolve(spark.end()) } } fileReader.onerror () { reject(new Error(文件读取失败)) } const loadNext () { const start currentChunk * chunkSize const end Math.min(start chunkSize, file.size) fileReader.readAsArrayBuffer(file.slice(start, end)) } loadNext() }) } const uploadChunks async (file, fileMd5, uploadId) { const totalChunks Math.ceil(file.size / CHUNK_SIZE); // const uploadedChunks new Set(); // 创建分片上传任务 const uploadTasks []; for (let i 0; i totalChunks; i) { uploadTasks.push(() uploadChunk(file, file.name, fileMd5, uploadId, i 1, totalChunks)) } // 控制并发上传 const executing new Set(); const results []; for (const task of uploadTasks) { const p task().then(result { executing.delete(p); return result; }) executing.add(p); results.push(p); if (executing.size MAX_CONCURRENT) { await Promise.race(executing); } } await Promise.all(results); } const uploadChunk async (file, fileName, fileMd5, uploadId, chunkNumber, totalChunks) { // console.log(开始上传); const start (chunkNumber - 1) * CHUNK_SIZE; // 文件的开始位置 const end Math.min(start CHUNK_SIZE, file.size); // 文件的截止位置 const chunk file.slice(start, end); // 制作文件切片 const formData new FormData() formData.append(file, chunk) formData.append(uploadId, uploadId) formData.append(fileMd5, fileMd5) formData.append(fileName, fileName) formData.append(chunkNumber, chunkNumber) formData.append(totalChunks, totalChunks) try { axios.defaults.headers[Authorization] Bearer getToken(); // 创建axios实例 const service axios.create({ // axios中请求配置有baseURL选项表示请求URL公共部分 baseURL: import.meta.env.VITE_APP_BASE_API, // 超时 timeout: 10000 }) const response await service.post(/system/big/oss/chunk, formData, { headers: { Content-Type: multipart/form-data } }) // console.log(response); if (response.data.code 200) { // 更新进度 const progress Math.round((chunkNumber / totalChunks) * 100); uploadProgress.value Math.max(uploadProgress.value, progress); return response.data; }else { ElMessage.error(文件分片上传失败!) } } catch (error) { console.error(分片 ${chunkNumber} 上传失败:, error) throw error } } /script style scoped /*.big-file-upload { padding: 20px; }*/ .file-info { margin-top: 20px; padding: 15px; border: 1px solid #e4e7ed; border-radius: 4px; } .upload-actions { margin-top: 15px; } .upload-result { margin-top: 20px; } /style