Minio文件分片上传并获取上传进度
直接上干货
minio 的上传逻辑是,文件大于5M,执行分片上传. 但是没有对外抛出相关的能力,在8.0版本后允许通过特殊途径执行


核心逻辑
后端代码
我也是抄的这个博客,写的很完整了,改改好用
前端代码
import {init, mergeMultipartUpload, upload} from "@/net/api";
import SparkMD5 from 'spark-md5'const CHUNK_SIZE = 5 * 1024 * 1024export let files = []const addFiles = async (f, roomId, cb) => {for (let file of f) {const data = {// 文件名name: file.name + '.' + file.extension,// 文件类型type: file.type,// 文件md5md5: '',// 切片列表chunks: [],// 上传进度percentage: 0,// 已上传大小uploaded: '',// 开始上传时间uploadTime: 0,// 上传速度speed: 0,// 切片完成度chunkedProgress: 0,// 总数量大小fileSizeByte: file.size,// 总大小fileSize: formatSize(file.size),// 原始文件file: file,// urlurl: '',}await inputChange(data, (list, len) => {const listLen = list.lengthdata.chunkedProgress = listLen <= 0 ? 0 : Math.round((listLen / len) * 10000) / 100.0})console.log('切片完成:', data.md5)const response = await init({filename: file.name + '.' + file.extension,partCount: data.chunks.length,md5: data.md5})const uploadUrls = response.uploadUrlsconst promises = [];data.uploadTime = new Date().getTime()console.log('开始上传:', data.uploadTime)for (let i = 0; i < uploadUrls.length; i++) {promises.push(new Promise((resolve) => {upload(uploadUrls[i], data.chunks[i].file, (loaded, total) => {data.chunks[i].uploadProgress = loadeddata.chunks[i].total = total}).then(() => {resolve()})}))}const t = setInterval(() => {countSpeed(data)}, 300)Promise.all(promises).then(async () => {files.push(data)clearInterval(t)if (response.uploadId) {await mergeMultipartUpload({uploadId: response.uploadId,objectName: response.objectName})console.log('文件合并完成!')}data.url = response.objectNameif (files.length === f.length) {files = []cb({...data.file, url: data.url}, true)} else {cb({...data.file, url: data.url}, false)}})}}const countSpeed = (data) => {let uploaded = 0;data.chunks.forEach(x => {uploaded += x.uploadProgress})const useTime = new Date().getTime() - data.uploadTimeconst speed = uploaded / (useTime / 1000)data.percentage = uploaded <= 0 ? 0 : Math.round((uploaded / data.fileSizeByte) * 10000) / 100.0data.uploaded = formatSize(uploaded)data.speed = formatSize(speed)console.log('总大小:', data.fileSize, ' 上传速度:', data.speed, ' 上传百分比:', data.percentage, ' 已上传:', data.uploaded)
}const inputChange = async (file, cb) => {if (file) {const fileChunkList = await createFileChunk(file.file, (data, size) => {cb(data, size)})file.md5 = await getFileChunkMd5(fileChunkList)file.chunks = fileChunkList}
}const createFileChunk = async (file, cb) => {const list = []const type = file.typelet size = 0const len = Math.ceil(file.size / CHUNK_SIZE)while (size < file.size) {const data = {file: file.blob.slice(size, size + CHUNK_SIZE),type: type,uploadProgress: 0,total: 0}list.push(data)size += CHUNK_SIZEcb(list, len)}return list;
}const getFileChunkMd5 = (fileChunkList) => {return new Promise((resolve) => {// 总切片数const chunkSize = fileChunkList.length// 当前处理位置let currentChunk = 0// SparkMD5实例的ArrayBufferconst spark = new SparkMD5.ArrayBuffer()const fileReader = new FileReader()fileReader.onload = (e) => {try {spark.append(e.target.result)} catch (error) {console.log('获取Md5错误,错误位置:' + currentChunk)}currentChunk++if (currentChunk < chunkSize) {loadNext()} else {resolve(spark.end())}}fileReader.onerror = function () {console.warn('Md5:文件读取错误')}function loadNext() {fileReader.readAsArrayBuffer(fileChunkList[currentChunk].file)}loadNext()})
}const formatSize = (size) => {if (size < 1024) {return size.toFixed(0) + ' bytes'} else if (size < 1024 * 1024) {return (size / 1024.0).toFixed(0) + ' KB'} else if (size < 1024 * 1024 * 1024) {return (size / 1024.0 / 1024.0).toFixed(1) + ' MB'} else {return (size / 1024.0 / 1024.0 / 1024.0).toFixed(1) + ' GB'}
}export {addFiles
}
请求代码
import request from "@/utils/request";export function init(data) {return request({url: '/multipart/init',method: 'post',data: data})
}export function upload(url, data,cb) {return request({url: url,method: 'put',onUploadProgress: (event) => {// console.log(event,'upload')cb(event.loaded, event.total)},headers: {'Content-Type': 'application/octet-stream'},data: data})
}export function mergeMultipartUpload(data) {return request({url: '/multipart/complete',method: 'put',data: data})
}
使用实例, 可以传入多个文件, roomid是我业务的编号, 每个文件传完都会回调当前文件和是否上传完成,改改好用
await addFiles(files, roomId, (file, isOver) => {sendFileMessage({name: file.name + '.' + file.extension,size: file.size,type: file.type,url: file.url,}, roomId, isOver)})
推荐一个作者开源IM
信使 核心代码在这里可以看到

本文来自互联网用户投稿,文章观点仅代表作者本人,不代表本站立场,不承担相关法律责任。如若转载,请注明出处。 如若内容造成侵权/违法违规/事实不符,请点击【内容举报】进行投诉反馈!
