From c18352f65c4cabd210b5e9220cafc933523ccb8e Mon Sep 17 00:00:00 2001
From: zy <276996223@qq.com>
Date: Sun, 26 Jan 2025 17:09:08 +0800
Subject: [PATCH] =?UTF-8?q?=E4=B8=8A=E4=BC=A0=E5=9B=BE=E7=89=87=E6=8E=A7?=
=?UTF-8?q?=E4=BB=B6?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/components/UploadFile/src/InnerImage.vue | 501 +++++++++++++++
.../UploadFile/src/InnerUploadImg.vue | 205 +++++++
.../UploadFile/src/InnerUploader.vue | 568 ++++++++++++++++++
.../UploadFile/src/sliceFileUPload.ts | 221 +++++++
4 files changed, 1495 insertions(+)
create mode 100644 src/components/UploadFile/src/InnerImage.vue
create mode 100644 src/components/UploadFile/src/InnerUploadImg.vue
create mode 100644 src/components/UploadFile/src/InnerUploader.vue
create mode 100644 src/components/UploadFile/src/sliceFileUPload.ts
diff --git a/src/components/UploadFile/src/InnerImage.vue b/src/components/UploadFile/src/InnerImage.vue
new file mode 100644
index 0000000..c6ae50f
--- /dev/null
+++ b/src/components/UploadFile/src/InnerImage.vue
@@ -0,0 +1,501 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/components/UploadFile/src/InnerUploadImg.vue b/src/components/UploadFile/src/InnerUploadImg.vue
new file mode 100644
index 0000000..3c2a418
--- /dev/null
+++ b/src/components/UploadFile/src/InnerUploadImg.vue
@@ -0,0 +1,205 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/components/UploadFile/src/InnerUploader.vue b/src/components/UploadFile/src/InnerUploader.vue
new file mode 100644
index 0000000..fecfa50
--- /dev/null
+++ b/src/components/UploadFile/src/InnerUploader.vue
@@ -0,0 +1,568 @@
+
+
+
+
+
+
+
+ {{ file.name }}
+
+ {{ formatDate(file.createTime, 'YYYY年M月D日') }}
+
+ {{ niceBytes(file.size) }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/components/UploadFile/src/sliceFileUPload.ts b/src/components/UploadFile/src/sliceFileUPload.ts
new file mode 100644
index 0000000..239d847
--- /dev/null
+++ b/src/components/UploadFile/src/sliceFileUPload.ts
@@ -0,0 +1,221 @@
+import SparkMD5 from 'spark-md5'
+import * as FileApi from '@/api/infra/file'
+import axios from 'axios'
+import Queue from 'promise-queue-plus'
+
+/**
+ * 文件分片上传
+ * @param options 文件上传参数
+ * {
+ * identifier:文件唯一标识,
+ * option:文件对象,
+ * configId:文件上传配置id,
+ * url:文件上传地址,
+ * path:文件名称,
+ * name:文件名称,
+ * size:文件大小,
+ * file:文件
+ * }
+ */
+export const sliceFileUpload = async (options: any) => {
+ const file = options.file
+ const task = await getTaskInfo(options)
+ if (task) {
+ const { finished, path, taskRecord } = task
+ const { fileIdentifier: identifier } = taskRecord
+ if (finished) {
+ return path
+ } else {
+ const errorList: any = await handleUpload(file, taskRecord, options)
+ if (errorList.length > 0) {
+ return
+ }
+ return await FileApi.merge({
+ identifier,
+ configId: options.configId,
+ path: options.fileName,
+ name: options.fileName,
+ url: options.url,
+ size: file.size,
+ chunkSize: DEFAULT_SIZE
+ })
+ }
+ }
+}
+
+// 文件上传分块任务的队列(用于移除文件时,停止该文件的上传队列) key:fileUid value: queue object
+const fileUploadChunkQueue = ref({}).value
+
+// 分片大小
+const DEFAULT_SIZE = 100 * 1024 * 1024
+
+// 分片加密
+const md5 = (file: any, chunkSize = DEFAULT_SIZE) => {
+ return new Promise((resolve, reject) => {
+ const startMs = new Date().getTime()
+ const blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice
+ const chunks = Math.ceil(file.size / chunkSize)
+ let currentChunk = 0
+ const spark = new SparkMD5.ArrayBuffer() //追加数组缓冲区。
+ const fileReader = new FileReader() //读取文件
+ fileReader.onload = function (e: any) {
+ spark.append(e.target.result)
+ currentChunk++
+ if (currentChunk < chunks) {
+ loadNext()
+ } else {
+ const md5 = spark.end() //完成md5的计算,返回十六进制结果。
+ // console.log('文件md5计算结束,总耗时:', (new Date().getTime() - startMs) / 1000, 's')
+ resolve(md5)
+ }
+ }
+ fileReader.onerror = function (e) {
+ reject(e)
+ }
+
+ function loadNext() {
+ // console.log('当前part number:', currentChunk, '总块数:', chunks)
+ const start = currentChunk * chunkSize
+ let end = start + chunkSize
+ end > file.size && (end = file.size)
+ fileReader.readAsArrayBuffer(blobSlice.call(file, start, end))
+ }
+ loadNext()
+ })
+}
+
+/**
+ * 获取一个上传任务,没有则初始化一个
+ */
+const getTaskInfo = async (option: any) => {
+ let task
+ const identifier = await md5(option.file)
+ const initTaskData = {
+ identifier,
+ path: option.fileName,
+ name: option.fileName,
+ url: option.url,
+ size: option.file.size,
+ configId: option.configId,
+ chunkSize: DEFAULT_SIZE
+ }
+ const res = await FileApi.taskInfo(initTaskData)
+ task = res
+ if (!task) {
+ const initres = await FileApi.initTask(initTaskData)
+ task = initres
+ }
+ return task
+}
+
+/**
+ * 上传逻辑处理,如果文件已经上传完成(完成分块合并操作),则不会进入到此方法中
+ */
+const handleUpload = (file, taskRecord, options) => {
+ let lastUploadedSize = 0 // 上次断点续传时上传的总大小
+ let uploadedSize = 0 // 已上传的大小
+ const totalSize = file.size || 0 // 文件总大小
+ const startMs = new Date().getTime() // 开始上传的时间
+ const { exitPartList, chunkSize, chunkNum, fileIdentifier } = taskRecord
+
+ // 获取从开始上传到现在的平均速度(byte/s)
+ const getSpeed = () => {
+ // 已上传的总大小 - 上次上传的总大小(断点续传)= 本次上传的总大小(byte)
+ const intervalSize = uploadedSize - lastUploadedSize
+ const nowMs = new Date().getTime()
+ // 时间间隔(s)
+ const intervalTime = (nowMs - startMs) / 1000
+ return intervalSize / intervalTime
+ }
+
+ const uploadNext = async (partNumber) => {
+ const start = Number(chunkSize) * (partNumber - 1)
+ const end = start + Number(chunkSize)
+ const blob = file.slice(start, end)
+ const data = await FileApi.preSignUrl({
+ identifier: fileIdentifier,
+ partNumber: partNumber,
+ configId: options.configId,
+ path: options.fileName,
+ name: options.fileName,
+ size: blob.size,
+ chunkSize: DEFAULT_SIZE
+ })
+ if (data) {
+ console.log('上传地址:', data)
+ await axios.request({
+ url: data,
+ method: 'PUT',
+ data: blob,
+ headers: {
+ 'Content-Type': 'application/octet-stream'
+ }
+ })
+ return Promise.resolve({
+ partNumber: partNumber,
+ uploadedSize: blob.size
+ })
+ }
+ return Promise.reject(`分片${partNumber}, 获取上传地址失败`)
+ }
+
+ /**
+ * 更新上传进度
+ * @param increment 为已上传的进度增加的字节量
+ */
+ const updateProcess = (increment) => {
+ increment = new Number(increment)
+ const { onProgress } = options
+ const factor = 1000 // 每次增加1000 byte
+ let from = 0
+ // 通过循环一点一点的增加进度
+ while (from <= increment) {
+ from += factor
+ uploadedSize += factor
+ const percent = Math.round((uploadedSize / totalSize) * 100).toFixed(2)
+ onProgress({ percent: percent })
+ }
+
+ const speed = getSpeed()
+ const remainingTime = speed != 0 ? Math.ceil((totalSize - uploadedSize) / speed) + 's' : '未知'
+ console.log('剩余大小:', (totalSize - uploadedSize) / 1024 / 1024, 'mb')
+ console.log('当前速度:', (speed / 1024 / 1024).toFixed(2), 'mbps')
+ console.log('预计完成:', remainingTime)
+ }
+
+ return new Promise((resolve) => {
+ const failArr: any = []
+ const queue = Queue(5, {
+ retry: 3, //Number of retries
+ retryIsJump: false, //retry now?
+ workReject: function (reason: any, queue) {
+ failArr.push(reason)
+ },
+ queueEnd: function (queue) {
+ resolve(failArr)
+ }
+ })
+ fileUploadChunkQueue[file.uid] = queue
+ for (let partNumber = 1; partNumber <= chunkNum; partNumber++) {
+ const exitPart = (exitPartList || []).find((exitPart) => exitPart.partNumber == partNumber)
+ if (exitPart) {
+ // 分片已上传完成,累计到上传完成的总额中,同时记录一下上次断点上传的大小,用于计算上传速度
+ lastUploadedSize += Number(exitPart.size)
+ updateProcess(exitPart.size)
+ } else {
+ queue.push(() =>
+ uploadNext(partNumber).then((res) => {
+ // 单片文件上传完成再更新上传进度
+ updateProcess(res.uploadedSize)
+ })
+ )
+ }
+ }
+ if (queue.getLength() == 0) {
+ // 所有分片都上传完,但未合并,直接return出去,进行合并操作
+ resolve(failArr)
+ return
+ }
+ queue.start()
+ })
+}