关于大文件上传的实现细节,网上已经有很多讲的细致清楚的教程以及代码可供参考。正所谓实践出真知,因此,本文的目的仅为了记录笔者开发工作中功能的逐渐实现和解决其中遇到的一些问题。如有错误,请大佬们不吝赐教!
文件分片、合并、删除及断点续传
对文件分片即是调用File对象的slice方法。在下图1上进行切片,笔者切片时文件对象如下图2,因此是对File的raw属性进行切片并得到Blob数据,然后调用后端接口进行上传。上传之后将这些切片进行合并删除切片(调用合并切口若返回数组说明有切片未上传成功,需要重新上传)。
const state = reactive({
hash: '',
})
const chunkSize = 1024 * 1024 * 5;
const fileChunks = [];
for (let i = 0; i < files.size; i += chunkSize) {
const chunk = files.raw.slice(i, i + chunkSize);
fileChunks.push(chunk);
}
const calculateFileHash = (fileChunks) => {
return new Promise((resolve) => {
const spark = new SparkMD5.ArrayBuffer();
function _read(i) {
if (i >= fileChunks.length) return;
const reader = new FileReader();
reader.readAsArrayBuffer(fileChunks[i]);
reader.onload = (e) => {
spark.append(e.target.result);
i < fileChunks.length - 1 ? _read(i + 1) : resolve(spark.end());
};
}
_read(0);
});
};
let resSet = [];
const mapper = async (chunk, index) => {
if(!state.hash) {
state.hash = await calculateFileHash(fileChunks);
}
const chunkHash = resSet.length ? `${state.hash}_${resSet.splice(0, 1)[0]}` : `${state.hash}_${index+1}`;
const fd = new FormData();
fd.append('file', chunk);
fd.append('chunkHash', chunkHash);
fd.append('mark', 1);
fd.append('markVal', xxx);
const result = await uploadReqeust(fd);
return result;
};
const merge = async (fileHash, fileName, filesPath, chunkNums) => {
const res = await mergeFiles(fileHash, fileName, filesPath, chunkNums);
if(res.filePath) {
state.uploading = false;
const dir = res.filePath.slice(res.filePath.indexOf(arr[0]),res.filePath.length)
ctx.emit('uploadSuccess', dir);
deleteSliceFile(fileHash, filesPath)
.then((res) => state.hash = '')
.catch((err) => console.log(err))
return dir;
}
return res.unUploadFiles;
}
return pMap(fileChunks, mapper, { concurrency: 3, stopOnError: false })
.then(async (res) => {
const result = await merge(state.hash, files.name, res.filePath, chunkNums)
if(Array.isArray(result)) {
resSet = result;
const reUploadList = [];
for (const item of resSet) {
reUploadList.push(fileChunks[item-1]);
}
pMap(reUploadList, mapper, { concurrency: 3, stopOnError: false })
.then(async (res) => {
const result2 = await merge(state.hash, files.name, res.filePath, chunkNums)
})
}
})
.catch(errFn);