前端大文件分片上传
前言
分片上传(Chunked Upload)是前端处理大文件上传的核心技术,能有效提升上传稳定性、支持断点续传、避免内存溢出、提升用户体验。我将通过以下 5 个核心步骤来一步步实现
✅ 第一步:前端文件分片(切片)
目标:将大文件按固定大小切分成多个小块(chunks)。
function createChunks(file, chunkSize = 5 * 1024 * 1024) { // 默认 5MB 一片const chunks = [];let start = 0;while (start < file.size) {const end = Math.min(start + chunkSize, file.size);chunks.push(file.slice(start, end)); // Blob.slice 返回新 Blobstart = end;}return chunks;
}
✅ 第二步:为每个分片生成唯一标识(用于服务端识别)
目标:确保每个分片可被服务端唯一识别并关联到原文件。
const file = e.target.files[0];
const fileHash = await calculateFileHash(file); // 如用 SparkMD5 计算整体 hash
const chunks = createChunks(file);const uploadTasks = chunks.map((chunk, index) => ({chunk,chunkName: `${fileHash}-${index}`, // 如 "a1b2c3-0", "a1b2c3-1"index,total: chunks.length,fileHash, // 全局文件标识
}));
💡 关键点:
文件级唯一 ID:可用文件内容 hash(如 SparkMD5)或 file.lastModified + file.name。
分片命名规范:{fileId}-{index},便于服务端排序合并。
✅ 第三步:并发上传分片(带重试机制)
目标:高效上传所有分片,并处理失败重试。
async function uploadChunk(chunkInfo, retry = 3) {const formData = new FormData();formData.append('chunk', chunkInfo.chunk);formData.append('chunkName', chunkInfo.chunkName);formData.append('fileHash', chunkInfo.fileHash);formData.append('index', chunkInfo.index);try {await fetch('/upload-chunk', {method: 'POST',body: formData,});} catch (err) {if (retry > 0) {await new Promise(r => setTimeout(r, 1000)); // 等待 1s 后重试return uploadChunk(chunkInfo, retry - 1);}throw err;}
}// 并发上传(控制并发数,如 3 个)
async function uploadAllChunks(uploadTasks, concurrency = 3) {const results = [];for (let i = 0; i < uploadTasks.length; i += concurrency) {const batch = uploadTasks.slice(i, i + concurrency);const promises = batch.map(task => uploadChunk(task));await Promise.all(promises);results.push(...batch);}return results;
}
✅ 第四步:通知服务端合并分片
目标:所有分片上传成功后,触发服务端合并。
// 所有分片上传完成后
await fetch('/merge-chunks', {method: 'POST',headers: { 'Content-Type': 'application/json' },body: JSON.stringify({fileHash: fileHash,fileName: file.name,totalChunks: chunks.length,}),
});
✅ 第五步:支持断点续传(可选但重要)
目标:上传中断后,下次可从中断处继续。
实现思路:
- 上传前,先询问服务端:“这个 fileHash 已经上传了哪些分片?”
- 前端只上传缺失的分片。
// 上传前查询已存在分片
const response = await fetch(`/check-uploaded?fileHash=${fileHash}`);
const uploadedChunks = await response.json(); // 如 [0, 1, 2]// 过滤已上传的分片
const pendingTasks = uploadTasks.filter(task => !uploadedChunks.includes(task.index)
);
关键点:
服务端需记录每个 fileHash 的已上传分片索引。
前端根据返回结果跳过已上传分片。
总结
📌 总结:5 步实现分片上传
| 步骤 | 关键逻辑 |
|---|---|
| 1️⃣ 切片 | File.slice() 按固定大小分割 |
| 2️⃣ 标识 | 生成文件唯一 ID + 分片命名 |
| 3️⃣ 上传 | 并发 + 重试上传每个分片 |
| 4️⃣ 合并 | 通知服务端按序合并 |
| 5️⃣ 断点续传 | 查询已传分片,跳过重复 |
demo案例
1.html结构
<!DOCTYPE html>
<html>
<head><meta charset="UTF-8"><title>分片上传示例</title>
</head>
<body><input type="file" id="fileInput" /><div id="progress">未上传</div><button id="uploadBtn" disabled>开始上传</button><!-- 引入 SparkMD5(CDN) --><script src="https://cdn.jsdelivr.net/npm/spark-md5@3.0.2/spark-md5.min.js"></script><script src="upload.js"></script>
</body>
</html>
2. JavaScript 完整逻辑
// 全局配置
const CHUNK_SIZE = 5 * 1024 * 1024; // 5MB 每片
const CONCURRENCY = 3; // 并发数
const RETRY_TIMES = 3;let selectedFile = null;// DOM 元素
const fileInput = document.getElementById('fileInput');
const uploadBtn = document.getElementById('uploadBtn');
const progressDiv = document.getElementById('progress');// 文件选择事件
fileInput.addEventListener('change', (e) => {selectedFile = e.target.files[0];uploadBtn.disabled = !selectedFile;
});// 开始上传按钮
uploadBtn.addEventListener('click', startUpload);// ==================== 核心函数 ====================// 1. 计算文件整体 MD5(用于唯一标识)
function calculateFileHash(file) {return new Promise((resolve) => {const spark = new SparkMD5.ArrayBuffer();const fileReader = new FileReader();const chunks = Math.ceil(file.size / CHUNK_SIZE);let currentChunk = 0;fileReader.onload = (e) => {spark.append(e.target.result);currentChunk++;if (currentChunk < chunks) {loadNext();} else {resolve(spark.end());}};fileReader.onerror = () => resolve('' + new Date().getTime()); // fallbackfunction loadNext() {const start = currentChunk * CHUNK_SIZE;const end = Math.min(start + CHUNK_SIZE, file.size);fileReader.readAsArrayBuffer(file.slice(start, end));}loadNext();});
}// 2. 切分文件为分片
function createChunks(file, chunkSize = CHUNK_SIZE) {const chunks = [];let start = 0;while (start < file.size) {const end = Math.min(start + chunkSize, file.size);chunks.push(file.slice(start, end));start = end;}return chunks;
}// 3. 上传单个分片(带重试)
async function uploadChunk(chunkInfo, retry = RETRY_TIMES) {const formData = new FormData();formData.append('chunk', chunkInfo.chunk);formData.append('chunkName', chunkInfo.chunkName);formData.append('fileHash', chunkInfo.fileHash);formData.append('index', chunkInfo.index);formData.append('fileName', chunkInfo.fileName);try {const res = await fetch('/upload-chunk', {method: 'POST',body: formData,});if (!res.ok) throw new Error(`HTTP ${res.status}`);} catch (err) {if (retry > 0) {await new Promise(r => setTimeout(r, 1000)); // 等 1s 重试return uploadChunk(chunkInfo, retry - 1);}throw err;}
}// 4. 查询已上传的分片(断点续传)
async function getUploadedChunks(fileHash) {try {const res = await fetch(`/check-uploaded?fileHash=${fileHash}`);if (res.ok) {const data = await res.json();return new Set(data.uploadedChunks || []);}} catch (e) {console.warn('查询已上传分片失败,从头开始:', e);}return new Set();
}// 5. 合并分片
async function mergeChunks(fileHash, fileName, totalChunks) {const res = await fetch('/merge-chunks', {method: 'POST',headers: { 'Content-Type': 'application/json' },body: JSON.stringify({ fileHash, fileName, totalChunks }),});if (!res.ok) throw new Error('合并失败');
}// 6. 主上传流程
async function startUpload() {if (!selectedFile) return;uploadBtn.disabled = true;progressDiv.textContent = '正在计算文件哈希...';// Step 1: 计算文件 hashconst fileHash = await calculateFileHash(selectedFile);console.log('文件 Hash:', fileHash);// Step 2: 切片const chunks = createChunks(selectedFile);const total = chunks.length;// Step 3: 查询已上传分片(断点续传)const uploadedSet = await getUploadedChunks(fileHash);const uploadTasks = chunks.map((chunk, index) => ({chunk,chunkName: `${fileHash}-${index}`,index,fileHash,fileName: selectedFile.name,})).filter(task => !uploadedSet.has(task.index)); // 跳过已上传if (uploadTasks.length === 0) {progressDiv.textContent = '文件已上传,正在合并...';await mergeChunks(fileHash, selectedFile.name, total);progressDiv.textContent = '✅ 上传完成!';return;}// Step 4: 并发上传分片let uploadedCount = uploadedSet.size; // 已上传数量progressDiv.textContent = `已跳过 ${uploadedSet.size} 片,开始上传...`;for (let i = 0; i < uploadTasks.length; i += CONCURRENCY) {const batch = uploadTasks.slice(i, i + CONCURRENCY);const promises = batch.map(task => uploadChunk(task));await Promise.all(promises);uploadedCount += batch.length;const percent = Math.min(100, Math.floor((uploadedCount / total) * 100));progressDiv.textContent = `上传中... ${percent}% (${uploadedCount}/${total})`;}// Step 5: 合并progressDiv.textContent = '正在合并分片...';await mergeChunks(fileHash, selectedFile.name, total);progressDiv.textContent = '✅ 上传成功!';
}
