前言
在Web开发中,大文件的上传是必不可少的功能之一。本文将介绍如何使用SpringBoot整合minio实现一个简单的大文件上传网站。
一、项目介绍
项目下载
gitee:https://gitee.com/wusupweilgy/springboot-vue.git
1.项目运行
2.技术栈
前端:vue2、element-ui组件、axios
后端:springboot、minio、mybatis-plus、redis
3.功能
- 断点续传
- 分片上传
- 前端显示上传进度条
4.流程图
基本分成3步完成上传,检查是否上传、获取分片上传的url列表和分片上传合并,所以这里我把文件上传分成了三张流程图,就是为了详细点,其实是一个流程。
检查是否上传
获取上传url
分片上传合并
redis主要作用是缓存上传任务,因为如果是断点续传,需要查询上次上传的信息去查询已上传的分片信息,只要上传没有上传的分片即可。
mysql主要作用是存储已完成上传的文件信息。如果下次上传的是同一个文件,数据库中存在就不用上传,直接返回数据库中的文件信息。
二、前端实现
1.spark-md5 主要用来计算文件MD5,安装命令:
npm install spark-md5 --S
2.FileView.vue
<template>
<div class="container">
<div style="display:none;">
<video width="500" height="240" controls id="upvideo">
</video>
</div>
<h2>上传示例</h2>
<el-upload class="upload-demo" ref="upload" action="https://jsonplaceholder.typicode.com/posts/"
:on-remove="handleRemove" :on-change="handleFileChange" :file-list="uploadFileList"
:show-file-list="false"
:auto-upload="false" multiple>
<el-button slot="trigger" type="primary" plain>选择文件</el-button>
<el-button style="margin-left: 5px;" type="success" @click="handler" plain>上传</el-button>
<el-button type="danger" @click="clearFileHandler" plain>清空</el-button>
</el-upload>
<table style="margin-top: 20px">
<th style="display:inline-block;font-size: 10px;color: #909399;margin-left: 100px">
文件名
</th>
<th style="display:inline-block;;font-size: 12px;color: #909399;margin-left: 191px;">
文件大小
</th>
<th style="display:inline-block;;font-size: 12px;color: #909399;margin-left: 100px">
上传进度
</th>
<th style="display:inline-block;;font-size: 12px;color: #909399;margin-left: 155px">
状态
</th>
</table>
<!-- 文件列表 -->
<div class="file-list-wrapper">
<el-collapse>
<el-collapse-item v-for="(item, index) in uploadFileList" :key="index">
<template slot="title">
<div class="upload-file-item">
<div class="file-info-item file-name" :title="item.name">{{ item.name }}</div>
<div class="file-info-item file-size">{{ item.size | transformByte }}</div>
<div class="file-info-item file-progress">
<span class="file-progress-label"></span>
<el-progress :percentage="item.uploadProgress" class="file-progress-value"/>
</div>
<div class="file-info-item file-size"><span></span>
<el-tag v-if="item.status === '等待上传'" size="medium" type="info">等待上传</el-tag>
<el-tag v-else-if="item.status === '校验MD5'" size="medium" type="warning">校验MD5</el-tag>
<el-tag v-else-if="item.status === '正在上传'" size="medium">正在上传</el-tag>
<el-tag v-else-if="item.status === '上传成功'" size="medium" type="success">上传完成</el-tag>
<el-tag v-else size="medium">正在上传</el-tag>
<!-- <el-tag v-else size="medium" type="danger">上传错误</el-tag>-->
</div>
</div>
</template>
<div class="file-chunk-list-wrapper">
<!-- 分片列表 -->
<el-table :data="item.chunkList" max-height="400" style="width: 100%">
<el-table-column prop="chunkNumber" label="分片序号" width="180">
</el-table-column>
<el-table-column prop="progress" label="上传进度">
<template v-slot="{ row }">
<el-progress v-if="!row.status || row.progressStatus === 'normal'"
:percentage="row.progress"/>
<el-progress v-else :percentage="row.progress" :status="row.progressStatus"
:text-inside="true" :stroke-width="16"/>
</template>
</el-table-column>
<el-table-column prop="status" label="状态" width="180">
</el-table-column>
</el-table>
</div>
</el-collapse-item>
</el-collapse>
</div>
</div>
</template>
<script>
import {checkUpload, initUpload, mergeUpload, uploadFileInfo} from "@/api/upload";
import {fileSuffixTypeUtil} from "@/utils/FileUtil"
import axios from 'axios'
import SparkMD5 from 'spark-md5'
const FILE_UPLOAD_ID_KEY = 'file_upload_id'
const chunkSize = 10 * 1024 * 1024 //10mb
let currentFileIndex = 0
const FileStatus = {
wait: '等待上传',
getMd5: '校验MD5',
chip: '正在创建序列',
uploading: '正在上传',
success: '上传成功',
error: '上传错误'
}
export default {
data() {
return {
// 上传并发数
simultaneousUploads: 3,
uploadIdInfo: null,
uploadFileList: [],
}
},
methods: {
/**
* 开始上传文件
*/
handler() {
const self = this
//判断文件列表是否为空
if (this.uploadFileList.length === 0) {
this.$message.error('请先选择文件')
return
}
if (currentFileIndex >= this.uploadFileList.length) {
this.$message.success("文件上传成功")
return;
}
//当前操作文件
const currentFile = this.uploadFileList[currentFileIndex]
console.log("当前操作文件:", currentFile)
//debugger
//更新上传标签
currentFile.status = FileStatus.getMd5
currentFile.chunkUploadedList = []
//截取封面图片
//this.ScreenshotVideo(currentFile.raw);
// 1. 计算文件MD5
this.getFileMd5(currentFile.raw, async (md5, totalChunks) => {
console.log("md5值", md5)
// 2. 检查是否已上传
const checkResult = await self.checkFileUploadedByMd5(md5)
console.log("检查是否已上传-->",checkResult)
// debugger
if (checkResult.code === 1) {
//self.$message.success(`上传成功,文件地址:${checkResult.data.url}`)
console.log('上传成功文件访问地址:' + checkResult.data.url)
currentFile.status = FileStatus.success
currentFile.uploadProgress = 100
//如果此文件上传过,就跳到下一个文件
currentFileIndex++;
this.handler()
return
} else if (checkResult.code === 2) { // "上传中" 状态
// 获取已上传分片列表
console.log("上传中:", checkResult)
currentFile.status = FileStatus.uploading
let chunkUploadedList = checkResult.data.chunkUploadedList
console.log("chunkUploadedList",chunkUploadedList)
currentFile.chunkUploadedList = chunkUploadedList
console.log("成功上传的分片信息", chunkUploadedList)
} else { // 未上传
console.log('未上传')
}
// 3. 正在创建分片
currentFile.status = FileStatus.chip;
//创建分片
let fileChunks = self.createFileChunk(currentFile.raw, chunkSize);
//重命名文件
//let fileName = this.getNewFileName(currentFile)
// 获取文件类型
//let type = currentFile.name.substring(currentFile.name.lastIndexOf(".") + 1)
let type = fileSuffixTypeUtil(currentFile.name)
let param = {
fileName: currentFile.name,
fileSize: currentFile.size,
chunkSize: chunkSize,
chunkNum: totalChunks,
fileMd5: md5,
contentType: 'application/octet-stream',
fileType: type,
//uploadId:localStorage.getItem("file_upload_id"),
chunkUploadedList: currentFile.chunkUploadedList//已上传的分片索引+1
}
// debugger
// 4. 获取上传url
let uploadIdInfoResult = await self.getFileUploadUrls(param)
//debugger
let uploadIdInfo = uploadIdInfoResult.data
console.log("获取上传url-->", uploadIdInfo)
let uploadUrls = uploadIdInfo.urlList
self.$set(currentFile, 'chunkList', [])
if (uploadUrls !== undefined) {
if (fileChunks.length !== uploadUrls.length) {
self.$message.error('文件分片上传地址获取错误')
return
}
}
fileChunks.map((chunkItem, index) => {
if(currentFile.chunkUploadedList.indexOf(index+1)!==-1){
currentFile.chunkList.push({
chunkNumber: index + 1,
chunk: chunkItem,
uploadUrl: uploadUrls[index],
progress: 100,
progressStatus:'success',
status: '上传成功'
})
}else{
currentFile.chunkList.push({
chunkNumber: index + 1,
chunk: chunkItem,
uploadUrl: uploadUrls[index],
progress: 0,
status: '—'
})
}
})
console.log("所有分片信息:",currentFile.chunkList)
let tempFileChunks = []
currentFile.chunkList.forEach((item) => {
tempFileChunks.push(item)
})
//更新状态
currentFile.status = FileStatus.uploading
// 处理分片列表,删除已上传的分片
tempFileChunks = self.processUploadChunkList(tempFileChunks)
console.log("删除已上传的分片-->", tempFileChunks)
// 5. 上传
await self.uploadChunkBase(tempFileChunks)
console.log('---上传完成---')
//判断是否单文件上传或者分片上传
if (uploadIdInfo.uploadId === "SingleFileUpload") {
console.log("单文件上传");
//更新状态
currentFile.status = FileStatus.success
//文件下标偏移
currentFileIndex++;
//递归上传下一个文件
this.handler()
return
} else {
// 6. 合并文件
console.log("合并文件-->",currentFile)
const mergeResult =await self.mergeFile({
uploadId: uploadIdInfo.uploadId,
fileName: currentFile.name,
fileMd5: md5,
fileType: type,
chunkNum:uploadIdInfo.urlList.length,
chunkSize:chunkSize,
fileSize:currentFile.size
})
//合并文件状态
if (!mergeResult.data) {
currentFile.status = FileStatus.error
self.$message.error(mergeResult.error)
} else {
localStorage.removeItem(FILE_UPLOAD_ID_KEY)
currentFile.status = FileStatus.success
console.log('文件访问地址:' + mergeResult.data)
//文件下标偏移
currentFileIndex++;
//递归上传下一个文件
this.handler()
}
}
})
},
/**
* 保存文件信息到数据库
* @param {*} imgInfoUrl 上传图片封面
* @param {*} currentFile 上传文件
* @param {*} fileName 文件名
* @param {*} url 文件url地址
* @param {*} md5 md5校验
*/
saveFileInfoToDB(currentFile, fileName, url, md5) {
let userInfoCache = JSON.parse(localStorage.getItem('userInfo'))
let VideoFileInfo = {
userId: userInfoCache.id,
fileRealName: currentFile.name,
fileName: fileName,
fileSize: currentFile.size,
fileMd5: md5,
fileAddress: url,
// imgAddress: imgInfoUrl,
bucketName: 'video',
fileType: 'video',
}
console.log(VideoFileInfo);
uploadFileInfo(VideoFileInfo).then(res => {
console.log(res.data);
if (res.status == 200) {
this.$message.success("文件信息存储成功");
//递归上传文件
if (this.uploadFileList.length > currentFileIndex) {
this.handleUpload()
}
} else {
this.$message.error("文件信息存储失败");
}
})
},
/**
* 清空列表
*/
clearFileHandler() {
this.uploadFileList = []
this.uploadIdInfo = null
this.fileIndex = 0
currentFileIndex = 0
},
/**
* 上传文件列表
* @param {*} file
* @param {*} fileList
*/
handleFileChange(file, fileList) {
this.initFileProperties(file)
this.uploadFileList = fileList
},
//初始化文件属性
initFileProperties(file) {
file.chunkList = []
file.status = FileStatus.wait
file.progressStatus = 'warning'
file.uploadProgress = 0
},
/**
* 移除文件列表
* @param {*} file
* @param {*} fileList
*/
handleRemove(file, fileList) {
this.uploadFileList = fileList
},
/**
* 检查上传文件格式
* @param {*} file
*/
beforeUploadVideo(file) {
let type = file.name.substring(file.name.lastIndexOf(".") + 1);
if (
[
"mp4",
"ogg",
"flv",
"avi",
"wmv",
"rmvb"
].indexOf(type) == -1
) {
this.$message.error("请上传正确的视频格式");
return false;
}
},
getNewFileName(file, md5) {
return new Date().getTime() + file.name
//return md5+"-"+ file.name
},
/**
* 分片读取文件 获取文件的MD5
* @param file
* @param callback
*/
getFileMd5(file, callback) {
const blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice
const fileReader = new FileReader()
// 计算分片数
const totalChunks = Math.ceil(file.size / chunkSize)
console.log('总分片数:' + totalChunks)
let currentChunk = 0
const spark = new SparkMD5.ArrayBuffer()
loadNext()
fileReader.onload = function (e) {
try {
spark.append(e.target.result)
} catch (error) {
console.log('获取Md5错误:' + currentChunk)
}
if (currentChunk < totalChunks) {
currentChunk++
loadNext()
} else {
callback(spark.end(), totalChunks)
}
}
fileReader.onerror = function () {
console.warn('读取Md5失败,文件读取错误')
}
function loadNext() {
const start = currentChunk * chunkSize
const end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize
// 注意这里的 fileRaw
fileReader.readAsArrayBuffer(blobSlice.call(file, start, end))
}
},
/**
* 文件分片
*/
createFileChunk(file, size = chunkSize) {
const fileChunkList = []
let count = 0
while (count < file.size) {
fileChunkList.push({
file: file.slice(count, count + size),
})
count += size
}
return fileChunkList
},
/**
* 处理即将上传的分片列表,判断是否有已上传的分片,有则从列表中删除
*/
processUploadChunkList(chunkList) {
const currentFile = this.uploadFileList[currentFileIndex]
let chunkUploadedList = currentFile.chunkUploadedList
if (chunkUploadedList === undefined || chunkUploadedList === null || chunkUploadedList.length === 0) {
return chunkList
}
//
for (let i = chunkList.length - 1; i >= 0; i--) {
const chunkItem = chunkList[i]
for (let j = 0; j < chunkUploadedList.length; j++) {
if (chunkItem.chunkNumber === chunkUploadedList[j]) {
chunkList.splice(i, 1)
break
}
}
}
return chunkList
},
/**
* 上传分片
* @param chunkList
* @returns {Promise<unknown>}
*/
uploadChunkBase(chunkList) {
const self = this
let successCount = 0
let totalChunks = chunkList.length
return new Promise((resolve, reject) => {
const handler = () => {
if (chunkList.length) {
const chunkItem = chunkList.shift()
// 直接上传二进制,不需要构造 FormData,否则上传后文件损坏
axios.put(chunkItem.uploadUrl, chunkItem.chunk.file, {
// 上传进度处理
onUploadProgress: self.checkChunkUploadProgress(chunkItem),
headers: {
'Content-Type': 'application/octet-stream'
}
}).then(response => {
if (response.status === 200) {
console.log('分片:' + chunkItem.chunkNumber + ' 上传成功')
successCount++
// 继续上传下一个分片
// debugger
handler()
} else {
console.log('上传失败:' + response.status + ',' + response.statusText)
}
})
.catch(error => {
// 更新状态
console.log('分片:' + chunkItem.chunkNumber + ' 上传失败,' + error)
// 重新添加到队列中
chunkList.push(chunkItem)
handler()
})
}
if (successCount >= totalChunks) {
resolve()
}
}
// 并发
for (let i = 0; i < this.simultaneousUploads; i++) {
handler()
}
})
},
/**
* 获取直接上传的uri链接
* @param fileParam
* @returns {Promise<AxiosResponse<any>>}
*/
getFileUploadUrls(fileParam) {
return initUpload(fileParam)
},
/**
* 根据MD5查看文件是否上传过
* @param md5
* @returns {Promise<unknown>}
*/
checkFileUploadedByMd5(md5) {
return new Promise((resolve, reject) => {
checkUpload(md5).then(response => {
console.log("md5-->:", response);
resolve(response)
}).catch(error => {
reject(error)
})
})
},
/**
* 合并文件
* uploadId: self.uploadIdInfo.uploadId,
fileName: currentFile.name,
//fileMd5: fileMd5,
bucketName: 'bucket'
*/
mergeFile(fileParam) {
const self = this;
return new Promise((resolve, reject) => {
mergeUpload(fileParam).then(response => {
console.log(response);
let data = response
console.log("@@@", data)
if (!data) {
console.log("222")
data.msg = FileStatus.error
resolve(data)
} else {
data.msg = FileStatus.success
resolve(data)
}
})
// .catch(error => {
// self.$message.error('合并文件失败:' + error)
// file.status = FileStatus.error
// reject()
// })
})
},
/**
* 检查分片上传进度
*/
checkChunkUploadProgress(item) {
return p => {
item.progress = parseInt(String((p.loaded / p.total) * 100))
console.log("进度:", this.uploadFileList[currentFileIndex].uploadProgress)
this.updateChunkUploadStatus(item)
}
},
updateChunkUploadStatus(item) {
let status = FileStatus.uploading
let progressStatus = 'normal'
if (item.progress >= 100) {
status = FileStatus.success
progressStatus = 'success'
}
let chunkIndex = item.chunkNumber - 1
let currentChunk = this.uploadFileList[currentFileIndex].chunkList[chunkIndex]
// 修改状态
currentChunk.status = status
currentChunk.progressStatus = progressStatus
// 更新状态
this.$set(this.uploadFileList[currentFileIndex].chunkList, chunkIndex, currentChunk)
// 获取文件上传进度
this.getCurrentFileProgress()
},
getCurrentFileProgress() {
const currentFile = this.uploadFileList[currentFileIndex]
if (!currentFile || !currentFile.chunkList) {
return
}
const chunkList = currentFile.chunkList
const uploadedSize = chunkList.map((item) => item.chunk.file.size * item.progress).reduce((acc, cur) => acc + cur)
// 计算方式:已上传大小 / 文件总大小
let progress = parseInt((uploadedSize / currentFile.size).toFixed(2))
// debugger
currentFile.uploadProgress = progress
this.$set(this.uploadFileList,currentFileIndex, currentFile)
},
},
filters: {
transformByte(size) {
if (!size) {
return '0B'
}
const unitSize = 1024
if (size < unitSize) {
return size + ' B'
}
// KB
if (size < Math.pow(unitSize, 2)) {
return (size / unitSize).toFixed(2) + ' K';
}
// MB
if (size < Math.pow(unitSize, 3)) {
return (size / Math.pow(unitSize, 2)).toFixed(2) + ' MB'
}
// GB
if (size < Math.pow(unitSize, 4)) {
return (size / Math.pow(unitSize, 3)).toFixed(2) + ' GB';
}
// TB
return (size / Math.pow(unitSize, 4)).toFixed(2) + ' TB';
}
}
}
</script>
<style scoped>
.container {
width: 750px;
margin: 0 auto;
}
.file-list-wrapper {
margin-top: 20px;
}
h2 {
text-align: center;
}
.file-info-item {
margin: 0 10px;
}
.upload-file-item {
display: flex;
}
.file-progress {
display: flex;
align-items: center;
}
.file-progress-value {
width: 150px;
}
.file-name {
width: 250px;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.file-size {
width: 100px;
margin-left: 60px;
}
.uploader-example {
width: 880px;
padding: 15px;
margin: 40px auto 0;
font-size: 12px;
box-shadow: 0 0 10px rgba(0, 0, 0, .4);
}
.uploader-example .uploader-btn {
margin-right: 4px;
}
.uploader-example .uploader-list {
max-height: 440px;
overflow: auto;
overflow-x: hidden;
overflow-y: auto;
}
</style>
3.FileUtil.js(判断文件类型)
/**
* @param: fileName - 文件名称
* @param: 数据返回 1) 无后缀匹配 - false
* @param: 数据返回 2) 匹配图片 - image
* @param: 数据返回 3) 匹配 txt - txt
* @param: 数据返回 4) 匹配 excel - excel
* @param: 数据返回 5) 匹配 word - word
* @param: 数据返回 6) 匹配 pdf - pdf
* @param: 数据返回 7) 匹配 ppt - ppt
* @param: 数据返回 8) 匹配 视频 - video
* @param: 数据返回 9) 匹配 音频 - radio
* @param: 数据返回 10) 其他匹配项 - other
* @author: ljw
**/
export function fileSuffixTypeUtil(fileName){
// 后缀获取
var suffix = "";
// 获取类型结果
var result = "";
try {
var flieArr = fileName.split(".");
suffix = flieArr[flieArr.length - 1];
} catch (err) {
suffix = "";
}
// fileName无后缀返回 false
if (!suffix) {
result = false;
return result;
}
// 图片格式
var imglist = ["png", "jpg", "jpeg", "bmp", "gif"];
// 进行图片匹配
result = imglist.some(function (item) {
return item == suffix;
});
if (result) {
result = "image";
return result;
}
// 匹配txt
var txtlist = ["txt"];
result = txtlist.some(function (item) {
return item == suffix;
});
if (result) {
result = "txt";
return result;
}
// 匹配 excel
var excelist = ["xls", "xlsx"];
result = excelist.some(function (item) {
return item == suffix;
});
if (result) {
result = "excel";
return result;
}
// 匹配 word
var wordlist = ["doc", "docx"];
result = wordlist.some(function (item) {
return item == suffix;
});
if (result) {
result = "word";
return result;
}
// 匹配 pdf
var pdflist = ["pdf"];
result = pdflist.some(function (item) {
return item == suffix;
});
if (result) {
result = "pdf";
return result;
}
// 匹配 ppt
var pptlist = ["ppt"];
result = pptlist.some(function (item) {
return item == suffix;
});
if (result) {
result = "ppt";
return result;
}
// 匹配 视频
var videolist = ["mp4", "m2v", "mkv","ogg", "flv", "avi", "wmv", "rmvb"];
result = videolist.some(function (item) {
return item == suffix;
});
if (result) {
result = "video";
return result;
}
// 匹配 音频
var radiolist = ["mp3", "wav", "wmv"];
result = radiolist.some(function (item) {
return item == suffix;
});
if (result) {
result = "radio";
return result;
}
// 其他 文件类型
result = "other";
return result;
};
4.upload.js(请求后端方法)
import request from '@/utils/request'
//上传信息
export function uploadScreenshot(data){
return request({
url:'upload/multipart/uploadScreenshot',
method:'post',
data
})
}
//上传信息
export function uploadFileInfo(data){
return request({
url:'upload/multipart/uploadFileInfo',
method:'post',
data
})
}
// 上传校验
export function checkUpload(MD5) {
return request({
url: `upload/multipart/check?md5=${MD5}`,
method: 'get',
})
};
// 初始化上传
export function initUpload(data) {
return request({
url: `upload/multipart/init`,
method: 'post',
data
})
};
// 初始化上传
export function mergeUpload(data) {
return request({
url: `upload/multipart/merge`,
method: 'post',
data
})
};
5.request.js
import axios from 'axios'
import router from "@/router";
import ElementUI from "element-ui";
const request = axios.create({
baseURL: `http://localhost:9090`,
timeout: 30000
})
// request 拦截器
// 可以自请求发送前对请求做一些处理
// 比如统一加token,对请求参数统一加密
request.interceptors.request.use(config => {
config.headers['Content-Type'] = 'application/json;charset=utf-8';
return config
}, error => {
return Promise.reject(error)
});
// response 拦截器
// 可以在接口响应后统一处理结果
request.interceptors.response.use(
response => {
let res = response.data;
// 如果是返回的文件
if (response.headers === 'blob') {
return res
}
// 兼容服务端返回的字符串数据
if (typeof res === 'string') {
res = res ? JSON.parse(res) : res
console.log(res)
}
return res;
},
error => {
console.log('err' + error) // for debug
return Promise.reject(error)
}
)
export default request
三、后端实现
1.数据库
文章来源:https://www.toymoban.com/news/detail-440695.html
/*
Navicat Premium Data Transfer
Source Server : local
Source Server Type : MySQL
Source Server Version : 80028
Source Host : localhost:3306
Source Schema : shiro_jwt_vue_file
Target Server Type : MySQL
Target Server Version : 80028
File Encoding : 65001
Date: 01/05/2023 13:52:06
*/
SET NAMES utf8mb4;
SET FOREIGN_KEY_CHECKS = 0;
-- ----------------------------
-- Table structure for files
-- ----------------------------
DROP TABLE IF EXISTS `files`;
CREATE TABLE `files` (
`id` int(0) NOT NULL AUTO_INCREMENT COMMENT 'id',
`upload_id` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL COMMENT '分片上传uploadId',
`file_md5` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL COMMENT '文件md5',
`url` varchar(500) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL COMMENT '下载链接',
`file_name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL COMMENT '文件名称',
`bucket_name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL COMMENT '桶名',
`file_type` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL COMMENT '文件类型',
`file_size` bigint(0) NULL DEFAULT NULL COMMENT '文件大小(byte)',
`chunk_size` bigint(0) NULL DEFAULT NULL COMMENT '每个分片的大小(byte)',
`chunk_num` int(0) NULL DEFAULT NULL COMMENT '分片数量',
`is_delete` tinyint(1) NULL DEFAULT 0 COMMENT '是否删除',
`enable` tinyint(1) NULL DEFAULT 1 COMMENT '是否禁用链接',
`create_time` datetime(0) NULL DEFAULT NULL COMMENT '创建时间',
`update_time` datetime(0) NULL DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 262 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_ci ROW_FORMAT = Dynamic;
SET FOREIGN_KEY_CHECKS = 1;
2.依赖pom.xml
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.50</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>3.5.2</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>8.0.30</version>
</dependency>
<dependency>
<groupId>io.minio</groupId>
<artifactId>minio</artifactId>
<version>8.3.1</version>
</dependency>
<dependency>
<groupId>com.squareup.okhttp3</groupId>
<artifactId>okhttp</artifactId>
<version>4.9.2</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-redis</artifactId>
</dependency>
<dependency>
<groupId>cn.hutool</groupId>
<artifactId>hutool-all</artifactId>
<version>5.7.20</version>
</dependency>
<!--参数校验注解所需依赖 spring-boot2.3之后需要单独引入-->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-validation</artifactId>
<!--这里版本一般和springboot一样-->
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-dependencies</artifactId>
<version>${spring-boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
3.application.yml
server:
port: 9090
spring:
datasource:
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://localhost:3306/shiro_jwt_vue_file?serverTimezone=Asia/Shanghai&userUnicode=true&useSSL=false
username: root
password: mysql
redis:
# Redis服务器地址
host: 127.0.0.1
# Redis服务器端口号
port: 6379
# 使用的数据库索引,默认是0
database: 0
# 连接超时时间
timeout: 1800000
# 设置密码
#password: "123456"
lettuce:
pool:
# 最大阻塞等待时间,负数表示没有限制
max-wait: -1
# 连接池中的最大空闲连接
max-idle: 5
# 连接池中的最小空闲连接
min-idle: 0
# 连接池中最大连接数,负数表示没有限制
max-active: 20
jackson:
date-format: yyyy-MM-dd HH:mm:ss
time-zone: GMT+8
mybatis-plus:
mapper-locations: classpath:mapper/*.xml
configuration:
log-impl: org.apache.ibatis.logging.stdout.StdOutImpl
minio:
endpoint: http://localhost:9000
accesskey: minioadmin
secretkey: minioadmin
expiry: 1 #分片对象过期时间 单位(天)
breakpoint-time: 1 #断点续传有效时间,在redis存储任务的时间 单位(天)
4.controller文件上传
package com.wusuowei.miniouploadfile.controller;
import com.wusuowei.miniouploadfile.model.vo.FileUploadInfo;
import com.wusuowei.miniouploadfile.service.UploadService;
import com.wusuowei.miniouploadfile.utils.MinioUtils;
import com.wusuowei.miniouploadfile.utils.R;
import com.wusuowei.miniouploadfile.utils.RedisUtil;
import com.wusuowei.miniouploadfile.utils.RespEnum;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import javax.annotation.Resource;
/**
* minio上传流程
*
* 1.检查数据库中是否存在上传文件
*
* 2.根据文件信息初始化,获取分片预签名url地址,前端根据url地址上传文件
*
* 3.上传完成后,将分片上传的文件进行合并
*
* 4.保存文件信息到数据库
*/
@RestController
@Slf4j
@RequestMapping("upload")
public class FileMinioController {
@Resource
private UploadService uploadService;
@Resource
private RedisUtil redisUtil;
@Resource
private MinioUtils minioUtils;
/**
* @description 获取上传文件
* @param fileMD5 文件md5
* @return {@link R }
* @author LGY
* @date 2023/04/26 16:00
*/
@GetMapping("/getUploadingFile/{fileMD5}")
public R getUploadingFile(@PathVariable String fileMD5) {
if (StringUtils.isBlank(fileMD5)) {
return R.error();
}
FileUploadInfo fileUploadInfo = (FileUploadInfo) redisUtil.get(fileMD5);
if (fileUploadInfo != null) {
// 查询上传后的分片数据
fileUploadInfo.setChunkUploadedList(minioUtils.getChunkByFileMD5(fileUploadInfo.getFileName(), fileUploadInfo.getUploadId(), fileUploadInfo.getFileType()));
return R.ok().setData(fileUploadInfo);
}
return R.error();
}
/**
* 校验文件是否存在
*
* @param md5 String
* @return ResponseResult<Object>
*/
@GetMapping("/multipart/check")
public R checkFileUploadedByMd5(@RequestParam("md5") String md5) {
log.info("REST: 通过查询 <{}> 文件是否存在、是否进行断点续传", md5);
if (StringUtils.isEmpty(md5)) {
log.error("查询文件是否存在、入参无效");
return R.error(RespEnum.ACCESS_PARAMETER_INVALID);
}
return uploadService.getByFileMD5(md5);
}
/**
* 分片初始化
*
* @param fileUploadInfo 文件信息
* @return ResponseResult<Object>
*/
@PostMapping("/multipart/init")
public R initMultiPartUpload(@RequestBody FileUploadInfo fileUploadInfo) {
log.info("REST: 通过 <{}> 初始化上传任务", fileUploadInfo);
return R.ok().setData(uploadService.initMultiPartUpload(fileUploadInfo));
}
/**
* 完成上传
*
* @param fileUploadInfo 文件信息
* @return ResponseResult<Object>
*/
@PostMapping("/multipart/merge")
public R completeMultiPartUpload(@RequestBody FileUploadInfo fileUploadInfo) {
log.info("REST: 通过 {} 合并上传任务", fileUploadInfo);
//合并文件
String url = uploadService.mergeMultipartUpload(fileUploadInfo);
//获取上传文件地址
if (StringUtils.isNotBlank(url)) {
return R.ok().setData(url);
}
return R.error();
}
@PostMapping("/multipart/uploadScreenshot")
public R uploaduploadScreenshot(@RequestPart("photos") MultipartFile[] photos,
@RequestParam("buckName") String buckName) {
log.info("REST: 上传文件信息 <{}> ", photos);
for (MultipartFile photo : photos) {
if (!photo.isEmpty()) {
uploadService.upload(photo, buckName);
}
}
return R.ok();
}
@RequestMapping("/createBucket")
public void createBucket(@RequestParam("bucketName") String bucketName) {
String bucket = minioUtils.createBucket(bucketName);
}
}
5.UploadService
package com.wusuowei.miniouploadfile.service;
import com.wusuowei.miniouploadfile.model.vo.FileUploadInfo;
import com.wusuowei.miniouploadfile.utils.R;
import org.springframework.web.multipart.MultipartFile;
import java.util.Map;
public interface UploadService {
/**
* 分片上传初始化
*
* @param fileUploadInfo
* @return Map<String, Object>
*/
Map<String, Object> initMultiPartUpload(FileUploadInfo fileUploadInfo);
/**
* 完成分片上传
*
* @param fileUploadInfo
* @return String
*/
String mergeMultipartUpload(FileUploadInfo fileUploadInfo);
/**
* 通过 md5 获取已上传的数据
* @param md5 String
* @return Mono<Map<String, Object>>
*/
R getByFileMD5(String md5);
/**
* 获取文件地址
* @param bucketName
* @param fileName
*
*/
String getFliePath(String bucketName, String fileName);
/**
* 单文件上传
* @param file
* @param bucketName
* @return
*/
String upload(MultipartFile file, String bucketName);
}
6.UploadServiceImpl
package com.wusuowei.miniouploadfile.service.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.wusuowei.miniouploadfile.mapper.FilesMapper;
import com.wusuowei.miniouploadfile.model.vo.FileUploadInfo;
import com.wusuowei.miniouploadfile.model.po.Files;
import com.wusuowei.miniouploadfile.service.UploadService;
import com.wusuowei.miniouploadfile.utils.MinioUtils;
import com.wusuowei.miniouploadfile.utils.R;
import com.wusuowei.miniouploadfile.utils.RedisUtil;
import com.wusuowei.miniouploadfile.utils.RespEnum;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;
import javax.annotation.Resource;
import java.util.List;
import java.util.Map;
@Slf4j
@Service
public class UploadServiceImpl implements UploadService {
@Resource
private FilesMapper filesMapper;
@Resource
private MinioUtils minioUtils;
@Resource
private RedisUtil redisUtil;
@Value("${minio.breakpoint-time}")
private Integer breakpointTime;
/**
* 通过 md5 获取已上传的数据(断点续传)
*
* @param md5 String
* @return Mono<Map < String, Object>>
*/
@Override
public R getByFileMD5(String md5) {
log.info("tip message: 通过 <{}> 查询redis是否存在", md5);
// 从redis获取文件名称和id
FileUploadInfo fileUploadInfo = (FileUploadInfo) redisUtil.get(md5);
if (fileUploadInfo != null) {
// 正在上传,查询上传后的分片数据
List<Integer> chunkList = minioUtils.getChunkByFileMD5(fileUploadInfo.getFileName(), fileUploadInfo.getUploadId(), fileUploadInfo.getFileType());
fileUploadInfo.setChunkUploadedList(chunkList);
return R.ok(RespEnum.UPLOADING).setData(fileUploadInfo);
}
log.info("tip message: 通过 <{}> 查询mysql是否存在", md5);
// 查询数据库是否上传成功
Files one = filesMapper.selectOne(new LambdaQueryWrapper<Files>().eq(Files::getFileMd5, md5));
if (one != null) {
FileUploadInfo mysqlsFileUploadInfo = new FileUploadInfo();
BeanUtils.copyProperties(one, mysqlsFileUploadInfo);
return R.ok(RespEnum.UPLOADSUCCESSFUL).setData(mysqlsFileUploadInfo);
}
return R.ok(RespEnum.NOT_UPLOADED);
}
/**
* 文件分片上传
*
* @param fileUploadInfo
* @return Mono<Map < String, Object>>
*/
@Override
public Map<String, Object> initMultiPartUpload(FileUploadInfo fileUploadInfo) {
FileUploadInfo redisFileUploadInfo = (FileUploadInfo) redisUtil.get(fileUploadInfo.getFileMd5());
if (redisFileUploadInfo != null) {
fileUploadInfo = redisFileUploadInfo;
}
log.info("tip message: 通过 <{}> 开始初始化<分片上传>任务", fileUploadInfo);
// 获取桶
String bucketName = minioUtils.getBucketName(fileUploadInfo.getFileType());
// 单文件上传
if (fileUploadInfo.getChunkNum() == 1) {
log.info("tip message: 当前分片数量 <{}> 进行单文件上传", fileUploadInfo.getChunkNum());
saveFileToDB(fileUploadInfo);
return minioUtils.getUploadObjectUrl(fileUploadInfo.getFileName(), bucketName);
}
// 分片上传
else {
log.info("tip message: 当前分片数量 <{}> 进行分片上传", fileUploadInfo.getChunkNum());
Map<String, Object> map = minioUtils.initMultiPartUpload(fileUploadInfo, fileUploadInfo.getFileName(), fileUploadInfo.getChunkNum(), fileUploadInfo.getContentType(), bucketName);
String uploadId = (String) map.get("uploadId");
fileUploadInfo.setUploadId(uploadId);
redisUtil.set(fileUploadInfo.getFileMd5(),fileUploadInfo,breakpointTime*60*60*24);
return map;
}
}
/**
* 文件合并
*
* @param
* @return String
*/
@Override
public String mergeMultipartUpload(FileUploadInfo fileUploadInfo) {
log.info("tip message: 通过 <{}> 开始合并<分片上传>任务", fileUploadInfo);
FileUploadInfo redisFileUploadInfo = (FileUploadInfo) redisUtil.get(fileUploadInfo.getFileMd5());
if(redisFileUploadInfo!=null){
fileUploadInfo.setFileName(redisFileUploadInfo.getFileName());
}
boolean result = minioUtils.mergeMultipartUpload(fileUploadInfo.getFileName(), fileUploadInfo.getUploadId(), fileUploadInfo.getFileType());
//合并成功
if (result) {
//存入数据库
Files files = saveFileToDB(fileUploadInfo);
redisUtil.del(fileUploadInfo.getFileMd5());
return files.getUrl();
}
return null;
}
@Override
public String getFliePath(String bucketName, String fileName) {
return minioUtils.getFliePath(bucketName, fileName);
}
@Override
public String upload(MultipartFile file, String bucketName) {
minioUtils.upload(file, bucketName);
return getFliePath(bucketName, file.getName());
}
private Files saveFileToDB(FileUploadInfo fileUploadInfo) {
String url = this.getFliePath(fileUploadInfo.getFileType().toLowerCase(), fileUploadInfo.getFileName());
//存入数据库
Files files = new Files();
BeanUtils.copyProperties(fileUploadInfo, files);
files.setBucketName(fileUploadInfo.getFileType());
files.setUrl(url);
filesMapper.insert(files);
return files;
}
}
7.FilesMapper
package com.wusuowei.miniouploadfile.mapper;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.wusuowei.miniouploadfile.model.po.Files;
/**
* <p>
* Mapper 接口
* </p>
*
* @author LGY
*/
public interface FilesMapper extends BaseMapper<Files> {
}
8.Files、FileUploadInfo实体类
package com.wusuowei.miniouploadfile.model.po;
import com.baomidou.mybatisplus.annotation.*;
import lombok.Data;
import java.io.Serializable;
import java.time.LocalDateTime;
/**
* <p>
*
* </p>
*
* @author LGY
*/
@Data
@TableName("files")
public class Files implements Serializable {
private static final long serialVersionUID = 1L;
@TableId(value = "id", type = IdType.AUTO)
private Integer id;
@TableField("upload_id")
private String uploadId;
@TableField("file_md5")
private String fileMd5;
@TableField("url")
private String url;
@TableField("file_name")
private String fileName;
@TableField("bucket_name")
private String bucketName;
@TableField("file_type")
private String fileType;
@TableField("file_size")
private Long fileSize;
@TableField("chunk_size")
private Long chunkSize;
@TableField("chunk_num")
private Integer chunkNum;
@TableField("is_delete")
@TableLogic(value = "0",delval = "1")
private Boolean isDelete;
@TableField("enable")
private Boolean enable;
@TableField(value = "create_time", fill = FieldFill.INSERT)
private LocalDateTime createTime;
@TableField(value = "update_time", fill = FieldFill.INSERT_UPDATE)
private LocalDateTime updateTime;
}
package com.wusuowei.miniouploadfile.model.vo;
import lombok.Data;
import lombok.experimental.Accessors;
import javax.validation.constraints.NotBlank;
import javax.validation.constraints.NotNull;
import java.util.List;
@Data
@Accessors(chain = true)
public class FileUploadInfo {
@NotBlank(message = "文件名不能为空")
private String fileName;
@NotNull(message = "文件大小不能为空")
private Long fileSize;
@NotBlank(message = "Content-Type不能为空")
private String contentType;
@NotNull(message = "分片数量不能为空")
private Integer chunkNum;
@NotBlank(message = "uploadId 不能为空")
private String uploadId;
private Long chunkSize;
// 桶名称
//private String bucketName;
//md5
private String fileMd5;
//文件类型
private String fileType;
//已上传的分片索引+1
private List<Integer> chunkUploadedList;
}
9.MinioUtils
package com.wusuowei.miniouploadfile.utils;
import cn.hutool.core.text.CharSequenceUtil;
import cn.hutool.core.util.StrUtil;
import com.google.common.collect.HashMultimap;
import com.wusuowei.miniouploadfile.config.CustomMinioClient;
import com.wusuowei.miniouploadfile.model.vo.FileUploadInfo;
import io.minio.*;
import io.minio.errors.*;
import io.minio.http.Method;
import io.minio.messages.Part;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import org.springframework.web.multipart.MultipartFile;
import javax.annotation.PostConstruct;
import java.io.IOException;
import java.io.InputStream;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
@Slf4j
@Component
public class MinioUtils {
@Value(value = "${minio.endpoint}")
private String endpoint;
@Value(value = "${minio.accesskey}")
private String accesskey;
@Value(value = "${minio.secretkey}")
private String secretkey;
@Value(value = "${minio.expiry}")
private Integer expiry;
private CustomMinioClient customMinioClient;
/**
* 用spring的自动注入会注入失败
*/
@PostConstruct
public void init() {
MinioClient minioClient = MinioClient.builder()
.endpoint(endpoint)
.credentials(accesskey, secretkey)
.build();
customMinioClient = new CustomMinioClient(minioClient);
}
/**
* 单文件签名上传
*
* @param objectName 文件全路径名称
* @param bucketName 桶名称
* @return /
*/
public Map<String, Object> getUploadObjectUrl(String objectName, String bucketName) {
try {
log.info("tip message: 通过 <{}-{}> 开始单文件上传<minio>", objectName, bucketName);
Map<String, Object> resMap = new HashMap();
List<String> partList = new ArrayList<>();
String url = customMinioClient.getPresignedObjectUrl(
GetPresignedObjectUrlArgs.builder()
.method(Method.PUT)
.bucket(bucketName)
.object(objectName)
.expiry(expiry, TimeUnit.DAYS)
.build());
log.info("tip message: 单个文件上传、成功");
partList.add(url);
resMap.put("uploadId", "SingleFileUpload");
resMap.put("urlList", partList);
return resMap;
} catch (Exception e) {
log.error("error message: 单个文件上传失败、原因:", e);
// 返回 文件上传失败
return null;
}
}
/**
* 初始化分片上传
*
* @param fileUploadInfo
* @param objectName 文件全路径名称
* @param chunkNum 分片数量
* @param contentType 类型,如果类型使用默认流会导致无法预览
* @param bucketName 桶名称
* @return Mono<Map < String, Object>>
*/
public Map<String, Object> initMultiPartUpload(FileUploadInfo fileUploadInfo, String objectName, int chunkNum, String contentType, String bucketName) {
log.info("tip message: 通过 <{}-{}-{}-{}> 开始初始化<分片上传>数据", objectName, chunkNum, contentType, bucketName);
Map<String, Object> resMap = new HashMap<>();
try {
if (CharSequenceUtil.isBlank(contentType)) {
contentType = "application/octet-stream";
}
HashMultimap<String, String> headers = HashMultimap.create();
headers.put("Content-Type", contentType);
//获取uploadId
String uploadId = null;
if(StringUtils.isBlank(fileUploadInfo.getUploadId())){
uploadId = customMinioClient.initMultiPartUpload(bucketName, null, objectName, headers, null);
}else{
uploadId = fileUploadInfo.getUploadId();
}
resMap.put("uploadId", uploadId);
fileUploadInfo.setUploadId(uploadId);
fileUploadInfo.setChunkNum(chunkNum);
List<String> partList = new ArrayList<>();
Map<String, String> reqParams = new HashMap<>();
reqParams.put("uploadId", uploadId);
for (int i = 1; i <= chunkNum; i++) {
reqParams.put("partNumber", String.valueOf(i));
String uploadUrl = customMinioClient.getPresignedObjectUrl(
GetPresignedObjectUrlArgs.builder()
.method(Method.PUT)
.bucket(bucketName)
.object(objectName)
.expiry(1, TimeUnit.DAYS)
.extraQueryParams(reqParams)
.build());
partList.add(uploadUrl);
}
log.info("tip message: 文件初始化<分片上传>、成功");
resMap.put("urlList", partList);
return resMap;
} catch (Exception e) {
log.error("error message: 初始化分片上传失败、原因:", e);
// 返回 文件上传失败
return R.error(RespEnum.UPLOAD_FILE_FAILED);
}
}
/**
* 分片上传完后合并
*
* @param objectName 文件全路径名称
* @param uploadId 返回的uploadId
* @param bucketName 桶名称
* @return boolean
*/
public boolean mergeMultipartUpload(String objectName, String uploadId, String bucketName) {
try {
log.info("tip message: 通过 <{}-{}-{}> 合并<分片上传>数据", objectName, uploadId, bucketName);
//目前仅做了最大1000分片
Part[] parts = new Part[1000];
// 查询上传后的分片数据
ListPartsResponse partResult = customMinioClient.listMultipart(bucketName, null, objectName, 1000, 0, uploadId, null, null);
int partNumber = 1;
for (Part part : partResult.result().partList()) {
parts[partNumber - 1] = new Part(partNumber, part.etag());
partNumber++;
}
// 合并分片
customMinioClient.mergeMultipartUpload(bucketName, null,objectName, uploadId, parts, null, null);
} catch (Exception e) {
log.error("error message: 合并失败、原因:", e);
//TODO删除redis的数据
return false;
}
return true;
}
/**
* 通过 sha256 获取上传中的分片信息
*
* @param objectName 文件全路径名称
* @param uploadId 返回的uploadId
* @param bucketName 桶名称
* @return Mono<Map < String, Object>>
*/
public List<Integer> getChunkByFileMD5(String objectName, String uploadId, String bucketName) {
log.info("通过 <{}-{}-{}> 查询<minio>上传分片数据", objectName, uploadId, bucketName);
try {
// 查询上传后的分片数据
ListPartsResponse partResult = customMinioClient.listMultipart(bucketName, null, objectName, 1000, 0, uploadId, null, null);
return partResult.result().partList().stream().map(Part::partNumber).collect(Collectors.toList());
} catch (Exception e) {
log.error("error message: 查询上传后的分片信息失败、原因:", e);
return null;
}
}
/**
* 获取文件下载地址
*
* @param bucketName 桶名称
* @param fileName 文件名
* @return
*/
public String getFliePath(String bucketName, String fileName) {
return StrUtil.format("{}/{}/{}", endpoint, bucketName, fileName);//文件访问路径
}
/**
* 创建一个桶
*
* @return
*/
public String createBucket(String bucketName) {
try {
BucketExistsArgs bucketExistsArgs = BucketExistsArgs.builder().bucket(bucketName).build();
//如果桶存在
if (customMinioClient.bucketExists(bucketExistsArgs)) {
return bucketName;
}
MakeBucketArgs makeBucketArgs = MakeBucketArgs.builder().bucket(bucketName).build();
customMinioClient.makeBucket(makeBucketArgs);
return bucketName;
} catch (Exception e) {
log.error("创建桶失败:{}", e.getMessage());
throw new RuntimeException(e);
}
}
/**
* 根据文件类型获取minio桶名称
*
* @param fileType
* @return
*/
public String getBucketName(String fileType) {
try {
//String bucketName = getProperty(fileType.toLowerCase());
if (fileType != null && !fileType.equals("")) {
//判断桶是否存在
String bucketName2 = createBucket(fileType.toLowerCase());
if (bucketName2 != null && !bucketName2.equals("")) {
return bucketName2;
}else{
return fileType;
}
}
} catch (Exception e) {
log.error("Error reading bucket name ");
}
return fileType;
}
/**
* 读取配置文件
*
* @param fileType
* @return
* @throws IOException
*/
private String getProperty(String fileType) throws IOException {
Properties SysLocalPropObject = new Properties();
//判断桶关系配置文件是否为空
if (SysLocalPropObject.isEmpty()) {
InputStream is = getClass().getResourceAsStream("/BucketRelation.properties");
SysLocalPropObject.load(is);
is.close();
}
return SysLocalPropObject.getProperty("bucket." + fileType);
}
/**
* 文件上传
*
* @param file 文件
* @return Boolean
*/
public String upload(MultipartFile file, String bucketName) {
String originalFilename = file.getOriginalFilename();
if (StringUtils.isBlank(originalFilename)) {
throw new RuntimeException();
}
String objectName = file.getName();
try {
PutObjectArgs objectArgs = PutObjectArgs.builder().bucket(bucketName).object(objectName)
.stream(file.getInputStream(), file.getSize(), -1).contentType(file.getContentType()).build();
//文件名称相同会覆盖
customMinioClient.putObject(objectArgs);
} catch (Exception e) {
e.printStackTrace();
return null;
}
// 查看文件地址
GetPresignedObjectUrlArgs build = new GetPresignedObjectUrlArgs().builder().bucket(bucketName).object(objectName).method(Method.GET).build();
String url = null;
try {
url = customMinioClient.getPresignedObjectUrl(build);
} catch (ErrorResponseException e) {
e.printStackTrace();
} catch (InsufficientDataException e) {
e.printStackTrace();
} catch (InternalException e) {
e.printStackTrace();
} catch (InvalidKeyException e) {
e.printStackTrace();
} catch (InvalidResponseException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} catch (NoSuchAlgorithmException e) {
e.printStackTrace();
} catch (XmlParserException e) {
e.printStackTrace();
} catch (ServerException e) {
e.printStackTrace();
}
return url;
}
// /**
// * 写入配置文件
// */
// public void setProperty(String bucketName) {
// String tempPath = Objects.requireNonNull(getClass().getResource("/BucketRelation.properties")).getPath();
// OutputStream os;
// try {
// os = new FileOutputStream(tempPath);
// SysLocalPropObject.setProperty(bucketName, bucketName);
// SysLocalPropObject.store(os, "Update " + bucketName + " " + bucketName);
// os.close();
// } catch (IOException e) {
// }
// }
}
// @Autowired
// private MinioProp minioProp;
//
//
// @Autowired
// private MinioClient minioClient;
//
//
//
// /**
// * 列出所有的桶
// */
// public List<String> listBuckets() throws Exception {
// List<Bucket> list = minioClient.listBuckets();
// List<String> names = new ArrayList<>();
// list.forEach(b -> {
// names.add(b.name());
// });
// return names;
// }
//
// /**
// * 列出一个桶中的所有文件和目录
// */
// public List<Fileinfo> listFiles(String bucket) throws Exception {
// Iterable<Result<Item>> results = minioClient.listObjects(
// ListObjectsArgs.builder().bucket(bucket).recursive(true).build());
//
// List<Fileinfo> infos = new ArrayList<>();
// results.forEach(r->{
// Fileinfo info = new Fileinfo();
// try {
// Item item = r.get();
// info.setFilename(item.objectName());
// info.setDirectory(item.isDir());
// infos.add(info);
// } catch (Exception e) {
// e.printStackTrace();
// }
// });
// return infos;
// }
//
// /**
// * 下载一个文件
// */
// public InputStream download(String bucket, String objectName) throws Exception {
// InputStream stream = minioClient.getObject(
// GetObjectArgs.builder().bucket(bucket).object(objectName).build());
// return stream;
// }
//
// /**
// * 删除一个桶
// */
// public void deleteBucket(String bucket) throws Exception {
// minioClient.removeBucket(RemoveBucketArgs.builder().bucket(bucket).build());
// }
//
// /**
// * 删除一个对象
// */
// public void deleteObject(String bucket, String objectName) throws Exception {
// minioClient.removeObject(RemoveObjectArgs.builder().bucket(bucket).object(objectName).build());
// }
//
//
// /**
// * 创建一个桶
// */
// public void createBucket(String bucketName) {
// BucketExistsArgs bucketExistsArgs = BucketExistsArgs.builder().bucket(bucketName).build();
// MakeBucketArgs makeBucketArgs = MakeBucketArgs.builder().bucket(bucketName).build();
// try {
// if (minioClient.bucketExists(bucketExistsArgs))
// return;
// minioClient.makeBucket(makeBucketArgs);
// } catch (Exception e) {
// log.error("创建桶失败:{}", e.getMessage());
// throw new RuntimeException(e);
// }
// }
//
// /**
// * 上传一个文件
// * @param file 文件
// * @param bucketName 存储桶
// * @return
// */
// public JSONObject uploadFile(MultipartFile file, String bucketName) throws Exception {
// JSONObject res = new JSONObject();
// res.put("code", 0);
// // 判断上传文件是否为空
// if (null == file || 0 == file.getSize()) {
// res.put("msg", "上传文件不能为空");
// return res;
// }
// // 判断存储桶是否存在
// createBucket(bucketName);
// // 文件名
// String originalFilename = file.getOriginalFilename();
// // 新的文件名 = 存储桶名称_时间戳.后缀名
// String fileName = bucketName + "_" + System.currentTimeMillis() + originalFilename.substring(originalFilename.lastIndexOf("."));
// // 开始上传
// InputStream inputStream = file.getInputStream();
// PutObjectArgs args = PutObjectArgs.builder().bucket(bucketName).object(fileName)
// .stream(inputStream,inputStream.available(),-1).build();
// minioClient.putObject(args);
// res.put("code", 1);
// res.put("msg", minioProp.getEndpoint() + "/" + bucketName + "/" + fileName);
// return res;
// }
10.RedisUtil
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;
import org.springframework.util.CollectionUtils;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
@Component
public class RedisUtil {
@Autowired
private RedisTemplate redisTemplate;
/**
* 指定缓存失效时间
* @param key 键
* @param time 时间(秒)
* @return
*/
public boolean expire(String key, long time) {
try {
if (time > 0) {
redisTemplate.expire(key, time, TimeUnit.SECONDS);
}
return true;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 根据key 获取过期时间
* @param key 键 不能为null
* @return 时间(秒) 返回0代表为永久有效
*/
public long getExpire(String key) {
return redisTemplate.getExpire(key, TimeUnit.SECONDS);
}
/**
* 判断key是否存在
* @param key 键
* @return true 存在 false不存在
*/
public boolean hasKey(String key) {
try {
return redisTemplate.hasKey(key);
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 删除缓存
* @param key 可以传一个值 或多个
*/
@SuppressWarnings("unchecked")
public void del(String... key) {
if (key != null && key.length > 0) {
if (key.length == 1) {
redisTemplate.delete(key[0]);
} else {
redisTemplate.delete((Collection<String>) CollectionUtils.arrayToList(key));
}
}
}
// ============================String=============================
/**
* 普通缓存获取
* @param key 键
* @return 值
*/
public Object get(String key) {
return key == null ? null : redisTemplate.opsForValue().get(key);
}
/**
* 普通缓存放入
* @param key 键
* @param value 值
* @return true成功 false失败
*/
public boolean set(String key, Object value) {
try {
redisTemplate.opsForValue().set(key, value);
return true;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 普通缓存放入并设置时间
* @param key 键
* @param value 值
* @param time 时间(秒) time要大于0 如果time小于等于0 将设置无限期
* @return true成功 false 失败
*/
public boolean set(String key, Object value, long time) {
try {
if (time > 0) {
redisTemplate.opsForValue().set(key, value, time, TimeUnit.SECONDS);
} else {
set(key, value);
}
return true;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 递增
* @param key 键
* @param delta 要增加几(大于0)
* @return
*/
public long incr(String key, long delta) {
if (delta < 0) {
throw new RuntimeException("递增因子必须大于0");
}
return redisTemplate.opsForValue().increment(key, delta);
}
/**
* 递减
* @param key 键
* @param delta 要减少几(小于0)
* @return
*/
public long decr(String key, long delta) {
if (delta < 0) {
throw new RuntimeException("递减因子必须大于0");
}
return redisTemplate.opsForValue().increment(key, -delta);
}
// ================================Map=================================
/**
* 获取list缓存的长度
* @param key 键
* @return
*/
public long hGetMapSize(String key) {
try {
return redisTemplate.opsForHash().size(key);
} catch (Exception e) {
e.printStackTrace();
return 0;
}
}
/**
* HashGet
* @param key 键 不能为null
* @param item 项 不能为null
* @return 值
*/
public Object hget(String key, String item) {
return redisTemplate.opsForHash().get(key, item);
}
/**
* 获取hashKey对应的所有键值
* @param key 键
* @return 对应的多个键值
*/
public Map<Object, Object> hmget(String key) {
return redisTemplate.opsForHash().entries(key);
}
/**
* HashSet
* @param key 键
* @param map 对应多个键值
* @return true 成功 false 失败
*/
public boolean hmset(String key, Map<String, Object> map) {
try {
redisTemplate.opsForHash().putAll(key, map);
return true;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* HashSet 并设置时间
* @param key 键
* @param map 对应多个键值
* @param time 时间(秒)
* @return true成功 false失败
*/
public boolean hmset(String key, Map<String, Object> map, long time) {
try {
redisTemplate.opsForHash().putAll(key, map);
if (time > 0) {
expire(key, time);
}
return true;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 向一张hash表中放入数据,如果不存在将创建
* @param key 键
* @param item 项
* @param value 值
* @return true 成功 false失败
*/
public boolean hset(String key, String item, Object value) {
try {
redisTemplate.opsForHash().put(key, item, value);
return true;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 向一张hash表中放入数据,如果不存在将创建
* @param key 键
* @param item 项
* @param value 值
* @param time 时间(秒) 注意:如果已存在的hash表有时间,这里将会替换原有的时间
* @return true 成功 false失败
*/
public boolean hset(String key, String item, Object value, long time) {
try {
redisTemplate.opsForHash().put(key, item, value);
if (time > 0) {
expire(key, time);
}
return true;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 删除hash表中的值
* @param key 键 不能为null
* @param item 项 可以使多个 不能为null
*/
public void hdel(String key, Object... item) {
redisTemplate.opsForHash().delete(key, item);
}
/**
* 判断hash表中是否有该项的值
* @param key 键 不能为null
* @param item 项 不能为null
* @return true 存在 false不存在
*/
public boolean hHasKey(String key, String item) {
return redisTemplate.opsForHash().hasKey(key, item);
}
/**
* hash递增 如果不存在,就会创建一个 并把新增后的值返回
* @param key 键
* @param item 项
* @param by 要增加几(大于0)
* @return
*/
public double hincr(String key, String item, double by) {
return redisTemplate.opsForHash().increment(key, item, by);
}
/**
* hash递减
* @param key 键
* @param item 项
* @param by 要减少记(小于0)
* @return
*/
public double hdecr(String key, String item, double by) {
return redisTemplate.opsForHash().increment(key, item, -by);
}
// ============================set=============================
/**
* 根据key获取Set中的所有值
* @param key 键
* @return
*/
public Set<Object> sGet(String key) {
try {
return redisTemplate.opsForSet().members(key);
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
/**
* 根据value从一个set中查询,是否存在
* @param key 键
* @param value 值
* @return true 存在 false不存在
*/
public boolean sHasKey(String key, Object value) {
try {
return redisTemplate.opsForSet().isMember(key, value);
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 将数据放入set缓存
* @param key 键
* @param values 值 可以是多个
* @return 成功个数
*/
public long sSet(String key, Object... values) {
try {
return redisTemplate.opsForSet().add(key, values);
} catch (Exception e) {
e.printStackTrace();
return 0;
}
}
/**
* 将set数据放入缓存
* @param key 键
* @param time 时间(秒)
* @param values 值 可以是多个
* @return 成功个数
*/
public long sSetAndTime(String key, long time, Object... values) {
try {
Long count = redisTemplate.opsForSet().add(key, values);
if (time > 0) {
expire(key, time);
}
return count;
} catch (Exception e) {
e.printStackTrace();
return 0;
}
}
/**
* 获取set缓存的长度
* @param key 键
* @return
*/
public long sGetSetSize(String key) {
try {
return redisTemplate.opsForSet().size(key);
} catch (Exception e) {
e.printStackTrace();
return 0;
}
}
/**
* 移除值为value的
* @param key 键
* @param values 值 可以是多个
* @return 移除的个数
*/
public long setRemove(String key, Object... values) {
try {
Long count = redisTemplate.opsForSet().remove(key, values);
return count;
} catch (Exception e) {
e.printStackTrace();
return 0;
}
}
// ===============================list=================================
/**
* 获取list缓存的内容
* @param key 键
* @param start 开始
* @param end 结束 0 到 -1代表所有值
* @return
*/
public List<Object> lGet(String key, long start, long end) {
try {
return redisTemplate.opsForList().range(key, start, end);
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
/**
* 获取list缓存的长度
* @param key 键
* @return
*/
public long lGetListSize(String key) {
try {
return redisTemplate.opsForList().size(key);
} catch (Exception e) {
e.printStackTrace();
return 0;
}
}
/**
* 通过索引 获取list中的值
* @param key 键
* @param index 索引 index>=0时, 0 表头,1 第二个元素,依次类推;index<0时,-1,表尾,-2倒数第二个元素,依次类推
* @return
*/
public Object lGetIndex(String key, long index) {
try {
return redisTemplate.opsForList().index(key, index);
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
/**
* 将list放入缓存
* @param key 键
* @param value 值
* @return
*/
public boolean lSet(String key, Object value) {
try {
redisTemplate.opsForList().rightPush(key, value);
return true;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 将list放入缓存
* @param key 键
* @param value 值
* @param time 时间(秒)
* @return
*/
public boolean lSet(String key, Object value, long time) {
try {
redisTemplate.opsForList().rightPush(key, value);
if (time > 0) {
expire(key, time);
}
return true;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 将list放入缓存
* @param key 键
* @param value 值
* @return
*/
public boolean lSet(String key, List<Object> value) {
try {
redisTemplate.opsForList().rightPushAll(key, value);
return true;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 将list放入缓存
*
* @param key 键
* @param value 值
* @param time 时间(秒)
* @return
*/
public boolean lSet(String key, List<Object> value, long time) {
try {
redisTemplate.opsForList().rightPushAll(key, value);
if (time > 0) {
expire(key, time);
}
return true;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 根据索引修改list中的某条数据
* @param key 键
* @param index 索引
* @param value 值
* @return
*/
public boolean lUpdateIndex(String key, long index, Object value) {
try {
redisTemplate.opsForList().set(key, index, value);
return true;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
/**
* 移除N个值为value
* @param key 键
* @param count 移除多少个
* @param value 值
* @return 移除的个数
*/
public long lRemove(String key, long count, Object value) {
try {
Long remove = redisTemplate.opsForList().remove(key, count, value);
return remove;
} catch (Exception e) {
e.printStackTrace();
return 0;
}
}
}
11.CustomMinioClient
import com.google.common.collect.Multimap;
import io.minio.CreateMultipartUploadResponse;
import io.minio.ListPartsResponse;
import io.minio.MinioClient;
import io.minio.ObjectWriteResponse;
import io.minio.errors.*;
import io.minio.messages.Part;
import java.io.IOException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
public class CustomMinioClient extends MinioClient {
/**
* 继承父类
* @param client
*/
public CustomMinioClient(MinioClient client) {
super(client);
}
/**
* 初始化分片上传、获取 uploadId
*
* @param bucket String 存储桶名称
* @param region String
* @param object String 文件名称
* @param headers Multimap<String, String> 请求头
* @param extraQueryParams Multimap<String, String>
* @return String
*/
public String initMultiPartUpload(String bucket, String region, String object, Multimap<String, String> headers, Multimap<String, String> extraQueryParams) throws IOException, InvalidKeyException, NoSuchAlgorithmException, InsufficientDataException, ServerException, InternalException, XmlParserException, InvalidResponseException, ErrorResponseException {
CreateMultipartUploadResponse response = this.createMultipartUpload(bucket, region, object, headers, extraQueryParams);
return response.result().uploadId();
}
/**
* 合并分片
*
* @param bucketName String 桶名称
* @param region String
* @param objectName String 文件名称
* @param uploadId String 上传的 uploadId
* @param parts Part[] 分片集合
* @param extraHeaders Multimap<String, String>
* @param extraQueryParams Multimap<String, String>
* @return ObjectWriteResponse
*/
public ObjectWriteResponse mergeMultipartUpload(String bucketName, String region, String objectName, String uploadId, Part[] parts, Multimap<String, String> extraHeaders, Multimap<String, String> extraQueryParams) throws IOException, NoSuchAlgorithmException, InsufficientDataException, ServerException, InternalException, XmlParserException, InvalidResponseException, ErrorResponseException, ServerException, InvalidKeyException {
return this.completeMultipartUpload(bucketName, region, objectName, uploadId, parts, extraHeaders, extraQueryParams);
}
/**
* 查询当前上传后的分片信息
*
* @param bucketName String 桶名称
* @param region String
* @param objectName String 文件名称
* @param maxParts Integer 分片数量
* @param partNumberMarker Integer 分片起始值
* @param uploadId String 上传的 uploadId
* @param extraHeaders Multimap<String, String>
* @param extraQueryParams Multimap<String, String>
* @return ListPartsResponse
*/
public ListPartsResponse listMultipart(String bucketName, String region, String objectName, Integer maxParts, Integer partNumberMarker, String uploadId, Multimap<String, String> extraHeaders, Multimap<String, String> extraQueryParams) throws NoSuchAlgorithmException, InsufficientDataException, IOException, InvalidKeyException, ServerException, XmlParserException, ErrorResponseException, InternalException, InvalidResponseException {
return this.listParts(bucketName, region, objectName, maxParts, partNumberMarker, uploadId, extraHeaders, extraQueryParams);
}
}
12.WebAppConfigurer
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.alibaba.fastjson.support.config.FastJsonConfig;
import com.alibaba.fastjson.support.spring.FastJsonHttpMessageConverter;
import org.springframework.boot.autoconfigure.http.HttpMessageConverters;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.converter.HttpMessageConverter;
import org.springframework.web.servlet.config.annotation.CorsRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
/**
* @description Web应用程序配置
* @author LGY
* @date 2023/03/14 20:09
* @version 1.0.0
*/
@Configuration
public class WebAppConfigurer implements WebMvcConfigurer {
@Override
public void addCorsMappings(CorsRegistry registry) {
registry.addMapping("/**")
.allowCredentials(true)
.allowedOriginPatterns("*")
.allowedMethods("POST", "GET", "PUT", "OPTIONS", "DELETE")
.allowedHeaders("*");
}
//实体类属性为空时不进行序列化返回给前端
@Bean
public HttpMessageConverters fastJsonHttpMessageConverters() {
FastJsonHttpMessageConverter fastConverter = new FastJsonHttpMessageConverter();
FastJsonConfig fastJsonConfig = new FastJsonConfig();
fastJsonConfig.setSerializerFeatures(SerializerFeature.PrettyFormat);
fastConverter.setFastJsonConfig(fastJsonConfig);
HttpMessageConverter<?> converter = fastConverter;
return new HttpMessageConverters(converter);
}
}
13.MybatisPlusConfig
import com.baomidou.mybatisplus.annotation.DbType;
import com.baomidou.mybatisplus.core.handlers.MetaObjectHandler;
import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
import com.baomidou.mybatisplus.extension.plugins.inner.PaginationInnerInterceptor;
import lombok.extern.slf4j.Slf4j;
import org.apache.ibatis.reflection.MetaObject;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.time.LocalDateTime;
/**
* MybatisPlus配置类
*
*/
@Slf4j
@Configuration
@MapperScan("com.wusuowei.miniouploadfile.mapper")
public class MybatisPlusConfig implements MetaObjectHandler {
/**
* 新的分页插件
* 需要设置 MybatisConfiguration#useDeprecatedExecutor = false
* 避免缓存出现问题(该属性会在旧插件移除后一同移除)
*/
@Bean
public MybatisPlusInterceptor mybatisPlusInterceptor() {
MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
interceptor.addInnerInterceptor(new PaginationInnerInterceptor(DbType.MYSQL));
return interceptor;
}
@Override
public void insertFill(MetaObject metaObject) {
log.info("start insert fill ....");
this.strictInsertFill(metaObject, "createTime", LocalDateTime.class, LocalDateTime.now()); // 起始版本 3.3.0(推荐使用)
this.fillStrategy(metaObject, "createTime", LocalDateTime.now()); // 也可以使用(3.3.0 该方法有bug请升级到之后的版本如`3.3.1.8-SNAPSHOT`)
/* 上面选其一使用,下面的已过时(注意 strictInsertFill 有多个方法,详细查看源码) */
}
@Override
public void updateFill(MetaObject metaObject) {
log.info("start update fill ....");
this.strictUpdateFill(metaObject, "updateTime", LocalDateTime.class, LocalDateTime.now()); // 起始版本 3.3.0(推荐使用)
this.fillStrategy(metaObject, "updateTime", LocalDateTime.now()); // 也可以使用(3.3.0 该方法有bug请升级到之后的版本如`3.3.1.8-SNAPSHOT`)
/* 上面选其一使用,下面的已过时(注意 strictUpdateFill 有多个方法,详细查看源码) */
}
14.R统一结果返回
package com.wusuowei.miniouploadfile.utils;
import com.alibaba.fastjson.JSON;
import java.util.HashMap;
import java.util.Map;
/**
* 返回数据
*
* @author Mark sunlightcs@gmail.com
*/
public class R extends HashMap<String, Object> {
private static final long serialVersionUID = 1L;
public R setData(Object data) {
put("data",data);
return this;
}
//利用fastjson进行反序列化
public <T> T getData(Class<T> typeReference) {
Object data = get("data"); //默认是map
String jsonString = JSON.toJSONString(data);
T t = JSON.parseObject(jsonString, typeReference);
return t;
}
public R() {
put("code", 200);
put("msg", "success");
}
public static R error() {
return error(5000, "未知异常,请联系管理员");
}
public static R error(String msg) {
return error(500, msg);
}
public static R error(RespEnum respEnum) {
return error(respEnum.getCode(), respEnum.getMessage());
}
public static R error(int code, String msg) {
R r = new R();
r.put("code", code);
r.put("msg", msg);
return r;
}
public static R ok(RespEnum respEnum) {
return ok(respEnum.getCode(), respEnum.getMessage());
}
public static R ok(String msg) {
R r = new R();
r.put("msg", msg);
return r;
}
public static R ok(int code, String msg) {
R r = new R();
r.put("code", code);
r.put("msg", msg);
return r;
}
public static R ok(Map<String, Object> map) {
R r = new R();
r.putAll(map);
return r;
}
public static R ok() {
return new R();
}
public R put(String key, Object value) {
super.put(key, value);
return this;
}
public Integer getCode() {
return (Integer) this.get("code");
}
}
15.RespEnum
package com.wusuowei.miniouploadfile.utils;
public enum RespEnum {
UPLOADSUCCESSFUL(1, "上传成功"),
UPLOADING(2, "上传中"),
NOT_UPLOADED(3, "未上传"),
ACCESS_PARAMETER_INVALID(1001,"访问参数无效"),
UPLOAD_FILE_FAILED(1002,"文件上传失败"),
DATA_NOT_EXISTS(1003,"数据不存在"),
;
private final Integer code;
private final String message;
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
RespEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
}
小结
文章贴的代码有点多了,大家可以去我的gitee上下载下来运行。项目的功能我测试优化了很多次,如果代码有何异常或者不完整欢迎在评论区留言。如果这篇文章有幸帮助到你,希望读者大大们可以给作者点个赞呀😶🌫️😶🌫️😶🌫️文章来源地址https://www.toymoban.com/news/detail-440695.html
到了这里,关于SpringBoot整合minio实现断点续传、分片上传(附源码)的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!