文章目录
- 前言
- 一、申请阿里云oss
- 二、上代码
- 总结
前言
阿里云对象存储OSS(Object Storage Service)是一款海量、安全、低成本、高可靠的云存储服务,可提供99.9999999999%(12个9)的数据持久性,99.995%的数据可用性。多种存储类型供选择,全面优化存储成本。
您可以使用阿里云提供的API、SDK接口或者OSS迁移工具轻松地将海量数据移入或移出阿里云OSS。数据存储到阿里云OSS以后,您可以选择标准存储(Standard)作为移动应用、大型网站、图片分享或热点音视频的主要存储方式,也可以选择成本更低、存储期限更长的低频访问存储(Infrequent Access)、归档存储(Archive)、冷归档存储(Cold Archive)或者深度冷归档(Deep Cold Archive)作为不经常访问数据的存储方式。
一、申请阿里云oss
阿里云-计算,为了无法计算的价值
选择对象存储oss
选择立即开通
创建bucket(桶)
创建Bucket 名称
选择就近地域
其他的默认就可
获取AccessKeyID 和AccessKeySecret
文章来源:https://www.toymoban.com/news/detail-732138.html
二、使用步骤
1.引入oss依赖
<!-- oss -->
<dependency>
<groupId>com.aliyun.oss</groupId>
<artifactId>aliyun-sdk-oss</artifactId>
<version>3.8.0</version>
</dependency>
2.阿里云配置
在resourse下新建 aliyunOSS.properties文件文章来源地址https://www.toymoban.com/news/detail-732138.html
#之前获取的AccessKeyID
aliyun.AccessKeyID=
#之前获取的AccessKeySecret
aliyun.AccessKeySecret=
#你创建桶的名称
aliyun.Buckets=
#桶的外网地址 在桶的概览下的外网地址 例:oss-cn-hangzhou.aliyuncs.com
aliyun.EndPoint=
#自定义前缀
aliyun.prefix=
#限制单个上传的最大的MB
aliyun.MAX_SIZE =
3.oss工具类
/**
* @Description
* @Author LuoAC
* @Date 2023/6/19 16:38
*/
public class test {
/**
* 阿里云的配置参数
*/
private static String accessKeyId = null;
private static String accessKeySecret = null;
private static String endpoint = null;
private static String bucketName = null;
private static Integer MAX_SIZE = null;
/**
* 存储在OSS中的前缀名
*/
private static String file_prefix = null;
private static volatile OSSClient ossClient = null;
private static volatile RedisCache redisManager = null;
static {
//初始化AccessKey
accessKeyId = PropertiesReader.get("aliyun.AccessKeyID");
//初始化AccessKeySecret
accessKeySecret = PropertiesReader.get("aliyun.AccessKeySecret");
//初始化Endpoint
endpoint = PropertiesReader.get("aliyun.EndPoint");
//初始化bucketName
bucketName = PropertiesReader.get("aliyun.Buckets");
//初始化前缀
file_prefix = PropertiesReader.get("aliyun.prefix");
//文件最大值
MAX_SIZE = PropertiesReader.getInteger("aliyun.MAX_SIZE");
}
/**
* @return skipUpload 判断文件是否存在的标识。true即上传完成。再次上传可秒传
* @return key 文件key
* @return bucket 桶名称
* @return uploadId 上传id
* @return uploaded 已经上传分片的list<Integer>
* @Description 首次分片 信息
* @Author LuoAC
* @Date 2023/6/9 9:55
*/
public static Map ossCheck(UploadChunkFileParam param) {
RedisCache redisManager = initRedisManager();
//文件md5
String identifier = param.getIdentifier();
//文件后缀
String suffix = param.getSuffix();
//文件的key
String key = identifier + "." + suffix;
Map<String, Object> map = MapUtil.newHashMap();
// 判断是否上传过 秒传
if (checkExist(key)) {
map.put("skipUpload", true);
map.put("key", key);
map.put("bucket", bucketName);
return map;
}
// 判断是否第一次上传 是否做断点续传
String uploadId = redisManager.getCacheObject(key);
//第一次上传
if (StringUtils.isEmpty(uploadId)) {
String uploadIdNew = uploadChunkInit(key);
map.put("skipUpload", false);
map.put("uploadId", uploadIdNew);
map.put("uploaded", null);
redisManager.setCacheObject(key, uploadIdNew);
return map;
} else {
// 继续上传
Map<String, String> uploadedCache = redisManager.hmget(CacheConstants.REDIS_ALI_OSS_KEY + uploadId);
List<Integer> uploaded = Lists.newArrayList();
for (Map.Entry<String, String> entry : uploadedCache.entrySet()) {
uploaded.add(JSONUtil.toBean(entry.getValue(), PartETag.class).getPartNumber());
}
map.put("skipUpload", false);
map.put("uploadId", uploadId);
map.put("uploaded", uploaded);
return map;
}
}
/**
* 分片上传
*
* @param param 上传参数
* @return
*/
public static Map uploadChunk(UploadChunkFileParam param) {
return uploadChunk(param.getUploadId(), param.getIdentifier(), param.getSuffix(), param.getFile(), param.getChunkNumber(),
param.getCurrentChunkSize(), param.getTotalChunks(), param.getFilename(),param.getProjectId(),param.getDirId());
}
/**
* 分片上传
* 1、检查文件是否上传
* 2、检查文件是否第一次上传,第一次上传创建上传id uploadId
* 3、检查是否是断点续传,如果是返回已上传的分片
* 4、分片上传到阿里云OSS上,并记录上传信息到Redis
* 5、判断是否已上传完成,已完成:合并所有分片为源文件
*
* @param uploadId 上传id
* @param identifier 文件在OSS上的key
* @param file 文件分片
* @param chunkIndex 分片索引
* @param chunkSize 分片大小
* @param chunkCount 总分片数w
* @return
*/
public static Map uploadChunk(String uploadId, String identifier, String suffix, MultipartFile file, Integer chunkIndex,
long chunkSize, Integer chunkCount, String fileName, Integer projectId, String dirId) {
ossClient = initOSS();
String key=identifier+"."+suffix;
try {
Map<String, Object> map = MapUtil.newHashMap();
RedisCache redisManager = initRedisManager();
// 上传分片
PartETag partETag = uploadChunkPart(uploadId, key, file.getInputStream(), chunkIndex, chunkSize, chunkCount);
// 分片上传完成缓存key
redisManager.hset(CacheConstants.REDIS_ALI_OSS_KEY + uploadId, chunkIndex + ",", JSONUtil.toJsonStr(partETag));
// 取出所有已上传的分片信息
Map<String, String> dataMap = redisManager.hmget(CacheConstants.REDIS_ALI_OSS_KEY + uploadId);
List<PartETag> partETagList = Lists.newArrayList();
//已经上传的片数
Integer i = 0;
for (Map.Entry<String, String> entry : dataMap.entrySet()) {
partETagList.add(JSONUtil.toBean(entry.getValue(), PartETag.class));
i++;
}
List<Integer> list = partETagList.stream().map(PartETag::getPartNumber).collect(Collectors.toList());
// 已上传的百分比
String percent = String.format("%.2f", (double) i / chunkCount);
// 分片上缓存的待传百分比
UploadChunkFileParam uploadChunkFileParam = new UploadChunkFileParam();
uploadChunkFileParam.setPercent(percent);
uploadChunkFileParam.setFilename(fileName);
uploadChunkFileParam.setProjectId(projectId);
uploadChunkFileParam.setDirId(dirId);
uploadChunkFileParam.setUploaded(list);
uploadChunkFileParam.setIdentifier(identifier);
uploadChunkFileParam.setSuffix(suffix);
uploadChunkFileParam.setUploadId(uploadId);
redisManager.setCacheMapValue(CacheConstants.REDIS_ALI_OSS_KEY + SecurityUtils.getUserId(),uploadId+","+key,uploadChunkFileParam);
// 判断是否上传完成
if (dataMap.keySet().size() == chunkCount) {
uploadChunkComplete(uploadId, key, partETagList);
for (String mapKey : dataMap.keySet()) {
redisManager.hdel(CacheConstants.REDIS_ALI_OSS_KEY + uploadId, mapKey);
}
redisManager.deleteObject(key);
redisManager.hdel(CacheConstants.REDIS_ALI_OSS_KEY+SecurityUtils.getUserId(),uploadId+","+key);
map.put("skipUpload", true);
map.put("uploadId", uploadId);
map.put("key", key);
map.put("bucket", bucketName);
map.put("fileName",fileName);
ossClient.setObjectAcl(bucketName, key, CannedAccessControlList.PublicRead);
} else {
map.put("uploaded", list);
map.put("skipUpload", false);
map.put("uploadId", uploadId);
}
return map;
} catch (Exception e) {
e.printStackTrace();
throw new CustomException("上传失败:" + e.getMessage());
}
}
/**
* 上传分片文件
*
* @param uploadId 上传id
* @param key key
* @param instream 文件分片流
* @param chunkIndex 分片索引
* @param chunkSize 分片大小
* @return
*/
public static PartETag uploadChunkPart(String uploadId, String key, InputStream instream,
Integer chunkIndex, long chunkSize, Integer chunkCount) {
ossClient = initOSS();
try {
UploadPartRequest partRequest = new UploadPartRequest();
// 阿里云 oss 文件根目录
partRequest.setBucketName(bucketName);
// 文件key
partRequest.setKey(key);
// 分片上传uploadId
partRequest.setUploadId(uploadId);
// 分片文件
partRequest.setInputStream(instream);
// 分片大小。除了最后一个分片没有大小限制,其他的分片最小为100 KB。
partRequest.setPartSize(chunkSize);
System.out.println(chunkSize + " " + chunkIndex + " " + uploadId);
// 分片号。每一个上传的分片都有一个分片号,取值范围是1~10000,如果超出这个范围,OSS将返回InvalidArgument的错误码。
partRequest.setPartNumber(chunkIndex);
// 每个分片不需要按顺序上传,甚至可以在不同客户端上传,OSS会按照分片号排序组成完整的文件。
UploadPartResult uploadPartResult = ossClient.uploadPart(partRequest);
// 每次上传分片之后,OSS的返回结果包含PartETag。PartETag将被保存在redis中。
return uploadPartResult.getPartETag();
} catch (Exception e) {
e.printStackTrace();
throw new CustomException("分片上传失败:" + e.getMessage());
}
}
/**
* 文件合并
*
* @param uploadId 上传id
* @param key key
* @param chunkTags 分片上传信息
* @return
*/
public static CompleteMultipartUploadResult uploadChunkComplete(String uploadId, String key, List<PartETag> chunkTags) {
ossClient = initOSS();
try {
CompleteMultipartUploadRequest completeMultipartUploadRequest =
new CompleteMultipartUploadRequest(bucketName, key, uploadId, chunkTags);
CompleteMultipartUploadResult result = ossClient.completeMultipartUpload(completeMultipartUploadRequest);
return result;
} catch (Exception e) {
e.printStackTrace();
throw new CustomException("分片合并失败:" + e.getMessage());
}
}
/**
* @Description 初始化OSSClient
* @Author LuoAC
* @Date 2023/6/8 10:53
*/
private static OSSClient initOSS() {
if (ossClient == null) {
synchronized (OSSClient.class) {
if (ossClient == null) {
ossClient = new OSSClient(endpoint, new DefaultCredentialProvider(accessKeyId, accessKeySecret),
new ClientConfiguration());
}
}
}
return ossClient;
}
/**
* 初始化上传id uploadId
*
* @param key
* @return
*/
public static String uploadChunkInit(String key) {
if (StringUtils.isEmpty(key)) {
throw new CustomException("key不能为空");
}
ossClient = initOSS();
try {
// 创建分片上传对象
InitiateMultipartUploadRequest uploadRequest = new InitiateMultipartUploadRequest(bucketName, key);
// 初始化分片
InitiateMultipartUploadResult result = ossClient.initiateMultipartUpload(uploadRequest);
// 返回uploadId,它是分片上传事件的唯一标识,您可以根据这个uploadId发起相关的操作,如取消分片上传、查询分片上传等。
return result.getUploadId();
} catch (Exception e) {
e.printStackTrace();
throw new CustomException("初始化分片失败:" + e.getMessage());
}
}
/**
* @Description 判断桶中是否存在这个key
* @Author LuoAC
* @Date 2023/6/8 14:00
*/
public static Boolean checkExist(String key) {
ossClient = initOSS();
return ossClient.doesObjectExist(bucketName, key);
}
/**
* @Description 获取redis
* @Author LuoAC
* @Date 2023/6/8 14:11
*/
private static RedisCache initRedisManager() {
if (redisManager == null) {
synchronized (RedisCache.class) {
if (redisManager == null) {
return SpringUtils.getBean(RedisCache.class);
}
}
}
return redisManager;
}
}
package com.hzzd.web.services.domain;
import com.baomidou.mybatisplus.annotation.TableField;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import org.springframework.web.multipart.MultipartFile;
import java.util.List;
/**
* @Description
* @Author LuoAC
* @Date 2023/6/7 17:18
*/
@Data
public class UploadChunkFileParam {
/**
* 文件传输任务ID
* 文件MD5编码
*/
private String identifier;
/**
* 文件全名称 例如:123.png
*/
private String filename;
/** 后缀*/
private String suffix;
/**
* 主体类型--这个字段是我项目中的其他业务逻辑可以忽略
*/
private String objectType;
/**
* 分片总数
*/
private int totalChunks;
/**
* 每个分块的大小
*/
private long chunkSize;
/**
* 当前为第几分片
*/
private int chunkNumber;
/**
* 当前分片大小
*/
private long currentChunkSize;
/**
* 分块文件传输对象
*/
private MultipartFile file;
/**
* oss上传时的上传id
*/
private String uploadId;
/**
* oss上传时的文件key
*/
private String key;
/**
* oss上传时的文件key
*/
private String percent;
@ApiModelProperty("工程id")
private Integer projectId;
@ApiModelProperty("目录id")
private String dirId;
/**
* 未上传的id
*/
private List<Integer> uploaded;
}
oss 分片上传笔记
到了这里,关于spring boot 阿里云oss 文件分片上传、断点续传的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!