mirror of
https://github.com/halo-dev/plugin-s3.git
synced 2025-10-15 06:39:08 +00:00
Compare commits
6 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c9f13d4b5f | ||
![]() |
72af0fcdac | ||
![]() |
21b752dd25 | ||
![]() |
b5c2c50654 | ||
![]() |
1158ea7ae8 | ||
![]() |
11087a9915 |
66
README.md
66
README.md
@@ -2,6 +2,64 @@
|
||||
|
||||
为 Halo 2.0 提供 S3 协议的对象存储策略,支持阿里云、腾讯云、七牛云等兼容 S3 协议的对象存储服务商
|
||||
|
||||
## 使用方法
|
||||
|
||||
1. 在 [Releases](https://github.com/halo-sigs/plugin-s3/releases) 下载最新的 JAR 文件。
|
||||
2. 在 Halo 后台的插件管理上传 JAR 文件进行安装。
|
||||
3. 进入后台附件管理。
|
||||
4. 点击右上角的存储策略,在存储策略弹框的右上角可新建 S3 Object Storage 存储策略。
|
||||
5. 创建完成之后即可在上传的时候选择新创建的 S3 Object Storage 存储策略。
|
||||
|
||||
## 配置指南
|
||||
|
||||
### Endpoint 访问风格
|
||||
|
||||
请根据下方表格中的兼容访问风格选择,若您的服务商不在表格中,请自行查看服务商的 s3 兼容性文档或自行尝试。
|
||||
|
||||
> 风格说明:<br/>
|
||||
> 当Endpoint填写`s3.example.com`时<br/>
|
||||
> Path Style:SDK将访问`s3.example.com/<bucket-name>/<object-key>`<br/>
|
||||
> Virtual Hosted Style:SDK将访问`<bucket-name>.s3.example.com/<object-key>`
|
||||
|
||||
### Endpoint
|
||||
|
||||
此处统一填写**不带** bucket-name 的 Endpoint,SDK 会自动处理访问风格。
|
||||
|
||||
想了解 s3 协议的 Endpoint 的配置可在服务商的文档中搜索 s3、Endpoint 或访问域名等关键词,一般与服务商自己的 Endpoint 相同。
|
||||
|
||||
> 例如百度云提供 `s3.bj.bcebos.com` 和 `<bucket-name>.s3.bj.bcebos.com` 两种 Endpoint,请填写`s3.bj.bcebos.com`。
|
||||
|
||||
### Access Key & Access Secret
|
||||
|
||||
与服务商自己 API 的 Access Key 和 Access Secret 相同,详情查看对应服务商的文档。
|
||||
|
||||
### Bucket 桶名称
|
||||
|
||||
与服务商的控制台中的桶名称一致。
|
||||
|
||||
### Region
|
||||
|
||||
一般留空即可。
|
||||
|
||||
> 若确认过其他配置正确又不能访问,请在服务商的文档中查看并填写英文的 Region,例如 `cn-east-1`。
|
||||
|
||||
## 部分对象存储服务商兼容性
|
||||
|
||||
|服务商|文档|兼容访问风格|兼容性|
|
||||
| ----- | ---- | ----- | ----- |
|
||||
|阿里云|https://help.aliyun.com/document_detail/410748.html|Virtual Hosted Style|✅|
|
||||
|腾讯云|[https://cloud.tencent.com/document/product/436/41284](https://cloud.tencent.com/document/product/436/41284)|Virtual Hosted Style / <br>Path Style|✅|
|
||||
|七牛云|https://developer.qiniu.com/kodo/4088/s3-access-domainname|Virtual Hosted Style / <br>Path Style|✅|
|
||||
|百度云|https://cloud.baidu.com/doc/BOS/s/Fjwvyq9xo|Virtual Hosted Style / <br>Path Style|✅|
|
||||
|京东云| https://docs.jdcloud.com/cn/object-storage-service/api/regions-and-endpoints |Virtual Hosted Style|✅|
|
||||
|金山云|https://docs.ksyun.com/documents/6761|Virtual Hosted Style|✅|
|
||||
|青云|https://docsv3.qingcloud.com/storage/object-storage/s3/intro/|Virtual Hosted Style / <br>Path Style|✅|
|
||||
|网易数帆|[https://sf.163.com/help/documents/89796157866430464](https://sf.163.com/help/documents/89796157866430464)|Virtual Hosted Style|✅|
|
||||
|自建minio|\-|Path Style|✅|
|
||||
|华为云|文档未说明是否兼容,工单反馈不保证兼容性,实际测试可以使用|Virtual Hosted Style|❓|
|
||||
|Ucloud|只支持 8MB 大小的分片,本插件暂不支持<br>[https://docs.ucloud.cn/ufile/s3/s3\_introduction](https://docs.ucloud.cn/ufile/s3/s3_introduction)|\-|❎|
|
||||
|又拍云|暂不支持 s3 协议|\-|❎|
|
||||
|
||||
## 开发环境
|
||||
|
||||
```bash
|
||||
@@ -31,11 +89,3 @@ plugin:
|
||||
```
|
||||
|
||||
构建完成之后,可以在 `build/libs` 目录得到插件的 JAR 包,在 Halo 后台的插件管理上传即可。
|
||||
|
||||
## 使用方法
|
||||
|
||||
1. 在 [Releases](https://github.com/halo-sigs/plugin-s3/releases) 下载最新的 JAR 文件。
|
||||
2. 在 Halo 后台的插件管理上传 JAR 文件进行安装。
|
||||
3. 进入后台附件管理。
|
||||
4. 点击右上角的存储策略,在存储策略弹框的右上角可新建 S3 Object Storage 存储策略。
|
||||
5. 创建完成之后即可在上传的时候选择新创建的 S3 Object Storage 存储策略。
|
||||
|
@@ -27,8 +27,8 @@ dependencies {
|
||||
|
||||
compileOnly files("lib/halo-2.0.0-SNAPSHOT-plain.jar")
|
||||
|
||||
implementation platform('com.amazonaws:aws-java-sdk-bom:1.12.360')
|
||||
implementation 'com.amazonaws:aws-java-sdk-s3'
|
||||
implementation platform('software.amazon.awssdk:bom:2.19.8')
|
||||
implementation 'software.amazon.awssdk:s3'
|
||||
implementation "javax.xml.bind:jaxb-api:2.3.1"
|
||||
implementation "javax.activation:activation:1.1.1"
|
||||
implementation "org.glassfish.jaxb:jaxb-runtime:2.3.3"
|
||||
|
@@ -1 +1 @@
|
||||
version=1.2.0-SNAPSHOT
|
||||
version=1.3.0-SNAPSHOT
|
||||
|
@@ -1,25 +1,16 @@
|
||||
package run.halo.s3os;
|
||||
|
||||
import com.amazonaws.AmazonServiceException;
|
||||
import com.amazonaws.SdkClientException;
|
||||
import com.amazonaws.auth.AWSStaticCredentialsProvider;
|
||||
import com.amazonaws.auth.BasicAWSCredentials;
|
||||
import com.amazonaws.client.builder.AwsClientBuilder;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
|
||||
import com.amazonaws.services.s3.model.ObjectMetadata;
|
||||
import com.amazonaws.services.s3.model.PutObjectRequest;
|
||||
import com.amazonaws.services.s3.model.PutObjectResult;
|
||||
import lombok.Data;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.pf4j.Extension;
|
||||
import org.springframework.core.io.buffer.DataBufferUtils;
|
||||
import org.springframework.core.io.buffer.DataBuffer;
|
||||
import org.springframework.http.MediaType;
|
||||
import org.springframework.http.MediaTypeFactory;
|
||||
import org.springframework.web.server.ServerErrorException;
|
||||
import org.springframework.web.server.ServerWebInputException;
|
||||
import org.springframework.web.util.UriUtils;
|
||||
import reactor.core.Exceptions;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.scheduler.Schedulers;
|
||||
import run.halo.app.core.extension.attachment.Attachment;
|
||||
import run.halo.app.core.extension.attachment.Attachment.AttachmentSpec;
|
||||
import run.halo.app.core.extension.attachment.Constant;
|
||||
@@ -28,76 +19,67 @@ import run.halo.app.core.extension.attachment.endpoint.AttachmentHandler;
|
||||
import run.halo.app.extension.ConfigMap;
|
||||
import run.halo.app.extension.Metadata;
|
||||
import run.halo.app.infra.utils.JsonUtils;
|
||||
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
|
||||
import software.amazon.awssdk.core.SdkResponse;
|
||||
import software.amazon.awssdk.core.async.AsyncRequestBody;
|
||||
import software.amazon.awssdk.http.SdkHttpResponse;
|
||||
import software.amazon.awssdk.regions.Region;
|
||||
import software.amazon.awssdk.services.s3.S3AsyncClient;
|
||||
import software.amazon.awssdk.services.s3.S3Configuration;
|
||||
import software.amazon.awssdk.services.s3.model.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PipedInputStream;
|
||||
import java.io.PipedOutputStream;
|
||||
import java.net.URI;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
@Slf4j
|
||||
@Extension
|
||||
public class S3OsAttachmentHandler implements AttachmentHandler {
|
||||
|
||||
private static final String OBJECT_KEY = "s3os.plugin.halo.run/object-key";
|
||||
private static final int MULTIPART_MIN_PART_SIZE = 5 * 1024 * 1024;
|
||||
private final Map<String, Object> uploadingFile = new ConcurrentHashMap<>();
|
||||
|
||||
@Override
|
||||
public Mono<Attachment> upload(UploadContext uploadContext) {
|
||||
return Mono.just(uploadContext).filter(context -> this.shouldHandle(context.policy()))
|
||||
.flatMap(context -> {
|
||||
final var properties = getProperties(context.configMap());
|
||||
return upload(context, properties).map(
|
||||
objectDetail -> this.buildAttachment(context, properties, objectDetail));
|
||||
});
|
||||
.flatMap(context -> {
|
||||
final var properties = getProperties(context.configMap());
|
||||
return upload(context, properties).map(
|
||||
objectDetail -> this.buildAttachment(context, properties, objectDetail));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Attachment> delete(DeleteContext deleteContext) {
|
||||
return Mono.just(deleteContext).filter(context -> this.shouldHandle(context.policy()))
|
||||
.doOnNext(context -> {
|
||||
var annotations = context.attachment().getMetadata().getAnnotations();
|
||||
if (annotations == null || !annotations.containsKey(OBJECT_KEY)) {
|
||||
return;
|
||||
}
|
||||
var objectName = annotations.get(OBJECT_KEY);
|
||||
var properties = getProperties(deleteContext.configMap());
|
||||
var client = buildOsClient(properties);
|
||||
ossExecute(() -> {
|
||||
log.info("{}/{} is being deleted from S3ObjectStorage", properties.getBucket(),
|
||||
objectName);
|
||||
client.deleteObject(properties.getBucket(), objectName);
|
||||
log.info("{}/{} was deleted successfully from S3ObjectStorage", properties.getBucket(),
|
||||
objectName);
|
||||
return null;
|
||||
}, client::shutdown);
|
||||
}).map(DeleteContext::attachment);
|
||||
}
|
||||
.flatMap(context -> {
|
||||
var annotations = context.attachment().getMetadata().getAnnotations();
|
||||
if (annotations == null || !annotations.containsKey(OBJECT_KEY)) {
|
||||
return Mono.just(context);
|
||||
}
|
||||
var objectName = annotations.get(OBJECT_KEY);
|
||||
var properties = getProperties(deleteContext.configMap());
|
||||
var client = buildS3AsyncClient(properties);
|
||||
return Mono.fromFuture(client.deleteObject(DeleteObjectRequest.builder()
|
||||
.bucket(properties.getBucket())
|
||||
.key(objectName)
|
||||
.build()))
|
||||
.doFinally(signalType -> client.close())
|
||||
.map(response -> {
|
||||
checkResult(response, "delete object");
|
||||
log.info("Delete object {} from bucket {} successfully",
|
||||
objectName, properties.getBucket());
|
||||
return context;
|
||||
});
|
||||
|
||||
<T> T ossExecute(Supplier<T> runnable, Runnable finalizer) {
|
||||
try {
|
||||
return runnable.get();
|
||||
} catch (AmazonServiceException ase) {
|
||||
log.error("""
|
||||
Caught an AmazonServiceException, which means your request made it to S3ObjectStorage, but was
|
||||
rejected with an error response for some reason.
|
||||
Error message: {}
|
||||
""", ase.getMessage());
|
||||
throw Exceptions.propagate(ase);
|
||||
} catch (SdkClientException sce) {
|
||||
log.error("""
|
||||
Caught an SdkClientException, which means the client encountered a serious internal
|
||||
problem while trying to communicate with S3ObjectStorage, such as not being able to access
|
||||
the network.
|
||||
Error message: {}
|
||||
""", sce.getMessage());
|
||||
throw Exceptions.propagate(sce);
|
||||
} finally {
|
||||
if (finalizer != null) {
|
||||
finalizer.run();
|
||||
}
|
||||
}
|
||||
})
|
||||
.map(DeleteContext::attachment);
|
||||
}
|
||||
|
||||
S3OsProperties getProperties(ConfigMap configMap) {
|
||||
@@ -110,100 +92,212 @@ public class S3OsAttachmentHandler implements AttachmentHandler {
|
||||
String externalLink;
|
||||
if (StringUtils.isBlank(properties.getDomain())) {
|
||||
var host = properties.getBucket() + "." + properties.getEndpoint();
|
||||
externalLink = properties.getProtocol() + "://" + host + "/" + objectDetail.objectName();
|
||||
externalLink = properties.getProtocol() + "://" + host + "/" + objectDetail.objectKey();
|
||||
} else {
|
||||
externalLink = properties.getProtocol() + "://" + properties.getDomain() + "/" + objectDetail.objectName();
|
||||
externalLink = properties.getProtocol() + "://" + properties.getDomain() + "/" + objectDetail.objectKey();
|
||||
}
|
||||
|
||||
var metadata = new Metadata();
|
||||
metadata.setName(UUID.randomUUID().toString());
|
||||
metadata.setAnnotations(
|
||||
Map.of(OBJECT_KEY, objectDetail.objectName(), Constant.EXTERNAL_LINK_ANNO_KEY,
|
||||
UriUtils.encodePath(externalLink, StandardCharsets.UTF_8)));
|
||||
Map.of(OBJECT_KEY, objectDetail.objectKey(), Constant.EXTERNAL_LINK_ANNO_KEY,
|
||||
UriUtils.encodePath(externalLink, StandardCharsets.UTF_8)));
|
||||
|
||||
var objectMetadata = objectDetail.objectMetadata();
|
||||
var spec = new AttachmentSpec();
|
||||
spec.setSize(objectMetadata.getContentLength());
|
||||
spec.setSize(objectMetadata.contentLength());
|
||||
spec.setDisplayName(uploadContext.file().filename());
|
||||
spec.setMediaType(objectMetadata.getContentType());
|
||||
spec.setMediaType(objectMetadata.contentType());
|
||||
|
||||
var attachment = new Attachment();
|
||||
attachment.setMetadata(metadata);
|
||||
attachment.setSpec(spec);
|
||||
log.info("Upload object {} to bucket {} successfully", objectDetail.objectKey(), properties.getBucket());
|
||||
return attachment;
|
||||
}
|
||||
|
||||
AmazonS3 buildOsClient(S3OsProperties properties) {
|
||||
return AmazonS3ClientBuilder.standard()
|
||||
.withCredentials(new AWSStaticCredentialsProvider(
|
||||
new BasicAWSCredentials(properties.getAccessKey(), properties.getAccessSecret())))
|
||||
.withEndpointConfiguration(
|
||||
new AwsClientBuilder.EndpointConfiguration(
|
||||
properties.getEndpointProtocol() + "://" + properties.getEndpoint(),
|
||||
properties.getRegion()))
|
||||
.withPathStyleAccessEnabled(false)
|
||||
.withChunkedEncodingDisabled(true)
|
||||
S3AsyncClient buildS3AsyncClient(S3OsProperties properties) {
|
||||
return S3AsyncClient.builder()
|
||||
.region(Region.of(properties.getRegion()))
|
||||
.endpointOverride(URI.create(properties.getEndpointProtocol() + "://" + properties.getEndpoint()))
|
||||
.credentialsProvider(() -> AwsBasicCredentials.create(properties.getAccessKey(),
|
||||
properties.getAccessSecret()))
|
||||
.serviceConfiguration(S3Configuration.builder()
|
||||
.chunkedEncodingEnabled(false)
|
||||
.pathStyleAccessEnabled(properties.getEnablePathStyleAccess())
|
||||
.build())
|
||||
.build();
|
||||
}
|
||||
|
||||
Mono<ObjectDetail> upload(UploadContext uploadContext, S3OsProperties properties) {
|
||||
return Mono.fromCallable(() -> {
|
||||
var client = buildOsClient(properties);
|
||||
// build object name
|
||||
var originFilename = uploadContext.file().filename();
|
||||
var objectName = properties.getObjectName(originFilename);
|
||||
var originFilename = uploadContext.file().filename();
|
||||
var objectKey = properties.getObjectName(originFilename);
|
||||
var contentType = MediaTypeFactory.getMediaType(originFilename)
|
||||
.orElse(MediaType.APPLICATION_OCTET_STREAM).toString();
|
||||
var uploadingMapKey = properties.getBucket() + "/" + objectKey;
|
||||
// deduplication of uploading files
|
||||
if (uploadingFile.put(uploadingMapKey, uploadingMapKey) != null) {
|
||||
return Mono.error(new ServerWebInputException("文件 " + originFilename + " 已存在,建议更名后重试。"));
|
||||
}
|
||||
|
||||
var pos = new PipedOutputStream();
|
||||
var pis = new PipedInputStream(pos);
|
||||
DataBufferUtils.write(uploadContext.file().content(), pos)
|
||||
.subscribeOn(Schedulers.boundedElastic()).doOnComplete(() -> {
|
||||
try {
|
||||
pos.close();
|
||||
} catch (IOException ioe) {
|
||||
// close the stream quietly
|
||||
log.warn("Failed to close output stream", ioe);
|
||||
var s3client = buildS3AsyncClient(properties);
|
||||
|
||||
var uploadState = new UploadState(properties.getBucket(), objectKey);
|
||||
|
||||
return Mono
|
||||
// check whether file exists
|
||||
.fromFuture(s3client.headObject(HeadObjectRequest.builder()
|
||||
.bucket(properties.getBucket())
|
||||
.key(objectKey)
|
||||
.build()))
|
||||
.onErrorResume(NoSuchKeyException.class, e -> {
|
||||
var builder = HeadObjectResponse.builder();
|
||||
builder.sdkHttpResponse(SdkHttpResponse.builder().statusCode(404).build());
|
||||
return Mono.just(builder.build());
|
||||
})
|
||||
.flatMap(response -> {
|
||||
if (response != null && response.sdkHttpResponse() != null && response.sdkHttpResponse().isSuccessful()) {
|
||||
return Mono.error(new ServerWebInputException("文件 " + originFilename + " 已存在,建议更名后重试。"));
|
||||
}else {
|
||||
return Mono.just(uploadState);
|
||||
}
|
||||
}).subscribe(DataBufferUtils.releaseConsumer());
|
||||
|
||||
final var bucket = properties.getBucket();
|
||||
var metadata = new ObjectMetadata();
|
||||
var contentType = MediaTypeFactory.getMediaType(originFilename)
|
||||
.orElse(MediaType.APPLICATION_OCTET_STREAM).toString();
|
||||
metadata.setContentType(contentType);
|
||||
var request = new PutObjectRequest(bucket, objectName, pis, metadata);
|
||||
log.info("Uploading {} into S3ObjectStorage {}/{}/{}", originFilename,
|
||||
properties.getEndpoint(), bucket, objectName);
|
||||
|
||||
return ossExecute(() -> {
|
||||
var result = client.putObject(request);
|
||||
if (log.isDebugEnabled()) {
|
||||
debug(result);
|
||||
}
|
||||
var objectMetadata = client.getObjectMetadata(bucket, objectName);
|
||||
return new ObjectDetail(bucket, objectName, objectMetadata);
|
||||
}, client::shutdown);
|
||||
}).subscribeOn(Schedulers.boundedElastic());
|
||||
})
|
||||
// init multipart upload
|
||||
.flatMap(state -> Mono.fromFuture(s3client.createMultipartUpload(
|
||||
CreateMultipartUploadRequest.builder()
|
||||
.bucket(properties.getBucket())
|
||||
.contentType(contentType)
|
||||
.key(objectKey)
|
||||
.build())))
|
||||
.flatMapMany((response) -> {
|
||||
checkResult(response, "createMultipartUpload");
|
||||
uploadState.setUploadId(response.uploadId());
|
||||
return uploadContext.file().content();
|
||||
})
|
||||
// buffer to part
|
||||
.windowUntil((buffer) -> {
|
||||
uploadState.buffered += buffer.readableByteCount();
|
||||
if (uploadState.buffered >= MULTIPART_MIN_PART_SIZE) {
|
||||
uploadState.buffered = 0;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
})
|
||||
// upload part
|
||||
.concatMap((window) -> window.collectList().flatMap((bufferList) -> {
|
||||
var buffer = S3OsAttachmentHandler.concatBuffers(bufferList);
|
||||
return uploadPart(uploadState, buffer, s3client);
|
||||
}))
|
||||
.reduce(uploadState, (state, completedPart) -> {
|
||||
state.completedParts.put(completedPart.partNumber(), completedPart);
|
||||
return state;
|
||||
})
|
||||
// complete multipart upload
|
||||
.flatMap((state) -> Mono
|
||||
.fromFuture(s3client.completeMultipartUpload(CompleteMultipartUploadRequest.builder()
|
||||
.bucket(state.bucket)
|
||||
.uploadId(state.uploadId)
|
||||
.multipartUpload(CompletedMultipartUpload.builder()
|
||||
.parts(state.completedParts.values())
|
||||
.build())
|
||||
.key(state.objectKey)
|
||||
.build())
|
||||
))
|
||||
// get object metadata
|
||||
.flatMap((response) -> {
|
||||
checkResult(response, "completeUpload");
|
||||
return Mono.fromFuture(s3client.headObject(
|
||||
HeadObjectRequest.builder()
|
||||
.bucket(properties.getBucket())
|
||||
.key(objectKey)
|
||||
.build()
|
||||
));
|
||||
})
|
||||
// build object detail
|
||||
.map((response) -> {
|
||||
checkResult(response, "getMetadata");
|
||||
return new ObjectDetail(properties.getBucket(), objectKey, response);
|
||||
})
|
||||
// close client
|
||||
.doFinally((signalType) -> {
|
||||
uploadingFile.remove(uploadingMapKey);
|
||||
s3client.close();
|
||||
});
|
||||
}
|
||||
|
||||
void debug(PutObjectResult result) {
|
||||
log.debug("""
|
||||
PutObjectResult: VersionId: {}, ETag: {}, ContentMd5: {}, ExpirationTime: {}, ExpirationTimeRuleId: {},
|
||||
response RawMetadata: {}, UserMetadata: {}
|
||||
""", result.getVersionId(), result.getETag(), result.getContentMd5(), result.getExpirationTime(),
|
||||
result.getExpirationTimeRuleId(), result.getMetadata().getRawMetadata(),
|
||||
result.getMetadata().getUserMetadata());
|
||||
|
||||
private Mono<CompletedPart> uploadPart(UploadState uploadState, ByteBuffer buffer, S3AsyncClient s3client) {
|
||||
final int partNumber = ++uploadState.partCounter;
|
||||
return Mono
|
||||
.fromFuture(s3client.uploadPart(UploadPartRequest.builder()
|
||||
.bucket(uploadState.bucket)
|
||||
.key(uploadState.objectKey)
|
||||
.partNumber(partNumber)
|
||||
.uploadId(uploadState.uploadId)
|
||||
.contentLength((long) buffer.capacity())
|
||||
.build(),
|
||||
AsyncRequestBody.fromPublisher(Mono.just(buffer))))
|
||||
.map((uploadPartResult) -> {
|
||||
checkResult(uploadPartResult, "uploadPart");
|
||||
return CompletedPart.builder()
|
||||
.eTag(uploadPartResult.eTag())
|
||||
.partNumber(partNumber)
|
||||
.build();
|
||||
});
|
||||
}
|
||||
|
||||
private static void checkResult(SdkResponse result, String operation) {
|
||||
log.info("operation: {}, result: {}", operation, result);
|
||||
if (result.sdkHttpResponse() == null || !result.sdkHttpResponse().isSuccessful()) {
|
||||
log.error("Failed to upload object, response: {}", result.sdkHttpResponse());
|
||||
throw new ServerErrorException("对象存储响应错误,无法将对象上传到S3对象存储", null);
|
||||
}
|
||||
}
|
||||
|
||||
private static ByteBuffer concatBuffers(List<DataBuffer> buffers) {
|
||||
int partSize = 0;
|
||||
for (DataBuffer b : buffers) {
|
||||
partSize += b.readableByteCount();
|
||||
}
|
||||
|
||||
ByteBuffer partData = ByteBuffer.allocate(partSize);
|
||||
buffers.forEach((buffer) -> {
|
||||
partData.put(buffer.toByteBuffer());
|
||||
});
|
||||
|
||||
// Reset read pointer to first byte
|
||||
partData.rewind();
|
||||
|
||||
return partData;
|
||||
}
|
||||
|
||||
|
||||
boolean shouldHandle(Policy policy) {
|
||||
if (policy == null || policy.getSpec() == null ||
|
||||
policy.getSpec().getTemplateName() == null) {
|
||||
policy.getSpec().getTemplateName() == null) {
|
||||
return false;
|
||||
}
|
||||
String templateName = policy.getSpec().getTemplateName();
|
||||
return "s3os".equals(templateName);
|
||||
}
|
||||
|
||||
record ObjectDetail(String bucketName, String objectName, ObjectMetadata objectMetadata) {
|
||||
record ObjectDetail(String bucketName, String objectKey, HeadObjectResponse objectMetadata) {
|
||||
}
|
||||
|
||||
@Data
|
||||
static class UploadState {
|
||||
String bucket;
|
||||
String objectKey;
|
||||
String uploadId;
|
||||
int partCounter;
|
||||
Map<Integer, CompletedPart> completedParts = new HashMap<>();
|
||||
int buffered = 0;
|
||||
|
||||
UploadState(String bucket, String objectKey) {
|
||||
this.bucket = bucket;
|
||||
this.objectKey = objectKey;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -10,6 +10,8 @@ class S3OsProperties {
|
||||
|
||||
private Protocol endpointProtocol = Protocol.https;
|
||||
|
||||
private Boolean enablePathStyleAccess = false;
|
||||
|
||||
private String endpoint;
|
||||
|
||||
private String accessKey;
|
||||
|
@@ -19,7 +19,7 @@ spec:
|
||||
label: Bucket 桶名称
|
||||
validation: required
|
||||
- $formkit: select
|
||||
name: endpoint_protocol
|
||||
name: endpointProtocol
|
||||
label: Endpoint 访问协议
|
||||
options:
|
||||
- label: HTTPS
|
||||
@@ -27,9 +27,20 @@ spec:
|
||||
- label: HTTP
|
||||
value: http
|
||||
validation: required
|
||||
- $formkit: select
|
||||
name: enablePathStyleAccess
|
||||
label: Endpoint 访问风格
|
||||
options:
|
||||
- label: Virtual Hosted Style
|
||||
value: false
|
||||
- label: Path Style
|
||||
value: true
|
||||
value: false
|
||||
validation: required
|
||||
- $formkit: text
|
||||
name: endpoint
|
||||
label: EndPoint
|
||||
placeholder: 请填写不带bucket-name的Endpoint
|
||||
validation: required
|
||||
help: 协议头请在上方设置,此处无需以"http://"或"https://"开头,系统会自动拼接
|
||||
- $formkit: password
|
||||
|
@@ -4,7 +4,7 @@ metadata:
|
||||
name: PluginS3ObjectStorage
|
||||
spec:
|
||||
enabled: true
|
||||
version: 1.2.0
|
||||
version: 1.3.0
|
||||
requires: ">=2.0.0"
|
||||
author:
|
||||
name: longjuan
|
||||
@@ -16,4 +16,4 @@ spec:
|
||||
displayName: "对象存储(Amazon S3 协议)"
|
||||
description: "提供兼容 Amazon S3 协议的对象存储策略,兼容阿里云、腾讯云、七牛云等"
|
||||
license:
|
||||
- name: "MIT"
|
||||
- name: "GPL-3.0"
|
||||
|
Reference in New Issue
Block a user