diff --git a/.github/dead_link_check_config.json b/.github/dead_link_check_config.json index f893ab4b37..b7e8c583df 100644 --- a/.github/dead_link_check_config.json +++ b/.github/dead_link_check_config.json @@ -12,6 +12,9 @@ { "pattern": "^https://console.cloud.tencent.com/cos/bucket" }, + { + "pattern": "^https://docs.buf.build/" + }, { "pattern": "^#" } diff --git a/.github/workflows/markdown-linter.yml b/.github/workflows/markdown-checker.yml similarity index 91% rename from .github/workflows/markdown-linter.yml rename to .github/workflows/markdown-checker.yml index fb78521208..8c34de1bc8 100644 --- a/.github/workflows/markdown-linter.yml +++ b/.github/workflows/markdown-checker.yml @@ -7,7 +7,7 @@ on: jobs: check: - name: "🍀 Markdown Lint" + name: "🍀 Markdown Validation" runs-on: ubuntu-latest steps: - name: Check out code diff --git a/.github/workflows/proto-checker.yml b/.github/workflows/proto-checker.yml new file mode 100644 index 0000000000..4eecc477d4 --- /dev/null +++ b/.github/workflows/proto-checker.yml @@ -0,0 +1,25 @@ +name: Layotto Env Pipeline 🌊 + +on: + pull_request: + branches: + - main + +jobs: + check: + name: "🍀 Proto Validation" + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: buf-setup + uses: bufbuild/buf-setup-action@v1 + with: + version: '1.6.0' + + - name: Install buf-lint + uses: bufbuild/buf-lint-action@v1 + + - name: Check comments in proto files + run: buf lint diff --git a/Makefile b/Makefile index 8f2ec095c3..d8b980204d 100644 --- a/Makefile +++ b/Makefile @@ -75,6 +75,7 @@ include make/golang.mk include make/image.mk include make/wasm.mk include make/ci.mk +include make/deploy.mk include make/proto.mk # ============================================================================== @@ -165,6 +166,34 @@ wasm.image: wasm.image.push: @$(MAKE) go.wasm.image.push +# ============================================================================== +## deploy: Deploy Layotto to Kubernetes +# ============================================================================== +.PHONY: deploy +deploy: + @$(MAKE) deploy.k8s + +# ============================================================================== +## deploy.standalone: Deploy Layotto to Kubernetes in Standalone Mode +# ============================================================================== +.PHONY: deploy.standalone +deploy.standalone: + @$(MAKE) deploy.k8s.standalone + +# ============================================================================== +## undeploy: Remove Layotto in Kubernetes +# ============================================================================== +.PHONY: undeploy +undeploy: + @$(MAKE) undeploy.k8s + +# ============================================================================== +## undeploy.standalone: Remove Layotto in Kubernetes in Standalone Mode +# ============================================================================== +.PHONY: undeploy.standalone +undeploy.standalone: + @$(MAKE) undeploy.k8s.standalone + # ============================================================================== ## check: Run all go checks of code sources. # ============================================================================== @@ -254,6 +283,13 @@ clean: proto.doc: @$(MAKE) proto.gen.doc +# ============================================================================== +## proto: Generate code and documentation based on the proto files. +# ============================================================================== +.PHONY: proto +proto: + @$(MAKE) proto.gen.all + # ============================================================================== ## proto.init: Install protoc-gen-go and protoc-gen-go-grpc # ============================================================================== @@ -291,6 +327,9 @@ ARGS: This option is available when using: make build.multiarch/image.multiarch/push.multiarch Example: make image.multiarch IMAGES="layotto" PLATFORMS="linux_amd64 linux_arm64" Supported Platforms: linux_amd64 linux_arm64 darwin_amd64 darwin_arm64 + NAMESPACE The namepace to deploy. Default is `default`. + This option is available when using: make deploy/deploy.standalone/undeploy/undeploy.standalone + Example: make deploy NAMESPACE="layotto" endef export USAGE_OPTIONS diff --git a/README.md b/README.md index 3151d8d3c5..fcea8768c3 100644 --- a/README.md +++ b/README.md @@ -126,7 +126,7 @@ Layotto enriches the CNCF CLOUD N | Platform | Link | | :----------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 💬 [DingTalk](https://www.dingtalk.com/en) (preferred) | Search the group number: 31912621 or scan the QR code below
| +| 💬 [DingTalk](https://www.dingtalk.com/en) (preferred) | Search the group number: 31912621 or scan the QR code below
| [comment]: <> (| 💬 [Wechat](https://www.wechat.com/en/) | Scan the QR code below and she will invite you into the wechat group
) diff --git a/buf.yaml b/buf.yaml new file mode 100644 index 0000000000..de14b310c9 --- /dev/null +++ b/buf.yaml @@ -0,0 +1,11 @@ +version: v1 +lint: + use: + - MINIMAL + - COMMENT_ENUM + - COMMENT_ENUM_VALUE + - COMMENT_FIELD + - COMMENT_MESSAGE + - COMMENT_ONEOF + - COMMENT_RPC + - COMMENT_SERVICE diff --git a/cmd/layotto/main.go b/cmd/layotto/main.go index d91b0e199c..cf320b3cd1 100644 --- a/cmd/layotto/main.go +++ b/cmd/layotto/main.go @@ -22,8 +22,16 @@ import ( "strconv" "time" + "mosn.io/layotto/components/oss" + + aws_oss "mosn.io/layotto/components/oss/aws" + + aliyun_oss "mosn.io/layotto/components/oss/aliyun" + "mosn.io/mosn/pkg/istio" + aliyun_file "mosn.io/layotto/components/file/aliyun" + "github.com/dapr/components-contrib/secretstores" "github.com/dapr/components-contrib/secretstores/aws/parameterstore" "github.com/dapr/components-contrib/secretstores/aws/secretmanager" @@ -34,14 +42,15 @@ import ( secretstore_env "github.com/dapr/components-contrib/secretstores/local/env" secretstore_file "github.com/dapr/components-contrib/secretstores/local/file" + "mosn.io/layotto/components/file/aws" + "mosn.io/layotto/components/file/minio" + "mosn.io/layotto/components/file/qiniu" + "mosn.io/layotto/components/file/tencentcloud" + "mosn.io/layotto/pkg/grpc/default_api" secretstores_loader "mosn.io/layotto/pkg/runtime/secretstores" "mosn.io/layotto/components/file/local" - "mosn.io/layotto/components/file/s3/alicloud" - "mosn.io/layotto/components/file/s3/aws" - "mosn.io/layotto/components/file/s3/minio" - mock_state "mosn.io/layotto/pkg/mock/components/state" dbindings "github.com/dapr/components-contrib/bindings" @@ -126,10 +135,6 @@ import ( sequencer_redis "mosn.io/layotto/components/sequencer/redis" sequencer_zookeeper "mosn.io/layotto/components/sequencer/zookeeper" - // File - "mosn.io/layotto/components/file/s3/qiniu" - "mosn.io/layotto/components/file/s3/tencentcloud" - // Actuator _ "mosn.io/layotto/pkg/actuator" "mosn.io/layotto/pkg/actuator/health" @@ -200,6 +205,8 @@ import ( _ "mosn.io/mosn/pkg/upstream/servicediscovery/dubbod" _ "mosn.io/layotto/diagnostics/exporter_iml" + + s3ext "mosn.io/layotto/pkg/grpc/extension/s3" ) // loggerForDaprComp is constructed for reusing dapr's components. @@ -251,6 +258,7 @@ func NewRuntimeGrpcServer(data json.RawMessage, opts ...grpc.ServerOption) (mgrp // register your gRPC API here runtime.WithGrpcAPI( default_api.NewGrpcAPI, + s3ext.NewS3Server, ), // Hello runtime.WithHelloFactory( @@ -269,14 +277,17 @@ func NewRuntimeGrpcServer(data json.RawMessage, opts ...grpc.ServerOption) (mgrp // File runtime.WithFileFactory( - file.NewFileFactory("aliOSS", alicloud.NewAliCloudOSS), - file.NewFileFactory("minioOSS", minio.NewMinioOss), - file.NewFileFactory("awsOSS", aws.NewAwsOss), - file.NewFileFactory("tencentCloudOSS", tencentcloud.NewTencentCloudOSS), + file.NewFileFactory("aliyun.oss", aliyun_file.NewAliyunFile), + file.NewFileFactory("minio", minio.NewMinioOss), + file.NewFileFactory("aws.s3", aws.NewAwsFile), + file.NewFileFactory("tencent.oss", tencentcloud.NewTencentCloudOSS), file.NewFileFactory("local", local.NewLocalStore), - file.NewFileFactory("qiniuOSS", qiniu.NewQiniuOSS), + file.NewFileFactory("qiniu.oss", qiniu.NewQiniuOSS), + ), + runtime.WithOssFactory( + oss.NewFactory("aws.oss", aws_oss.NewAwsOss), + oss.NewFactory("aliyun.oss", aliyun_oss.NewAliyunOss), ), - // PubSub runtime.WithPubSubFactory( pubsub.NewFactory("redis", func() dapr_comp_pubsub.PubSub { diff --git a/cmd/layotto_multiple_api/main.go b/cmd/layotto_multiple_api/main.go index 1f692a244b..20d02dfa9a 100644 --- a/cmd/layotto_multiple_api/main.go +++ b/cmd/layotto_multiple_api/main.go @@ -22,9 +22,14 @@ import ( "strconv" "time" - "mosn.io/layotto/cmd/layotto_multiple_api/helloworld/component" - "mosn.io/layotto/components/custom" - "mosn.io/layotto/pkg/grpc/dapr" + "mosn.io/layotto/components/oss" + + aws_oss "mosn.io/layotto/components/oss/aws" + + aliyun_oss "mosn.io/layotto/components/oss/aliyun" + + aliyun_file "mosn.io/layotto/components/file/aliyun" + "mosn.io/layotto/components/file/local" "mosn.io/mosn/pkg/istio" @@ -47,15 +52,21 @@ import ( _ "mosn.io/layotto/pkg/wasm/uninstall" _ "mosn.io/layotto/pkg/wasm/update" - "mosn.io/layotto/components/file/local" - "mosn.io/layotto/components/file/s3/alicloud" - "mosn.io/layotto/components/file/s3/aws" - "mosn.io/layotto/components/file/s3/minio" + _ "mosn.io/mosn/pkg/filter/stream/grpcmetric" dbindings "github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/bindings/http" "mosn.io/pkg/log" + "mosn.io/layotto/cmd/layotto_multiple_api/helloworld/component" + "mosn.io/layotto/components/custom" + aws_file "mosn.io/layotto/components/file/aws" + "mosn.io/layotto/components/file/minio" + "mosn.io/layotto/components/file/qiniu" + "mosn.io/layotto/components/file/tencentcloud" + "mosn.io/layotto/pkg/grpc/dapr" + s3ext "mosn.io/layotto/pkg/grpc/extension/s3" + "mosn.io/layotto/components/configstores/etcdv3" "mosn.io/layotto/components/file" "mosn.io/layotto/components/sequencer" @@ -134,10 +145,6 @@ import ( sequencer_redis "mosn.io/layotto/components/sequencer/redis" sequencer_zookeeper "mosn.io/layotto/components/sequencer/zookeeper" - // File - "mosn.io/layotto/components/file/s3/qiniu" - "mosn.io/layotto/components/file/s3/tencentcloud" - // Actuator _ "mosn.io/layotto/pkg/actuator" "mosn.io/layotto/pkg/actuator/health" @@ -151,7 +158,6 @@ import ( mgrpc "mosn.io/mosn/pkg/filter/network/grpc" _ "mosn.io/mosn/pkg/filter/network/proxy" _ "mosn.io/mosn/pkg/filter/stream/flowcontrol" - _ "mosn.io/mosn/pkg/filter/stream/grpcmetric" _ "mosn.io/mosn/pkg/metrics/sink" _ "mosn.io/mosn/pkg/metrics/sink/prometheus" _ "mosn.io/mosn/pkg/network" @@ -262,6 +268,7 @@ func NewRuntimeGrpcServer(data json.RawMessage, opts ...grpc.ServerOption) (mgrp // Currently it only support Dapr's InvokeService,secret API,state API and InvokeBinding API. // Note: this feature is still in Alpha state and we don't recommend that you use it in your production environment. dapr.NewDaprAPI_Alpha, + s3ext.NewS3Server, ), // Hello runtime.WithHelloFactory( @@ -280,12 +287,18 @@ func NewRuntimeGrpcServer(data json.RawMessage, opts ...grpc.ServerOption) (mgrp // File runtime.WithFileFactory( - file.NewFileFactory("aliOSS", alicloud.NewAliCloudOSS), - file.NewFileFactory("minioOSS", minio.NewMinioOss), - file.NewFileFactory("awsOSS", aws.NewAwsOss), - file.NewFileFactory("tencentCloudOSS", tencentcloud.NewTencentCloudOSS), + file.NewFileFactory("aliyun.oss", aliyun_file.NewAliyunFile), + file.NewFileFactory("minio", minio.NewMinioOss), + file.NewFileFactory("aws.s3", aws_file.NewAwsFile), + file.NewFileFactory("tencent.oss", tencentcloud.NewTencentCloudOSS), file.NewFileFactory("local", local.NewLocalStore), - file.NewFileFactory("qiniuOSS", qiniu.NewQiniuOSS), + file.NewFileFactory("qiniu.oss", qiniu.NewQiniuOSS), + ), + + //OSS + runtime.WithOssFactory( + oss.NewFactory("aws.oss", aws_oss.NewAwsOss), + oss.NewFactory("aliyun.oss", aliyun_oss.NewAliyunOss), ), // PubSub diff --git a/cmd/layotto_without_xds/main.go b/cmd/layotto_without_xds/main.go index f836efa81e..243e177fc4 100644 --- a/cmd/layotto_without_xds/main.go +++ b/cmd/layotto_without_xds/main.go @@ -22,6 +22,18 @@ import ( "strconv" "time" + "mosn.io/layotto/components/oss" + + aws_oss "mosn.io/layotto/components/oss/aws" + + aliyun_oss "mosn.io/layotto/components/oss/aliyun" + + "mosn.io/layotto/components/file/aliyun" + aws_file "mosn.io/layotto/components/file/aws" + "mosn.io/layotto/components/file/minio" + "mosn.io/layotto/components/file/qiniu" + "mosn.io/layotto/components/file/tencentcloud" + "github.com/dapr/components-contrib/secretstores" "github.com/dapr/components-contrib/secretstores/aws/parameterstore" "github.com/dapr/components-contrib/secretstores/aws/secretmanager" @@ -36,9 +48,6 @@ import ( secretstores_loader "mosn.io/layotto/pkg/runtime/secretstores" "mosn.io/layotto/components/file/local" - "mosn.io/layotto/components/file/s3/alicloud" - "mosn.io/layotto/components/file/s3/aws" - "mosn.io/layotto/components/file/s3/minio" mock_state "mosn.io/layotto/pkg/mock/components/state" @@ -123,10 +132,6 @@ import ( sequencer_redis "mosn.io/layotto/components/sequencer/redis" sequencer_zookeeper "mosn.io/layotto/components/sequencer/zookeeper" - // File - "mosn.io/layotto/components/file/s3/qiniu" - "mosn.io/layotto/components/file/s3/tencentcloud" - // Actuator _ "mosn.io/layotto/pkg/actuator" "mosn.io/layotto/pkg/actuator/health" @@ -254,12 +259,12 @@ func NewRuntimeGrpcServer(data json.RawMessage, opts ...grpc.ServerOption) (mgrp // File runtime.WithFileFactory( - file.NewFileFactory("aliOSS", alicloud.NewAliCloudOSS), - file.NewFileFactory("minioOSS", minio.NewMinioOss), - file.NewFileFactory("awsOSS", aws.NewAwsOss), - file.NewFileFactory("tencentCloudOSS", tencentcloud.NewTencentCloudOSS), + file.NewFileFactory("aliyun.oss", aliyun.NewAliyunFile), + file.NewFileFactory("minio", minio.NewMinioOss), + file.NewFileFactory("aws.s3", aws_file.NewAwsFile), + file.NewFileFactory("tencent.oss", tencentcloud.NewTencentCloudOSS), file.NewFileFactory("local", local.NewLocalStore), - file.NewFileFactory("qiniuOSS", qiniu.NewQiniuOSS), + file.NewFileFactory("qiniu.oss", qiniu.NewQiniuOSS), ), // PubSub @@ -394,6 +399,12 @@ func NewRuntimeGrpcServer(data json.RawMessage, opts ...grpc.ServerOption) (mgrp }), ), + //OSS + runtime.WithOssFactory( + oss.NewFactory("aws.oss", aws_oss.NewAwsOss), + oss.NewFactory("aliyun.oss", aliyun_oss.NewAliyunOss), + ), + // Sequencer runtime.WithSequencerFactory( runtime_sequencer.NewFactory("etcd", func() sequencer.Store { diff --git a/components/file/s3/alicloud/oss.go b/components/file/aliyun/file.go similarity index 65% rename from components/file/s3/alicloud/oss.go rename to components/file/aliyun/file.go index c2f538be44..c31151844f 100644 --- a/components/file/s3/alicloud/oss.go +++ b/components/file/aliyun/file.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package alicloud +package aliyun import ( "context" @@ -26,34 +26,27 @@ import ( "github.com/aliyun/aliyun-oss-go-sdk/oss" "mosn.io/layotto/components/file" - loss "mosn.io/layotto/components/file/s3" + "mosn.io/layotto/components/file/util" + "mosn.io/layotto/components/pkg/utils" ) const ( - endpointKey = "endpoint" storageTypeKey = "storageType" ) -// AliCloudOSS is a binding for an AliCloud OSS storage bucketKey -type AliCloudOSS struct { - metadata map[string]*OssMetadata - client map[string]*oss.Client +// AliyunFile is a binding for an AliCloud OSS storage bucketKey +type AliyunFile struct { + client *oss.Client } -type OssMetadata struct { - Endpoint string `json:"endpoint"` - AccessKeyID string `json:"accessKeyID"` - AccessKeySecret string `json:"accessKeySecret"` -} - -func NewAliCloudOSS() file.File { - oss := &AliCloudOSS{metadata: make(map[string]*OssMetadata), client: make(map[string]*oss.Client)} +func NewAliyunFile() file.File { + oss := &AliyunFile{} return oss } // Init does metadata parsing and connection creation -func (s *AliCloudOSS) Init(ctx context.Context, metadata *file.FileConfig) error { - m := make([]*OssMetadata, 0) +func (s *AliyunFile) Init(ctx context.Context, metadata *file.FileConfig) error { + m := make([]*utils.OssMetadata, 0) err := json.Unmarshal(metadata.Metadata, &m) if err != nil { return file.ErrInvalid @@ -63,17 +56,16 @@ func (s *AliCloudOSS) Init(ctx context.Context, metadata *file.FileConfig) error if !s.checkMetadata(v) { return file.ErrInvalid } - client, err := s.getClient(v) + client, err := oss.New(v.Endpoint, v.AccessKeyID, v.AccessKeySecret) if err != nil { return err } - s.metadata[v.Endpoint] = v - s.client[v.Endpoint] = client + s.client = client } return nil } -func (s *AliCloudOSS) Put(ctx context.Context, st *file.PutFileStu) error { +func (s *AliyunFile) Put(ctx context.Context, st *file.PutFileStu) error { storageType := st.Metadata[storageTypeKey] if storageType == "" { storageType = "Standard" @@ -82,7 +74,7 @@ func (s *AliCloudOSS) Put(ctx context.Context, st *file.PutFileStu) error { if err != nil { return fmt.Errorf("put file[%s] fail,err: %s", st.FileName, err.Error()) } - fileNameWithoutBucket, err := loss.GetFileName(st.FileName) + fileNameWithoutBucket, err := util.GetFileName(st.FileName) if err != nil { return fmt.Errorf("put file[%s] fail,err: %s", st.FileName, err.Error()) } @@ -94,12 +86,12 @@ func (s *AliCloudOSS) Put(ctx context.Context, st *file.PutFileStu) error { return nil } -func (s *AliCloudOSS) Get(ctx context.Context, st *file.GetFileStu) (io.ReadCloser, error) { +func (s *AliyunFile) Get(ctx context.Context, st *file.GetFileStu) (io.ReadCloser, error) { bucket, err := s.getBucket(st.FileName, st.Metadata) if err != nil { return nil, fmt.Errorf("get file[%s] fail, err: %s", st.FileName, err.Error()) } - fileNameWithoutBucket, err := loss.GetFileName(st.FileName) + fileNameWithoutBucket, err := util.GetFileName(st.FileName) if err != nil { return nil, fmt.Errorf("get file[%s] fail, err: %s", st.FileName, err.Error()) } @@ -107,13 +99,13 @@ func (s *AliCloudOSS) Get(ctx context.Context, st *file.GetFileStu) (io.ReadClos return bucket.GetObject(fileNameWithoutBucket) } -func (s *AliCloudOSS) List(ctx context.Context, request *file.ListRequest) (*file.ListResp, error) { +func (s *AliyunFile) List(ctx context.Context, request *file.ListRequest) (*file.ListResp, error) { bucket, err := s.getBucket(request.DirectoryName, request.Metadata) if err != nil { return nil, fmt.Errorf("list directory[%s] fail, err: %s", request.DirectoryName, err.Error()) } resp := &file.ListResp{} - prefix := loss.GetFilePrefixName(request.DirectoryName) + prefix := util.GetFilePrefixName(request.DirectoryName) object, err := bucket.ListObjectsV2(oss.StartAfter(request.Marker), oss.MaxKeys(int(request.PageSize)), oss.Prefix(prefix)) if err != nil { return nil, fmt.Errorf("list directory[%s] fail, err: %s", request.DirectoryName, err.Error()) @@ -134,12 +126,12 @@ func (s *AliCloudOSS) List(ctx context.Context, request *file.ListRequest) (*fil return resp, nil } -func (s *AliCloudOSS) Del(ctx context.Context, request *file.DelRequest) error { +func (s *AliyunFile) Del(ctx context.Context, request *file.DelRequest) error { bucket, err := s.getBucket(request.FileName, request.Metadata) if err != nil { return fmt.Errorf("del file[%s] fail, err: %s", request.FileName, err.Error()) } - fileNameWithoutBucket, err := loss.GetFileName(request.FileName) + fileNameWithoutBucket, err := util.GetFileName(request.FileName) if err != nil { return fmt.Errorf("del file[%s] fail, err: %s", request.FileName, err.Error()) } @@ -150,14 +142,14 @@ func (s *AliCloudOSS) Del(ctx context.Context, request *file.DelRequest) error { return nil } -func (s *AliCloudOSS) Stat(ctx context.Context, request *file.FileMetaRequest) (*file.FileMetaResp, error) { +func (s *AliyunFile) Stat(ctx context.Context, request *file.FileMetaRequest) (*file.FileMetaResp, error) { resp := &file.FileMetaResp{} resp.Metadata = make(map[string][]string) bucket, err := s.getBucket(request.FileName, request.Metadata) if err != nil { return nil, fmt.Errorf("stat file[%s] fail, err: %s", request.FileName, err.Error()) } - fileNameWithoutBucket, err := loss.GetFileName(request.FileName) + fileNameWithoutBucket, err := util.GetFileName(request.FileName) if err != nil { return nil, fmt.Errorf("stat file[%s] fail, err: %s", request.FileName, err.Error()) } @@ -190,37 +182,22 @@ func (s *AliCloudOSS) Stat(ctx context.Context, request *file.FileMetaRequest) ( return resp, nil } -func (s *AliCloudOSS) checkMetadata(m *OssMetadata) bool { +func (s *AliyunFile) checkMetadata(m *utils.OssMetadata) bool { if m.AccessKeySecret == "" || m.Endpoint == "" || m.AccessKeyID == "" { return false } return true } -func (s *AliCloudOSS) getClient(metadata *OssMetadata) (*oss.Client, error) { - client, err := oss.New(metadata.Endpoint, metadata.AccessKeyID, metadata.AccessKeySecret) - if err != nil { - return nil, err - } - return client, nil -} - -func (s *AliCloudOSS) getBucket(fileName string, metaData map[string]string) (*oss.Bucket, error) { +func (s *AliyunFile) getBucket(fileName string, metaData map[string]string) (*oss.Bucket, error) { var ossClient *oss.Client var err error - // get oss client - if _, ok := metaData[endpointKey]; ok { - ossClient = s.client[endpointKey] - } else { - // if user not specify endpoint, try to use default client - ossClient, err = s.selectClient() - if err != nil { - return nil, err - } - } - // get oss bucket - bucketName, err := loss.GetBucketName(fileName) + bucketName, err := util.GetBucketName(fileName) + if err != nil { + return nil, err + } + ossClient, err = s.getClient() if err != nil { return nil, err } @@ -231,13 +208,9 @@ func (s *AliCloudOSS) getBucket(fileName string, metaData map[string]string) (*o return bucket, nil } -func (s *AliCloudOSS) selectClient() (*oss.Client, error) { - if len(s.client) == 1 { - for _, client := range s.client { - return client, nil - } - } else { - return nil, fmt.Errorf("should specific endpoint in metadata") +func (s *AliyunFile) getClient() (*oss.Client, error) { + if s.client == nil { + return nil, utils.ErrNotInitClient } - return nil, nil + return s.client, nil } diff --git a/components/file/s3/alicloud/oss_test.go b/components/file/aliyun/file_test.go similarity index 76% rename from components/file/s3/alicloud/oss_test.go rename to components/file/aliyun/file_test.go index c7ad7c5da1..c29e565735 100644 --- a/components/file/s3/alicloud/oss_test.go +++ b/components/file/aliyun/file_test.go @@ -14,14 +14,14 @@ * limitations under the License. */ -package alicloud +package aliyun import ( "context" "io" "testing" - "github.com/aliyun/aliyun-oss-go-sdk/oss" + "mosn.io/layotto/components/pkg/utils" "github.com/stretchr/testify/assert" @@ -41,7 +41,7 @@ const ( func TestInit(t *testing.T) { fc := file.FileConfig{} - oss := NewAliCloudOSS() + oss := NewAliyunFile() err := oss.Init(context.TODO(), &fc) assert.Equal(t, err.Error(), "invalid argument") fc.Metadata = []byte(data) @@ -49,34 +49,14 @@ func TestInit(t *testing.T) { assert.Nil(t, err) } -func TestSelectClient(t *testing.T) { - ossObject := &AliCloudOSS{metadata: make(map[string]*OssMetadata), client: make(map[string]*oss.Client)} - - client, err := ossObject.selectClient() - assert.Equal(t, err.Error(), "should specific endpoint in metadata") - assert.Nil(t, client) - - client1 := &oss.Client{} - ossObject.client["127.0.0.1"] = client1 - client, err = ossObject.selectClient() - assert.Equal(t, client, client1) - assert.Nil(t, err) - - client2 := &oss.Client{} - ossObject.client["0.0.0.0"] = client2 - client, err = ossObject.selectClient() - assert.Equal(t, err.Error(), "should specific endpoint in metadata") - assert.Nil(t, client) -} - func TestGetBucket(t *testing.T) { fc := file.FileConfig{} - oss := NewAliCloudOSS() + oss := NewAliyunFile() fc.Metadata = []byte(data) err := oss.Init(context.TODO(), &fc) assert.Nil(t, err) - ac := oss.(*AliCloudOSS) + ac := oss.(*AliyunFile) mt := make(map[string]string) bucket, err := ac.getBucket("/", mt) @@ -94,34 +74,23 @@ func TestGetBucket(t *testing.T) { func TestGetClient(t *testing.T) { fc := file.FileConfig{} - oss := NewAliCloudOSS() + oss := &AliyunFile{} fc.Metadata = []byte(data) err := oss.Init(context.TODO(), &fc) assert.Nil(t, err) - ac := oss.(*AliCloudOSS) - mt := &OssMetadata{ - Endpoint: "endpoint", - AccessKeyID: "ak", - AccessKeySecret: "ak", - } - - //TODO test empty endpoint/ak/sk , now will get panic - - client, err := ac.getClient(mt) - assert.Nil(t, err) - assert.NotNil(t, client) + assert.NotNil(t, oss.client) } func TestCheckMetadata(t *testing.T) { fc := file.FileConfig{} - oss := NewAliCloudOSS() + oss := NewAliyunFile() fc.Metadata = []byte(data) err := oss.Init(context.TODO(), &fc) assert.Nil(t, err) - ac := oss.(*AliCloudOSS) - mt := &OssMetadata{ + ac := oss.(*AliyunFile) + mt := &utils.OssMetadata{ Endpoint: "", AccessKeyID: "", AccessKeySecret: "", @@ -138,7 +107,7 @@ func TestCheckMetadata(t *testing.T) { func TestPut(t *testing.T) { fc := file.FileConfig{} - oss := NewAliCloudOSS() + oss := NewAliyunFile() fc.Metadata = []byte(data) err := oss.Init(context.TODO(), &fc) assert.Nil(t, err) @@ -156,7 +125,7 @@ func TestPut(t *testing.T) { func TestGet(t *testing.T) { fc := file.FileConfig{} - oss := NewAliCloudOSS() + oss := NewAliyunFile() fc.Metadata = []byte(data) err := oss.Init(context.TODO(), &fc) assert.Nil(t, err) @@ -177,7 +146,7 @@ func TestGet(t *testing.T) { func TestStat(t *testing.T) { fc := file.FileConfig{} - oss := NewAliCloudOSS() + oss := NewAliyunFile() fc.Metadata = []byte(data) err := oss.Init(context.TODO(), &fc) assert.Nil(t, err) @@ -198,7 +167,7 @@ func TestStat(t *testing.T) { func TestList(t *testing.T) { fc := file.FileConfig{} - oss := NewAliCloudOSS() + oss := NewAliyunFile() fc.Metadata = []byte(data) err := oss.Init(context.TODO(), &fc) assert.Nil(t, err) @@ -220,7 +189,7 @@ func TestList(t *testing.T) { func TestDel(t *testing.T) { fc := file.FileConfig{} - oss := NewAliCloudOSS() + oss := NewAliyunFile() fc.Metadata = []byte(data) err := oss.Init(context.TODO(), &fc) assert.Nil(t, err) diff --git a/components/file/s3/aws/oss.go b/components/file/aws/file.go similarity index 59% rename from components/file/s3/aws/oss.go rename to components/file/aws/file.go index 11a3ffe198..474cefc0c0 100644 --- a/components/file/s3/aws/oss.go +++ b/components/file/aws/file.go @@ -24,76 +24,61 @@ import ( "io" "strings" + "mosn.io/layotto/components/pkg/utils" + "github.com/aws/aws-sdk-go-v2/aws" aws_config "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" "mosn.io/layotto/components/file" - loss "mosn.io/layotto/components/file/s3" + "mosn.io/layotto/components/file/util" ) const ( - endpointKey = "endpoint" defaultCredentialsSource = "provider" ) -var ( - ErrNotSpecifyEndpoint error = errors.New("should specific endpoint in metadata") -) - // AwsOss is a binding for aws oss storage. type AwsOss struct { - client map[string]*s3.Client - meta map[string]*AwsOssMetaData + client *s3.Client } -// AwsOssMetaData describe a aws-oss instance. -type AwsOssMetaData struct { - Region string `json:"region"` // eg. us-west-2 - EndPoint string `json:"endpoint"` // eg. protocol://service-code.region-code.amazonaws.com - AccessKeyID string `json:"accessKeyID"` - AccessKeySecret string `json:"accessKeySecret"` -} - -func NewAwsOss() file.File { - return &AwsOss{ - client: make(map[string]*s3.Client), - meta: make(map[string]*AwsOssMetaData), - } +func NewAwsFile() file.File { + return &AwsOss{} } // Init instance by config. func (a *AwsOss) Init(ctx context.Context, config *file.FileConfig) error { - m := make([]*AwsOssMetaData, 0) + m := make([]*utils.OssMetadata, 0) err := json.Unmarshal(config.Metadata, &m) if err != nil { return errors.New("invalid config for aws oss") } for _, data := range m { - if !data.isAwsMetaValid() { + if !a.isAwsMetaValid(data) { return errors.New("invalid config for aws oss") } client, err := a.createOssClient(data) if err != nil { continue } - a.client[data.EndPoint] = client - a.meta[data.EndPoint] = data + a.client = client } return nil } // isAwsMetaValid check if the metadata valid. -func (am *AwsOssMetaData) isAwsMetaValid() bool { - if am.AccessKeySecret == "" || am.EndPoint == "" || am.AccessKeyID == "" { +func (a *AwsOss) isAwsMetaValid(v *utils.OssMetadata) bool { + if v.AccessKeySecret == "" || v.Endpoint == "" || v.AccessKeyID == "" { return false } return true } // createOssClient by input meta info. -func (a *AwsOss) createOssClient(meta *AwsOssMetaData) (*s3.Client, error) { +func (a *AwsOss) createOssClient(meta *utils.OssMetadata) (*s3.Client, error) { optFunc := []func(options *aws_config.LoadOptions) error{ aws_config.WithRegion(meta.Region), aws_config.WithCredentialsProvider(credentials.StaticCredentialsProvider{ @@ -112,62 +97,50 @@ func (a *AwsOss) createOssClient(meta *AwsOssMetaData) (*s3.Client, error) { // Put file to aws oss. func (a *AwsOss) Put(ctx context.Context, st *file.PutFileStu) error { - bucket, err := loss.GetBucketName(st.FileName) + //var bodySize int64 + bucket, err := util.GetBucketName(st.FileName) if err != nil { - return fmt.Errorf("awsoss put file[%s] fail,err: %s", st.FileName, err.Error()) + return fmt.Errorf("aws.s3 put file[%s] fail,err: %s", st.FileName, err.Error()) } - key, err := loss.GetFileName(st.FileName) + key, err := util.GetFileName(st.FileName) if err != nil { - return fmt.Errorf("awsoss put file[%s] fail,err: %s", st.FileName, err.Error()) - } - input := &s3.PutObjectInput{ - Bucket: &bucket, - Key: &key, - Body: st.DataStream, + return fmt.Errorf("aws.s3 put file[%s] fail,err: %s", st.FileName, err.Error()) } - client, err := a.selectClient(st.Metadata) + client, err := a.selectClient() if err != nil { return err } - _, err = client.PutObject(context.TODO(), input) + uploader := manager.NewUploader(client) + _, err = uploader.Upload(context.TODO(), &s3.PutObjectInput{Bucket: &bucket, Key: &key, Body: st.DataStream}) + if err != nil { return err } return nil } -// selectClient choose aws client from exist client-map, key is endpoint, value is client instance. -func (a *AwsOss) selectClient(meta map[string]string) (*s3.Client, error) { - // exist specific client with key endpoint - if ep, ok := meta[endpointKey]; ok { - if client, ok := a.client[ep]; ok { - return client, nil - } +func (a *AwsOss) selectClient() (*s3.Client, error) { + if a.client == nil { + return nil, utils.ErrNotInitClient } - // if not specify endpoint, select default one - if len(a.client) == 1 { - for _, client := range a.client { - return client, nil - } - } - return nil, ErrNotSpecifyEndpoint + return a.client, nil } // Get object from aws oss. func (a *AwsOss) Get(ctx context.Context, st *file.GetFileStu) (io.ReadCloser, error) { - bucket, err := loss.GetBucketName(st.FileName) + bucket, err := util.GetBucketName(st.FileName) if err != nil { - return nil, fmt.Errorf("awsoss get file[%s] fail,err: %s", st.FileName, err.Error()) + return nil, fmt.Errorf("aws.s3 get file[%s] fail,err: %s", st.FileName, err.Error()) } - key, err := loss.GetFileName(st.FileName) + key, err := util.GetFileName(st.FileName) if err != nil { - return nil, fmt.Errorf("awsoss get file[%s] fail,err: %s", st.FileName, err.Error()) + return nil, fmt.Errorf("aws.s3 get file[%s] fail,err: %s", st.FileName, err.Error()) } input := &s3.GetObjectInput{ Bucket: &bucket, Key: &key, } - client, err := a.selectClient(st.Metadata) + client, err := a.selectClient() if err != nil { return nil, err } @@ -180,18 +153,13 @@ func (a *AwsOss) Get(ctx context.Context, st *file.GetFileStu) (io.ReadCloser, e // List objects from aws oss. func (a *AwsOss) List(ctx context.Context, st *file.ListRequest) (*file.ListResp, error) { - bucket, err := loss.GetBucketName(st.DirectoryName) + bucket, err := util.GetBucketName(st.DirectoryName) if err != nil { return nil, fmt.Errorf("list bucket[%s] fail, err: %s", st.DirectoryName, err.Error()) } - prefix := loss.GetFilePrefixName(st.DirectoryName) - input := &s3.ListObjectsInput{ - Bucket: &bucket, - MaxKeys: st.PageSize, - Marker: &st.Marker, - Prefix: &prefix, - } - client, err := a.selectClient(st.Metadata) + prefix := util.GetFilePrefixName(st.DirectoryName) + input := &s3.ListObjectsInput{Bucket: &bucket, MaxKeys: st.PageSize, Marker: &st.Marker, Prefix: &prefix} + client, err := a.selectClient() if err != nil { return nil, fmt.Errorf("list bucket[%s] fail, err: %s", st.DirectoryName, err.Error()) } @@ -216,19 +184,19 @@ func (a *AwsOss) List(ctx context.Context, st *file.ListRequest) (*file.ListResp // Del object in aws oss. func (a *AwsOss) Del(ctx context.Context, st *file.DelRequest) error { - bucket, err := loss.GetBucketName(st.FileName) + bucket, err := util.GetBucketName(st.FileName) if err != nil { - return fmt.Errorf("awsoss put file[%s] fail,err: %s", st.FileName, err.Error()) + return fmt.Errorf("aws.s3 put file[%s] fail,err: %s", st.FileName, err.Error()) } - key, err := loss.GetFileName(st.FileName) + key, err := util.GetFileName(st.FileName) if err != nil { - return fmt.Errorf("awsoss put file[%s] fail,err: %s", st.FileName, err.Error()) + return fmt.Errorf("aws.s3 put file[%s] fail,err: %s", st.FileName, err.Error()) } input := &s3.DeleteObjectInput{ Bucket: &bucket, Key: &key, } - client, err := a.selectClient(st.Metadata) + client, err := a.selectClient() if err != nil { return err } @@ -239,19 +207,19 @@ func (a *AwsOss) Del(ctx context.Context, st *file.DelRequest) error { return nil } func (a *AwsOss) Stat(ctx context.Context, st *file.FileMetaRequest) (*file.FileMetaResp, error) { - bucket, err := loss.GetBucketName(st.FileName) + bucket, err := util.GetBucketName(st.FileName) if err != nil { - return nil, fmt.Errorf("awsoss stat file[%s] fail,err: %s", st.FileName, err.Error()) + return nil, fmt.Errorf("aws.s3 stat file[%s] fail,err: %s", st.FileName, err.Error()) } - key, err := loss.GetFileName(st.FileName) + key, err := util.GetFileName(st.FileName) if err != nil { - return nil, fmt.Errorf("awsoss stat file[%s] fail,err: %s", st.FileName, err.Error()) + return nil, fmt.Errorf("aws.s3 stat file[%s] fail,err: %s", st.FileName, err.Error()) } input := &s3.HeadObjectInput{ Bucket: &bucket, Key: &key, } - client, err := a.selectClient(st.Metadata) + client, err := a.selectClient() if err != nil { return nil, err } @@ -260,13 +228,13 @@ func (a *AwsOss) Stat(ctx context.Context, st *file.FileMetaRequest) (*file.File if strings.Contains(err.Error(), "no such key") { return nil, file.ErrNotExist } - return nil, fmt.Errorf("awsoss stat file[%s] fail,err: %s", st.FileName, err.Error()) + return nil, fmt.Errorf("aws.s3 stat file[%s] fail,err: %s", st.FileName, err.Error()) } resp := &file.FileMetaResp{} resp.Size = out.ContentLength resp.LastModified = out.LastModified.String() resp.Metadata = make(map[string][]string) - resp.Metadata[loss.ETag] = append(resp.Metadata[loss.ETag], *out.ETag) + resp.Metadata[util.ETag] = append(resp.Metadata[util.ETag], *out.ETag) for k, v := range out.Metadata { resp.Metadata[k] = append(resp.Metadata[k], v) } diff --git a/components/file/s3/aws/oss_test.go b/components/file/aws/file_test.go similarity index 58% rename from components/file/s3/aws/oss_test.go rename to components/file/aws/file_test.go index 4a553dc91c..8edc9f65bf 100644 --- a/components/file/s3/aws/oss_test.go +++ b/components/file/aws/file_test.go @@ -18,8 +18,16 @@ package aws import ( "context" + "fmt" + "reflect" "testing" + "mosn.io/layotto/components/pkg/utils" + + "mosn.io/layotto/components/oss" + + "github.com/jinzhu/copier" + "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/stretchr/testify/assert" @@ -28,6 +36,7 @@ import ( const cfg = `[ { + "buckets":["bucket1"], "endpoint": "protocol://service-code.region-code.amazonaws.com", "accessKeyID": "accessKey", "accessKeySecret": "secret", @@ -36,56 +45,28 @@ const cfg = `[ ]` func TestAwsOss_Init(t *testing.T) { - oss := NewAwsOss() + oss := NewAwsFile() err := oss.Init(context.TODO(), &file.FileConfig{}) assert.Equal(t, err.Error(), "invalid config for aws oss") err = oss.Init(context.TODO(), &file.FileConfig{Metadata: []byte(cfg)}) assert.Equal(t, nil, err) } -func TestAwsOss_SelectClient(t *testing.T) { - oss := &AwsOss{ - client: make(map[string]*s3.Client), - meta: make(map[string]*AwsOssMetaData), - } - err := oss.Init(context.TODO(), &file.FileConfig{Metadata: []byte(cfg)}) - assert.Equal(t, nil, err) - - // not specify endpoint, select default client - meta := map[string]string{} - _, err = oss.selectClient(meta) - assert.Nil(t, err) - - // specify endpoint equal config - meta["endpoint"] = "protocol://service-code.region-code.amazonaws.com" - client, _ := oss.selectClient(meta) - assert.NotNil(t, client) - - // specicy not exist endpoint, select default one - meta["endpoint"] = "protocol://cn-northwest-1.region-code.amazonaws.com" - client, err = oss.selectClient(meta) - assert.Nil(t, err) - assert.NotNil(t, client) - // new client with endpoint - oss.client["protocol://cn-northwest-1.region-code.amazonaws.com"] = &s3.Client{} - client, _ = oss.selectClient(meta) - assert.NotNil(t, client) -} - func TestAwsOss_IsAwsMetaValid(t *testing.T) { - mt := &AwsOssMetaData{} - assert.False(t, mt.isAwsMetaValid()) + mt := &utils.OssMetadata{} + a := AwsOss{} + assert.False(t, a.isAwsMetaValid(mt)) mt.AccessKeyID = "a" - assert.False(t, mt.isAwsMetaValid()) - mt.EndPoint = "a" - assert.False(t, mt.isAwsMetaValid()) + assert.False(t, a.isAwsMetaValid(mt)) + mt.Endpoint = "a" + assert.False(t, a.isAwsMetaValid(mt)) mt.AccessKeySecret = "a" - assert.True(t, mt.isAwsMetaValid()) + assert.True(t, a.isAwsMetaValid(mt)) } func TestAwsOss_Put(t *testing.T) { - oss := NewAwsOss() + oss := NewAwsFile() err := oss.Init(context.TODO(), &file.FileConfig{Metadata: []byte(cfg)}) assert.Equal(t, nil, err) @@ -93,15 +74,15 @@ func TestAwsOss_Put(t *testing.T) { FileName: "", } err = oss.Put(context.Background(), req) - assert.Equal(t, err.Error(), "awsoss put file[] fail,err: invalid fileName format") + assert.Equal(t, err.Error(), "aws.s3 put file[] fail,err: invalid fileName format") req.FileName = "/a.txt" err = oss.Put(context.Background(), req) - assert.Equal(t, err.Error(), "awsoss put file[/a.txt] fail,err: invalid fileName format") + assert.Equal(t, err.Error(), "aws.s3 put file[/a.txt] fail,err: invalid fileName format") } func TestAwsOss_Get(t *testing.T) { - oss := NewAwsOss() + oss := NewAwsFile() err := oss.Init(context.TODO(), &file.FileConfig{Metadata: []byte(cfg)}) assert.Equal(t, nil, err) @@ -110,15 +91,36 @@ func TestAwsOss_Get(t *testing.T) { } err = oss.Put(context.Background(), putReq) - assert.Equal(t, err.Error(), "awsoss put file[/a.txt] fail,err: invalid fileName format") + assert.Equal(t, err.Error(), "aws.s3 put file[/a.txt] fail,err: invalid fileName format") req := &file.GetFileStu{ FileName: "", } _, err = oss.Get(context.Background(), req) - assert.Equal(t, err.Error(), "awsoss get file[] fail,err: invalid fileName format") + assert.Equal(t, err.Error(), "aws.s3 get file[] fail,err: invalid fileName format") req.FileName = "/a.txt" _, err = oss.Get(context.Background(), req) - assert.Equal(t, err.Error(), "awsoss get file[/a.txt] fail,err: invalid fileName format") + assert.Equal(t, err.Error(), "aws.s3 get file[/a.txt] fail,err: invalid fileName format") +} + +type fun = func() (string, error) + +func TestCopier(t *testing.T) { + hello := "hello" + target := &oss.ListObjectsOutput{} + source := &s3.ListObjectsOutput{Delimiter: &hello, EncodingType: "encoding type"} + re := reflect.TypeOf(source) + h, _ := re.Elem().FieldByName("EncodingType") + fmt.Println(h.Type.Name(), h.Type.Kind()) + err := copier.Copy(target, source) + if err != nil { + t.Fail() + } + var s fun + if s == nil { + fmt.Printf("s is nil \n") + } + fmt.Println(target) + } diff --git a/components/file/errors.go b/components/file/errors.go index db949b33b0..4a6a18f3f2 100644 --- a/components/file/errors.go +++ b/components/file/errors.go @@ -25,5 +25,5 @@ var ( ErrPermission = errors.New("permission denied") ErrExist = errors.New("file already exists") ErrNotExist = errors.New("file does not exist") - ErrExpired = errors.New("file does not exist") + ErrExpired = errors.New("file expired") ) diff --git a/components/file/s3/minio/oss.go b/components/file/minio/oss.go similarity index 78% rename from components/file/s3/minio/oss.go rename to components/file/minio/oss.go index 50b27aee56..5cca66763b 100644 --- a/components/file/s3/minio/oss.go +++ b/components/file/minio/oss.go @@ -24,12 +24,13 @@ import ( "io" "strconv" + "mosn.io/layotto/components/file/util" + "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7" "mosn.io/layotto/components/file" - loss "mosn.io/layotto/components/file/s3" ) const ( @@ -88,13 +89,13 @@ func (m *MinioOss) Put(ctx context.Context, st *file.PutFileStu) error { var ( size int64 = -1 ) - bucket, err := loss.GetBucketName(st.FileName) + bucket, err := util.GetBucketName(st.FileName) if err != nil { - return fmt.Errorf("minioOss put file[%s] fail,err: %s", st.FileName, err.Error()) + return fmt.Errorf("minio put file[%s] fail,err: %s", st.FileName, err.Error()) } - key, err := loss.GetFileName(st.FileName) + key, err := util.GetFileName(st.FileName) if err != nil { - return fmt.Errorf("minioOss put file[%s] fail,err: %s", st.FileName, err.Error()) + return fmt.Errorf("minio put file[%s] fail,err: %s", st.FileName, err.Error()) } core, err := m.selectClient(st.Metadata) if err != nil { @@ -115,13 +116,13 @@ func (m *MinioOss) Put(ctx context.Context, st *file.PutFileStu) error { } func (m *MinioOss) Get(ctx context.Context, st *file.GetFileStu) (io.ReadCloser, error) { - bucket, err := loss.GetBucketName(st.FileName) + bucket, err := util.GetBucketName(st.FileName) if err != nil { - return nil, fmt.Errorf("minioOss get file[%s] fail,err: %s", st.FileName, err.Error()) + return nil, fmt.Errorf("minio get file[%s] fail,err: %s", st.FileName, err.Error()) } - key, err := loss.GetFileName(st.FileName) + key, err := util.GetFileName(st.FileName) if err != nil { - return nil, fmt.Errorf("minioOss get file[%s] fail,err: %s", st.FileName, err.Error()) + return nil, fmt.Errorf("minio get file[%s] fail,err: %s", st.FileName, err.Error()) } core, err := m.selectClient(st.Metadata) if err != nil { @@ -135,12 +136,12 @@ func (m *MinioOss) Get(ctx context.Context, st *file.GetFileStu) (io.ReadCloser, } func (m *MinioOss) List(ctx context.Context, st *file.ListRequest) (*file.ListResp, error) { - bucket, err := loss.GetBucketName(st.DirectoryName) + bucket, err := util.GetBucketName(st.DirectoryName) marker := "" if err != nil { - return nil, fmt.Errorf("MinioOss list bucket[%s] fail, err: %s", st.DirectoryName, err.Error()) + return nil, fmt.Errorf("minio list bucket[%s] fail, err: %s", st.DirectoryName, err.Error()) } - prefix := loss.GetFilePrefixName(st.DirectoryName) + prefix := util.GetFilePrefixName(st.DirectoryName) core, err := m.selectClient(st.Metadata) if err != nil { @@ -165,33 +166,33 @@ func (m *MinioOss) List(ctx context.Context, st *file.ListRequest) (*file.ListRe } func (m *MinioOss) Del(ctx context.Context, st *file.DelRequest) error { - bucket, err := loss.GetBucketName(st.FileName) + bucket, err := util.GetBucketName(st.FileName) if err != nil { - return fmt.Errorf("minioOss del file[%s] fail,err: %s", st.FileName, err.Error()) + return fmt.Errorf("minio del file[%s] fail,err: %s", st.FileName, err.Error()) } - key, err := loss.GetFileName(st.FileName) + key, err := util.GetFileName(st.FileName) if err != nil { - return fmt.Errorf("minioOss del file[%s] fail,err: %s", st.FileName, err.Error()) + return fmt.Errorf("minio del file[%s] fail,err: %s", st.FileName, err.Error()) } core, err := m.selectClient(st.Metadata) if err != nil { - return fmt.Errorf("minioOss del file[%s] fail,err: %s", st.FileName, err.Error()) + return fmt.Errorf("minio del file[%s] fail,err: %s", st.FileName, err.Error()) } return core.Client.RemoveObject(ctx, bucket, key, minio.RemoveObjectOptions{}) } func (m *MinioOss) Stat(ctx context.Context, st *file.FileMetaRequest) (*file.FileMetaResp, error) { - bucket, err := loss.GetBucketName(st.FileName) + bucket, err := util.GetBucketName(st.FileName) if err != nil { - return nil, fmt.Errorf("minioOss stat file[%s] fail,err: %s", st.FileName, err.Error()) + return nil, fmt.Errorf("minio stat file[%s] fail,err: %s", st.FileName, err.Error()) } - key, err := loss.GetFileName(st.FileName) + key, err := util.GetFileName(st.FileName) if err != nil { - return nil, fmt.Errorf("minioOss stat file[%s] fail,err: %s", st.FileName, err.Error()) + return nil, fmt.Errorf("minio stat file[%s] fail,err: %s", st.FileName, err.Error()) } core, err := m.selectClient(st.Metadata) if err != nil { - return nil, fmt.Errorf("minioOss stat file[%s] fail,err: %s", st.FileName, err.Error()) + return nil, fmt.Errorf("minio stat file[%s] fail,err: %s", st.FileName, err.Error()) } info, err := core.Client.StatObject(ctx, bucket, key, minio.GetObjectOptions{}) @@ -206,7 +207,7 @@ func (m *MinioOss) Stat(ctx context.Context, st *file.FileMetaRequest) (*file.Fi resp.Metadata = make(map[string][]string) resp.LastModified = info.LastModified.String() resp.Size = info.Size - resp.Metadata[loss.ETag] = append(resp.Metadata[loss.ETag], info.ETag) + resp.Metadata[util.ETag] = append(resp.Metadata[util.ETag], info.ETag) for k, v := range info.Metadata { resp.Metadata[k] = v } diff --git a/components/file/s3/minio/oss_test.go b/components/file/minio/oss_test.go similarity index 97% rename from components/file/s3/minio/oss_test.go rename to components/file/minio/oss_test.go index 8f8d223f05..afdc13af90 100644 --- a/components/file/s3/minio/oss_test.go +++ b/components/file/minio/oss_test.go @@ -126,7 +126,7 @@ func TestMinioOss_Get(t *testing.T) { } _, err = oss.Get(context.TODO(), getReq) - assert.Equal(t, "minioOss get file[file] fail,err: invalid fileName format", err.Error()) + assert.Equal(t, "minio get file[file] fail,err: invalid fileName format", err.Error()) // client not exist getReq.FileName = "bucketName/file" diff --git a/components/file/s3/qiniu/qiniu_oss_client.go b/components/file/qiniu/qiniu_oss_client.go similarity index 100% rename from components/file/s3/qiniu/qiniu_oss_client.go rename to components/file/qiniu/qiniu_oss_client.go diff --git a/components/file/s3/qiniu/qiniu_oss_client_test.go b/components/file/qiniu/qiniu_oss_client_test.go similarity index 100% rename from components/file/s3/qiniu/qiniu_oss_client_test.go rename to components/file/qiniu/qiniu_oss_client_test.go diff --git a/components/file/s3/qiniu/qiniu_oss_test.go b/components/file/qiniu/qiniu_oss_test.go similarity index 100% rename from components/file/s3/qiniu/qiniu_oss_test.go rename to components/file/qiniu/qiniu_oss_test.go diff --git a/components/file/s3/qiniu/quniu_oss.go b/components/file/qiniu/quniu_oss.go similarity index 100% rename from components/file/s3/qiniu/quniu_oss.go rename to components/file/qiniu/quniu_oss.go diff --git a/components/file/s3/tencentcloud/oss.go b/components/file/tencentcloud/oss.go similarity index 100% rename from components/file/s3/tencentcloud/oss.go rename to components/file/tencentcloud/oss.go diff --git a/components/file/s3/tencentcloud/oss_test.go b/components/file/tencentcloud/oss_test.go similarity index 100% rename from components/file/s3/tencentcloud/oss_test.go rename to components/file/tencentcloud/oss_test.go diff --git a/components/file/types.go b/components/file/types.go index 746eb7ebbc..d9d20ae6cb 100644 --- a/components/file/types.go +++ b/components/file/types.go @@ -23,8 +23,8 @@ import ( // FileConfig wraps configuration for a file implementation type FileConfig struct { - Type string `json:"type"` - Metadata json.RawMessage + Metadata json.RawMessage `json:"metadata"` + Type string `json:"type"` } type PutFileStu struct { diff --git a/components/file/s3/util.go b/components/file/util/util.go similarity index 99% rename from components/file/s3/util.go rename to components/file/util/util.go index 1676a51126..ac128a2477 100644 --- a/components/file/s3/util.go +++ b/components/file/util/util.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package s3 +package util import ( "fmt" diff --git a/components/file/s3/util_test.go b/components/file/util/util_test.go similarity index 99% rename from components/file/s3/util_test.go rename to components/file/util/util_test.go index b4ebd6645a..cefc783861 100644 --- a/components/file/s3/util_test.go +++ b/components/file/util/util_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package s3 +package util import ( "testing" diff --git a/components/go.mod b/components/go.mod index 47c0809429..8c21771414 100644 --- a/components/go.mod +++ b/components/go.mod @@ -5,13 +5,14 @@ go 1.14 require ( github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/alicebob/miniredis/v2 v2.16.0 - github.com/aliyun/aliyun-oss-go-sdk v2.1.8+incompatible + github.com/aliyun/aliyun-oss-go-sdk v2.2.0+incompatible github.com/apache/dubbo-go-hessian2 v1.10.2 github.com/apolloconfig/agollo/v4 v4.2.0 - github.com/aws/aws-sdk-go-v2 v1.9.1 - github.com/aws/aws-sdk-go-v2/config v1.8.2 - github.com/aws/aws-sdk-go-v2/credentials v1.4.2 - github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 + github.com/aws/aws-sdk-go-v2 v1.16.4 + github.com/aws/aws-sdk-go-v2/config v1.15.9 + github.com/aws/aws-sdk-go-v2/credentials v1.12.4 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.14 + github.com/aws/aws-sdk-go-v2/service/s3 v1.26.10 github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect github.com/go-redis/redis/v8 v8.8.0 github.com/go-zookeeper/zk v1.0.2 @@ -19,6 +20,7 @@ require ( github.com/google/uuid v1.3.0 github.com/hashicorp/consul/api v1.3.0 github.com/jarcoal/httpmock v1.2.0 + github.com/jinzhu/copier v0.3.6-0.20220506024824-3e39b055319a github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/minio/minio-go/v7 v7.0.15 github.com/mitchellh/mapstructure v1.4.1 // indirect diff --git a/components/go.sum b/components/go.sum index d5d2d6977f..15f5abde5f 100644 --- a/components/go.sum +++ b/components/go.sum @@ -71,8 +71,8 @@ github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGn github.com/alicebob/miniredis/v2 v2.16.0 h1:ALkyFg7bSTEd1Mkrb4ppq4fnwjklA59dVtIehXCUZkU= github.com/alicebob/miniredis/v2 v2.16.0/go.mod h1:gquAfGbzn92jvtrSC69+6zZnwSODVXVpYDRaGhWaL6I= github.com/aliyun/alibaba-cloud-sdk-go v1.61.18/go.mod h1:v8ESoHo4SyHmuB4b1tJqDHxfTGEciD+yhvOU/5s1Rfk= -github.com/aliyun/aliyun-oss-go-sdk v2.1.8+incompatible h1:hLUNPbx10wawWW7DeNExvTrlb90db3UnnNTFKHZEFhE= -github.com/aliyun/aliyun-oss-go-sdk v2.1.8+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/aliyun/aliyun-oss-go-sdk v2.2.0+incompatible h1:ht2+VfbXtNLGhCsnTMc6/N26nSTBK6qdhktjYyjJQkk= +github.com/aliyun/aliyun-oss-go-sdk v2.2.0+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/andybalholm/brotli v1.0.2 h1:JKnhI/XQ75uFBTiuzXpzFrUriDPiZjlOSzh6wXogP0E= github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= @@ -94,30 +94,42 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aws/aws-sdk-go-v2 v1.9.1 h1:ZbovGV/qo40nrOJ4q8G33AGICzaPI45FHQWJ9650pF4= -github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2/config v1.8.2 h1:Dqy4ySXFmulRmZhfynm/5CD4Y6aXiTVhDtXLIuUe/r0= -github.com/aws/aws-sdk-go-v2/config v1.8.2/go.mod h1:r0bkX9NyuCuf28qVcsEMtpAQibT7gA1Q0gzkjvgJdLU= -github.com/aws/aws-sdk-go-v2/credentials v1.4.2 h1:8kVE4Og6wlhVrMGiORQ3p9gRj2exjzhFRB+QzWBUa5Q= -github.com/aws/aws-sdk-go-v2/credentials v1.4.2/go.mod h1:9Sp6u121/f0NnvHyhG7dgoYeUTEFC2vsvJqJ6wXpkaI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 h1:Nm+BxqBtT0r+AnD6byGMCGT4Km0QwHBy8mAYptNPXY4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1/go.mod h1:W1ldHfsgeGlKpJ4xZMKZUI6Wmp6EAstU7PxnhbXWWrI= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 h1:NnXJXUz7oihrSlPKEM0yZ19b+7GQ47MX/LluLlEyE/Y= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3/go.mod h1:EES9ToeC3h063zCFDdqWGnARExNdULPaBvARm1FLwxA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1 h1:APEjhKZLFlNVLATnA/TJyA+w1r/xd5r5ACWBDZ9aIvc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1/go.mod h1:Ve+eJOx9UWaT/lMVebnFhDhO49fSLVedHoA82+Rqme0= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 h1:YEz2KMyqK2zyG3uOa0l2xBc/H6NUVJir8FhwHQHF3rc= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1/go.mod h1:yg4EN/BKoc7+DLhNOxxdvoO3+iyW2FuynvaKqLcLDUM= -github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 h1:dt1JQFj/135ozwGIWeCM3aQ8N/kB3Xu3Uu4r9zuOIyc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0/go.mod h1:Tk23mCmfL3wb3tNIeMk/0diUZ0W4R6uZtjYKguMLW2s= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.1 h1:RfgQyv3bFT2Js6XokcrNtTjQ6wAVBRpoCgTFsypihHA= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.1/go.mod h1:ycPdbJZlM0BLhuBnd80WX9PucWPG88qps/2jl9HugXs= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 h1:7ce9ugapSgBapwLhg7AJTqKW5U92VRX3vX65k2tsB+g= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.1/go.mod h1:r1i8QwKPzwByXqZb3POQfBs7jozrdnHz8PVbsvyx73w= -github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= -github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/aws-sdk-go-v2 v1.16.4 h1:swQTEQUyJF/UkEA94/Ga55miiKFoXmm/Zd67XHgmjSg= +github.com/aws/aws-sdk-go-v2 v1.16.4/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 h1:SdK4Ppk5IzLs64ZMvr6MrSficMtjY2oS0WOORXTlxwU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM= +github.com/aws/aws-sdk-go-v2/config v1.15.9 h1:TK5yNEnFDQ9iaO04gJS/3Y+eW8BioQiCUafW75/Wc3Q= +github.com/aws/aws-sdk-go-v2/config v1.15.9/go.mod h1:rv/l/TbZo67kp99v/3Kb0qV6Fm1KEtKyruEV2GvVfgs= +github.com/aws/aws-sdk-go-v2/credentials v1.12.4 h1:xggwS+qxCukXRVXJBJWQJGyUsvuxGC8+J1kKzv2cxuw= +github.com/aws/aws-sdk-go-v2/credentials v1.12.4/go.mod h1:7g+GGSp7xtR823o1jedxKmqRZGqLdoHQfI4eFasKKxs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.5 h1:YPxclBeE07HsLQE8vtjC8T2emcTjM9nzqsnDi2fv5UM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.5/go.mod h1:WAPnuhG5IQ/i6DETFl5NmX3kKqCzw7aau9NHAGcm4QE= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.14 h1:qpJmFbypCfwPok5PGTSnQy1NKbv4Hn8xGsee9l4xOPE= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.14/go.mod h1:IOYB+xOZik8YgdTlnDSwbvKmCkikA3nVue8/Qnfzs0c= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11 h1:gsqHplNh1DaQunEKZISK56wlpbCg0yKxNVvGWCFuF1k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11/go.mod h1:tmUB6jakq5DFNcXsXOA/ZQ7/C8VnSKYkx58OI7Fh79g= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5 h1:PLFj+M2PgIDHG//hw3T0O0KLI4itVtAjtxrZx4AHPLg= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5/go.mod h1:fV1AaS2gFc1tM0RCb015FJ0pvWVUfJZANzjwoO4YakM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.12 h1:j0VqrjtgsY1Bx27tD0ysay36/K4kFMWRp9K3ieO9nLU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.12/go.mod h1:00c7+ALdPh4YeEUPXJzyU0Yy01nPGOq2+9rUaz05z9g= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.2 h1:1fs9WkbFcMawQjxEI0B5L0SqvBhJZebxWM6Z3x/qHWY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.2/go.mod h1:0jDVeWUFPbI3sOfsXXAsIdiawXcn7VBLx/IlFVTRP64= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 h1:T4pFel53bkHjL2mMo+4DKE6r6AuoZnM0fg7k1/ratr4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1/go.mod h1:GeUru+8VzrTXV/83XyMJ80KpH8xO89VPoUileyNQ+tc= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.6 h1:9mvDAsMiN+07wcfGM+hJ1J3dOKZ2YOpDiPZ6ufRJcgw= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.6/go.mod h1:Eus+Z2iBIEfhOvhSdMTcscNOMy6n3X9/BJV0Zgax98w= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.5 h1:gRW1ZisKc93EWEORNJRvy/ZydF3o6xLSveJHdi1Oa0U= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.5/go.mod h1:ZbkttHXaVn3bBo/wpJbQGiiIWR90eTBUVBrEHUEQlho= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.5 h1:DyPYkrH4R2zn+Pdu6hM3VTuPsQYAE6x2WB24X85Sgw0= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.5/go.mod h1:XtL92YWo0Yq80iN3AgYRERJqohg4TozrqRlxYhHGJ7g= +github.com/aws/aws-sdk-go-v2/service/s3 v1.26.10 h1:GWdLZK0r1AK5sKb8rhB9bEXqXCK8WNuyv4TBAD6ZviQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.26.10/go.mod h1:+O7qJxF8nLorAhuIVhYTHse6okjHJJm4EwhhzvpnkT0= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.7 h1:suAGD+RyiHWPPihZzY+jw4mCZlOFWgmdjb2AeTenz7c= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.7/go.mod h1:TFVe6Rr2joVLsYQ1ABACXgOC6lXip/qpX2x5jWg/A9w= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.6 h1:aYToU0/iazkMY67/BYLt3r6/LT/mUtarLAF5mGof1Kg= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.6/go.mod h1:rP1rEOKAGZoXp4iGDxSXFvODAtXpm34Egf0lL0eshaQ= +github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= +github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= @@ -352,8 +364,8 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -467,8 +479,12 @@ github.com/jcmturner/rpc/v2 v2.0.2 h1:gMB4IwRXYsWw4Bc6o/az2HJgFUA1ffSh90i26ZJ6Xl github.com/jcmturner/rpc/v2 v2.0.2/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869/go.mod h1:cJ6Cj7dQo+O6GJNiMx+Pa94qKj+TG8ONdKHgMNIyyag= github.com/jinzhu/copier v0.3.2/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro= +github.com/jinzhu/copier v0.3.6-0.20220506024824-3e39b055319a h1:1tB9lnwJFOtcMERtVVAad4aK5e3Q7sVbP0id87FxIS8= +github.com/jinzhu/copier v0.3.6-0.20220506024824-3e39b055319a/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= diff --git a/components/oss/aliyun/option.go b/components/oss/aliyun/option.go new file mode 100644 index 0000000000..2dd2f164be --- /dev/null +++ b/components/oss/aliyun/option.go @@ -0,0 +1,493 @@ +/* +* Copyright 2021 Layotto Authors +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package aliyun + +import ( + "time" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" +) + +// Prefix is an option to set prefix parameter +func Prefix(value string) oss.Option { + if value == "" { + return nil + } + return oss.Prefix(value) +} + +// KeyMarker is an option to set key-marker parameter +func KeyMarker(value string) oss.Option { + if value == "" { + return nil + } + return oss.KeyMarker(value) +} + +// MaxUploads is an option to set max-uploads parameter +func MaxUploads(value int) oss.Option { + if value <= 0 { + return nil + } + return oss.MaxUploads(value) +} + +// Delimiter is an option to set delimiler parameter +func Delimiter(value string) oss.Option { + if value == "" { + return nil + } + + return oss.Delimiter(value) +} + +// UploadIDMarker is an option to set upload-id-marker parameter +func UploadIDMarker(value string) oss.Option { + if value == "" { + return nil + } + return oss.UploadIDMarker(value) +} + +// VersionId is an option to set versionId parameter +func VersionId(value string) oss.Option { + if value == "" { + return nil + } + return oss.VersionId(value) +} + +// ObjectACL is an option to set X-Oss-Object-Acl header +func ObjectACL(value string) oss.Option { + if value == "" { + return nil + } + return oss.ObjectACL(oss.ACLType(value)) +} + +// CacheControl is an option to set Cache-Control header +func CacheControl(value string) oss.Option { + if value == "" { + return nil + } + return oss.CacheControl(value) +} + +// ContentEncoding is an option to set Content-Encoding header +func ContentEncoding(value string) oss.Option { + if value == "" { + return nil + } + return oss.ContentEncoding(value) +} + +// ACL is an option to set X-Oss-Acl header +func ACL(acl string) oss.Option { + if acl == "" { + return nil + } + return oss.ACL(oss.ACLType(acl)) +} + +// ContentType is an option to set Content-Type header +func ContentType(value string) oss.Option { + if value == "" { + return nil + } + return oss.ContentType(value) +} + +// ContentLength is an option to set Content-Length header +func ContentLength(length int64) oss.Option { + if length == 0 { + return nil + } + return oss.ContentLength(length) +} + +// ContentDisposition is an option to set Content-Disposition header +func ContentDisposition(value string) oss.Option { + if value == "" { + return nil + } + return oss.ContentDisposition(value) +} + +// SetTagging is an option to set object tagging +func SetTagging(value map[string]string) oss.Option { + if value == nil { + return nil + } + tagging := oss.Tagging{} + for k, v := range value { + tag := oss.Tag{Key: k, Value: v} + tagging.Tags = append(tagging.Tags, tag) + } + return oss.SetTagging(tagging) +} + +// ContentLanguage is an option to set Content-Language header +func ContentLanguage(value string) oss.Option { + if value == "" { + return nil + } + return oss.ContentLanguage(value) +} + +// ContentMD5 is an option to set Content-MD5 header +func ContentMD5(value string) oss.Option { + if value == "" { + return nil + } + return oss.ContentMD5(value) +} + +// Expires is an option to set Expires header +func Expires(t int64) oss.Option { + if t == 0 { + return nil + } + ti := time.Unix(0, t) + return oss.Expires(ti) +} + +// AcceptEncoding is an option to set Accept-Encoding header +func AcceptEncoding(value string) oss.Option { + if value == "" { + return nil + } + return oss.AcceptEncoding(value) +} + +// IfModifiedSince is an option to set If-Modified-Since header +func IfModifiedSince(t int64) oss.Option { + if t == 0 { + return nil + } + ti := time.Unix(0, t) + return oss.IfModifiedSince(ti) +} + +// IfUnmodifiedSince is an option to set If-Unmodified-Since header +func IfUnmodifiedSince(t int64) oss.Option { + if t == 0 { + return nil + } + ti := time.Unix(0, t) + return oss.IfUnmodifiedSince(ti) +} + +// IfMatch is an option to set If-Match header +func IfMatch(value string) oss.Option { + if value == "" { + return nil + } + return oss.IfNoneMatch(value) +} + +// IfNoneMatch is an option to set IfNoneMatch header +func IfNoneMatch(value string) oss.Option { + if value == "" { + return nil + } + return oss.IfNoneMatch(value) +} + +// Range is an option to set Range header, [start, end] +func Range(start, end int64) oss.Option { + if start == 0 && end == 0 { + return nil + } + return oss.Range(start, end) +} + +// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header +func CopySourceIfMatch(value string) oss.Option { + if value == "" { + return nil + } + return oss.CopySourceIfMatch(value) +} + +// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header +func CopySourceIfNoneMatch(value string) oss.Option { + if value == "" { + return nil + } + return oss.CopySourceIfNoneMatch(value) +} + +// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header +func CopySourceIfModifiedSince(t int64) oss.Option { + if t == 0 { + return nil + } + tm := time.Unix(0, t) + return oss.CopySourceIfModifiedSince(tm) +} + +// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header +func CopySourceIfUnmodifiedSince(t int64) oss.Option { + if t == 0 { + return nil + } + tm := time.Unix(0, t) + return oss.CopySourceIfUnmodifiedSince(tm) +} + +// MetadataDirective is an option to set X-Oss-Metadata-Directive header +func MetadataDirective(value string) oss.Option { + if value == "" { + return nil + } + return oss.MetadataDirective(oss.MetadataDirectiveType(value)) +} + +// Meta is an option to set Meta header +func Meta(key, value string) oss.Option { + return oss.Meta(key, value) +} + +// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header +func ServerSideEncryption(value string) oss.Option { + if value == "" { + return nil + } + return oss.ServerSideEncryption(value) +} + +// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header +func ServerSideEncryptionKeyID(value string) oss.Option { + if value == "" { + return nil + } + return oss.ServerSideEncryptionKeyID(value) +} + +// ServerSideDataEncryption is an option to set X-Oss-Server-Side-Data-Encryption header +func ServerSideDataEncryption(value string) oss.Option { + if value == "" { + return nil + } + return oss.ServerSideDataEncryption(value) +} + +// SSECAlgorithm is an option to set X-Oss-Server-Side-Encryption-Customer-Algorithm header +func SSECAlgorithm(value string) oss.Option { + if value == "" { + return nil + } + return oss.SSECAlgorithm(value) +} + +// SSECKey is an option to set X-Oss-Server-Side-Encryption-Customer-Key header +func SSECKey(value string) oss.Option { + if value == "" { + return nil + } + return oss.SSECKey(value) +} + +// SSECKeyMd5 is an option to set X-Oss-Server-Side-Encryption-Customer-Key-Md5 header +func SSECKeyMd5(value string) oss.Option { + if value == "" { + return nil + } + return oss.SSECKeyMd5(value) +} + +// Origin is an option to set Origin header +func Origin(value string) oss.Option { + if value == "" { + return nil + } + return oss.Origin(value) +} + +// RangeBehavior is an option to set Range value, such as "standard" +func RangeBehavior(value string) oss.Option { + if value == "" { + return nil + } + return oss.RangeBehavior(value) +} + +func PartHashCtxHeader(value string) oss.Option { + if value == "" { + return nil + } + return oss.PartHashCtxHeader(value) +} + +func PartMd5CtxHeader(value string) oss.Option { + if value == "" { + return nil + } + return oss.PartMd5CtxHeader(value) +} + +func PartHashCtxParam(value string) oss.Option { + if value == "" { + return nil + } + return oss.PartHashCtxParam(value) +} + +func PartMd5CtxParam(value string) oss.Option { + if value == "" { + return nil + } + return oss.PartMd5CtxParam(value) +} + +// Marker is an option to set marker parameter +func Marker(value string) oss.Option { + if value == "" { + return nil + } + return oss.Marker(value) +} + +// MaxKeys is an option to set maxkeys parameter +func MaxKeys(value int) oss.Option { + if value == 0 { + return nil + } + return oss.MaxKeys(value) +} + +// EncodingType is an option to set encoding-type parameter +func EncodingType(value string) oss.Option { + if value == "" { + return nil + } + return oss.EncodingType(value) +} + +// VersionIdMarker is an option to set version-id-marker parameter +func VersionIdMarker(value string) oss.Option { + if value == "" { + return nil + } + return oss.VersionIdMarker(value) +} + +// MaxParts is an option to set max-parts parameter +func MaxParts(value int) oss.Option { + if value == 0 { + return nil + } + return oss.MaxParts(value) +} + +// PartNumberMarker is an option to set part-number-marker parameter +func PartNumberMarker(value int) oss.Option { + if value == 0 { + return nil + } + return oss.PartNumberMarker(value) +} + +// StorageClass bucket storage class +func StorageClass(value string) oss.Option { + if value == "" { + return nil + } + return oss.StorageClass(oss.StorageClassType(value)) +} + +// ResponseContentType is an option to set response-content-type param +func ResponseContentType(value string) oss.Option { + if value == "" { + return nil + } + return oss.ResponseContentType(value) +} + +// ResponseContentLanguage is an option to set response-content-language param +func ResponseContentLanguage(value string) oss.Option { + if value == "" { + return nil + } + return oss.ResponseContentLanguage(value) +} + +// ResponseExpires is an option to set response-expires param +func ResponseExpires(value string) oss.Option { + if value == "" { + return nil + } + return oss.ResponseExpires(value) +} + +// ResponseCacheControl is an option to set response-cache-control param +func ResponseCacheControl(value string) oss.Option { + if value == "" { + return nil + } + return oss.ResponseCacheControl(value) +} + +// ResponseContentDisposition is an option to set response-content-disposition param +func ResponseContentDisposition(value string) oss.Option { + if value == "" { + return nil + } + return oss.ResponseContentDisposition(value) +} + +// ResponseContentEncoding is an option to set response-content-encoding param +func ResponseContentEncoding(value string) oss.Option { + if value == "" { + return nil + } + return oss.ResponseContentEncoding(value) +} + +// Process is an option to set x-oss-process param +func Process(value string) oss.Option { + if value == "" { + return nil + } + return oss.Process(value) +} + +// TrafficLimitParam is a option to set x-oss-traffic-limit +func TrafficLimitParam(value int64) oss.Option { + if value == 0 { + return nil + } + return oss.TrafficLimitParam(value) +} + +// SetHeader Allow users to set personalized http headers +func SetHeader(key string, value interface{}) oss.Option { + return oss.SetHeader(key, value) +} + +// AddParam Allow users to set personalized http params +func AddParam(key string, value interface{}) oss.Option { + return oss.AddParam(key, value) +} + +// RequestPayer is an option to set payer who pay for the request +func RequestPayer(value string) oss.Option { + if value == "" { + return nil + } + return oss.RequestPayer(oss.PayerType(value)) +} diff --git a/components/oss/aliyun/option_test.go b/components/oss/aliyun/option_test.go new file mode 100644 index 0000000000..0b78ed18bd --- /dev/null +++ b/components/oss/aliyun/option_test.go @@ -0,0 +1,146 @@ +/* +* Copyright 2021 Layotto Authors +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package aliyun + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestOptionNil(t *testing.T) { + assert.Nil(t, Prefix("")) + assert.Nil(t, KeyMarker("")) + assert.Nil(t, MaxUploads(0)) + assert.Nil(t, Delimiter("")) + assert.Nil(t, UploadIDMarker("")) + assert.Nil(t, VersionId("")) + assert.Nil(t, ObjectACL("")) + assert.Nil(t, CacheControl("")) + assert.Nil(t, ContentEncoding("")) + assert.Nil(t, ACL("")) + assert.Nil(t, ContentType("")) + assert.Nil(t, ContentLength(0)) + assert.Nil(t, ContentDisposition("")) + assert.Nil(t, ContentLanguage("")) + assert.Nil(t, ContentMD5("")) + assert.Nil(t, Expires(0)) + assert.Nil(t, AcceptEncoding("")) + assert.Nil(t, IfModifiedSince(0)) + assert.Nil(t, IfMatch("")) + assert.Nil(t, IfNoneMatch("")) + assert.Nil(t, Range(0, 0)) + assert.Nil(t, CopySourceIfMatch("")) + assert.Nil(t, CopySourceIfNoneMatch("")) + assert.Nil(t, CopySourceIfModifiedSince(0)) + assert.Nil(t, CopySourceIfUnmodifiedSince(0)) + assert.Nil(t, IfUnmodifiedSince(0)) + assert.Nil(t, MetadataDirective("")) + assert.Nil(t, ServerSideEncryption("")) + assert.Nil(t, ServerSideEncryptionKeyID("")) + assert.Nil(t, ServerSideDataEncryption("")) + assert.Nil(t, SSECAlgorithm("")) + assert.Nil(t, SSECKey("")) + assert.Nil(t, SSECKeyMd5("")) + assert.Nil(t, Origin("")) + assert.Nil(t, RangeBehavior("")) + assert.Nil(t, PartHashCtxHeader("")) + assert.Nil(t, PartNumberMarker(0)) + assert.Nil(t, PartHashCtxParam("")) + assert.Nil(t, PartMd5CtxHeader("")) + assert.Nil(t, PartMd5CtxParam("")) + assert.Nil(t, Marker("")) + assert.Nil(t, MaxKeys(0)) + assert.Nil(t, EncodingType("")) + assert.Nil(t, VersionId("")) + assert.Nil(t, VersionIdMarker("")) + assert.Nil(t, MaxParts(0)) + assert.Nil(t, StorageClass("")) + assert.Nil(t, ResponseContentDisposition("")) + assert.Nil(t, ResponseCacheControl("")) + assert.Nil(t, ResponseContentEncoding("")) + assert.Nil(t, ResponseContentLanguage("")) + assert.Nil(t, ResponseContentType("")) + assert.Nil(t, ResponseExpires("")) + assert.Nil(t, Process("")) + assert.Nil(t, TrafficLimitParam(0)) + assert.Nil(t, RequestPayer("")) + assert.Nil(t, SetTagging(nil)) +} + +func TestOptionNotNil(t *testing.T) { + assert.NotNil(t, Prefix(" ")) + assert.NotNil(t, KeyMarker(" ")) + assert.NotNil(t, MaxUploads(1)) + assert.NotNil(t, Delimiter(" ")) + assert.NotNil(t, UploadIDMarker(" ")) + assert.NotNil(t, VersionId(" ")) + assert.NotNil(t, ObjectACL(" ")) + assert.NotNil(t, CacheControl(" ")) + assert.NotNil(t, ContentEncoding(" ")) + assert.NotNil(t, ACL(" ")) + assert.NotNil(t, ContentType(" ")) + assert.NotNil(t, ContentLength(1)) + assert.NotNil(t, ContentDisposition(" ")) + assert.NotNil(t, ContentLanguage(" ")) + assert.NotNil(t, ContentMD5(" ")) + assert.NotNil(t, Expires(1)) + assert.NotNil(t, AcceptEncoding(" ")) + assert.NotNil(t, IfModifiedSince(1)) + assert.NotNil(t, IfMatch(" ")) + assert.NotNil(t, IfNoneMatch(" ")) + assert.NotNil(t, Range(1, 1)) + assert.NotNil(t, CopySourceIfMatch(" ")) + assert.NotNil(t, CopySourceIfNoneMatch(" ")) + assert.NotNil(t, CopySourceIfModifiedSince(1)) + assert.NotNil(t, CopySourceIfUnmodifiedSince(1)) + assert.NotNil(t, IfUnmodifiedSince(1)) + assert.NotNil(t, MetadataDirective(" ")) + assert.NotNil(t, Meta(" ", " ")) + assert.NotNil(t, ServerSideEncryption(" ")) + assert.NotNil(t, ServerSideEncryptionKeyID(" ")) + assert.NotNil(t, ServerSideDataEncryption(" ")) + assert.NotNil(t, SSECAlgorithm(" ")) + assert.NotNil(t, SSECKey(" ")) + assert.NotNil(t, SSECKeyMd5(" ")) + assert.NotNil(t, Origin(" ")) + assert.NotNil(t, RangeBehavior(" ")) + assert.NotNil(t, PartHashCtxHeader(" ")) + assert.NotNil(t, PartNumberMarker(1)) + assert.NotNil(t, PartHashCtxParam(" ")) + assert.NotNil(t, PartMd5CtxHeader(" ")) + assert.NotNil(t, PartMd5CtxParam(" ")) + assert.NotNil(t, Marker(" ")) + assert.NotNil(t, MaxKeys(1)) + assert.NotNil(t, EncodingType(" ")) + assert.NotNil(t, VersionId(" ")) + assert.NotNil(t, VersionIdMarker(" ")) + assert.NotNil(t, MaxParts(1)) + assert.NotNil(t, StorageClass(" ")) + assert.NotNil(t, ResponseContentDisposition(" ")) + assert.NotNil(t, ResponseCacheControl(" ")) + assert.NotNil(t, ResponseContentEncoding(" ")) + assert.NotNil(t, ResponseContentLanguage(" ")) + assert.NotNil(t, ResponseContentType(" ")) + assert.NotNil(t, ResponseExpires(" ")) + assert.NotNil(t, Process(" ")) + assert.NotNil(t, TrafficLimitParam(1)) + assert.NotNil(t, SetHeader(" ", " ")) + assert.NotNil(t, AddParam(" ", " ")) + assert.NotNil(t, RequestPayer(" ")) + assert.NotNil(t, SetTagging(map[string]string{"k": "v"})) +} diff --git a/components/oss/aliyun/oss.go b/components/oss/aliyun/oss.go new file mode 100644 index 0000000000..16022aa8d7 --- /dev/null +++ b/components/oss/aliyun/oss.go @@ -0,0 +1,643 @@ +/* +* Copyright 2021 Layotto Authors +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package aliyun + +import ( + "context" + "encoding/json" + "net/http" + "strconv" + + "mosn.io/layotto/components/pkg/utils" + + l8oss "mosn.io/layotto/components/oss" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" +) + +const ( + connectTimeoutSec = "connectTimeoutSec" + readWriteTimeoutSec = "readWriteTimeout" +) + +type AliyunOSS struct { + client *oss.Client + basicConf json.RawMessage +} + +func NewAliyunOss() l8oss.Oss { + return &AliyunOSS{} +} + +func (a *AliyunOSS) Init(ctx context.Context, config *l8oss.Config) error { + var connectTimeout, readWriteTimeout int64 + a.basicConf = config.Metadata[l8oss.BasicConfiguration] + m := &utils.OssMetadata{} + err := json.Unmarshal(a.basicConf, &m) + if err != nil { + return l8oss.ErrInvalid + } + connectTimeout = 30 + readWriteTimeout = 60 + if t, ok := config.Metadata[connectTimeoutSec]; ok { + v, err := strconv.Atoi(string(t)) + if err == nil { + connectTimeout = int64(v) + } + } + if t, ok := config.Metadata[readWriteTimeoutSec]; ok { + v, err := strconv.Atoi(string(t)) + if err == nil { + readWriteTimeout = int64(v) + } + } + + client, err := oss.New(m.Endpoint, m.AccessKeyID, m.AccessKeySecret, oss.Timeout(connectTimeout, readWriteTimeout)) + if err != nil { + return err + } + a.client = client + return nil +} + +func (a *AliyunOSS) GetObject(ctx context.Context, req *l8oss.GetObjectInput) (*l8oss.GetObjectOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := client.Bucket(req.Bucket) + if err != nil { + return nil, err + } + //user can use SignedUrl to get file without ak、sk + if req.SignedUrl != "" { + body, err := bucket.GetObjectWithURL(req.SignedUrl) + return &l8oss.GetObjectOutput{DataStream: body}, err + } + body, err := bucket.GetObject(req.Key, + IfUnmodifiedSince(req.IfUnmodifiedSince), + IfModifiedSince(req.IfModifiedSince), + IfMatch(req.IfMatch), + IfNoneMatch(req.IfNoneMatch), + Range(req.Start, req.End), + AcceptEncoding(req.AcceptEncoding), + ) + + return &l8oss.GetObjectOutput{DataStream: body}, err +} + +func (a *AliyunOSS) PutObject(ctx context.Context, req *l8oss.PutObjectInput) (*l8oss.PutObjectOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + metaOption := []oss.Option{ + CacheControl(req.CacheControl), + ContentDisposition(req.ContentDisposition), + ContentEncoding(req.ContentEncoding), + Expires(req.Expires), + ServerSideEncryption(req.ServerSideEncryption), + ObjectACL(req.ACL), + SetTagging(req.Tagging), + } + for k, v := range req.Meta { + o := oss.Meta(k, v) + metaOption = append(metaOption, o) + } + //user can use SignedUrl to put file without ak、sk + if req.SignedUrl != "" { + err = bucket.PutObjectWithURL(req.SignedUrl, req.DataStream, + metaOption..., + ) + } else { + err = bucket.PutObject(req.Key, req.DataStream, + metaOption..., + ) + } + return &l8oss.PutObjectOutput{}, err +} + +func (a *AliyunOSS) DeleteObject(ctx context.Context, req *l8oss.DeleteObjectInput) (*l8oss.DeleteObjectOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + err = bucket.DeleteObject(req.Key, RequestPayer(req.RequestPayer), VersionId(req.VersionId)) + return &l8oss.DeleteObjectOutput{}, err +} +func (a *AliyunOSS) DeleteObjects(ctx context.Context, req *l8oss.DeleteObjectsInput) (*l8oss.DeleteObjectsOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + var objects []oss.DeleteObject + for _, v := range req.Delete.Objects { + object := oss.DeleteObject{Key: v.Key, VersionId: v.VersionId} + objects = append(objects, object) + } + resp, err := bucket.DeleteObjectVersions(objects, oss.DeleteObjectsQuiet(req.Delete.Quiet)) + if err != nil { + return nil, err + } + out := &l8oss.DeleteObjectsOutput{} + for _, v := range resp.DeletedObjectsDetail { + object := &l8oss.DeletedObject{Key: v.Key, VersionId: v.VersionId, DeleteMarker: v.DeleteMarker, DeleteMarkerVersionId: v.DeleteMarkerVersionId} + out.Deleted = append(out.Deleted, object) + } + return out, err +} + +func (a *AliyunOSS) PutObjectTagging(ctx context.Context, req *l8oss.PutObjectTaggingInput) (*l8oss.PutObjectTaggingOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + tagging := oss.Tagging{} + for k, v := range req.Tags { + tag := oss.Tag{Key: k, Value: v} + tagging.Tags = append(tagging.Tags, tag) + } + err = bucket.PutObjectTagging(req.Key, tagging, VersionId(req.VersionId)) + return nil, err +} + +func (a *AliyunOSS) DeleteObjectTagging(ctx context.Context, req *l8oss.DeleteObjectTaggingInput) (*l8oss.DeleteObjectTaggingOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + err = bucket.DeleteObjectTagging(req.Key) + return nil, err +} + +func (a *AliyunOSS) GetObjectTagging(ctx context.Context, req *l8oss.GetObjectTaggingInput) (*l8oss.GetObjectTaggingOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + resp, err := bucket.GetObjectTagging(req.Key) + if err != nil { + return nil, err + } + out := &l8oss.GetObjectTaggingOutput{Tags: map[string]string{}} + for _, v := range resp.Tags { + out.Tags[v.Key] = v.Value + } + return out, err +} + +func (a *AliyunOSS) GetObjectCannedAcl(ctx context.Context, req *l8oss.GetObjectCannedAclInput) (*l8oss.GetObjectCannedAclOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + resp, err := bucket.GetObjectACL(req.Key) + if err != nil { + return nil, err + } + output := &l8oss.GetObjectCannedAclOutput{CannedAcl: resp.ACL, Owner: &l8oss.Owner{DisplayName: resp.Owner.DisplayName, ID: resp.Owner.ID}} + return output, err +} +func (a *AliyunOSS) PutObjectCannedAcl(ctx context.Context, req *l8oss.PutObjectCannedAclInput) (*l8oss.PutObjectCannedAclOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + err = bucket.SetObjectACL(req.Key, oss.ACLType(req.Acl)) + output := &l8oss.PutObjectCannedAclOutput{} + return output, err +} +func (a *AliyunOSS) ListObjects(ctx context.Context, req *l8oss.ListObjectsInput) (*l8oss.ListObjectsOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + resp, err := bucket.ListObjects() + if err != nil { + return nil, err + } + out := &l8oss.ListObjectsOutput{ + CommonPrefixes: resp.CommonPrefixes, + Delimiter: resp.Delimiter, + IsTruncated: resp.IsTruncated, + Marker: resp.Marker, + MaxKeys: int32(resp.MaxKeys), + NextMarker: resp.NextMarker, + Prefix: resp.Prefix, + } + for _, v := range resp.Objects { + object := &l8oss.Object{ + ETag: v.ETag, + Key: v.Key, + LastModified: v.LastModified.Unix(), + Owner: &l8oss.Owner{ID: v.Owner.ID, DisplayName: v.Owner.DisplayName}, + Size: v.Size, + StorageClass: v.StorageClass, + } + out.Contents = append(out.Contents, object) + } + return out, nil +} +func (a *AliyunOSS) CopyObject(ctx context.Context, req *l8oss.CopyObjectInput) (*l8oss.CopyObjectOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + var options []oss.Option + for k, v := range req.Metadata { + option := Meta(k, v) + options = append(options, option) + } + options = append(options, MetadataDirective(req.MetadataDirective)) + options = append(options, VersionId(req.CopySource.CopySourceVersionId)) + resp, err := bucket.CopyObject(req.CopySource.CopySourceKey, req.Key, options...) + if err != nil { + return nil, err + } + out := &l8oss.CopyObjectOutput{CopyObjectResult: &l8oss.CopyObjectResult{ETag: resp.ETag, LastModified: resp.LastModified.Unix()}} + return out, err +} + +func (a *AliyunOSS) CreateMultipartUpload(ctx context.Context, req *l8oss.CreateMultipartUploadInput) (*l8oss.CreateMultipartUploadOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + resp, err := bucket.InitiateMultipartUpload(req.Key) + output := &l8oss.CreateMultipartUploadOutput{Bucket: resp.Bucket, Key: resp.Key, UploadId: resp.UploadID} + return output, err +} +func (a *AliyunOSS) UploadPart(ctx context.Context, req *l8oss.UploadPartInput) (*l8oss.UploadPartOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + resp, err := bucket.UploadPart( + oss.InitiateMultipartUploadResult{Bucket: req.Bucket, Key: req.Key, UploadID: req.UploadId}, + req.DataStream, + req.ContentLength, + int(req.PartNumber)) + output := &l8oss.UploadPartOutput{ETag: resp.ETag} + return output, err +} +func (a *AliyunOSS) UploadPartCopy(ctx context.Context, req *l8oss.UploadPartCopyInput) (*l8oss.UploadPartCopyOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + resp, err := bucket.UploadPartCopy( + oss.InitiateMultipartUploadResult{Bucket: req.Bucket, Key: req.Key, UploadID: req.UploadId}, + req.CopySource.CopySourceBucket, + req.CopySource.CopySourceKey, + req.StartPosition, + req.PartSize, + int(req.PartNumber), + VersionId(req.CopySource.CopySourceVersionId), + ) + output := &l8oss.UploadPartCopyOutput{CopyPartResult: &l8oss.CopyPartResult{ETag: resp.ETag}} + return output, err +} +func (a *AliyunOSS) CompleteMultipartUpload(ctx context.Context, req *l8oss.CompleteMultipartUploadInput) (*l8oss.CompleteMultipartUploadOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + + parts := make([]oss.UploadPart, 0) + if req.MultipartUpload != nil { + for _, v := range req.MultipartUpload.Parts { + part := oss.UploadPart{PartNumber: int(v.PartNumber), ETag: v.ETag} + parts = append(parts, part) + } + } + resp, err := bucket.CompleteMultipartUpload( + oss.InitiateMultipartUploadResult{Bucket: req.Bucket, Key: req.Key, UploadID: req.UploadId}, + parts, + ) + output := &l8oss.CompleteMultipartUploadOutput{Location: resp.Location, Bucket: resp.Bucket, Key: resp.Key, ETag: resp.ETag} + return output, err +} +func (a *AliyunOSS) AbortMultipartUpload(ctx context.Context, req *l8oss.AbortMultipartUploadInput) (*l8oss.AbortMultipartUploadOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + + err = bucket.AbortMultipartUpload( + oss.InitiateMultipartUploadResult{Bucket: req.Bucket, Key: req.Key, UploadID: req.UploadId}, + ) + output := &l8oss.AbortMultipartUploadOutput{} + return output, err +} +func (a *AliyunOSS) ListMultipartUploads(ctx context.Context, req *l8oss.ListMultipartUploadsInput) (*l8oss.ListMultipartUploadsOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + resp, err := bucket.ListMultipartUploads(Prefix(req.Prefix), KeyMarker(req.KeyMarker), MaxUploads(int(req.MaxUploads)), Delimiter(req.Delimiter), UploadIDMarker(req.UploadIdMarker)) + output := &l8oss.ListMultipartUploadsOutput{ + Bucket: resp.Bucket, + Delimiter: resp.Delimiter, + Prefix: resp.Prefix, + KeyMarker: resp.KeyMarker, + UploadIDMarker: resp.UploadIDMarker, + NextKeyMarker: resp.NextKeyMarker, + NextUploadIDMarker: resp.NextUploadIDMarker, + MaxUploads: int32(resp.MaxUploads), + IsTruncated: resp.IsTruncated, + CommonPrefixes: resp.CommonPrefixes, + } + for _, v := range resp.Uploads { + upload := &l8oss.MultipartUpload{Initiated: v.Initiated.Unix(), UploadId: v.UploadID, Key: v.Key} + output.Uploads = append(output.Uploads, upload) + } + return output, err +} + +func (a *AliyunOSS) RestoreObject(ctx context.Context, req *l8oss.RestoreObjectInput) (*l8oss.RestoreObjectOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + err = bucket.RestoreObject(req.Key) + output := &l8oss.RestoreObjectOutput{} + return output, err +} + +func (a *AliyunOSS) ListObjectVersions(ctx context.Context, req *l8oss.ListObjectVersionsInput) (*l8oss.ListObjectVersionsOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + resp, err := bucket.ListObjectVersions() + output := &l8oss.ListObjectVersionsOutput{ + Name: resp.Name, + Prefix: resp.Prefix, + KeyMarker: resp.KeyMarker, + VersionIdMarker: resp.VersionIdMarker, + MaxKeys: int32(resp.MaxKeys), + Delimiter: resp.Delimiter, + IsTruncated: resp.IsTruncated, + NextKeyMarker: resp.NextKeyMarker, + NextVersionIdMarker: resp.NextVersionIdMarker, + CommonPrefixes: resp.CommonPrefixes, + } + for _, v := range resp.ObjectDeleteMarkers { + marker := &l8oss.DeleteMarkerEntry{ + IsLatest: v.IsLatest, + Key: v.Key, + LastModified: v.LastModified.Unix(), + Owner: &l8oss.Owner{ + ID: v.Owner.ID, + DisplayName: v.Owner.DisplayName, + }, + VersionId: v.VersionId, + } + output.DeleteMarkers = append(output.DeleteMarkers, marker) + } + + for _, v := range resp.ObjectVersions { + version := &l8oss.ObjectVersion{ + ETag: v.ETag, + IsLatest: v.IsLatest, + Key: v.Key, + LastModified: v.LastModified.Unix(), + Owner: &l8oss.Owner{ + ID: v.Owner.ID, + DisplayName: v.Owner.DisplayName, + }, + Size: v.Size, + StorageClass: v.StorageClass, + VersionId: v.VersionId, + } + output.Versions = append(output.Versions, version) + } + + return output, err +} + +func (a *AliyunOSS) HeadObject(ctx context.Context, req *l8oss.HeadObjectInput) (*l8oss.HeadObjectOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + output := &l8oss.HeadObjectOutput{ResultMetadata: map[string]string{}} + var resp http.Header + if req.WithDetails { + resp, err = bucket.GetObjectDetailedMeta(req.Key) + } else { + resp, err = bucket.GetObjectMeta(req.Key) + } + if err != nil { + return nil, err + } + for k, v := range resp { + for _, t := range v { + //if key exist,concatenated with commas + if _, ok := output.ResultMetadata[k]; ok { + output.ResultMetadata[k] = output.ResultMetadata[k] + "," + t + } else { + output.ResultMetadata[k] = t + } + } + } + return output, err +} + +func (a *AliyunOSS) IsObjectExist(ctx context.Context, req *l8oss.IsObjectExistInput) (*l8oss.IsObjectExistOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + resp, err := bucket.IsObjectExist(req.Key) + return &l8oss.IsObjectExistOutput{FileExist: resp}, err +} + +func (a *AliyunOSS) SignURL(ctx context.Context, req *l8oss.SignURLInput) (*l8oss.SignURLOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + resp, err := bucket.SignURL(req.Key, oss.HTTPMethod(req.Method), req.ExpiredInSec) + return &l8oss.SignURLOutput{SignedUrl: resp}, err +} + +//UpdateDownloadBandwidthRateLimit update all client rate +func (a *AliyunOSS) UpdateDownloadBandwidthRateLimit(ctx context.Context, req *l8oss.UpdateBandwidthRateLimitInput) error { + cli, err := a.getClient() + if err != nil { + return err + } + err = cli.LimitDownloadSpeed(int(req.AverageRateLimitInBitsPerSec)) + return err +} + +//UpdateUploadBandwidthRateLimit update all client rate +func (a *AliyunOSS) UpdateUploadBandwidthRateLimit(ctx context.Context, req *l8oss.UpdateBandwidthRateLimitInput) error { + cli, err := a.getClient() + if err != nil { + return err + } + err = cli.LimitUploadSpeed(int(req.AverageRateLimitInBitsPerSec)) + return err +} + +func (a *AliyunOSS) AppendObject(ctx context.Context, req *l8oss.AppendObjectInput) (*l8oss.AppendObjectOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + resp, err := bucket.AppendObject(req.Key, req.DataStream, req.Position, + CacheControl(req.CacheControl), + ContentDisposition(req.ContentDisposition), + ContentEncoding(req.ContentEncoding), + Expires(req.Expires), + ServerSideEncryption(req.ServerSideEncryption), + ObjectACL(req.ACL), + ) + if err != nil { + return nil, err + } + return &l8oss.AppendObjectOutput{AppendPosition: resp}, err +} + +func (a *AliyunOSS) ListParts(ctx context.Context, req *l8oss.ListPartsInput) (*l8oss.ListPartsOutput, error) { + cli, err := a.getClient() + if err != nil { + return nil, err + } + bucket, err := cli.Bucket(req.Bucket) + if err != nil { + return nil, err + } + resp, err := bucket.ListUploadedParts(oss.InitiateMultipartUploadResult{Bucket: req.Bucket, Key: req.Key, UploadID: req.UploadId}, + MaxParts(int(req.MaxParts)), + PartNumberMarker(int(req.PartNumberMarker)), + RequestPayer(req.RequestPayer), + ) + if err != nil { + return nil, err + } + out := &l8oss.ListPartsOutput{ + Bucket: resp.Bucket, + Key: resp.Key, + UploadId: resp.UploadID, + NextPartNumberMarker: resp.NextPartNumberMarker, + MaxParts: int64(resp.MaxParts), + IsTruncated: resp.IsTruncated, + } + for _, v := range resp.UploadedParts { + part := &l8oss.Part{Etag: v.ETag, LastModified: v.LastModified.Unix(), PartNumber: int64(v.PartNumber), Size: int64(v.Size)} + out.Parts = append(out.Parts, part) + } + return out, err +} + +func (a *AliyunOSS) getClient() (*oss.Client, error) { + if a.client == nil { + return nil, utils.ErrNotInitClient + } + return a.client, nil +} diff --git a/components/oss/aliyun/oss_test.go b/components/oss/aliyun/oss_test.go new file mode 100644 index 0000000000..8ccabb17ce --- /dev/null +++ b/components/oss/aliyun/oss_test.go @@ -0,0 +1,138 @@ +/* +* Copyright 2021 Layotto Authors +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package aliyun + +import ( + "context" + "encoding/json" + "testing" + + "mosn.io/layotto/components/pkg/utils" + + "mosn.io/layotto/components/oss" + + "mosn.io/pkg/buffer" + + "github.com/stretchr/testify/assert" + + l8oss "mosn.io/layotto/components/oss" +) + +const ( + confWithoutUidAndBucket = ` + { + "endpoint": "endpoint_address", + "accessKeyID": "accessKey", + "accessKeySecret": "secret" + } + ` +) + +func TestInitAliyunOss(t *testing.T) { + a := &AliyunOSS{} + client, err := a.getClient() + assert.Equal(t, err, utils.ErrNotInitClient) + assert.Nil(t, client) + err = a.Init(context.TODO(), &l8oss.Config{Metadata: map[string]json.RawMessage{oss.BasicConfiguration: []byte("hello")}}) + assert.Equal(t, err, l8oss.ErrInvalid) + err = a.Init(context.TODO(), &l8oss.Config{Metadata: map[string]json.RawMessage{oss.BasicConfiguration: []byte(confWithoutUidAndBucket)}}) + assert.NotEqual(t, l8oss.ErrInvalid, err) + assert.NotNil(t, a.client) + +} + +func TestAliyunOss(t *testing.T) { + instance := NewAliyunOss() + instance.Init(context.TODO(), &l8oss.Config{Metadata: map[string]json.RawMessage{connectTimeoutSec: []byte("1"), readWriteTimeoutSec: []byte("1"), oss.BasicConfiguration: []byte(confWithoutUidAndBucket)}}) + appendObjectResp, err := instance.AppendObject(context.TODO(), &oss.AppendObjectInput{Bucket: "bucket"}) + assert.NotNil(t, err) + assert.Nil(t, appendObjectResp) + + _, err = instance.AbortMultipartUpload(context.TODO(), &oss.AbortMultipartUploadInput{Bucket: "bucket"}) + assert.NotNil(t, err) + + _, err = instance.CompleteMultipartUpload(context.TODO(), &oss.CompleteMultipartUploadInput{Bucket: "bucket"}) + assert.NotNil(t, err) + + _, err = instance.CopyObject(context.TODO(), &oss.CopyObjectInput{Bucket: "bucket", CopySource: &oss.CopySource{}}) + assert.NotNil(t, err) + + _, err = instance.CreateMultipartUpload(context.TODO(), &oss.CreateMultipartUploadInput{Bucket: "bucket"}) + assert.NotNil(t, err) + + _, err = instance.DeleteObject(context.TODO(), &oss.DeleteObjectInput{Bucket: "bucket"}) + assert.NotNil(t, err) + _, err = instance.DeleteObjects(context.TODO(), &oss.DeleteObjectsInput{ + Bucket: "bucket", + Delete: &oss.Delete{}, + }) + assert.NotNil(t, err) + _, err = instance.DeleteObjectTagging(context.TODO(), &oss.DeleteObjectTaggingInput{Bucket: "bucket"}) + assert.NotNil(t, err) + + _, err = instance.GetObject(context.TODO(), &oss.GetObjectInput{Bucket: "bucket"}) + assert.NotNil(t, err) + _, err = instance.GetObjectCannedAcl(context.TODO(), &oss.GetObjectCannedAclInput{Bucket: "bucket"}) + assert.NotNil(t, err) + _, err = instance.GetObjectTagging(context.TODO(), &oss.GetObjectTaggingInput{Bucket: "bucket"}) + assert.NotNil(t, err) + + _, err = instance.HeadObject(context.TODO(), &oss.HeadObjectInput{Bucket: "bucket"}) + assert.NotNil(t, err) + + _, err = instance.IsObjectExist(context.TODO(), &oss.IsObjectExistInput{Bucket: "bucket"}) + assert.NotNil(t, err) + + _, err = instance.ListParts(context.TODO(), &oss.ListPartsInput{Bucket: "bucket"}) + assert.NotNil(t, err) + + _, err = instance.ListMultipartUploads(context.TODO(), &oss.ListMultipartUploadsInput{Bucket: "bucket"}) + assert.NotNil(t, err) + _, err = instance.ListObjects(context.TODO(), &oss.ListObjectsInput{Bucket: "bucket"}) + assert.NotNil(t, err) + _, err = instance.ListObjectVersions(context.TODO(), &oss.ListObjectVersionsInput{Bucket: "bucket"}) + assert.NotNil(t, err) + + stream := buffer.NewIoBufferString("hello") + _, err = instance.PutObject(context.TODO(), &oss.PutObjectInput{DataStream: stream, Bucket: "bucket"}) + assert.NotNil(t, err) + _, err = instance.PutObjectCannedAcl(context.TODO(), &oss.PutObjectCannedAclInput{Bucket: "bucket"}) + assert.NotNil(t, err) + _, err = instance.PutObjectTagging(context.TODO(), &oss.PutObjectTaggingInput{Bucket: "bucket"}) + assert.NotNil(t, err) + + _, err = instance.RestoreObject(context.TODO(), &oss.RestoreObjectInput{}) + assert.NotNil(t, err) + + _, err = instance.SignURL(context.TODO(), &oss.SignURLInput{}) + assert.NotNil(t, err) + + _, err = instance.UploadPartCopy(context.TODO(), &oss.UploadPartCopyInput{ + CopySource: &oss.CopySource{CopySourceBucket: "bucket", CopySourceKey: "key"}, + }) + assert.NotNil(t, err) + + _, err = instance.UploadPart(context.TODO(), &oss.UploadPartInput{}) + assert.NotNil(t, err) + + err = instance.UpdateDownloadBandwidthRateLimit(context.TODO(), &oss.UpdateBandwidthRateLimitInput{}) + assert.Nil(t, err) + + err = instance.UpdateUploadBandwidthRateLimit(context.TODO(), &oss.UpdateBandwidthRateLimitInput{}) + assert.Nil(t, err) + +} diff --git a/components/oss/aws/option.go b/components/oss/aws/option.go new file mode 100644 index 0000000000..7869a46acf --- /dev/null +++ b/components/oss/aws/option.go @@ -0,0 +1,43 @@ +/* +* Copyright 2021 Layotto Authors +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package aws + +import ( + "time" + + "github.com/jinzhu/copier" +) + +var ( + int642time = copier.TypeConverter{ + SrcType: int64(0), + DstType: &time.Time{}, + Fn: func(src interface{}) (interface{}, error) { + s, _ := src.(int64) + t := time.Unix(s, 0) + return &t, nil + }, + } + time2int64 = copier.TypeConverter{ + SrcType: &time.Time{}, + DstType: int64(0), + Fn: func(src interface{}) (interface{}, error) { + s, _ := src.(*time.Time) + return s.Unix(), nil + }, + } +) diff --git a/components/oss/aws/option_test.go b/components/oss/aws/option_test.go new file mode 100644 index 0000000000..4ea5f0f766 --- /dev/null +++ b/components/oss/aws/option_test.go @@ -0,0 +1,52 @@ +/* +* Copyright 2021 Layotto Authors +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package aws + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/jinzhu/copier" +) + +func TestCopierOption(t *testing.T) { + type ValueWithInt64 struct { + TestString string + TestInt64toTime int64 + } + + type ValueWithTimer struct { + TestString *string + TestInt64toTime *time.Time + } + timer := time.Now().Unix() + srcValue := &ValueWithInt64{TestInt64toTime: timer} + destValue := &ValueWithTimer{} + err := copier.CopyWithOption(destValue, srcValue, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{int642time}}) + assert.Nil(t, err) + assert.Nil(t, destValue.TestString) + assert.Equal(t, timer, destValue.TestInt64toTime.Unix()) + + ti := time.Now() + src := &ValueWithTimer{TestInt64toTime: &ti} + dst := &ValueWithInt64{} + err = copier.CopyWithOption(dst, src, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{time2int64}}) + assert.Nil(t, err) + assert.Equal(t, ti.Unix(), dst.TestInt64toTime) +} diff --git a/components/oss/aws/oss.go b/components/oss/aws/oss.go new file mode 100644 index 0000000000..7573e3bd7c --- /dev/null +++ b/components/oss/aws/oss.go @@ -0,0 +1,549 @@ +/* +* Copyright 2021 Layotto Authors +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package aws + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + aws_config "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + + "mosn.io/layotto/components/pkg/utils" + + "mosn.io/layotto/components/oss" + + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/jinzhu/copier" + "mosn.io/pkg/log" +) + +type AwsOss struct { + client *s3.Client + basicConf json.RawMessage +} + +func NewAwsOss() oss.Oss { + return &AwsOss{} +} + +func (a *AwsOss) Init(ctx context.Context, config *oss.Config) error { + a.basicConf = config.Metadata[oss.BasicConfiguration] + m := &utils.OssMetadata{} + err := json.Unmarshal(a.basicConf, &m) + if err != nil { + return oss.ErrInvalid + } + optFunc := []func(options *aws_config.LoadOptions) error{ + aws_config.WithRegion(m.Region), + aws_config.WithCredentialsProvider(credentials.StaticCredentialsProvider{ + Value: aws.Credentials{ + AccessKeyID: m.AccessKeyID, SecretAccessKey: m.AccessKeySecret, + Source: "provider", + }, + }), + } + cfg, err := aws_config.LoadDefaultConfig(context.TODO(), optFunc...) + if err != nil { + return err + } + client := s3.NewFromConfig(cfg) + a.client = client + return nil +} + +func (a *AwsOss) GetObject(ctx context.Context, req *oss.GetObjectInput) (*oss.GetObjectOutput, error) { + input := &s3.GetObjectInput{} + client, err := a.getClient() + if err != nil { + return nil, err + } + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + ob, err := client.GetObject(context.TODO(), input) + if err != nil { + return nil, err + } + out := &oss.GetObjectOutput{} + err = copier.Copy(out, ob) + if err != nil { + return nil, err + } + out.DataStream = ob.Body + return out, nil +} + +func (a *AwsOss) PutObject(ctx context.Context, req *oss.PutObjectInput) (*oss.PutObjectOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.PutObjectInput{} + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + input.Body = req.DataStream + uploader := manager.NewUploader(client) + resp, err := uploader.Upload(context.TODO(), input) + if err != nil { + return nil, err + } + out := &oss.PutObjectOutput{} + err = copier.Copy(out, resp) + if err != nil { + return nil, err + } + return out, err +} + +func (a *AwsOss) DeleteObject(ctx context.Context, req *oss.DeleteObjectInput) (*oss.DeleteObjectOutput, error) { + input := &s3.DeleteObjectInput{ + Bucket: &req.Bucket, + Key: &req.Key, + } + client, err := a.getClient() + if err != nil { + return nil, err + } + resp, err := client.DeleteObject(ctx, input) + if err != nil { + return nil, err + } + return &oss.DeleteObjectOutput{DeleteMarker: resp.DeleteMarker, RequestCharged: string(resp.RequestCharged), VersionId: *resp.VersionId}, err +} + +func (a *AwsOss) PutObjectTagging(ctx context.Context, req *oss.PutObjectTaggingInput) (*oss.PutObjectTaggingOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.PutObjectTaggingInput{Tagging: &types.Tagging{}} + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + for k, v := range req.Tags { + k, v := k, v + input.Tagging.TagSet = append(input.Tagging.TagSet, types.Tag{Key: &k, Value: &v}) + } + _, err = client.PutObjectTagging(ctx, input) + return &oss.PutObjectTaggingOutput{}, err +} +func (a *AwsOss) DeleteObjectTagging(ctx context.Context, req *oss.DeleteObjectTaggingInput) (*oss.DeleteObjectTaggingOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.DeleteObjectTaggingInput{} + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + resp, err := client.DeleteObjectTagging(ctx, input) + if err != nil { + return nil, err + } + return &oss.DeleteObjectTaggingOutput{VersionId: *resp.VersionId}, err +} + +func (a *AwsOss) GetObjectTagging(ctx context.Context, req *oss.GetObjectTaggingInput) (*oss.GetObjectTaggingOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.GetObjectTaggingInput{} + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + resp, err := client.GetObjectTagging(ctx, input) + if err != nil { + return nil, err + } + + output := &oss.GetObjectTaggingOutput{Tags: map[string]string{}} + for _, tags := range resp.TagSet { + output.Tags[*tags.Key] = *tags.Value + } + return output, err +} + +func (a *AwsOss) CopyObject(ctx context.Context, req *oss.CopyObjectInput) (*oss.CopyObjectOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + + if req.CopySource == nil { + return nil, errors.New("must specific copy_source") + } + + //TODO: should support objects accessed through access points + copySource := req.CopySource.CopySourceBucket + "/" + req.CopySource.CopySourceKey + if req.CopySource.CopySourceVersionId != "" { + copySource += "?versionId=" + req.CopySource.CopySourceVersionId + } + copySourceUrlEncode := url.QueryEscape(copySource) + input := &s3.CopyObjectInput{Bucket: &req.Bucket, Key: &req.Key, CopySource: ©SourceUrlEncode} + resp, err := client.CopyObject(ctx, input) + if err != nil { + return nil, err + } + return &oss.CopyObjectOutput{CopyObjectResult: &oss.CopyObjectResult{ETag: *resp.CopyObjectResult.ETag, LastModified: resp.CopyObjectResult.LastModified.Unix()}}, err +} +func (a *AwsOss) DeleteObjects(ctx context.Context, req *oss.DeleteObjectsInput) (*oss.DeleteObjectsOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.DeleteObjectsInput{ + Bucket: &req.Bucket, + Delete: &types.Delete{}, + } + if req.Delete != nil { + for _, v := range req.Delete.Objects { + object := &types.ObjectIdentifier{} + err = copier.CopyWithOption(object, v, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + input.Delete.Objects = append(input.Delete.Objects, *object) + } + } + resp, err := client.DeleteObjects(ctx, input) + if err != nil { + return nil, err + } + output := &oss.DeleteObjectsOutput{} + copier.Copy(output, resp) + return output, err +} +func (a *AwsOss) ListObjects(ctx context.Context, req *oss.ListObjectsInput) (*oss.ListObjectsOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + + input := &s3.ListObjectsInput{} + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + resp, err := client.ListObjects(ctx, input) + if err != nil { + return nil, err + } + output := &oss.ListObjectsOutput{} + err = copier.CopyWithOption(output, resp, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{time2int64}}) + // if not return NextMarker, use the value of the last Key in the response as the marker + if output.IsTruncated && output.NextMarker == "" { + index := len(output.Contents) - 1 + output.NextMarker = output.Contents[index].Key + } + return output, err +} +func (a *AwsOss) GetObjectCannedAcl(ctx context.Context, req *oss.GetObjectCannedAclInput) (*oss.GetObjectCannedAclOutput, error) { + return nil, errors.New("GetObjectCannedAcl method not supported on AWS") +} +func (a *AwsOss) PutObjectCannedAcl(ctx context.Context, req *oss.PutObjectCannedAclInput) (*oss.PutObjectCannedAclOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.PutObjectAclInput{Bucket: &req.Bucket, Key: &req.Key, ACL: types.ObjectCannedACL(req.Acl)} + resp, err := client.PutObjectAcl(ctx, input) + if err != nil { + return nil, err + } + return &oss.PutObjectCannedAclOutput{RequestCharged: string(resp.RequestCharged)}, err +} +func (a *AwsOss) RestoreObject(ctx context.Context, req *oss.RestoreObjectInput) (*oss.RestoreObjectOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.RestoreObjectInput{ + Bucket: &req.Bucket, + Key: &req.Key, + } + resp, err := client.RestoreObject(ctx, input) + if err != nil { + return nil, err + } + return &oss.RestoreObjectOutput{RequestCharged: string(resp.RequestCharged), RestoreOutputPath: *resp.RestoreOutputPath}, err +} +func (a *AwsOss) CreateMultipartUpload(ctx context.Context, req *oss.CreateMultipartUploadInput) (*oss.CreateMultipartUploadOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.CreateMultipartUploadInput{} + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{int642time}}) + if err != nil { + log.DefaultLogger.Errorf("copy CreateMultipartUploadInput fail, err: %+v", err) + return nil, err + } + resp, err := client.CreateMultipartUpload(ctx, input) + if err != nil { + return nil, err + } + output := &oss.CreateMultipartUploadOutput{} + copier.CopyWithOption(output, resp, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{time2int64}}) + return output, err +} +func (a *AwsOss) UploadPart(ctx context.Context, req *oss.UploadPartInput) (*oss.UploadPartOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.UploadPartInput{} + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + input.Body = req.DataStream + resp, err := client.UploadPart(ctx, input, s3.WithAPIOptions(v4.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware)) + if err != nil { + return nil, err + } + output := &oss.UploadPartOutput{} + err = copier.Copy(output, resp) + if err != nil { + return nil, err + } + return output, err +} +func (a *AwsOss) UploadPartCopy(ctx context.Context, req *oss.UploadPartCopyInput) (*oss.UploadPartCopyOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + + //TODO: should support objects accessed through access points + copySource := req.CopySource.CopySourceBucket + "/" + req.CopySource.CopySourceKey + if req.CopySource.CopySourceVersionId != "" { + copySource += "?versionId=" + req.CopySource.CopySourceVersionId + } + input := &s3.UploadPartCopyInput{} + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + input.CopySource = ©Source + resp, err := client.UploadPartCopy(ctx, input) + if err != nil { + return nil, err + } + output := &oss.UploadPartCopyOutput{} + err = copier.Copy(output, resp) + return output, err +} +func (a *AwsOss) CompleteMultipartUpload(ctx context.Context, req *oss.CompleteMultipartUploadInput) (*oss.CompleteMultipartUploadOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.CompleteMultipartUploadInput{MultipartUpload: &types.CompletedMultipartUpload{}} + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + resp, err := client.CompleteMultipartUpload(ctx, input) + if err != nil { + return nil, err + } + output := &oss.CompleteMultipartUploadOutput{} + err = copier.Copy(output, resp) + return output, err +} +func (a *AwsOss) AbortMultipartUpload(ctx context.Context, req *oss.AbortMultipartUploadInput) (*oss.AbortMultipartUploadOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.AbortMultipartUploadInput{} + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + resp, err := client.AbortMultipartUpload(ctx, input) + if err != nil { + return nil, err + } + output := &oss.AbortMultipartUploadOutput{ + RequestCharged: string(resp.RequestCharged), + } + return output, err +} +func (a *AwsOss) ListMultipartUploads(ctx context.Context, req *oss.ListMultipartUploadsInput) (*oss.ListMultipartUploadsOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.ListMultipartUploadsInput{} + + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + + resp, err := client.ListMultipartUploads(ctx, input) + if err != nil { + return nil, err + } + output := &oss.ListMultipartUploadsOutput{CommonPrefixes: []string{}, Uploads: []*oss.MultipartUpload{}} + err = copier.Copy(output, resp) + if err != nil { + return nil, err + } + for _, v := range resp.CommonPrefixes { + output.CommonPrefixes = append(output.CommonPrefixes, *v.Prefix) + } + for _, v := range resp.Uploads { + upload := &oss.MultipartUpload{} + copier.CopyWithOption(upload, v, copier.Option{IgnoreEmpty: true, DeepCopy: true}) + output.Uploads = append(output.Uploads, upload) + } + return output, err +} +func (a *AwsOss) ListObjectVersions(ctx context.Context, req *oss.ListObjectVersionsInput) (*oss.ListObjectVersionsOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.ListObjectVersionsInput{} + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + resp, err := client.ListObjectVersions(ctx, input) + if err != nil { + return nil, err + } + output := &oss.ListObjectVersionsOutput{} + err = copier.Copy(output, resp) + if err != nil { + return nil, err + } + for _, v := range resp.CommonPrefixes { + output.CommonPrefixes = append(output.CommonPrefixes, *v.Prefix) + } + for _, v := range resp.DeleteMarkers { + entry := &oss.DeleteMarkerEntry{IsLatest: v.IsLatest, Key: *v.Key, Owner: &oss.Owner{DisplayName: *v.Owner.DisplayName, ID: *v.Owner.ID}, VersionId: *v.VersionId} + output.DeleteMarkers = append(output.DeleteMarkers, entry) + } + for _, v := range resp.Versions { + version := &oss.ObjectVersion{} + copier.CopyWithOption(version, v, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{time2int64}}) + output.Versions = append(output.Versions, version) + } + return output, err +} + +func (a *AwsOss) HeadObject(ctx context.Context, req *oss.HeadObjectInput) (*oss.HeadObjectOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.HeadObjectInput{} + err = copier.CopyWithOption(input, req, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{}}) + if err != nil { + return nil, err + } + resp, err := client.HeadObject(ctx, input) + if err != nil { + return nil, err + } + return &oss.HeadObjectOutput{ResultMetadata: resp.Metadata}, nil +} + +func (a *AwsOss) IsObjectExist(ctx context.Context, req *oss.IsObjectExistInput) (*oss.IsObjectExistOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + input := &s3.HeadObjectInput{Bucket: &req.Bucket, Key: &req.Key} + _, err = client.HeadObject(ctx, input) + if err != nil { + errorMsg := err.Error() + if strings.Contains(errorMsg, "StatusCode: 404") { + return &oss.IsObjectExistOutput{FileExist: false}, nil + } + return nil, err + } + return &oss.IsObjectExistOutput{FileExist: true}, nil +} + +func (a *AwsOss) SignURL(ctx context.Context, req *oss.SignURLInput) (*oss.SignURLOutput, error) { + client, err := a.getClient() + if err != nil { + return nil, err + } + resignClient := s3.NewPresignClient(client) + switch strings.ToUpper(req.Method) { + case "GET": + input := &s3.GetObjectInput{Bucket: &req.Bucket, Key: &req.Key} + resp, err := resignClient.PresignGetObject(ctx, input, s3.WithPresignExpires(time.Duration((req.ExpiredInSec)*int64(time.Second)))) + if err != nil { + return nil, err + } + return &oss.SignURLOutput{SignedUrl: resp.URL}, nil + case "PUT": + input := &s3.PutObjectInput{Bucket: &req.Bucket, Key: &req.Key} + resp, err := resignClient.PresignPutObject(ctx, input, s3.WithPresignExpires(time.Duration(req.ExpiredInSec*int64(time.Second)))) + if err != nil { + return nil, err + } + return &oss.SignURLOutput{SignedUrl: resp.URL}, nil + default: + return nil, fmt.Errorf("not supported method %+v now", req.Method) + } +} + +func (a *AwsOss) UpdateDownloadBandwidthRateLimit(ctx context.Context, req *oss.UpdateBandwidthRateLimitInput) error { + return errors.New("UpdateDownloadBandwidthRateLimit method not supported now") +} + +func (a *AwsOss) UpdateUploadBandwidthRateLimit(ctx context.Context, req *oss.UpdateBandwidthRateLimitInput) error { + return errors.New("UpdateUploadBandwidthRateLimit method not supported now") +} +func (a *AwsOss) AppendObject(ctx context.Context, req *oss.AppendObjectInput) (*oss.AppendObjectOutput, error) { + return nil, errors.New("AppendObject method not supported on AWS") +} + +func (a *AwsOss) ListParts(ctx context.Context, req *oss.ListPartsInput) (*oss.ListPartsOutput, error) { + return nil, errors.New("ListParts method not supported on AWS") +} + +func (a *AwsOss) getClient() (*s3.Client, error) { + if a.client == nil { + return nil, utils.ErrNotInitClient + } + return a.client, nil +} diff --git a/components/oss/aws/oss_test.go b/components/oss/aws/oss_test.go new file mode 100644 index 0000000000..78d893e65f --- /dev/null +++ b/components/oss/aws/oss_test.go @@ -0,0 +1,158 @@ +/* +* Copyright 2021 Layotto Authors +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package aws + +import ( + "context" + "encoding/json" + "errors" + "testing" + "time" + + "github.com/jinzhu/copier" + + "github.com/aws/aws-sdk-go-v2/service/s3/types" + + "mosn.io/layotto/components/oss" + + "mosn.io/pkg/buffer" + + "github.com/stretchr/testify/assert" +) + +const ( + confWithoutUidAndBucket = ` + { + "endpoint": "endpoint_address", + "accessKeyID": "accessKey", + "accessKeySecret": "secret" + } + ` +) + +func TestAwsDefaultInitFunc(t *testing.T) { + a := &AwsOss{} + err := a.Init(context.TODO(), &oss.Config{Metadata: map[string]json.RawMessage{oss.BasicConfiguration: []byte("hello")}}) + assert.Equal(t, err, oss.ErrInvalid) + assert.Nil(t, a.client) + +} + +func TestAwsOss(t *testing.T) { + instance := &AwsOss{} + err := instance.Init(context.TODO(), &oss.Config{Metadata: map[string]json.RawMessage{oss.BasicConfiguration: []byte(confWithoutUidAndBucket)}}) + assert.Nil(t, err) + + appendObjectResp, err := instance.AppendObject(context.TODO(), &oss.AppendObjectInput{}) + assert.Equal(t, errors.New("AppendObject method not supported on AWS"), err) + assert.Nil(t, appendObjectResp) + + _, err = instance.AbortMultipartUpload(context.TODO(), &oss.AbortMultipartUploadInput{}) + assert.NotNil(t, err) + + _, err = instance.CompleteMultipartUpload(context.TODO(), &oss.CompleteMultipartUploadInput{}) + assert.NotNil(t, err) + + _, err = instance.CopyObject(context.TODO(), &oss.CopyObjectInput{}) + assert.Equal(t, errors.New("must specific copy_source"), err) + + _, err = instance.CopyObject(context.TODO(), &oss.CopyObjectInput{ + CopySource: &oss.CopySource{CopySourceBucket: "bucket", CopySourceKey: "key"}, + }) + assert.NotEqual(t, errors.New("must specific copy_source"), err) + _, err = instance.CreateMultipartUpload(context.TODO(), &oss.CreateMultipartUploadInput{}) + assert.NotNil(t, err) + + _, err = instance.DeleteObject(context.TODO(), &oss.DeleteObjectInput{}) + assert.NotNil(t, err) + _, err = instance.DeleteObjects(context.TODO(), &oss.DeleteObjectsInput{ + Delete: &oss.Delete{Objects: []*oss.ObjectIdentifier{{Key: "object", VersionId: "version"}}}, + }) + assert.NotNil(t, err) + _, err = instance.DeleteObjectTagging(context.TODO(), &oss.DeleteObjectTaggingInput{}) + assert.NotNil(t, err) + + _, err = instance.GetObject(context.TODO(), &oss.GetObjectInput{}) + assert.NotNil(t, err) + _, err = instance.GetObjectCannedAcl(context.TODO(), &oss.GetObjectCannedAclInput{}) + assert.NotNil(t, err) + _, err = instance.GetObjectTagging(context.TODO(), &oss.GetObjectTaggingInput{}) + assert.NotNil(t, err) + + _, err = instance.HeadObject(context.TODO(), &oss.HeadObjectInput{}) + assert.NotNil(t, err) + + _, err = instance.IsObjectExist(context.TODO(), &oss.IsObjectExistInput{}) + assert.NotNil(t, err) + + _, err = instance.ListParts(context.TODO(), &oss.ListPartsInput{}) + assert.NotNil(t, err) + + _, err = instance.ListMultipartUploads(context.TODO(), &oss.ListMultipartUploadsInput{}) + assert.NotNil(t, err) + _, err = instance.ListObjects(context.TODO(), &oss.ListObjectsInput{}) + assert.NotNil(t, err) + _, err = instance.ListObjectVersions(context.TODO(), &oss.ListObjectVersionsInput{}) + assert.NotNil(t, err) + + stream := buffer.NewIoBufferString("hello") + _, err = instance.PutObject(context.TODO(), &oss.PutObjectInput{DataStream: stream}) + assert.NotNil(t, err) + _, err = instance.PutObjectCannedAcl(context.TODO(), &oss.PutObjectCannedAclInput{}) + assert.NotNil(t, err) + _, err = instance.PutObjectTagging(context.TODO(), &oss.PutObjectTaggingInput{}) + assert.NotNil(t, err) + + _, err = instance.RestoreObject(context.TODO(), &oss.RestoreObjectInput{}) + assert.NotNil(t, err) + + _, err = instance.SignURL(context.TODO(), &oss.SignURLInput{}) + assert.NotNil(t, err) + + _, err = instance.UploadPartCopy(context.TODO(), &oss.UploadPartCopyInput{ + CopySource: &oss.CopySource{CopySourceBucket: "bucket", CopySourceKey: "key"}, + }) + assert.NotNil(t, err) + + _, err = instance.UploadPart(context.TODO(), &oss.UploadPartInput{}) + assert.NotNil(t, err) + + err = instance.UpdateDownloadBandwidthRateLimit(context.TODO(), &oss.UpdateBandwidthRateLimitInput{}) + assert.NotNil(t, err) + + err = instance.UpdateUploadBandwidthRateLimit(context.TODO(), &oss.UpdateBandwidthRateLimitInput{}) + assert.NotNil(t, err) +} + +func TestDeepCopy(t *testing.T) { + value := "hello" + t1 := time.Now() + fromValue := &types.ObjectVersion{ + ETag: &value, + IsLatest: true, + Key: &value, + LastModified: &t1, + Owner: &types.Owner{DisplayName: &value, ID: &value}, + Size: 10, + StorageClass: "hello", + VersionId: &value, + } + tovalue := &oss.ObjectVersion{} + err := copier.CopyWithOption(tovalue, fromValue, copier.Option{IgnoreEmpty: true, DeepCopy: true, Converters: []copier.TypeConverter{time2int64}}) + assert.Nil(t, err) + assert.Equal(t, tovalue.Owner.DisplayName, value) +} diff --git a/components/oss/oss.go b/components/oss/oss.go new file mode 100644 index 0000000000..39c42d35ba --- /dev/null +++ b/components/oss/oss.go @@ -0,0 +1,592 @@ +/* +* Copyright 2021 Layotto Authors +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package oss + +import ( + "context" + "io" +) + +const ( + ServiceName = "oss" +) + +// Oss is the interface for ObjectStorageService components. +// For more details about the fields explanation, please refer to the `.proto` file. +type Oss interface { + Init(context.Context, *Config) error + GetObject(context.Context, *GetObjectInput) (*GetObjectOutput, error) + PutObject(context.Context, *PutObjectInput) (*PutObjectOutput, error) + DeleteObject(context.Context, *DeleteObjectInput) (*DeleteObjectOutput, error) + PutObjectTagging(context.Context, *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) + DeleteObjectTagging(context.Context, *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) + GetObjectTagging(context.Context, *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) + CopyObject(context.Context, *CopyObjectInput) (*CopyObjectOutput, error) + DeleteObjects(context.Context, *DeleteObjectsInput) (*DeleteObjectsOutput, error) + ListObjects(context.Context, *ListObjectsInput) (*ListObjectsOutput, error) + GetObjectCannedAcl(context.Context, *GetObjectCannedAclInput) (*GetObjectCannedAclOutput, error) + PutObjectCannedAcl(context.Context, *PutObjectCannedAclInput) (*PutObjectCannedAclOutput, error) + RestoreObject(context.Context, *RestoreObjectInput) (*RestoreObjectOutput, error) + CreateMultipartUpload(context.Context, *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) + UploadPart(context.Context, *UploadPartInput) (*UploadPartOutput, error) + UploadPartCopy(context.Context, *UploadPartCopyInput) (*UploadPartCopyOutput, error) + CompleteMultipartUpload(context.Context, *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) + AbortMultipartUpload(context.Context, *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) + ListMultipartUploads(context.Context, *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) + ListObjectVersions(context.Context, *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) + HeadObject(context.Context, *HeadObjectInput) (*HeadObjectOutput, error) + IsObjectExist(context.Context, *IsObjectExistInput) (*IsObjectExistOutput, error) + SignURL(context.Context, *SignURLInput) (*SignURLOutput, error) + UpdateDownloadBandwidthRateLimit(context.Context, *UpdateBandwidthRateLimitInput) error + UpdateUploadBandwidthRateLimit(context.Context, *UpdateBandwidthRateLimitInput) error + AppendObject(context.Context, *AppendObjectInput) (*AppendObjectOutput, error) + ListParts(context.Context, *ListPartsInput) (*ListPartsOutput, error) +} + +type GetObjectInput struct { + Bucket string `json:"bucket,omitempty"` + ExpectedBucketOwner string `json:"expected_bucket_owner,omitempty"` + IfMatch string `json:"if_match,omitempty"` + IfModifiedSince int64 `json:"if_modified_since,omitempty"` + IfNoneMatch string `json:"if_none_match,omitempty"` + IfUnmodifiedSince int64 `json:"if_unmodified_since,omitempty"` + Key string `json:"key,omitempty"` + PartNumber int64 `json:"part_number,omitempty"` + Start int64 `json:"start,omitempty"` + End int64 `json:"end,omitempty"` + RequestPayer string `json:"request_payer,omitempty"` + ResponseCacheControl string `json:"response_cache_control,omitempty"` + ResponseContentDisposition string `json:"response_content_disposition,omitempty"` + ResponseContentEncoding string `json:"response_content_encoding,omitempty"` + ResponseContentLanguage string `json:"response_content_language,omitempty"` + ResponseContentType string `json:"response_content_type,omitempty"` + ResponseExpires string `json:"response_expires,omitempty"` + SseCustomerAlgorithm string `json:"sse_customer_algorithm,omitempty"` + SseCustomerKey string `json:"sse_customer_key,omitempty"` + SseCustomerKeyMd5 string `json:"sse_customer_key_md5,omitempty"` + VersionId string `json:"version_id,omitempty"` + AcceptEncoding string `json:"accept_encoding,omitempty"` + SignedUrl string `json:"signed_url,omitempty"` +} + +type GetObjectOutput struct { + DataStream io.ReadCloser + CacheControl string `json:"cache_control,omitempty"` + ContentDisposition string `json:"content_disposition,omitempty"` + ContentEncoding string `json:"content_encoding,omitempty"` + ContentLanguage string `json:"content_language,omitempty"` + ContentLength int64 `json:"content_length,omitempty"` + ContentRange string `json:"content_range,omitempty"` + ContentType string `json:"content_type,omitempty"` + DeleteMarker bool `json:"delete_marker,omitempty"` + Etag string `json:"etag,omitempty"` + Expiration string `json:"expiration,omitempty"` + Expires string `json:"expires,omitempty"` + LastModified int64 `json:"last_modified,omitempty"` + VersionId string `json:"version_id,omitempty"` + TagCount int64 `json:"tag_count,omitempty"` + StorageClass string `json:"storage_class,omitempty"` + PartsCount int64 `json:"parts_count,omitempty"` + Metadata map[string]string `json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +type PutObjectInput struct { + DataStream io.Reader + ACL string `json:"acl,omitempty"` + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + BucketKeyEnabled bool `json:"bucket_key_enabled,omitempty"` + CacheControl string `json:"cache_control,omitempty"` + ContentDisposition string `json:"content_disposition,omitempty"` + ContentEncoding string `json:"content_encoding,omitempty"` + Expires int64 `json:"expires,omitempty"` + ServerSideEncryption string `json:"server_side_encryption,omitempty"` + SignedUrl string `json:"signed_url,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Tagging map[string]string `json:"tagging,omitempty"` +} + +type PutObjectOutput struct { + BucketKeyEnabled bool `json:"bucket_key_enabled,omitempty"` + ETag string `json:"etag,omitempty"` +} + +type DeleteObjectInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + RequestPayer string `json:"request_payer,omitempty"` + VersionId string `json:"version_id,omitempty"` +} +type DeleteObjectOutput struct { + DeleteMarker bool `json:"delete_marker,omitempty"` + RequestCharged string `json:"request_charged,omitempty"` + VersionId string `json:"version_id,omitempty"` +} + +type PutObjectTaggingInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + Tags map[string]string `json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + VersionId string `json:"version_id,omitempty"` +} +type PutObjectTaggingOutput struct { +} + +type DeleteObjectTaggingInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + VersionId string `json:"version_id,omitempty"` + ExpectedBucketOwner string `json:"expected_bucket_owner,omitempty"` +} +type DeleteObjectTaggingOutput struct { + VersionId string `json:"version_id,omitempty"` +} + +type GetObjectTaggingInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + VersionId string ` json:"version_id,omitempty"` + ExpectedBucketOwner string `json:"expected_bucket_owner,omitempty"` + RequestPayer string ` json:"request_payer,omitempty"` +} +type GetObjectTaggingOutput struct { + Tags map[string]string `json:"tags,omitempty"` + VersionId string `json:"version_id,omitempty"` + ResultMetadata map[string]string `json:"result_metadata,omitempty"` +} + +type CopySource struct { + CopySourceBucket string `json:"copy_source_bucket,omitempty"` + CopySourceKey string `json:"copy_source_key,omitempty"` + CopySourceVersionId string `json:"copy_source_version_id,omitempty"` +} + +type CopyObjectInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + CopySource *CopySource `json:"copy_source,omitempty"` + Tagging map[string]string `json:"tagging,omitempty"` + Expires int64 `json:"expires,omitempty"` + // Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request. + MetadataDirective string `json:"metadata_directive,omitempty"` + // A map of metadata to store with the object in S3. + Metadata map[string]string `json:"metadata,omitempty"` +} +type CopyObjectOutput struct { + CopyObjectResult *CopyObjectResult `json:"copy_object_result,omitempty"` +} +type CopyObjectResult struct { + ETag string `json:"etag,omitempty"` + LastModified int64 `json:"LastModified,omitempty"` +} + +type DeleteObjectsInput struct { + Bucket string `json:"bucket,omitempty"` + Delete *Delete `json:"delete,omitempty"` +} +type Delete struct { + Objects []*ObjectIdentifier `json:"objects,omitempty"` + Quiet bool `json:"quiet,omitempty"` +} +type ObjectIdentifier struct { + Key string `json:"key,omitempty"` + VersionId string `json:"version_id,omitempty"` +} + +type DeleteObjectsOutput struct { + Deleted []*DeletedObject `json:"deleted,omitempty"` +} + +type DeletedObject struct { + DeleteMarker bool `json:"delete_marker,omitempty"` + DeleteMarkerVersionId string `json:"delete_marker_version_id,omitempty"` + Key string `json:"key,omitempty"` + VersionId string `json:"version_id,omitempty"` +} + +type ListObjectsInput struct { + Bucket string `json:"bucket,omitempty"` + Delimiter string `json:"delimiter,omitempty"` + EncodingType string `json:"encoding_type,omitempty"` + ExpectedBucketOwner string `json:"expected_bucket_owner,omitempty"` + Marker string `json:"marker,omitempty"` + MaxKeys int32 `json:"maxKeys,omitempty"` + Prefix string `json:"prefix,omitempty"` + RequestPayer string `json:"request_payer,omitempty"` +} +type ListObjectsOutput struct { + CommonPrefixes []string `json:"common_prefixes,omitempty"` + Contents []*Object `json:"contents,omitempty"` + Delimiter string `json:"delimiter,omitempty"` + EncodingType string `json:"encoding_type,omitempty"` + IsTruncated bool `json:"is_truncated,omitempty"` + Marker string `json:"marker,omitempty"` + MaxKeys int32 `json:"max_keys,omitempty"` + Name string `json:"name,omitempty"` + NextMarker string `json:"next_marker,omitempty"` + Prefix string `json:"prefix,omitempty"` +} +type Object struct { + ETag string `json:"etag,omitempty"` + Key string `json:"key,omitempty"` + LastModified int64 `json:"last_modified,omitempty"` + Owner *Owner `json:"owner,omitempty"` + Size int64 `json:"size,omitempty"` + StorageClass string `json:"storage_class,omitempty"` +} +type Owner struct { + DisplayName string `json:"display_name,omitempty"` + ID string `json:"id,omitempty"` +} + +type GetObjectCannedAclInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + VersionId string `json:"version_id,omitempty"` +} +type GetObjectCannedAclOutput struct { + CannedAcl string `json:"canned_acl,omitempty"` + Owner *Owner `json:"owner,omitempty"` + RequestCharged string `json:"request_charged,omitempty"` +} + +type PutObjectCannedAclInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + Acl string `json:"acl,omitempty"` + VersionId string `json:"version_id,omitempty"` +} +type PutObjectCannedAclOutput struct { + RequestCharged string `json:"request_charged,omitempty"` +} + +type RestoreObjectInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + VersionId string `json:"version_id,omitempty"` +} +type RestoreObjectOutput struct { + RequestCharged string `json:"request_charged,omitempty"` + RestoreOutputPath string `json:"restore_output_path,omitempty"` +} + +type CreateMultipartUploadInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + ACL string `json:"acl,omitempty"` + BucketKeyEnabled bool `json:"bucket_key_enabled,omitempty"` + CacheControl string `json:"cache_control,omitempty"` + ContentDisposition string `json:"content_disposition,omitempty"` + ContentEncoding string `json:"content_encoding,omitempty"` + ContentLanguage string `json:"content_language,omitempty"` + ContentType string `json:"content_type,omitempty"` + ExpectedBucketOwner string `json:"expected_bucket_owner,omitempty"` + Expires int64 `json:"expires,omitempty"` + GrantFullControl string `json:"grant_full_control,omitempty"` + GrantRead string `json:"grant_read,omitempty"` + GrantReadACP string `json:"grant_read_acp,omitempty"` + GrantWriteACP string `json:"grant_write_acp,omitempty"` + MetaData map[string]string `json:"meta_data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ObjectLockLegalHoldStatus string `json:"object_lock_legal_hold_status,omitempty"` + ObjectLockMode string `json:"object_lock_mode,omitempty"` + ObjectLockRetainUntilDate int64 `json:"object_lock_retain_until_date,omitempty"` + RequestPayer string `json:"request_payer,omitempty"` + SSECustomerAlgorithm string `json:"sse_customer_algorithm,omitempty"` + SSECustomerKey string `json:"sse_customer_key,omitempty"` + SSECustomerKeyMD5 string `json:"sse_customer_key_md5,omitempty"` + SSEKMSEncryptionContext string `json:"sse_kms_encryption_context,omitempty"` + SSEKMSKeyId string `json:"sse_kms_key_id,omitempty"` + ServerSideEncryption string `json:"server_side_encryption,omitempty"` + StorageClass string `json:"storage_class,omitempty"` + Tagging map[string]string `json:"tagging,omitempty"` + WebsiteRedirectLocation string `json:"website_redirect_location,omitempty"` +} +type CreateMultipartUploadOutput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + AbortDate int64 `json:"abort_date,omitempty"` + AbortRuleId string `json:"abort_rule_id,omitempty"` + BucketKeyEnabled bool `json:"bucket_key_enabled,omitempty"` + RequestCharged string `json:"request_charged,omitempty"` + SSECustomerAlgorithm string `json:"sse_customer_algorithm,omitempty"` + SSECustomerKeyMD5 string `json:"sse_customer_key_md5,omitempty"` + SSEKMSEncryptionContext string `json:"sse_kms_encryption_context,omitempty"` + SSEKMSKeyId string `json:"sse_kms_key_id,omitempty"` + ServerSideEncryption string `json:"server_side_encryption,omitempty"` + UploadId string `json:"upload_id,omitempty"` +} + +type UploadPartInput struct { + DataStream io.Reader + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + //Body []byte `json:"body,omitempty"` + ContentLength int64 `json:"content_length,omitempty"` + ContentMd5 string `json:"content_md5,omitempty"` + ExpectedBucketOwner string `json:"expected_bucket_owner,omitempty"` + PartNumber int32 `json:"part_number,omitempty"` + RequestPayer string `json:"request_payer,omitempty"` + SseCustomerAlgorithm string `json:"sse_customer_algorithm,omitempty"` + SseCustomerKey string `json:"sse_customer_key,omitempty"` + SseCustomerKeyMd5 string `json:"sse_customer_key_md5,omitempty"` + UploadId string `json:"upload_id,omitempty"` +} +type UploadPartOutput struct { + BucketKeyEnabled bool `json:"bucket_key_enabled,omitempty"` + ETag string `json:"etag,omitempty"` + RequestCharged string `json:"request_charged,omitempty"` + SSECustomerAlgorithm string `json:"sse_customer_algorithm,omitempty"` + SSECustomerKeyMD5 string `json:"sse_customer_key_md5,omitempty"` + SSEKMSKeyId string `json:"sse_kms_key_id,omitempty"` + ServerSideEncryption string `json:"server_side_encryption,omitempty"` +} + +type UploadPartCopyInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + CopySource *CopySource `json:"copy_source,omitempty"` + PartNumber int32 `json:"part_number,omitempty"` + UploadId string `json:"upload_id,omitempty"` + StartPosition int64 `json:"start_position,omitempty"` + PartSize int64 `json:"part_size,omitempty"` +} +type UploadPartCopyOutput struct { + BucketKeyEnabled bool `json:"bucket_key_enabled,omitempty"` + CopyPartResult *CopyPartResult `json:"copy_part_result,omitempty"` + CopySourceVersionId string `json:"copy_source_version_id,omitempty"` + RequestCharged string `json:"request_charged,omitempty"` + SSECustomerAlgorithm string `json:"sse_customer_algorithm,omitempty"` + SSECustomerKeyMD5 string `json:"sse_customer_key_md5,omitempty"` + SSEKMSKeyId string `json:"sse_kms_key_id,omitempty"` + ServerSideEncryption string `json:"server_side_encryption,omitempty"` +} +type CopyPartResult struct { + ETag string `json:"etag,omitempty"` + LastModified int64 `json:"last_modified,omitempty"` +} + +type CompleteMultipartUploadInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + UploadId string `json:"upload_id,omitempty"` + RequestPayer string `json:"request_payer,omitempty"` + ExpectedBucketOwner string `json:"expected_bucket_owner,omitempty"` + MultipartUpload *CompletedMultipartUpload `json:"multipart_upload,omitempty"` +} +type CompletedMultipartUpload struct { + Parts []*CompletedPart `json:"parts,omitempty"` +} +type CompletedPart struct { + ETag string `json:"etag,omitempty"` + PartNumber int32 `json:"part_number,omitempty"` +} +type CompleteMultipartUploadOutput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + BucketKeyEnabled bool `json:"bucket_key_enabled,omitempty"` + ETag string `json:"etag,omitempty"` + Expiration string `json:"expiration,omitempty"` + Location string `json:"location,omitempty"` + RequestCharged string `json:"request_charged,omitempty"` + SSEKMSKeyId string `json:"sse_kms_keyId,omitempty"` + ServerSideEncryption string `json:"server_side_encryption,omitempty"` + VersionId string `json:"version_id,omitempty"` +} + +type AbortMultipartUploadInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + ExpectedBucketOwner string `json:"expected_bucket_owner,omitempty"` + RequestPayer string `json:"request_payer,omitempty"` + UploadId string `json:"upload_id,omitempty"` +} +type AbortMultipartUploadOutput struct { + RequestCharged string `json:"request_charged,omitempty"` +} + +type ListMultipartUploadsInput struct { + Bucket string `json:"bucket,omitempty"` + Delimiter string `json:"delimiter,omitempty"` + EncodingType string `json:"encoding_type,omitempty"` + ExpectedBucketOwner string `json:"expected_bucket_owner,omitempty"` + KeyMarker string `json:"key_marker,omitempty"` + MaxUploads int64 `json:"max_uploads,omitempty"` + Prefix string `json:"prefix,omitempty"` + UploadIdMarker string `json:"upload_id_marker,omitempty"` +} +type ListMultipartUploadsOutput struct { + Bucket string `json:"bucket,omitempty"` + CommonPrefixes []string `json:"common_prefixes,omitempty"` + Delimiter string `json:"delimiter,omitempty"` + EncodingType string `json:"encoding_type,omitempty"` + IsTruncated bool `json:"is_truncated,omitempty"` + KeyMarker string `json:"key_marker,omitempty"` + MaxUploads int32 `json:"max_uploads,omitempty"` + NextKeyMarker string `json:"next_key_marker,omitempty"` + NextUploadIDMarker string `json:"next_upload_id_marker,omitempty"` + Prefix string `json:"prefix,omitempty"` + UploadIDMarker string `json:"upload_id_marker,omitempty"` + Uploads []*MultipartUpload `json:"uploads,omitempty"` +} +type MultipartUpload struct { + Initiated int64 `json:"initiated,omitempty"` + Initiator *Initiator `json:"initiator,omitempty"` + Key string `json:"key,omitempty"` + Owner *Owner `json:"owner,omitempty"` + StorageClass string `json:"storage_class,omitempty"` + UploadId string `json:"upload_id,omitempty"` +} +type Initiator struct { + DisplayName string `json:"display_name,omitempty"` + ID string `json:"id,omitempty"` +} + +type ListObjectVersionsInput struct { + Bucket string `json:"bucket,omitempty"` + Delimiter string `json:"delimiter,omitempty"` + EncodingType string `json:"encoding_type,omitempty"` + ExpectedBucketOwner string `json:"expected_bucket_owner,omitempty"` + KeyMarker string `json:"key_marker,omitempty"` + MaxKeys int32 `json:"max_keys,omitempty"` + Prefix string `json:"prefix,omitempty"` + VersionIdMarker string `json:"version_id_marker,omitempty"` +} +type ListObjectVersionsOutput struct { + CommonPrefixes []string `json:"common_prefixes,omitempty"` + DeleteMarkers []*DeleteMarkerEntry `json:"delete_markers,omitempty"` + Delimiter string `json:"delimiter,omitempty"` + EncodingType string `json:"encoding_type,omitempty"` + IsTruncated bool `json:"is_truncated,omitempty"` + KeyMarker string `json:"key_marker,omitempty"` + MaxKeys int32 `json:"max_keys,omitempty"` + Name string `json:"name,omitempty"` + NextKeyMarker string `json:"next_key_marker,omitempty"` + NextVersionIdMarker string `json:"next_version_id_marker,omitempty"` + Prefix string `json:"prefix,omitempty"` + VersionIdMarker string `json:"version_id_marker,omitempty"` + Versions []*ObjectVersion `json:"versions,omitempty"` +} +type DeleteMarkerEntry struct { + IsLatest bool `json:"is_latest,omitempty"` + Key string `json:"key,omitempty"` + LastModified int64 `json:"last_modified,omitempty"` + Owner *Owner `json:"owner,omitempty"` + VersionId string `json:"version_id,omitempty"` +} +type ObjectVersion struct { + ETag string `json:"etag,omitempty"` + IsLatest bool `json:"is_latest,omitempty"` + Key string `json:"key,omitempty"` + LastModified int64 `json:"last_modified,omitempty"` + Owner *Owner `json:"owner,omitempty"` + Size int64 `json:"size,omitempty"` + StorageClass string `json:"storage_class,omitempty"` + VersionId string `json:"version_id,omitempty"` +} + +type HeadObjectInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + ChecksumMode string `json:"checksum_mode,omitempty"` + ExpectedBucketOwner string `json:"expected_bucket_owner,omitempty"` + IfMatch string `json:"if_match,omitempty"` + IfModifiedSince int64 `json:"if_modified_since,omitempty"` + IfNoneMatch string `json:"if_none_match,omitempty"` + IfUnmodifiedSince int64 `json:"if_unmodified_since,omitempty"` + PartNumber int32 `json:"part_number,omitempty"` + RequestPayer string `json:"request_payer,omitempty"` + SSECustomerAlgorithm string `json:"sse_customer_algorithm,omitempty"` + SSECustomerKey string `json:"sse_customer_key,omitempty"` + SSECustomerKeyMD5 string `json:"sse_customer_key_md5,omitempty"` + VersionId string `json:"version_id,omitempty"` + WithDetails bool `json:"with_details,omitempty"` +} +type HeadObjectOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata map[string]string `json:"result_metadata,omitempty"` +} + +type IsObjectExistInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` +} +type IsObjectExistOutput struct { + FileExist bool `json:"file_exist,omitempty"` +} + +type SignURLInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + Method string `json:"method,omitempty"` + ExpiredInSec int64 `json:"expired_in_sec,omitempty"` +} +type SignURLOutput struct { + SignedUrl string `json:"signed_url,omitempty"` +} + +type UpdateBandwidthRateLimitInput struct { + // The average upload/download bandwidth rate limit in bits per second. + AverageRateLimitInBitsPerSec int64 `json:"average_rate_limit_in_bits_per_sec,omitempty"` + //Resource name of gateway + GatewayResourceName string `json:"gateway_resource_name,omitempty"` +} + +type AppendObjectInput struct { + DataStream io.Reader + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + Position int64 `json:"position,omitempty"` + ACL string `json:"acl,omitempty"` + CacheControl string `json:"cache_control,omitempty"` + ContentDisposition string `json:"content_disposition,omitempty"` + ContentEncoding string `json:"content_encoding,omitempty"` + ContentMd5 string `json:"content_md5,omitempty"` + Expires int64 `json:"expires,omitempty"` + StorageClass string `json:"storage_class,omitempty"` + ServerSideEncryption string `json:"server_side_encryption,omitempty"` + Meta string `json:"meta,omitempty"` + Tags map[string]string `json:"tags,omitempty"` +} +type AppendObjectOutput struct { + AppendPosition int64 `json:"append_position,omitempty"` +} + +type ListPartsInput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + ExpectedBucketOwner string `json:"expected_bucket_owner,omitempty"` + MaxParts int64 `json:"max_parts,omitempty"` + PartNumberMarker int64 `json:"part_number_marker,omitempty"` + RequestPayer string `json:"request_payer,omitempty"` + UploadId string `json:"upload_id,omitempty"` +} +type ListPartsOutput struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + UploadId string `json:"upload_id,omitempty"` + NextPartNumberMarker string `json:"next_part_number_marker,omitempty"` + MaxParts int64 `json:"max_parts,omitempty"` + IsTruncated bool `json:"is_truncated,omitempty"` + Parts []*Part `json:"parts,omitempty"` +} + +type Part struct { + Etag string `json:"etag,omitempty"` + LastModified int64 `json:"last_modified,omitempty"` + PartNumber int64 `json:"part_number,omitempty"` + Size int64 `json:"size,omitempty"` +} diff --git a/components/oss/registry.go b/components/oss/registry.go new file mode 100644 index 0000000000..3b6e58e2bb --- /dev/null +++ b/components/oss/registry.go @@ -0,0 +1,68 @@ +/* + * Copyright 2021 Layotto Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package oss + +import ( + "fmt" + + "mosn.io/layotto/components/pkg/info" +) + +type Registry interface { + Register(fs ...*Factory) + Create(compType string) (Oss, error) +} + +type Factory struct { + CompType string + FactoryMethod func() Oss +} + +func NewFactory(compType string, f func() Oss) *Factory { + return &Factory{ + CompType: compType, + FactoryMethod: f, + } +} + +type registry struct { + oss map[string]func() Oss + info *info.RuntimeInfo +} + +func NewRegistry(info *info.RuntimeInfo) Registry { + info.AddService(ServiceName) + return ®istry{ + oss: make(map[string]func() Oss), + info: info, + } +} + +func (r *registry) Register(fs ...*Factory) { + for _, f := range fs { + r.oss[f.CompType] = f.FactoryMethod + r.info.RegisterComponent(ServiceName, f.CompType) + } +} + +func (r *registry) Create(compType string) (Oss, error) { + if f, ok := r.oss[compType]; ok { + r.info.LoadComponent(ServiceName, compType) + return f(), nil + } + return nil, fmt.Errorf("service component %s is not regsitered", compType) +} diff --git a/components/oss/types.go b/components/oss/types.go new file mode 100644 index 0000000000..f70149b0b6 --- /dev/null +++ b/components/oss/types.go @@ -0,0 +1,36 @@ +/* + * Copyright 2021 Layotto Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package oss + +import ( + "encoding/json" + "errors" +) + +const ( + BasicConfiguration = "basic_config" +) + +var ( + ErrInvalid = errors.New("invalid argument") +) + +// Config wraps configuration for a oss implementation +type Config struct { + Metadata map[string]json.RawMessage `json:"metadata"` + Type string `json:"type"` +} diff --git a/components/pkg/utils/oss.go b/components/pkg/utils/oss.go new file mode 100644 index 0000000000..aa6959ba34 --- /dev/null +++ b/components/pkg/utils/oss.go @@ -0,0 +1,30 @@ +// +// Copyright 2021 Layotto Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package utils + +import ( + "errors" +) + +var ( + ErrNotInitClient = errors.New("oss client not init") +) + +// OssMetadata wraps the configuration of oss implementation +type OssMetadata struct { + Endpoint string `json:"endpoint"` + AccessKeyID string `json:"accessKeyID"` + AccessKeySecret string `json:"accessKeySecret"` + Region string `json:"region"` +} diff --git a/components/rpc/callback/dubbo_json_rpc_test.go b/components/rpc/callback/dubbo_json_rpc_test.go index 4410d11118..7d2e7b93ee 100644 --- a/components/rpc/callback/dubbo_json_rpc_test.go +++ b/components/rpc/callback/dubbo_json_rpc_test.go @@ -33,7 +33,7 @@ func Test_beforeFactory_Create(t *testing.T) { Id: "1", Timeout: 300, Method: "Hello", - Header: make(map[string][]string, 0), + Header: make(map[string][]string), } newReq, err := f(req) assert.Nil(t, err) diff --git a/components/rpc/invoker/mosn/channel/channel.go b/components/rpc/invoker/mosn/channel/channel.go index baa0116ae4..12602ab128 100644 --- a/components/rpc/invoker/mosn/channel/channel.go +++ b/components/rpc/invoker/mosn/channel/channel.go @@ -52,7 +52,7 @@ type ChannelConfig struct { Ext map[string]interface{} `json:"ext"` } -// GetChannel is get rpc.Channel by config.Protocol +// GetChannel creates a rpc.Channel according to config.Protocol func GetChannel(config ChannelConfig) (rpc.Channel, error) { c, ok := registry[config.Protocol] if !ok { @@ -66,7 +66,7 @@ func RegistChannel(proto string, f func(config ChannelConfig) (rpc.Channel, erro registry[proto] = f } -// simulate tcp connect +// fakeTcpConn simulates tcp connection. It implements net.Conn type fakeTcpConn struct { c net.Conn } diff --git a/components/sequencer/zookeeper/zookeeper_sequencer.go b/components/sequencer/zookeeper/zookeeper_sequencer.go index 2291330235..7f7959d717 100644 --- a/components/sequencer/zookeeper/zookeeper_sequencer.go +++ b/components/sequencer/zookeeper/zookeeper_sequencer.go @@ -95,6 +95,13 @@ func (s *ZookeeperSequencer) GetNextId(req *sequencer.GetNextIdRequest) (*sequen stat, err := s.client.Set("/"+req.Key, []byte(""), -1) if err != nil { + if err == zk.ErrNoNode { + _, errCreate := s.client.Create("/"+req.Key, []byte(""), zk.FlagEphemeral, zk.WorldACL(zk.PermAll)) + if errCreate != nil { + return nil, errCreate + } + return s.GetNextId(req) + } return nil, err } // create node version=0, every time we set node will result in version+1 diff --git a/configs/config_file.json b/configs/config_file.json index a249163fc8..8cf2cb13eb 100644 --- a/configs/config_file.json +++ b/configs/config_file.json @@ -24,7 +24,7 @@ }, "file": { "file_demo": { - "type": "minioOSS", + "type": "minio", "metadata": [ { "endpoint": "127.0.0.1:9000", diff --git a/configs/config_file_qiniu_oss.json b/configs/config_file_qiniu_oss.json index b9bf5df66d..4e1d7ea38e 100644 --- a/configs/config_file_qiniu_oss.json +++ b/configs/config_file_qiniu_oss.json @@ -24,7 +24,7 @@ }, "file": { "file_demo": { - "type": "qiniuOSS", + "type": "qiniu.oss", "metadata": [ { "endpoint": "", diff --git a/configs/config_file_tencentcloud_oss.json b/configs/config_file_tencentcloud_oss.json index 75bb97de55..c57c8dafae 100644 --- a/configs/config_file_tencentcloud_oss.json +++ b/configs/config_file_tencentcloud_oss.json @@ -24,7 +24,7 @@ }, "files": { "file_demo": { - "type": "tencentCloudOSS", + "type": "tencent.oss", "metadata": [ { "endpoint": "", diff --git a/configs/config_oss.json b/configs/config_oss.json new file mode 100644 index 0000000000..702b32ba4f --- /dev/null +++ b/configs/config_oss.json @@ -0,0 +1,43 @@ +{ + "servers": [ + { + "default_log_path": "stdout", + "default_log_level": "DEBUG", + "listeners": [ + { + "name": "grpc", + "address": "127.0.0.1:34904", + "bind_port": true, + "filter_chains": [ + { + "filters": [ + { + "type": "grpc", + "config": { + "server_name": "runtime", + "grpc_config": { + "oss": { + "oss_demo": { + "type": "aws.oss", + "metadata": + { + "basic_config":{ + "region": "your-oss-resource-region", + "endpoint": "your-oss-resource-endpoint", + "accessKeyID": "your-oss-resource-accessKeyID", + "accessKeySecret": "your-oss-resource-accessKeySecret" + } + } + } + } + } + } + } + ] + } + ] + } + ] + } + ] +} diff --git a/demo/go.mod b/demo/go.mod index 95562f2a67..a08839907c 100644 --- a/demo/go.mod +++ b/demo/go.mod @@ -8,6 +8,7 @@ require ( github.com/minio/minio-go/v7 v7.0.15 google.golang.org/grpc v1.37.0 google.golang.org/protobuf v1.26.0 + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect mosn.io/layotto/sdk/go-sdk v0.0.0-20211020084508-6f5ee3cfeba0 mosn.io/layotto/spec v0.0.0-20211020084508-6f5ee3cfeba0 ) diff --git a/demo/go.sum b/demo/go.sum index 49d9a91302..eba55c0d20 100644 --- a/demo/go.sum +++ b/demo/go.sum @@ -157,7 +157,8 @@ gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/demo/oss/client.go b/demo/oss/client.go new file mode 100644 index 0000000000..757895c4e6 --- /dev/null +++ b/demo/oss/client.go @@ -0,0 +1,544 @@ +/* +* Copyright 2021 Layotto Authors +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package main + +import ( + "bufio" + "context" + "fmt" + "os" + "strconv" + + "mosn.io/layotto/spec/proto/extension/v1/s3" + + "google.golang.org/grpc" +) + +const ( + storeName = "oss_demo" +) + +func TestGetObjectInput(bucket, fileName string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + + c := s3.NewObjectStorageServiceClient(conn) + req := &s3.GetObjectInput{StoreName: storeName, Bucket: bucket, Key: fileName} + cli, err := c.GetObject(context.Background(), req) + if err != nil { + fmt.Printf("get file error: %+v", err) + return + } + pic := make([]byte, 0) + for { + resp, err := cli.Recv() + if err != nil { + if err.Error() != "EOF" { + panic(err) + } + break + } + pic = append(pic, resp.Body...) + } + fmt.Println(string(pic)) +} + +func TestPutObject(bucket, fileName string, value string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + req := &s3.PutObjectInput{StoreName: storeName, Bucket: bucket, Key: fileName} + stream, err := c.PutObject(context.TODO()) + if err != nil { + fmt.Printf("put file failed:%+v", err) + return + } + req.Body = []byte(value) + stream.Send(req) + _, err = stream.CloseAndRecv() + if err != nil { + fmt.Printf("cannot receive response: %+v", err) + } +} + +func TestListObjects(bucket string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + marker := "" + for { + req := &s3.ListObjectsInput{StoreName: storeName, Bucket: bucket, MaxKeys: 2, Marker: marker} + resp, err := c.ListObjects(context.Background(), req) + if err != nil { + fmt.Printf("list file fail, err: %+v", err) + return + } + marker = resp.NextMarker + if !resp.IsTruncated { + fmt.Printf("files under bucket is: %+v, %+v \n", resp.Contents, marker) + fmt.Printf("finish list \n") + return + } + fmt.Printf("files under bucket is: %+v, %+v \n", resp.Contents, marker) + } + +} + +func TestDeleteObject(bucket, fileName string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + req := &s3.DeleteObjectInput{StoreName: storeName, Bucket: bucket, Key: fileName} + resp, err := c.DeleteObject(context.Background(), req) + if err != nil { + fmt.Printf("DeleteObject fail, err: %+v \n", err) + return + } + fmt.Printf("delete file success with resp: %+v \n", resp) +} + +func TestDeleteObjects(bucket, fileName1, fileName2 string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + req2 := &s3.DeleteObjectsInput{StoreName: storeName, Bucket: bucket, Delete: &s3.Delete{}} + object1 := &s3.ObjectIdentifier{Key: fileName1} + object2 := &s3.ObjectIdentifier{Key: fileName2} + req2.Delete.Objects = append(req2.Delete.Objects, object1) + req2.Delete.Objects = append(req2.Delete.Objects, object2) + resp2, err := c.DeleteObjects(context.Background(), req2) + if err != nil { + fmt.Printf("DeleteObjects fail, err: %+v \n", err) + return + } + fmt.Printf("DeleteObjects success with resp: %+v \n", resp2) +} + +func TestTagging(bucket, name string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + req := &s3.PutObjectTaggingInput{StoreName: storeName, Bucket: bucket, Key: name, Tags: map[string]string{"Abc": "123", "Def": "456"}} + _, err = c.PutObjectTagging(context.Background(), req) + if err != nil { + fmt.Printf("PutObjectTagging fail, err: %+v \n", err) + return + } + fmt.Printf("PutObjectTagging success, try get tagging\n") + + req2 := &s3.GetObjectTaggingInput{StoreName: storeName, Bucket: bucket, Key: name} + getResp, err := c.GetObjectTagging(context.Background(), req2) + if err != nil { + fmt.Printf("GetObjectTagging fail, err: %+v \n", err) + return + } + fmt.Printf("GetObjectTagging: %+v \n", getResp.Tags) + + req3 := &s3.DeleteObjectTaggingInput{StoreName: storeName, Bucket: bucket, Key: name} + delResp, err := c.DeleteObjectTagging(context.Background(), req3) + if err != nil { + fmt.Printf("DeleteObjectTagging fail, err: %+v \n", err) + return + } + fmt.Printf("DeleteObjectTagging success: %+v \n", delResp.VersionId) + + req4 := &s3.GetObjectTaggingInput{StoreName: storeName, Bucket: bucket, Key: name} + getResp4, err := c.GetObjectTagging(context.Background(), req4) + if err != nil { + fmt.Printf("GetObjectTagging fail, err: %+v \n", err) + return + } + fmt.Printf("GetObjectTagging after delete tag: %+v \n", getResp4.Tags) +} + +func TestAcl(bucket, name string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + req := &s3.GetObjectCannedAclInput{StoreName: storeName, Bucket: bucket, Key: name} + resp, err := c.GetObjectCannedAcl(context.Background(), req) + if err != nil { + fmt.Printf("GetObjectAcl fail, err: %+v \n", err) + } else { + fmt.Printf("get acl success, resp: %+v\n", resp) + } + + putRequest := &s3.PutObjectCannedAclInput{StoreName: storeName, Bucket: bucket, Key: name, Acl: "public-read-write"} + resp2, err := c.PutObjectCannedAcl(context.Background(), putRequest) + if err != nil { + fmt.Printf("TestAcl fail, err: %+v \n", err) + return + } + fmt.Printf("put acl public-read-write success with resp: %+v \n", resp2) + +} + +func TestCopyObject(bucket, name string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + req := &s3.CopyObjectInput{StoreName: storeName, Bucket: bucket, Key: name + ".copy", CopySource: &s3.CopySource{CopySourceBucket: bucket, CopySourceKey: name}} + resp, err := c.CopyObject(context.Background(), req) + if err != nil { + fmt.Printf("CopyObject fail, err: %+v \n", err) + return + } + fmt.Printf("CopyObject success, resp: %+v\n", resp) + +} + +func TestPart(bucket, name string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + req := &s3.CreateMultipartUploadInput{StoreName: storeName, Bucket: bucket, Key: "multicopy.jpg"} + resp, err := c.CreateMultipartUpload(context.Background(), req) + if err != nil { + fmt.Printf("CreateMultipartUpload fail, err: %+v \n", err) + return + } + fmt.Printf("CreateMultipartUpload success, resp: %+v\n", resp) + + req1 := &s3.ListMultipartUploadsInput{StoreName: storeName, Bucket: bucket, MaxUploads: 1000, KeyMarker: "multicopy.jpg", UploadIdMarker: resp.UploadId} + resp1, err := c.ListMultipartUploads(context.Background(), req1) + if err != nil { + fmt.Printf("ListMultipartUploads fail, err: %+v \n", err) + return + } + fmt.Printf("ListMultipartUploads success, resp: %+v \n", resp1) + + req2 := &s3.UploadPartCopyInput{StoreName: storeName, Bucket: bucket, PartNumber: 1, UploadId: resp.UploadId, Key: "multicopy.jpg", StartPosition: 0, PartSize: 1000, CopySource: &s3.CopySource{CopySourceBucket: bucket, CopySourceKey: name}} + resp2, err := c.UploadPartCopy(context.Background(), req2) + if err != nil { + fmt.Printf("UploadPartCopy fail, err: %+v \n", err) + return + } + fmt.Printf("UploadPartCopy success, resp: %+v \n", resp2) + + req3 := &s3.CompleteMultipartUploadInput{StoreName: storeName, Bucket: bucket, Key: "multicopy.jpg", UploadId: resp.UploadId, MultipartUpload: &s3.CompletedMultipartUpload{Parts: []*s3.CompletedPart{{Etag: resp2.CopyPartResult.Etag, PartNumber: req2.PartNumber}}}} + resp3, err := c.CompleteMultipartUpload(context.Background(), req3) + if err != nil { + fmt.Printf("CompleteMultipartUpload fail, err: %+v \n", err) + return + } + fmt.Printf("CompleteMultipartUpload success, resp: %+v \n", resp3) + + //req4 := &s3.AbortMultipartUploadInput{StoreName: storeName, Bucket: bucket, Key: "海贼王.jpeg", UploadId: "EEE5317D0EB841AC9B80D0B6A2F811AA"} + //resp4, err := c.AbortMultipartUpload(context.Background(), req4) + //if err != nil { + // fmt.Printf("AbortMultipartUpload fail, err: %+v \n", err) + // return + //} + //fmt.Printf("AbortMultipartUpload success, resp: %+v \n", resp4) + + req5 := &s3.CreateMultipartUploadInput{StoreName: storeName, Bucket: bucket, Key: "海贼王.jpg"} + resp5, err := c.CreateMultipartUpload(context.Background(), req5) + if err != nil { + fmt.Printf("CreateMultipartUpload fail, err: %+v \n", err) + return + } + fmt.Printf("CreateMultipartUpload success, resp: %+v\n", resp5) + + req6 := &s3.UploadPartInput{ + StoreName: storeName, + Bucket: bucket, + Key: "海贼王.jpg", + UploadId: resp5.UploadId, + PartNumber: 0, + } + f, err := os.Open("海贼王.jpg") + if err != nil { + fmt.Printf("open file fail, err: %+v\n", err) + return + } + defer f.Close() + reader := bufio.NewReader(f) + var parts []*s3.CompletedPart + for { + dataByte := make([]byte, 120*1024) + var n int + n, err = reader.Read(dataByte) + if err != nil || 0 == n { + break + } + req6.Body = dataByte[:n] + req6.ContentLength = int64(n) + req6.PartNumber = req6.PartNumber + 1 + stream, err := c.UploadPart(context.TODO()) + if err != nil { + fmt.Printf("UploadPart fail, err: %+v \n", err) + return + } + err = stream.Send(req6) + if err != nil { + fmt.Printf("UploadPart send fail, err: %+v \n", err) + return + } + resp6, err := stream.CloseAndRecv() + if err != nil { + fmt.Printf("UploadPart CloseAndRecv fail, err: %+v \n", err) + return + } + part := &s3.CompletedPart{Etag: resp6.Etag, PartNumber: req6.PartNumber} + parts = append(parts, part) + } + fmt.Printf("UploadPart success, parts: %+v \n", parts) + req8 := &s3.ListPartsInput{StoreName: storeName, Bucket: bucket, Key: "海贼王.jpg", UploadId: resp5.UploadId} + resp8, err := c.ListParts(context.Background(), req8) + if err != nil { + fmt.Printf("ListPartsInput fail, err: %+v \n", err) + } else { + fmt.Printf("ListPartsInput success, resp: %+v \n", resp8) + } + req7 := &s3.CompleteMultipartUploadInput{StoreName: storeName, Bucket: bucket, Key: "海贼王.jpg", UploadId: resp5.UploadId, MultipartUpload: &s3.CompletedMultipartUpload{Parts: parts}} + resp7, err := c.CompleteMultipartUpload(context.Background(), req7) + if err != nil { + fmt.Printf("CompleteMultipartUpload fail, err: %+v \n", err) + return + } + fmt.Printf("CompleteMultipartUpload success, resp: %+v \n", resp7) +} + +func TestListVersion(bucket string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + req := &s3.ListObjectVersionsInput{StoreName: storeName, Bucket: bucket} + resp, err := c.ListObjectVersions(context.Background(), req) + if err != nil { + fmt.Printf("ListObjectVersions fail, err: %+v \n", err) + return + } + fmt.Printf("ListObjectVersions success, resp: %+v\n", resp) + +} + +func TestRestore(bucket, name string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + req := &s3.RestoreObjectInput{StoreName: storeName, Bucket: bucket, Key: name} + resp, err := c.RestoreObject(context.Background(), req) + if err != nil { + fmt.Printf("RestoreObject fail, err: %+v \n", err) + return + } + fmt.Printf("RestoreObject success, resp: %+v\n", resp) + +} + +func TestObjectExist(bucket, name string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + req := &s3.IsObjectExistInput{StoreName: storeName, Bucket: bucket, Key: name} + resp, err := c.IsObjectExist(context.Background(), req) + if err != nil { + fmt.Printf("TestObjectExist fail, err: %+v \n", err) + return + } + fmt.Printf("TestObjectExist success, resp: %+v\n", resp.FileExist) + +} + +func TestAbortMultipartUpload(bucket string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + + req := &s3.ListMultipartUploadsInput{StoreName: storeName, Bucket: bucket, MaxUploads: 1000} + resp, err := c.ListMultipartUploads(context.Background(), req) + if err != nil { + fmt.Printf("ListMultipartUploads fail, err: %+v \n", err) + return + } + fmt.Printf("ListMultipartUploads success, resp: %+v \n", resp) + + for _, v := range resp.Uploads { + req4 := &s3.AbortMultipartUploadInput{StoreName: storeName, Bucket: bucket, Key: v.Key, UploadId: v.UploadId} + resp4, err := c.AbortMultipartUpload(context.Background(), req4) + if err != nil { + fmt.Printf("AbortMultipartUpload fail, err: %+v \n", err) + return + } + fmt.Printf("AbortMultipartUpload success, resp: %+v \n", resp4) + } + + fmt.Printf("AbortMultipartUpload test success") +} + +func TestSign(bucket, name, method string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + req := &s3.SignURLInput{StoreName: storeName, Bucket: bucket, Key: name, ExpiredInSec: int64(10), Method: method} + resp, err := c.SignURL(context.Background(), req) + if err != nil { + fmt.Printf("SignURLInput fail, err: %+v \n", err) + return + } + fmt.Printf("SignURLInput success, resp: %+v\n", resp.SignedUrl) + +} + +func TestAppend(bucket, fileName, data, position string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + ps, _ := strconv.Atoi(position) + req := &s3.AppendObjectInput{StoreName: storeName, Bucket: bucket, Key: fileName, Body: []byte(data), Position: int64(ps)} + stream, err := c.AppendObject(context.Background()) + if err != nil { + fmt.Printf("AppendObject fail,err:%+v", err) + return + } + err = stream.Send(req) + if err != nil { + fmt.Printf("AppendObject fail,err:%+v", err) + return + } + resp, err := stream.CloseAndRecv() + if err != nil { + fmt.Printf("AppendObject fail,err:%+v", err) + return + } + fmt.Printf("AppendObject success,resp: %+v \n", resp) +} + +func TestHeadObject(bucket, fileName string) { + conn, err := grpc.Dial("127.0.0.1:34904", grpc.WithInsecure()) + if err != nil { + fmt.Printf("conn build failed,err:%+v", err) + return + } + c := s3.NewObjectStorageServiceClient(conn) + req := &s3.HeadObjectInput{StoreName: storeName, Bucket: bucket, Key: fileName} + resp, err := c.HeadObject(context.Background(), req) + if err != nil { + fmt.Printf("HeadObjectInput fail,err:%+v", err) + return + } + + fmt.Printf("HeadObjectInput success,resp: %+v \n", resp) +} + +func main() { + + if os.Args[1] == "put" { + TestPutObject(os.Args[2], os.Args[3], os.Args[4]) + } + if os.Args[1] == "get" { + TestGetObjectInput(os.Args[2], os.Args[3]) + } + if os.Args[1] == "del" { + TestDeleteObject(os.Args[2], os.Args[3]) + } + if os.Args[1] == "dels" { + TestDeleteObjects(os.Args[2], os.Args[3], os.Args[4]) + } + + if os.Args[1] == "list" { + TestListObjects(os.Args[2]) + } + + if os.Args[1] == "tag" { + TestTagging(os.Args[2], os.Args[3]) + } + + if os.Args[1] == "acl" { + TestAcl(os.Args[2], os.Args[3]) + } + + if os.Args[1] == "copy" { + TestCopyObject(os.Args[2], os.Args[3]) + } + + if os.Args[1] == "part" { + TestPart(os.Args[2], os.Args[3]) + } + + if os.Args[1] == "version" { + TestListVersion(os.Args[2]) + } + + if os.Args[1] == "restore" { + TestRestore(os.Args[2], os.Args[3]) + } + if os.Args[1] == "exist" { + TestObjectExist(os.Args[2], os.Args[3]) + } + if os.Args[1] == "abort" { + TestAbortMultipartUpload(os.Args[2]) + } + + if os.Args[1] == "sign" { + TestSign(os.Args[2], os.Args[3], os.Args[4]) + } + + if os.Args[1] == "append" { + TestAppend(os.Args[2], os.Args[3], os.Args[4], os.Args[5]) + } + + if os.Args[1] == "head" { + TestHeadObject(os.Args[2], os.Args[3]) + } +} diff --git a/demo/rpc/http/example.json b/demo/rpc/http/example.json index 1464b9e055..defe3b0dfa 100644 --- a/demo/rpc/http/example.json +++ b/demo/rpc/http/example.json @@ -1,98 +1,109 @@ { - "servers":[ - { - "default_log_path": "stdout", - "default_log_level": "DEBUG", - "routers": [ - { - "router_config_name":"http_router", - "virtual_hosts":[{ - "name":"http_host", - "domains": ["*"], - "routers": [ - { - "match":{ - "headers": [ - { - "name":"id", - "value":"HelloService:1.0" - } - ] - }, - "route":{"cluster_name":"http_server"} - } - ] - }] - }], - "listeners":[ - { - "name":"grpc", - "address": "127.0.0.1:34904", - "bind_port": true, - "filter_chains": [{ - "filters": [ - { - "type": "grpc", - "config": { - "server_name":"runtime", - "grpc_config": { - "rpcs": { - "mosn": { - "config": { - "channel": [{ - "size": 16, - "protocol": "http", - "listener": "egress_runtime_http" - }] - } - } - } - } - } - } - ] - }] - }, - { - "name": "egress_runtime_http", - "type": "egress", - "address": "0.0.0.0:12221", - "bind_port": true, - "network": "tcp", - "filter_chains": [ - { - "filters": [ - { - "type": "proxy", - "config": { - "downstream_protocol": "Http1", - "name": "proxy_config", - "router_config_name": "http_router", - "upstream_protocol": "Http1" - } - } - ] - } - ] - } - ] - } - ], - "cluster_manager": { - "tls_context": {}, - "clusters": [ - { - "name": "http_server", - "type": "SIMPLE", - "lb_type": "LB_RANDOM", - "hosts": [ - { - "address": "127.0.0.1:8889", - "hostname": "downstream_machine1", - "weight": 1 - } - ] - } - ] - } + "servers": [ + { + "default_log_path": "stdout", + "default_log_level": "DEBUG", + "routers": [ + { + "router_config_name": "http_router", + "virtual_hosts": [ + { + "name": "http_host", + "domains": [ + "*" + ], + "routers": [ + { + "match": { + "headers": [ + { + "name": "id", + "value": "HelloService:1.0" + } + ] + }, + "route": { + "cluster_name": "http_server" + } + } + ] + } + ] + } + ], + "listeners": [ + { + "name": "grpc", + "address": "127.0.0.1:34904", + "bind_port": true, + "filter_chains": [ + { + "filters": [ + { + "type": "grpc", + "config": { + "server_name": "runtime", + "grpc_config": { + "rpcs": { + "mosn": { + "config": { + "channel": [ + { + "size": 16, + "protocol": "http", + "listener": "egress_runtime_http" + } + ] + } + } + } + } + } + } + ] + } + ] + }, + { + "name": "egress_runtime_http", + "type": "egress", + "address": "0.0.0.0:12221", + "bind_port": true, + "network": "tcp", + "filter_chains": [ + { + "filters": [ + { + "type": "proxy", + "config": { + "downstream_protocol": "Http1", + "name": "proxy_config", + "router_config_name": "http_router", + "upstream_protocol": "Http1" + } + } + ] + } + ] + } + ] + } + ], + "cluster_manager": { + "tls_context": {}, + "clusters": [ + { + "name": "http_server", + "type": "SIMPLE", + "lb_type": "LB_RANDOM", + "hosts": [ + { + "address": "127.0.0.1:8889", + "hostname": "downstream_machine1", + "weight": 1 + } + ] + } + ] + } } diff --git a/deploy/k8s/bookinfo/bookinfo-gateway.yaml b/deploy/k8s/bookinfo/bookinfo-gateway.yaml new file mode 100644 index 0000000000..951f069f35 --- /dev/null +++ b/deploy/k8s/bookinfo/bookinfo-gateway.yaml @@ -0,0 +1,41 @@ +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: bookinfo-gateway +spec: + selector: + istio: ingressgateway # use istio default controller + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: bookinfo +spec: + hosts: + - "*" + gateways: + - bookinfo-gateway + http: + - match: + - uri: + exact: /productpage + - uri: + prefix: /static + - uri: + exact: /login + - uri: + exact: /logout + - uri: + prefix: /api/v1/products + route: + - destination: + host: productpage + port: + number: 9080 diff --git a/deploy/k8s/bookinfo/bookinfo.yaml b/deploy/k8s/bookinfo/bookinfo.yaml new file mode 100644 index 0000000000..d9e8b4fcaa --- /dev/null +++ b/deploy/k8s/bookinfo/bookinfo.yaml @@ -0,0 +1,344 @@ +# Copyright Istio Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################################################################################## +# This file defines the services, service accounts, and deployments for the Bookinfo sample. +# +# To apply all 4 Bookinfo services, their corresponding service accounts, and deployments: +# +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml +# +# Alternatively, you can deploy any resource separately: +# +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l service=reviews # reviews Service +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l account=reviews # reviews ServiceAccount +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l app=reviews,version=v3 # reviews-v3 Deployment +################################################################################################## + +################################################################################################## +# Details service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: details + labels: + app: details + service: details +spec: + ports: + - port: 9080 + name: http + selector: + app: details +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-details + labels: + account: details +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: details-v1 + labels: + app: details + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: details + version: v1 + template: + metadata: + labels: + app: details + version: v1 + spec: + serviceAccountName: bookinfo-details + containers: + - name: details + image: docker.io/istio/examples-bookinfo-details-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + securityContext: + runAsUser: 1000 +--- +################################################################################################## +# Ratings service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: ratings + labels: + app: ratings + service: ratings +spec: + ports: + - port: 9080 + name: http + selector: + app: ratings +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-ratings + labels: + account: ratings +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ratings-v1 + labels: + app: ratings + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: ratings + version: v1 + template: + metadata: + labels: + app: ratings + version: v1 + spec: + serviceAccountName: bookinfo-ratings + containers: + - name: ratings + image: docker.io/istio/examples-bookinfo-ratings-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + securityContext: + runAsUser: 1000 +--- +################################################################################################## +# Reviews service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: reviews + labels: + app: reviews + service: reviews +spec: + ports: + - port: 9080 + name: http + selector: + app: reviews +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-reviews + labels: + account: reviews +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v1 + labels: + app: reviews + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v1 + template: + metadata: + labels: + app: reviews + version: v1 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v1:1.16.2 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + securityContext: + runAsUser: 1000 + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v2 + labels: + app: reviews + version: v2 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v2 + template: + metadata: + labels: + app: reviews + version: v2 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v2:1.16.2 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + securityContext: + runAsUser: 1000 + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v3 + labels: + app: reviews + version: v3 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v3 + template: + metadata: + labels: + app: reviews + version: v3 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v3:1.16.2 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + securityContext: + runAsUser: 1000 + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +--- +################################################################################################## +# Productpage services +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: productpage + labels: + app: productpage + service: productpage +spec: + ports: + - port: 9080 + name: http + selector: + app: productpage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-productpage + labels: + account: productpage +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: productpage-v1 + labels: + app: productpage + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: productpage + version: v1 + template: + metadata: + labels: + app: productpage + version: v1 + spec: + serviceAccountName: bookinfo-productpage + containers: + - name: productpage + # docker.io/istio/examples-bookinfo-productpage-v1:1.16.2 + image: layotto/productpage:latest + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + securityContext: + runAsUser: 1000 + volumes: + - name: tmp + emptyDir: {} +--- diff --git a/deploy/k8s/bookinfo/destination-rule-all.yaml b/deploy/k8s/bookinfo/destination-rule-all.yaml new file mode 100644 index 0000000000..96be6993aa --- /dev/null +++ b/deploy/k8s/bookinfo/destination-rule-all.yaml @@ -0,0 +1,62 @@ +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: productpage +spec: + host: productpage + subsets: + - name: v1 + labels: + version: v1 +--- +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: reviews +spec: + host: reviews + subsets: + - name: v1 + labels: + version: v1 + - name: v2 + labels: + version: v2 + - name: v3 + labels: + version: v3 +--- +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: ratings +spec: + host: ratings + subsets: + - name: v1 + labels: + version: v1 + - name: v2 + labels: + version: v2 + - name: v2-mysql + labels: + version: v2-mysql + - name: v2-mysql-vm + labels: + version: v2-mysql-vm +--- +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: details +spec: + host: details + subsets: + - name: v1 + labels: + version: v1 + - name: v2 + labels: + version: v2 +--- diff --git a/deploy/k8s/bookinfo/virtual-service-all-v1.yaml b/deploy/k8s/bookinfo/virtual-service-all-v1.yaml new file mode 100644 index 0000000000..6811e31d98 --- /dev/null +++ b/deploy/k8s/bookinfo/virtual-service-all-v1.yaml @@ -0,0 +1,52 @@ +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: productpage +spec: + hosts: + - productpage + http: + - route: + - destination: + host: productpage + subset: v1 +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: reviews +spec: + hosts: + - reviews + http: + - route: + - destination: + host: reviews + subset: v1 +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: ratings +spec: + hosts: + - ratings + http: + - route: + - destination: + host: ratings + subset: v1 +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: details +spec: + hosts: + - details + http: + - route: + - destination: + host: details + subset: v1 +--- diff --git a/deploy/k8s/sidecar/default_quickstart_counter.yaml b/deploy/k8s/sidecar/default_quickstart_counter.yaml new file mode 100644 index 0000000000..45dea2bcee --- /dev/null +++ b/deploy/k8s/sidecar/default_quickstart_counter.yaml @@ -0,0 +1,188 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: layotto-runtime-config +data: + config.json: | + { + "servers": [ + { + "default_log_path": "stdout", + "default_log_level": "DEBUG", + "routers": [ + { + "router_config_name": "actuator_dont_need_router" + } + ], + "listeners": [ + { + "name": "grpc", + "address": "0.0.0.0:34904", + "bind_port": true, + "filter_chains": [ + { + "filters": [ + { + "type": "grpc", + "config": { + "server_name": "runtime", + "grpc_config": { + "hellos": { + "quick_start_demo": { + "type": "helloworld", + "hello": "greeting" + } + }, + "state": { + "state_demo": { + "type": "in-memory", + "metadata": { + } + } + }, + "lock": { + "lock_demo": { + "type": "in-memory", + "metadata": { + } + } + }, + "pub_subs": { + "pub_subs_demo": { + "type": "in-memory", + "metadata": { + "consumerID": "1" + } + } + }, + "sequencer": { + "sequencer_demo": { + "type": "in-memory", + "metadata": {} + } + }, + "secret_store": { + "secret_demo": { + "type": "local.env", + "metadata": { + } + } + }, + "bindings": { + "bindings_demo": { + "type": "http", + "metadata": { + "url": "https://mosn.io/layotto" + } + } + }, + "custom_component": { + "helloworld": { + "demo": { + "type": "in-memory", + "metadata": {} + } + } + }, + "app": { + "app_id": "app1", + "grpc_callback_port": 9999 + } + } + } + } + ] + } + ] + }, + { + "name": "actuator", + "address": "127.0.0.1:34999", + "bind_port": true, + "filter_chains": [ + { + "filters": [ + { + "type": "proxy", + "config": { + "downstream_protocol": "Http1", + "upstream_protocol": "Http1", + "router_config_name": "actuator_dont_need_router" + } + } + ] + } + ], + "stream_filters": [ + { + "type": "actuator_filter" + } + ] + } + ] + } + ], + "tracing": { + "enable": true, + "driver": "SOFATracer", + "config": { + "generator": "mosntracing", + "exporter": [ + "stdout" + ] + } + }, + "metrics": { + "sinks": [ + { + "type": "prometheus", + "config": { + "port": 34903 + } + } + ] + } + } + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: counter-deployment + labels: + app: counter-deployment +spec: + selector: + matchLabels: + app: counter + replicas: 1 + template: + metadata: + labels: + app: counter + spec: + containers: + # Your app + - name: count + image: busybox:1.28 + args: [ /bin/sh, -c, + 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done' ] + # Layotto + - name: layotto-runtime + image: layotto/layotto:latest + command: [ "/runtime/layotto", "start" ] + args: [ "-c", "/runtime/configs/config.json" ] + ports: + - containerPort: 34904 + volumeMounts: + - name: runtime-config + mountPath: /runtime/configs + readOnly: false + volumes: + - name: runtime-config + configMap: + name: layotto-runtime-config + items: + - key: config.json + path: config.json \ No newline at end of file diff --git a/demo/deploy/k8s/standalone/default_configmap.yaml b/deploy/k8s/standalone/default_configmap.yaml similarity index 100% rename from demo/deploy/k8s/standalone/default_configmap.yaml rename to deploy/k8s/standalone/default_configmap.yaml diff --git a/demo/deploy/k8s/standalone/default_deployment.yaml b/deploy/k8s/standalone/default_deployment.yaml similarity index 100% rename from demo/deploy/k8s/standalone/default_deployment.yaml rename to deploy/k8s/standalone/default_deployment.yaml diff --git a/demo/deploy/k8s/standalone/default_quickstart.yaml b/deploy/k8s/standalone/default_quickstart.yaml similarity index 100% rename from demo/deploy/k8s/standalone/default_quickstart.yaml rename to deploy/k8s/standalone/default_quickstart.yaml diff --git a/docker/layotto-minio/config_minio.json b/docker/layotto-minio/config_minio.json index c48302521b..33dae046b2 100644 --- a/docker/layotto-minio/config_minio.json +++ b/docker/layotto-minio/config_minio.json @@ -24,7 +24,7 @@ }, "file": { "file_demo": { - "type": "minioOSS", + "type": "minio", "metadata": [ { "endpoint": "minio:9000", diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 0c81dc3d4a..47ad05e71b 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -39,9 +39,7 @@ - [Pub/Sub API](en/building_blocks/pubsub/reference.md) - [RPC API](en/building_blocks/rpc/reference.md) - [Configuration API](en/building_blocks/configuration/reference.md) - - API reference - - [spec/proto/runtime/v1/runtime.proto](https://github.com/mosn/layotto/blob/main/docs/en/api_reference/runtime_v1.md) - - [spec/proto/runtime/v1/appcallback.proto](https://github.com/mosn/layotto/blob/main/docs/en/api_reference/appcallback_v1.md) + - [API reference](en/api_reference/README) - SDK reference - [java sdk](https://github.com/layotto/java-sdk) - [.net sdk](https://github.com/layotto/dotnet-sdk) diff --git a/docs/api/v1/runtime.html b/docs/api/v1/runtime.html new file mode 100644 index 0000000000..aadb347130 --- /dev/null +++ b/docs/api/v1/runtime.html @@ -0,0 +1,4289 @@ + + + + + Protocol Documentation + + + + + + + + + + +

Protocol Documentation

+ +

Table of Contents

+ +
+ +
+ + + +
+

appcallback.proto

Top +
+

+ + +

[gRPC Service] AppCallback

+

AppCallback V1 allows user application to interact with runtime.

User application needs to implement AppCallback service if it needs to

receive message from runtime.

+ + + + + + + + + + + + + + + + + + + + + +
Method NameRequest TypeResponse TypeDescription
ListTopicSubscriptions.google.protobuf.EmptyListTopicSubscriptionsResponse

Lists all topics subscribed by this app.

OnTopicEventTopicEventRequestTopicEventResponse

Subscribes events from Pubsub

+ + + + +

ListTopicSubscriptionsResponse

+

ListTopicSubscriptionsResponse is the message including the list of the subscribing topics.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
subscriptionsTopicSubscriptionrepeated

The list of topics.

+ + + + + +

TopicEventRequest

+

TopicEventRequest message is compatible with CloudEvent spec v1.0

https://github.com/cloudevents/spec/blob/v1.0/spec.md

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

id identifies the event. Producers MUST ensure that source + id +is unique for each distinct event. If a duplicate event is re-sent +(e.g. due to a network error) it MAY have the same id.

sourcestring

source identifies the context in which an event happened. +Often this will include information such as the type of the +event source, the organization publishing the event or the process +that produced the event. The exact syntax and semantics behind +the data encoded in the URI is defined by the event producer.

typestring

The type of event related to the originating occurrence.

spec_versionstring

The version of the CloudEvents specification.

data_content_typestring

The content type of data value.

databytes

The content of the event.

topicstring

The pubsub topic which publisher sent to.

pubsub_namestring

The name of the pubsub the publisher sent to.

metadataTopicEventRequest.MetadataEntryrepeated

add a map to pass some extra properties.

+ + + + + +

TopicEventRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

TopicEventResponse

+

TopicEventResponse is response from app on published message

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
statusTopicEventResponse.TopicEventResponseStatus

The list of output bindings.

+ + + + + +

TopicSubscription

+

TopicSubscription represents topic and metadata.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
pubsub_namestring

Required. The name of the pubsub containing the topic below to subscribe to.

topicstring

Required. The name of topic which will be subscribed

metadataTopicSubscription.MetadataEntryrepeated

The optional properties used for this topic's subscription e.g. session id

+ + + + + +

TopicSubscription.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + + + +

TopicEventResponse.TopicEventResponseStatus

+

TopicEventResponseStatus allows apps to have finer control over handling of the message.

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameNumberDescription
SUCCESS0

SUCCESS is the default behavior: message is acknowledged and not retried or logged.

RETRY1

RETRY status signals runtime to retry the message as part of an expected scenario (no warning is logged).

DROP2

DROP status signals runtime to drop the message as part of an unexpected scenario (warning is logged).

+ + + + + + +
+

runtime.proto

Top +
+

+ + +

[gRPC Service] Runtime

+

Runtime encapsulates variours Runtime APIs(such as Configuration API, Pub/Sub API, etc)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameRequest TypeResponse TypeDescription
SayHelloSayHelloRequestSayHelloResponse

SayHello used for test

InvokeServiceInvokeServiceRequestInvokeResponse

InvokeService do rpc calls

GetConfigurationGetConfigurationRequestGetConfigurationResponse

GetConfiguration gets configuration from configuration store.

SaveConfigurationSaveConfigurationRequest.google.protobuf.Empty

SaveConfiguration saves configuration into configuration store.

DeleteConfigurationDeleteConfigurationRequest.google.protobuf.Empty

DeleteConfiguration deletes configuration from configuration store.

SubscribeConfigurationSubscribeConfigurationRequest streamSubscribeConfigurationResponse stream

SubscribeConfiguration gets configuration from configuration store and subscribe the updates.

TryLockTryLockRequestTryLockResponse

Distributed Lock API +A non-blocking method trying to get a lock with ttl.

UnlockUnlockRequestUnlockResponse

A method trying to unlock.

GetNextIdGetNextIdRequestGetNextIdResponse

Sequencer API +Get next unique id with some auto-increment guarantee

GetStateGetStateRequestGetStateResponse

Gets the state for a specific key.

GetBulkStateGetBulkStateRequestGetBulkStateResponse

Gets a bulk of state items for a list of keys

SaveStateSaveStateRequest.google.protobuf.Empty

Saves an array of state objects

DeleteStateDeleteStateRequest.google.protobuf.Empty

Deletes the state for a specific key.

DeleteBulkStateDeleteBulkStateRequest.google.protobuf.Empty

Deletes a bulk of state items for a list of keys

ExecuteStateTransactionExecuteStateTransactionRequest.google.protobuf.Empty

Executes transactions for a specified store

PublishEventPublishEventRequest.google.protobuf.Empty

Publishes events to the specific topic

GetFileGetFileRequestGetFileResponse stream

Get file with stream

PutFilePutFileRequest stream.google.protobuf.Empty

Put file with stream

ListFileListFileRequestListFileResp

List all files

DelFileDelFileRequest.google.protobuf.Empty

Delete specific file

GetFileMetaGetFileMetaRequestGetFileMetaResponse

Get file meta data, if file not exist,return code.NotFound error

InvokeBindingInvokeBindingRequestInvokeBindingResponse

Invokes binding data to specific output bindings

GetSecretGetSecretRequestGetSecretResponse

Gets secrets from secret stores.

GetBulkSecretGetBulkSecretRequestGetBulkSecretResponse

Gets a bulk of secrets

+ + + + +

BulkStateItem

+

BulkStateItem is the response item for a bulk get operation.

Return values include the item key, data and etag.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

state item key

databytes

The byte array data

etagstring

The entity tag which represents the specific version of data. +ETag format is defined by the corresponding data store.

errorstring

The error that was returned from the state store in case of a failed get operation.

metadataBulkStateItem.MetadataEntryrepeated

The metadata which will be sent to app.

+ + + + + +

BulkStateItem.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

CommonInvokeRequest

+

Common invoke request message which includes invoke method and data

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
methodstring

The method of requset

datagoogle.protobuf.Any

The request data

content_typestring

The content type of request data

http_extensionHTTPExtension

The extra information of http

+ + + + + +

ConfigurationItem

+

ConfigurationItem represents a configuration item with key, content and other information.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

Required. The key of configuration item

contentstring

The content of configuration item +Empty if the configuration is not set, including the case that the configuration is changed from value-set to value-not-set.

groupstring

The group of configuration item.

labelstring

The label of configuration item.

tagsConfigurationItem.TagsEntryrepeated

The tag list of configuration item.

metadataConfigurationItem.MetadataEntryrepeated

The metadata which will be passed to configuration store component.

+ + + + + +

ConfigurationItem.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

ConfigurationItem.TagsEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

DelFileRequest

+

Delete file request message

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
requestFileRequest

File request

+ + + + + +

DeleteBulkStateRequest

+

DeleteBulkStateRequest is the message to delete a list of key-value states from specific state store.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of state store.

statesStateItemrepeated

Required. The array of the state key values.

+ + + + + +

DeleteConfigurationRequest

+

DeleteConfigurationRequest is the message to delete a list of key-value configuration from specified configuration store.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

The name of configuration store.

app_idstring

The application id which +Only used for admin, Ignored and reset for normal client

groupstring

The group of keys.

labelstring

The label for keys.

keysstringrepeated

The keys to get.

metadataDeleteConfigurationRequest.MetadataEntryrepeated

The metadata which will be sent to configuration store components.

+ + + + + +

DeleteConfigurationRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

DeleteStateRequest

+

DeleteStateRequest is the message to delete key-value states in the specific state store.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of state store.

keystring

Required. The key of the desired state

etagEtag

(optional) The entity tag which represents the specific version of data. +The exact ETag format is defined by the corresponding data store.

optionsStateOptions

(optional) State operation options which includes concurrency/ +consistency/retry_policy.

metadataDeleteStateRequest.MetadataEntryrepeated

(optional) The metadata which will be sent to state store components.

+ + + + + +

DeleteStateRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

Etag

+

Etag represents a state item version

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
valuestring

value sets the etag value

+ + + + + +

ExecuteStateTransactionRequest

+

ExecuteStateTransactionRequest is the message to execute multiple operations on a specified store.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
storeNamestring

Required. name of state store.

operationsTransactionalStateOperationrepeated

Required. transactional operation list.

metadataExecuteStateTransactionRequest.MetadataEntryrepeated

(optional) The metadata used for transactional operations.

+ + + + + +

ExecuteStateTransactionRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

FileInfo

+

File info message

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
file_namestring

The name of file

sizeint64

The size of file

last_modifiedstring

The modified time of file

metadataFileInfo.MetadataEntryrepeated

The metadata for user extension.

+ + + + + +

FileInfo.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

FileMeta

+

A map that store FileMetaValue

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
metadataFileMeta.MetadataEntryrepeated

A data structure to store metadata

+ + + + + +

FileMeta.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valueFileMetaValue

+ + + + + +

FileMetaValue

+

FileMeta value

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
valuestringrepeated

File meta value

+ + + + + +

FileRequest

+

File request message

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

The name of store

namestring

The name of the directory

metadataFileRequest.MetadataEntryrepeated

The metadata for user extension.

+ + + + + +

FileRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

GetBulkSecretRequest

+

GetBulkSecretRequest is the message to get the secrets from secret store.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

The name of secret store.

metadataGetBulkSecretRequest.MetadataEntryrepeated

The metadata which will be sent to secret store components.

+ + + + + +

GetBulkSecretRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

GetBulkSecretResponse

+

GetBulkSecretResponse is the response message to convey the requested secrets.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
dataGetBulkSecretResponse.DataEntryrepeated

data hold the secret values. Some secret store, such as kubernetes secret +store, can save multiple secrets for single secret key.

+ + + + + +

GetBulkSecretResponse.DataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valueSecretResponse

+ + + + + +

GetBulkStateRequest

+

GetBulkStateRequest is the message to get a list of key-value states from specific state store.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of state store.

keysstringrepeated

Required. The keys to get.

parallelismint32

(optional) The number of parallel operations executed on the state store for a get operation.

metadataGetBulkStateRequest.MetadataEntryrepeated

(optional) The metadata which will be sent to state store components.

+ + + + + +

GetBulkStateRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

GetBulkStateResponse

+

GetBulkStateResponse is the response conveying the list of state values.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
itemsBulkStateItemrepeated

The list of items containing the keys to get values for.

+ + + + + +

GetConfigurationRequest

+

GetConfigurationRequest is the message to get a list of key-value configuration from specified configuration store.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

The name of configuration store.

app_idstring

The application id which +Only used for admin, Ignored and reset for normal client

groupstring

The group of keys.

labelstring

The label for keys.

keysstringrepeated

The keys to get.

metadataGetConfigurationRequest.MetadataEntryrepeated

The metadata which will be sent to configuration store components.

subscribe_updatebool

Subscribes update event for given keys. +If true, when any configuration item in this request is updated, app will receive event by OnConfigurationEvent() of app callback

+ + + + + +

GetConfigurationRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

GetConfigurationResponse

+

GetConfigurationResponse is the response conveying the list of configuration values.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
itemsConfigurationItemrepeated

The list of items containing configuration values.

+ + + + + +

GetFileMetaRequest

+

Get fileMeta request message

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
requestFileRequest

File meta request

+ + + + + +

GetFileMetaResponse

+

Get fileMeta response message

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
sizeint64

The size of file

last_modifiedstring

The modified time of file

responseFileMeta

File meta response

+ + + + + +

GetFileRequest

+

Get file request message

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

The name of store

namestring

The name of the file or object want to get.

metadataGetFileRequest.MetadataEntryrepeated

The metadata for user extension.

+ + + + + +

GetFileRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

GetFileResponse

+

Get file response message

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
databytes

The data of file

+ + + + + +

GetNextIdRequest

+

Get next id request message

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. Name of sequencer storage

keystring

Required. key is the identifier of a sequencer namespace,e.g. "order_table".

optionsSequencerOptions

(optional) SequencerOptions configures requirements for auto-increment guarantee

metadataGetNextIdRequest.MetadataEntryrepeated

(optional) The metadata which will be sent to the component.

+ + + + + +

GetNextIdRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

GetNextIdResponse

+

Get next id response message

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
next_idint64

The next unique id +Fixed int64 overflow problems on JavaScript https://github.com/improbable-eng/ts-protoc-gen#gotchas

+ + + + + +

GetSecretRequest

+

GetSecretRequest is the message to get secret from secret store.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

The name of secret store.

keystring

The name of secret key.

metadataGetSecretRequest.MetadataEntryrepeated

The metadata which will be sent to secret store components. +Contains version, status, and so on...

+ + + + + +

GetSecretRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

GetSecretResponse

+

GetSecretResponse is the response message to convey the requested secret.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
dataGetSecretResponse.DataEntryrepeated

data is the secret value. Some secret store, such as kubernetes secret +store, can save multiple secrets for single secret key.

+ + + + + +

GetSecretResponse.DataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

GetStateRequest

+

GetStateRequest is the message to get key-value states from specific state store.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of state store.

keystring

Required. The key of the desired state

consistencyStateOptions.StateConsistency

(optional) read consistency mode

metadataGetStateRequest.MetadataEntryrepeated

(optional) The metadata which will be sent to state store components.

+ + + + + +

GetStateRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

GetStateResponse

+

GetStateResponse is the response conveying the state value and etag.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
databytes

The byte array data

etagstring

The entity tag which represents the specific version of data. +ETag format is defined by the corresponding data store.

metadataGetStateResponse.MetadataEntryrepeated

The metadata which will be sent to app.

+ + + + + +

GetStateResponse.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

HTTPExtension

+

Http extension message is about invoke http information

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
verbHTTPExtension.Verb

The method of http reuest

querystringstring

The query information of http

+ + + + + +

InvokeBindingRequest

+

InvokeBindingRequest is the message to send data to output bindings

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
namestring

The name of the output binding to invoke.

databytes

The data which will be sent to output binding.

metadataInvokeBindingRequest.MetadataEntryrepeated

The metadata passing to output binding components +Common metadata property: +- ttlInSeconds : the time to live in seconds for the message. +If set in the binding definition will cause all messages to +have a default time to live. The message ttl overrides any value +in the binding definition.

operationstring

The name of the operation type for the binding to invoke

+ + + + + +

InvokeBindingRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

InvokeBindingResponse

+

InvokeBindingResponse is the message returned from an output binding invocation

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
databytes

The data which will be sent to output binding.

metadataInvokeBindingResponse.MetadataEntryrepeated

The metadata returned from an external system

+ + + + + +

InvokeBindingResponse.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

InvokeResponse

+

Invoke service response message is result of invoke service queset

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
datagoogle.protobuf.Any

The response data

content_typestring

The content type of response data

+ + + + + +

InvokeServiceRequest

+

Invoke service request message

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

The identify of InvokeServiceRequest

messageCommonInvokeRequest

InvokeServiceRequest message

+ + + + + +

ListFileRequest

+

List file request message

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
requestFileRequest

File request

page_sizeint32

Page size

markerstring

Marker

+ + + + + +

ListFileResp

+

List file response message

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
filesFileInforepeated

File info

markerstring

Marker

is_truncatedbool

Is truncated

+ + + + + +

PublishEventRequest

+

PublishEventRequest is the message to publish event data to pubsub topic

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
pubsub_namestring

The name of the pubsub component

topicstring

The pubsub topic

databytes

The data which will be published to topic.

data_content_typestring

The content type for the data (optional).

metadataPublishEventRequest.MetadataEntryrepeated

The metadata passing to pub components + +metadata property: +- key : the key of the message.

+ + + + + +

PublishEventRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

PutFileRequest

+

Put file request message

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

The name of store

namestring

The name of the file or object want to put.

databytes

The data will be store.

metadataPutFileRequest.MetadataEntryrepeated

The metadata for user extension.

+ + + + + +

PutFileRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

SaveConfigurationRequest

+

SaveConfigurationRequest is the message to save a list of key-value configuration into specified configuration store.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

The name of configuration store.

app_idstring

The application id which +Only used for admin, ignored and reset for normal client

itemsConfigurationItemrepeated

The list of configuration items to save. +To delete a exist item, set the key (also label) and let content to be empty

metadataSaveConfigurationRequest.MetadataEntryrepeated

The metadata which will be sent to configuration store components.

+ + + + + +

SaveConfigurationRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

SaveStateRequest

+

SaveStateRequest is the message to save multiple states into state store.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of state store.

statesStateItemrepeated

Required. The array of the state key values.

+ + + + + +

SayHelloRequest

+

Hello request message

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
service_namestring

The name of service

namestring

Reuqest name

datagoogle.protobuf.Any

Optional. This field is used to control the packet size during load tests.

+ + + + + +

SayHelloResponse

+

Hello response message

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
hellostring

Hello

datagoogle.protobuf.Any

Hello message of data

+ + + + + +

SecretResponse

+

SecretResponse is a map of decrypted string/string values

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
secretsSecretResponse.SecretsEntryrepeated

The data struct of secrets

+ + + + + +

SecretResponse.SecretsEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

SequencerOptions

+

SequencerOptions configures requirements for auto-increment guarantee

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
incrementSequencerOptions.AutoIncrement

Default STRONG auto-increment

+ + + + + +

StateItem

+

StateItem represents state key, value, and additional options to save state.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

Required. The state key

valuebytes

Required. The state data for key

etagEtag

(optional) The entity tag which represents the specific version of data. +The exact ETag format is defined by the corresponding data store. Layotto runtime only treats ETags as opaque strings.

metadataStateItem.MetadataEntryrepeated

(optional) additional key-value pairs to be passed to the state store.

optionsStateOptions

(optional) Options for concurrency and consistency to save the state.

+ + + + + +

StateItem.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

StateOptions

+

StateOptions configures concurrency and consistency for state operations

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
concurrencyStateOptions.StateConcurrency

The state operation of concurrency

consistencyStateOptions.StateConsistency

The state operation of consistency

+ + + + + +

SubscribeConfigurationRequest

+

SubscribeConfigurationRequest is the message to get a list of key-value configuration from specified configuration store.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

The name of configuration store.

app_idstring

The application id which +Only used for admin, ignored and reset for normal client

groupstring

The group of keys.

labelstring

The label for keys.

keysstringrepeated

The keys to get.

metadataSubscribeConfigurationRequest.MetadataEntryrepeated

The metadata which will be sent to configuration store components.

+ + + + + +

SubscribeConfigurationRequest.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

SubscribeConfigurationResponse

+

SubscribeConfigurationResponse is the response conveying the list of configuration values.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

The name of configuration store.

app_idstring

The application id. +Only used for admin client.

itemsConfigurationItemrepeated

The list of items containing configuration values.

+ + + + + +

TransactionalStateOperation

+

TransactionalStateOperation is the message to execute a specified operation with a key-value pair.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
operationTypestring

Required. The type of operation to be executed. +Legal values include: +"upsert" represents an update or create operation +"delete" represents a delete operation

requestStateItem

Required. State values to be operated on

+ + + + + +

TryLockRequest

+

Lock request message is distributed lock API which is not blocking method tring to get a lock with ttl

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The lock store name,e.g. `redis`.

resource_idstring

Required. resource_id is the lock key. e.g. `order_id_111` +It stands for "which resource I want to protect"

lock_ownerstring

Required. lock_owner indicate the identifier of lock owner. +You can generate a uuid as lock_owner.For example,in golang: +req.LockOwner = uuid.New().String() +This field is per request,not per process,so it is different for each request, +which aims to prevent multi-thread in the same process trying the same lock concurrently. +The reason why we don't make it automatically generated is: +1. If it is automatically generated,there must be a 'my_lock_owner_id' field in the response. +This name is so weird that we think it is inappropriate to put it into the api spec +2. If we change the field 'my_lock_owner_id' in the response to 'lock_owner',which means the current lock owner of this lock, +we find that in some lock services users can't get the current lock owner.Actually users don't need it at all. +3. When reentrant lock is needed,the existing lock_owner is required to identify client and check "whether this client can reenter this lock". +So this field in the request shouldn't be removed.

expireint32

Required. expire is the time before expire.The time unit is second.

+ + + + + +

TryLockResponse

+

Lock response message returns is the lock obtained.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
successbool

Is lock success

+ + + + + +

UnlockRequest

+

UnLock request message

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

The name of store

resource_idstring

resource_id is the lock key.

lock_ownerstring

The owner of the lock

+ + + + + +

UnlockResponse

+

UnLock response message

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
statusUnlockResponse.Status

The status of unlock

+ + + + + + + +

HTTPExtension.Verb

+

The enum of http reuest method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameNumberDescription
NONE0

NONE

GET1

GET method

HEAD2

HEAD method

POST3

POST method

PUT4

PUT method

DELETE5

DELETE method

CONNECT6

CONNECT method

OPTIONS7

CONNECT method

TRACE8

CONNECT method

PATCH9

PATCH method

+ +

SequencerOptions.AutoIncrement

+

requirements for auto-increment guarantee

+ + + + + + + + + + + + + + + + + + + +
NameNumberDescription
WEAK0

(default) WEAK means a "best effort" incrementing service.But there is no strict guarantee of global monotonically increasing. +The next id is "probably" greater than current id.

STRONG1

STRONG means a strict guarantee of global monotonically increasing. +The next id "must" be greater than current id.

+ +

StateOptions.StateConcurrency

+

Enum describing the supported concurrency for state.

The API server uses Optimized Concurrency Control (OCC) with ETags.

When an ETag is associated with an save or delete request, the store shall allow the update only if the attached ETag matches with the latest ETag in the database.

But when ETag is missing in the write requests, the state store shall handle the requests in the specified strategy(e.g. a last-write-wins fashion).

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameNumberDescription
CONCURRENCY_UNSPECIFIED0

Concurrency state is unspecified

CONCURRENCY_FIRST_WRITE1

First write wins

CONCURRENCY_LAST_WRITE2

Last write wins

+ +

StateOptions.StateConsistency

+

Enum describing the supported consistency for state.

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameNumberDescription
CONSISTENCY_UNSPECIFIED0

Consistency state is unspecified

CONSISTENCY_EVENTUAL1

The API server assumes data stores are eventually consistent by default.A state store should: +- For read requests, the state store can return data from any of the replicas +- For write request, the state store should asynchronously replicate updates to configured quorum after acknowledging the update request.

CONSISTENCY_STRONG2

When a strong consistency hint is attached, a state store should: +- For read requests, the state store should return the most up-to-date data consistently across replicas. +- For write/delete requests, the state store should synchronisely replicate updated data to configured quorum before completing the write request.

+ +

UnlockResponse.Status

+

The enum of unlock status

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameNumberDescription
SUCCESS0

Unlock is success

LOCK_UNEXIST1

The lock is not exist

LOCK_BELONG_TO_OTHERS2

The lock is belong to others

INTERNAL_ERROR3

Internal error

+ + + + + + +

Scalar Value Types

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
.proto TypeNotesC++JavaPythonGoC#PHPRuby
doubledoubledoublefloatfloat64doublefloatFloat
floatfloatfloatfloatfloat32floatfloatFloat
int32Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead.int32intintint32intintegerBignum or Fixnum (as required)
int64Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead.int64longint/longint64longinteger/stringBignum
uint32Uses variable-length encoding.uint32intint/longuint32uintintegerBignum or Fixnum (as required)
uint64Uses variable-length encoding.uint64longint/longuint64ulonginteger/stringBignum or Fixnum (as required)
sint32Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s.int32intintint32intintegerBignum or Fixnum (as required)
sint64Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s.int64longint/longint64longinteger/stringBignum
fixed32Always four bytes. More efficient than uint32 if values are often greater than 2^28.uint32intintuint32uintintegerBignum or Fixnum (as required)
fixed64Always eight bytes. More efficient than uint64 if values are often greater than 2^56.uint64longint/longuint64ulonginteger/stringBignum
sfixed32Always four bytes.int32intintint32intintegerBignum or Fixnum (as required)
sfixed64Always eight bytes.int64longint/longint64longinteger/stringBignum
boolboolbooleanbooleanboolboolbooleanTrueClass/FalseClass
stringA string must always contain UTF-8 encoded or 7-bit ASCII text.stringStringstr/unicodestringstringstringString (UTF-8)
bytesMay contain any arbitrary sequence of bytes.stringByteStringstr[]byteByteStringstringString (ASCII-8BIT)
+ + diff --git a/docs/api/v1/s3.html b/docs/api/v1/s3.html new file mode 100644 index 0000000000..d56e57834c --- /dev/null +++ b/docs/api/v1/s3.html @@ -0,0 +1,5533 @@ + + + + + Protocol Documentation + + + + + + + + + + +

Protocol Documentation

+ +

Table of Contents

+ +
+ +
+ + + +
+

oss.proto

Top +
+

The file defined base on s3 protocol, to get an in-depth walkthrough of this file, see:

https://docs.aws.amazon.com/s3/index.html

https://github.com/aws/aws-sdk-go-v2

+ + +

[gRPC Service] ObjectStorageService

+

ObjectStorageService

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameRequest TypeResponse TypeDescription
PutObjectPutObjectInput streamPutObjectOutput

Object CRUD API +Adds an object to a bucket. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html

GetObjectGetObjectInputGetObjectOutput stream

Retrieves objects. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html

DeleteObjectDeleteObjectInputDeleteObjectOutput

Delete objects. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html

CopyObjectCopyObjectInputCopyObjectOutput

Creates a copy of an object that is already stored in oss server. +Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_CopyObject.html

DeleteObjectsDeleteObjectsInputDeleteObjectsOutput

Delete multiple objects from a bucket. +Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_DeleteObjects.html

ListObjectsListObjectsInputListObjectsOutput

Returns some or all (up to 1,000) of the objects in a bucket. +Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_ListObjects.html

HeadObjectHeadObjectInputHeadObjectOutput

The HEAD action retrieves metadata from an object without returning the object itself. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html

IsObjectExistIsObjectExistInputIsObjectExistOutput

This action used to check if the file exists.

PutObjectTaggingPutObjectTaggingInputPutObjectTaggingOutput

Object Tagging API +Sets the supplied tag-set to an object that already exists in a bucket. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html

DeleteObjectTaggingDeleteObjectTaggingInputDeleteObjectTaggingOutput

Removes the entire tag set from the specified object. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html

GetObjectTaggingGetObjectTaggingInputGetObjectTaggingOutput

Returns the tag-set of an object. +Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_GetObjectTagging.html

GetObjectCannedAclGetObjectCannedAclInputGetObjectCannedAclOutput

Returns object canned acl. +Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL

PutObjectCannedAclPutObjectCannedAclInputPutObjectCannedAclOutput

Set object canned acl. +Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL

CreateMultipartUploadCreateMultipartUploadInputCreateMultipartUploadOutput

Object Multipart Operation API +Initiates a multipart upload and returns an upload ID. +Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_CreateMultipartUpload.html

UploadPartUploadPartInput streamUploadPartOutput

Uploads a part in a multipart upload. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html

UploadPartCopyUploadPartCopyInputUploadPartCopyOutput

Uploads a part by copying data from an existing object as data source. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html

CompleteMultipartUploadCompleteMultipartUploadInputCompleteMultipartUploadOutput

Completes a multipart upload by assembling previously uploaded parts. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html

AbortMultipartUploadAbortMultipartUploadInputAbortMultipartUploadOutput

This action aborts a multipart upload. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html

ListMultipartUploadsListMultipartUploadsInputListMultipartUploadsOutput

This action lists in-progress multipart uploads. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html

ListPartsListPartsInputListPartsOutput

Lists the parts that have been uploaded for a specific multipart upload. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html

ListObjectVersionsListObjectVersionsInputListObjectVersionsOutput

Returns metadata about all versions of the objects in a bucket. +Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html

SignURLSignURLInputSignURLOutput

A presigned URL gives you access to the object identified in the URL, provided that the creator of the presigned URL has permissions to access that object. +Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/PresignedUrlUploadObject.html

UpdateDownloadBandwidthRateLimitUpdateBandwidthRateLimitInput.google.protobuf.Empty

This action used to set download bandwidth limit speed. +Refer https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client.go#L2106

UpdateUploadBandwidthRateLimitUpdateBandwidthRateLimitInput.google.protobuf.Empty

This action used to set upload bandwidth limit speed. +Refer https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client.go#L2096

AppendObjectAppendObjectInput streamAppendObjectOutput

This action is used to append object. +Refer https://help.aliyun.com/document_detail/31981.html or https://github.com/minio/minio-java/issues/980

RestoreObjectRestoreObjectInputRestoreObjectOutput

Restores an archived copy of an object back. +Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_RestoreObject.html

+ + + + +

AbortMultipartUploadInput

+

AbortMultipartUploadInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

expected_bucket_ownerstring

The account ID of the expected bucket owner

request_payerstring

Confirms that the requester knows that they will be charged for the request.

upload_idstring

Upload ID that identifies the multipart upload. +This member is required.

+ + + + + +

AbortMultipartUploadOutput

+

AbortMultipartUploadOutput

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
request_chargedstring

If present, indicates that the requester was successfully charged for the request.

+ + + + + +

AppendObjectInput

+

AppendObjectInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

bodybytes

Object content

positionint64

Append start position

aclstring

Object ACL

cache_controlstring

Sets the Cache-Control header of the response.

content_dispositionstring

Sets the Content-Disposition header of the response

content_encodingstring

Sets the Content-Encoding header of the response

content_md5string

The base64-encoded 128-bit MD5 digest of the part data.

expiresint64

Sets the Expires header of the response

storage_classstring

Provides storage class information of the object. Amazon S3 returns this header +for all objects except for S3 Standard storage class objects.

server_side_encryptionstring

The server-side encryption algorithm used when storing this object in Amazon S3 +(for example, AES256, aws:kms).

metastring

Object metadata

tagsAppendObjectInput.TagsEntryrepeated

Object tags

+ + + + + +

AppendObjectInput.TagsEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

AppendObjectOutput

+

AppendObjectOutput

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
append_positionint64

Next append position

+ + + + + +

CompleteMultipartUploadInput

+

CompleteMultipartUploadInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

upload_idstring

ID for the initiated multipart upload. +This member is required.

request_payerstring

Confirms that the requester knows that they will be charged for the request.

expected_bucket_ownerstring

Expected bucket owner

multipart_uploadCompletedMultipartUpload

The container for the multipart upload request information.

+ + + + + +

CompleteMultipartUploadOutput

+

CompleteMultipartUploadOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

bucket_key_enabledbool

Indicates whether the multipart upload uses an S3 Bucket Key for server-side +encryption with Amazon Web Services KMS (SSE-KMS).

etagstring

Entity tag that identifies the newly created object's data

expirationstring

If the object expiration is configured, this will contain the expiration date +(expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded.

locationstring

The URI that identifies the newly created object.

request_chargedstring

If present, indicates that the requester was successfully charged for the +request.

sse_kms_keyIdstring

If present, specifies the ID of the Amazon Web Services Key Management Service +(Amazon Web Services KMS) symmetric customer managed key that was used for the +object.

server_side_encryptionstring

The server-side encryption algorithm used when storing this object in Amazon S3 +(for example, AES256, aws:kms).

version_idstring

Version ID of the newly created object, in case the bucket has versioning turned +on.

+ + + + + +

CompletedMultipartUpload

+

CompletedMultipartUpload

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
partsCompletedPartrepeated

Array of CompletedPart data types.

+ + + + + +

CompletedPart

+

CompletedPart

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
etagstring

Entity tag returned when the part was uploaded.

part_numberint32

Part number that identifies the part. This is a positive integer between 1 and +10,000.

+ + + + + +

CopyObjectInput

+

CopyObjectInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The name of the destination bucket. When using this action with an access point +This member is required.

keystring

The key of the destination object. +This member is required.

copy_sourceCopySource

CopySource

taggingCopyObjectInput.TaggingEntryrepeated

The tag-set for the object destination object this value must be used in +conjunction with the TaggingDirective. The tag-set must be encoded as URL Query +parameters.

expiresint64

The date and time at which the object is no longer cacheable.

metadata_directivestring

Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request.

metadataCopyObjectInput.MetadataEntryrepeated

A map of metadata to store with the object in S3.

+ + + + + +

CopyObjectInput.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

CopyObjectInput.TaggingEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

CopyObjectOutput

+

CopyObjectOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
copy_object_resultCopyObjectResult

Container for all response elements.

version_idstring

Version ID of the newly created copy.

expirationstring

If the object expiration is configured, the response includes this header.

+ + + + + +

CopyObjectResult

+

CopyObjectResult

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
etagstring

Returns the ETag of the new object. The ETag reflects only changes to the +contents of an object, not its metadata.

last_modifiedint64

Creation date of the object.

+ + + + + +

CopyPartResult

+

CopyPartResult

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
etagstring

Entity tag of the object.

last_modifiedint64

Last modified time

+ + + + + +

CopySource

+

CopySource

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
copy_source_bucketstring

source object bucket name

copy_source_keystring

source object name

copy_source_version_idstring

source object version

+ + + + + +

CreateMultipartUploadInput

+

CreateMultipartUploadInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

aclstring

The canned ACL to apply to the object. This action is not supported by Amazon S3 +on Outposts.

bucket_key_enabledbool

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption +with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true +causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. +Specifying this header with a PUT action doesn’t affect bucket-level settings +for S3 Bucket Key.

cache_controlstring

Specifies caching behavior along the request/reply chain

content_dispositionstring

Specifies presentational information for the object

content_encodingstring

Specifies what content encodings have been applied to the object and thus what +decoding mechanisms must be applied to obtain the media-type referenced by the +Content-Type header field.

content_languagestring

The language the content is in.

content_typestring

A standard MIME type describing the format of the object data.

expected_bucket_ownerstring

The account ID of the expected bucket owner. If the bucket is owned by a +different account, the request fails with the HTTP status code 403 Forbidden +(access denied).

expiresint64

The date and time at which the object is no longer cacheable.

grant_full_controlstring

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This +action is not supported by Amazon S3 on Outposts.

grant_readstring

Allows grantee to read the object data and its metadata. This action is not +supported by Amazon S3 on Outposts.

grant_read_acpstring

Allows grantee to read the object ACL. This action is not supported by Amazon S3 +on Outposts.

grant_write_acpstring

Allows grantee to write the ACL for the applicable object. This action is not +supported by Amazon S3 on Outposts.

meta_dataCreateMultipartUploadInput.MetaDataEntryrepeated

A map of metadata to store with the object

object_lock_legal_hold_statusstring

Specifies whether you want to apply a legal hold to the uploaded object

object_lock_modestring

Specifies the Object Lock mode that you want to apply to the uploaded object

object_lock_retain_until_dateint64

Specifies the date and time when you want the Object Lock to expire

request_payerstring

Confirms that the requester knows that they will be charged for the request

sse_customer_algorithmstring

Specifies the algorithm to use to when encrypting the object (for example, +AES256).

sse_customer_keystring

Specifies the customer-provided encryption key to use in encrypting data

sse_customer_key_md5string

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321

sse_kms_encryption_contextstring

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption

sse_kms_key_idstring

Specifies the ID of the symmetric customer managed key to use for object encryption

server_side_encryptionstring

The server-side encryption algorithm used when storing this object

storage_classstring

By default, oss store uses the STANDARD Storage Class to store newly created objects

taggingCreateMultipartUploadInput.TaggingEntryrepeated

The tag-set for the object. The tag-set must be encoded as URL Query parameters.

website_redirect_locationstring

If the bucket is configured as a website, redirects requests for this object to +another object in the same bucket or to an external URL.

+ + + + + +

CreateMultipartUploadInput.MetaDataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

CreateMultipartUploadInput.TaggingEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

CreateMultipartUploadOutput

+

CreateMultipartUploadOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

abort_dateint64

If the bucket has a lifecycle rule configured with an action to abort incomplete +multipart uploads and the prefix in the lifecycle rule matches the object name +in the request, the response includes this header

abort_rule_idstring

It identifies the applicable lifecycle configuration rule that defines the action to abort +incomplete multipart uploads.

bucket_key_enabledbool

Indicates whether the multipart upload uses an S3 Bucket Key for server-side +encryption with Amazon Web Services KMS (SSE-KMS).

request_chargedstring

If present, indicates that the requester was successfully charged for the +request.

sse_customer_algorithmstring

If server-side encryption with a customer-provided encryption key was requested, +the response will include this header confirming the encryption algorithm used.

sse_customer_key_md5string

If server-side encryption with a customer-provided encryption key was requested, +the response will include this header to provide round-trip message integrity +verification of the customer-provided encryption key.

sse_kms_encryption_contextstring

If present, specifies the Amazon Web Services KMS Encryption Context to use for +object encryption. The value of this header is a base64-encoded UTF-8 string +holding JSON with the encryption context key-value pairs.

sse_kms_key_idstring

If present, specifies the ID of the Amazon Web Services Key Management Service +(Amazon Web Services KMS) symmetric customer managed key that was used for the +object.

server_side_encryptionstring

The server-side encryption algorithm used when storing this object in Amazon S3 +(for example, AES256, aws:kms).

upload_idstring

ID for the initiated multipart upload.

+ + + + + +

Delete

+

Delete

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
objectsObjectIdentifierrepeated

ObjectIdentifier

quietbool

Element to enable quiet mode for the request. When you add this element, you +must set its value to true.

+ + + + + +

DeleteMarkerEntry

+

DeleteMarkerEntry

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
is_latestbool

Specifies whether the object is (true) or is not (false) the latest version of +an object.

keystring

Name of the object key. +This member is required.

last_modifiedint64

Date and time the object was last modified.

ownerOwner

Owner

version_idstring

Version ID of an object.

+ + + + + +

DeleteObjectInput

+

DeleteObjectInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name to which the DEL action was initiated +This member is required.

keystring

Object key for which the DEL action was initiated. +This member is required.

request_payerstring

Confirms that the requester knows that they will be charged for the request.

version_idstring

VersionId used to reference a specific version of the object.

+ + + + + +

DeleteObjectOutput

+

DeleteObjectOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
delete_markerbool

Specifies whether the versioned object that was permanently deleted was (true) +or was not (false) a delete marker.

request_chargedstring

If present, indicates that the requester was successfully charged for the +request.

version_idstring

Returns the version ID of the delete marker created as a result of the DELETE +operation.

+ + + + + +

DeleteObjectTaggingInput

+

DeleteObjectTaggingInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the objects from which to remove the tags.

keystring

The key that identifies the object in the bucket from which to remove all tags. +This member is required.

version_idstring

The versionId of the object that the tag-set will be removed from.

expected_bucket_ownerstring

The account ID of the expected bucket owner. If the bucket is owned by a +different account, the request fails with the HTTP status code 403 Forbidden +(access denied).

+ + + + + +

DeleteObjectTaggingOutput

+

DeleteObjectTaggingOutput

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
version_idstring

The versionId of the object the tag-set was removed from.

result_metadataDeleteObjectTaggingOutput.ResultMetadataEntryrepeated

Metadata pertaining to the operation's result.

+ + + + + +

DeleteObjectTaggingOutput.ResultMetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

DeleteObjectsInput

+

DeleteObjectsInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

deleteDelete

Delete objects

request_payerstring

Confirms that the requester knows that they will be charged for the request.

+ + + + + +

DeleteObjectsOutput

+

DeleteObjectsOutput

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
deletedDeletedObjectrepeated

DeletedObject

+ + + + + +

DeletedObject

+

DeletedObject

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
delete_markerbool

Specifies whether the versioned object that was permanently deleted was (true) +or was not (false) a delete marker. In a simple DELETE, this header indicates +whether (true) or not (false) a delete marker was created.

delete_marker_version_idstring

The version ID of the delete marker created as a result of the DELETE operation. +If you delete a specific object version, the value returned by this header is +the version ID of the object version deleted.

keystring

The name of the deleted object.

version_idstring

The version ID of the deleted object.

+ + + + + +

GetObjectCannedAclInput

+

GetObjectCannedAclInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

version_idstring

VersionId used to reference a specific version of the object

+ + + + + +

GetObjectCannedAclOutput

+

GetObjectCannedAclOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
canned_aclstring

Object CannedACL

ownerOwner

Owner

request_chargedstring

If present, indicates that the requester was successfully charged for the +request.

+ + + + + +

GetObjectInput

+

GetObjectInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Key of the object to get +This member is required

expected_bucket_ownerstring

The account ID of the expected bucket owner

if_matchstring

Return the object only if its entity tag (ETag) is the same as the one specified

if_modified_sinceint64

Return the object only if it has been modified since the specified time

if_none_matchstring

Return the object only if its entity tag (ETag) is different from the one specified

if_unmodified_sinceint64

Return the object only if it has not been modified since the specified time

part_numberint64

Part number of the object being read. This is a positive integer between 1 and +10,000. Effectively performs a 'ranged' GET request for the part specified. +Useful for downloading just a part of an object.

startint64

Downloads the specified range bytes of an object +start is used to specify the location where the file starts

endint64

end is used to specify the location where the file end

request_payerstring

Confirms that the requester knows that they will be charged for the request.

response_cache_controlstring

Sets the Cache-Control header of the response.

response_content_dispositionstring

Sets the Content-Disposition header of the response

response_content_encodingstring

Sets the Content-Encoding header of the response

response_content_languagestring

Sets the Content-Language header of the response

response_content_typestring

Sets the Content-Type header of the response

response_expiresstring

Sets the Expires header of the response

sse_customer_algorithmstring

Specifies the algorithm to use to when decrypting the object (for example,AES256)

sse_customer_keystring

Specifies the customer-provided encryption key for Amazon S3 used to encrypt the +data. This value is used to decrypt the object when recovering it and must match +the one used when storing the data. The key must be appropriate for use with the +algorithm specified in the x-amz-server-side-encryption-customer-algorithm header

sse_customer_key_md5string

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321 +Amazon S3 uses this header for a message integrity check to ensure that the +encryption key was transmitted without error.

version_idstring

VersionId used to reference a specific version of the object

accept_encodingstring

Specify Accept-Encoding, aws not supported now

signed_urlstring

Specify the signed url of object, user can get object with signed url without ak、sk

+ + + + + +

GetObjectOutput

+

GetObjectOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
bodybytes

Object data.

cache_controlstring

Specifies caching behavior along the request/reply chain.

content_dispositionstring

Specifies presentational information for the object.

content_encodingstring

Specifies what content encodings have been applied to the object and thus what +decoding mechanisms must be applied to obtain the media-type referenced by the +Content-Type header field.

content_languagestring

The language the content is in.

content_lengthint64

Size of the body in bytes.

content_rangestring

The portion of the object returned in the response.

content_typestring

A standard MIME type describing the format of the object data.

delete_markerbool

Specifies whether the object retrieved was (true) or was not (false) a Delete +Marker. If false, this response header does not appear in the response.

etagstring

An entity tag (ETag) is an opaque identifier assigned by a web server to a +specific version of a resource found at a URL.

expirationstring

If the object expiration is configured (see PUT Bucket lifecycle), the response +includes this header. It includes the expiry-date and rule-id key-value pairs +providing object expiration information. The value of the rule-id is +URL-encoded.

expiresstring

The date and time at which the object is no longer cacheable.

last_modifiedint64

Creation date of the object.

version_idstring

Version of the object.

tag_countint64

The number of tags, if any, on the object.

storage_classstring

Provides storage class information of the object. Amazon S3 returns this header +for all objects except for S3 Standard storage class objects.

parts_countint64

The count of parts this object has. This value is only returned if you specify +partNumber in your request and the object was uploaded as a multipart upload.

metadataGetObjectOutput.MetadataEntryrepeated

A map of metadata to store with the object in S3. +Map keys will be normalized to lower-case.

+ + + + + +

GetObjectOutput.MetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

GetObjectTaggingInput

+

GetObjectTaggingInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object for which to get the tagging information. +This member is required.

keystring

Object key for which to get the tagging information. +This member is required.

version_idstring

The versionId of the object for which to get the tagging information.

expected_bucket_ownerstring

The account ID of the expected bucket owner. If the bucket is owned by a +different account, the request fails with the HTTP status code 403 Forbidden +(access denied).

request_payerstring

Confirms that the requester knows that they will be charged for the request.

+ + + + + +

GetObjectTaggingOutput

+

GetObjectTaggingOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
tagsGetObjectTaggingOutput.TagsEntryrepeated

Contains the tag set. +This member is required.

version_idstring

The versionId of the object for which you got the tagging information.

result_metadataGetObjectTaggingOutput.ResultMetadataEntryrepeated

Metadata pertaining to the operation's result.

+ + + + + +

GetObjectTaggingOutput.ResultMetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

GetObjectTaggingOutput.TagsEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

HeadObjectInput

+

HeadObjectInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

checksum_modestring

To retrieve the checksum, this parameter must be enabled

expected_bucket_ownerstring

The account ID of the expected bucket owner

if_matchstring

Return the object only if its entity tag (ETag) is the same as the one +specified; otherwise, return a 412 (precondition failed) error.

if_modified_sinceint64

Return the object only if it has been modified since the specified time; +otherwise, return a 304 (not modified) error.

if_none_matchstring

Return the object only if its entity tag (ETag) is different from the one +specified

if_unmodified_sinceint64

Return the object only if it has not been modified since the specified time;

part_numberint32

Part number of the object being read. This is a positive integer between 1 and +10,000. Effectively performs a 'ranged' HEAD request for the part specified. +Useful querying about the size of the part and the number of parts in this +object.

request_payerstring

Confirms that the requester knows that they will be charged for the request.

sse_customer_algorithmstring

Specifies the algorithm to use to when encrypting the object (for example, +AES256).

sse_customer_keystring

Specifies the customer-provided encryption key for Amazon S3 to use in +encrypting data

sse_customer_key_md5string

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.

version_idstring

VersionId used to reference a specific version of the object.

with_detailsbool

Return object details meta

+ + + + + +

HeadObjectOutput

+

HeadObjectOutput

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
result_metadataHeadObjectOutput.ResultMetadataEntryrepeated

Metadata pertaining to the operation's result.

+ + + + + +

HeadObjectOutput.ResultMetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

Initiator

+

Initiator

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
display_namestring

Initiator name

idstring

Initiator id

+ + + + + +

IsObjectExistInput

+

IsObjectExistInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

version_idstring

Object version id

+ + + + + +

IsObjectExistOutput

+

IsObjectExistOutput

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
file_existbool

Object exist or not

+ + + + + +

ListMultipartUploadsInput

+

ListMultipartUploadsInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

delimiterstring

Character you use to group keys. All keys that contain the same string between +the prefix, if specified, and the first occurrence of the delimiter after the +prefix are grouped under a single result element, CommonPrefixes. If you don't +specify the prefix parameter, then the substring starts at the beginning of the +key. The keys that are grouped under CommonPrefixes result element are not +returned elsewhere in the response.

encoding_typestring

Requests Amazon S3 to encode the object keys in the response and specifies the +encoding method to use. An object key may contain any Unicode character;

expected_bucket_ownerstring

The account ID of the expected bucket owner

key_markerstring

Together with upload-id-marker, this parameter specifies the multipart upload +after which listing should begin. If upload-id-marker is not specified, only the +keys lexicographically greater than the specified key-marker will be included in +the list. If upload-id-marker is specified, any multipart uploads for a key +equal to the key-marker might also be included, provided those multipart uploads +have upload IDs lexicographically greater than the specified upload-id-marker.

max_uploadsint64

Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the +response body. 1,000 is the maximum number of uploads that can be returned in a +response.

prefixstring

Lists in-progress uploads only for those keys that begin with the specified +prefix. You can use prefixes to separate a bucket into different grouping of +keys. (You can think of using prefix to make groups in the same way you'd use a +folder in a file system.)

upload_id_markerstring

Together with key-marker, specifies the multipart upload after which listing +should begin. If key-marker is not specified, the upload-id-marker parameter is +ignored. Otherwise, any multipart uploads for a key equal to the key-marker +might be included in the list only if they have an upload ID lexicographically +greater than the specified upload-id-marker.

+ + + + + +

ListMultipartUploadsOutput

+

ListMultipartUploadsOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
bucketstring

The bucket name containing the object +This member is required

common_prefixesstringrepeated

If you specify a delimiter in the request, then the result returns each distinct +key prefix containing the delimiter in a CommonPrefixes element.

delimiterstring

Contains the delimiter you specified in the request. If you don't specify a +delimiter in your request, this element is absent from the response.

encoding_typestring

Encoding type used by Amazon S3 to encode object keys in the response.

is_truncatedbool

Indicates whether the returned list of multipart uploads is truncated. A value +of true indicates that the list was truncated. The list can be truncated if the +number of multipart uploads exceeds the limit allowed or specified by max +uploads.

key_markerstring

The key at or after which the listing began.

max_uploadsint32

Maximum number of multipart uploads that could have been included in the +response.

next_key_markerstring

When a list is truncated, this element specifies the value that should be used +for the key-marker request parameter in a subsequent request.

next_upload_id_markerstring

When a list is truncated, this element specifies the value that should be used +for the upload-id-marker request parameter in a subsequent request.

prefixstring

When a prefix is provided in the request, this field contains the specified +prefix. The result contains only keys starting with the specified prefix.

upload_id_markerstring

Upload ID after which listing began.

uploadsMultipartUploadrepeated

Container for elements related to a particular multipart upload. A response can +contain zero or more Upload elements.

+ + + + + +

ListObjectVersionsInput

+

ListObjectVersionsInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

delimiterstring

A delimiter is a character that you specify to group keys. All keys that contain +the same string between the prefix and the first occurrence of the delimiter are +grouped under a single result element in CommonPrefixes. These groups are +counted as one result against the max-keys limitation. These keys are not +returned elsewhere in the response.

encoding_typestring

Requests Amazon S3 to encode the object keys in the response and specifies the +encoding method to use. An object key may contain any Unicode character;

expected_bucket_ownerstring

The account ID of the expected bucket owner

key_markerstring

Specifies the key to start with when listing objects in a bucket.

max_keysint64

Sets the maximum number of keys returned in the response. By default the action +returns up to 1,000 key names. The response might contain fewer keys but will +never contain more. If additional keys satisfy the search criteria, but were not +returned because max-keys was exceeded, the response contains true. To return +the additional keys, see key-marker and version-id-marker.

prefixstring

Use this parameter to select only those keys that begin with the specified +prefix. You can use prefixes to separate a bucket into different groupings of +keys. (You can think of using prefix to make groups in the same way you'd use a +folder in a file system.) You can use prefix with delimiter to roll up numerous +objects into a single result under CommonPrefixes.

version_id_markerstring

Specifies the object version you want to start listing from.

+ + + + + +

ListObjectVersionsOutput

+

ListObjectVersionsOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
common_prefixesstringrepeated

All of the keys rolled up into a common prefix count as a single return when +calculating the number of returns.

delete_markersDeleteMarkerEntryrepeated

Container for an object that is a delete marker.

delimiterstring

The delimiter grouping the included keys.

encoding_typestring

Encoding type used by Amazon S3 to encode object key names in the XML response.

is_truncatedbool

A flag that indicates whether Amazon S3 returned all of the results that +satisfied the search criteria

key_markerstring

Marks the last key returned in a truncated response.

max_keysint64

Specifies the maximum number of objects to return

namestring

The bucket name.

next_key_markerstring

When the number of responses exceeds the value of MaxKeys, NextKeyMarker +specifies the first key not returned that satisfies the search criteria

next_version_id_markerstring

When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker +specifies the first object version not returned that satisfies the search +criteria.

prefixstring

Selects objects that start with the value supplied by this parameter.

version_id_markerstring

Marks the last version of the key returned in a truncated response.

versionsObjectVersionrepeated

Container for version information.

+ + + + + +

ListObjectsInput

+

ListObjectsInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

delimiterstring

A delimiter is a character you use to group keys.

encoding_typestring

Requests Amazon S3 to encode the object keys in the response and specifies the +encoding method to use. An object key may contain any Unicode character; +however, XML 1.0 parser cannot parse some characters, such as characters with an +ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you +can add this parameter to request that Amazon S3 encode the keys in the +response.

expected_bucket_ownerstring

The account ID of the expected bucket owner. If the bucket is owned by a +different account, the request fails with the HTTP status code 403 Forbidden +(access denied).

markerstring

Marker is where you want Amazon S3 to start listing from. Amazon S3 starts +listing after this specified key. Marker can be any key in the bucket.

maxKeysint32

Sets the maximum number of keys returned in the response. By default the action +returns up to 1,000 key names. The response might contain fewer keys but will +never contain more.

prefixstring

Limits the response to keys that begin with the specified prefix.

request_payerstring

Confirms that the requester knows that they will be charged for the request.

+ + + + + +

ListObjectsOutput

+

ListObjectsOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
common_prefixesstringrepeated

CommonPrefixes

contentsObjectrepeated

Objects contents

delimiterstring

Causes keys that contain the same string between the prefix and the first +occurrence of the delimiter to be rolled up into a single result element in the +CommonPrefixes collection. These rolled-up keys are not returned elsewhere in +the response. Each rolled-up result counts as only one return against the +MaxKeys value.

encoding_typestring

Encoding type used by Amazon S3 to encode object keys in the response.

is_truncatedbool

A flag that indicates whether Amazon S3 returned all of the results that +satisfied the search criteria.

markerstring

Indicates where in the bucket listing begins. Marker is included in the response +if it was sent with the request.

max_keysint32

The maximum number of keys returned in the response body.

namestring

The bucket name.

next_markerstring

When response is truncated (the IsTruncated element value in the response is +true), you can use the key name in this field as marker in the subsequent +request to get next set of objects.

prefixstring

Keys that begin with the indicated prefix.

+ + + + + +

ListPartsInput

+

ListPartsInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

expected_bucket_ownerstring

The account ID of the expected bucket owner

max_partsint64

Sets the maximum number of parts to return

part_number_markerint64

Specifies the part after which listing should begin. Only parts with higher part +numbers will be listed.

request_payerstring

Confirms that the requester knows that they will be charged for the request.

upload_idstring

Upload ID identifying the multipart upload whose parts are being listed.

+ + + + + +

ListPartsOutput

+

ListPartsOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

upload_idstring

Upload ID identifying the multipart upload whose parts are being listed.

next_part_number_markerstring

When a list is truncated, this element specifies the last part in the list, as +well as the value to use for the part-number-marker request parameter in a +subsequent request.

max_partsint64

Maximum number of parts that were allowed in the response.

is_truncatedbool

Indicates whether the returned list of parts is truncated. A true value +indicates that the list was truncated. A list can be truncated if the number of +parts exceeds the limit returned in the MaxParts element.

partsPartrepeated

Container for elements related to a particular part. A response can contain zero +or more Part elements.

+ + + + + +

MultipartUpload

+

MultipartUpload

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
initiatedint64

Date and time at which the multipart upload was initiated.

initiatorInitiator

Identifies who initiated the multipart upload.

keystring

Name of the object key. +This member is required.

ownerOwner

Specifies the owner of the object that is part of the multipart upload.

storage_classstring

The class of storage used to store the object.

upload_idstring

Upload ID that identifies the multipart upload.

+ + + + + +

Object

+

Object

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
etagstring

The entity tag is a hash of the object

keystring

The name that you assign to an object. You use the object key to retrieve the +object.

last_modifiedint64

Creation date of the object.

ownerOwner

The owner of the object

sizeint64

Size in bytes of the object

storage_classstring

The class of storage used to store the object.

+ + + + + +

ObjectIdentifier

+

ObjectIdentifier

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

Key name of the object. +This member is required.

version_idstring

VersionId for the specific version of the object to delete.

+ + + + + +

ObjectVersion

+

ObjectVersion

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
etagstring

The entity tag is an MD5 hash of that version of the object.

is_latestbool

Specifies whether the object is (true) or is not (false) the latest version of +an object.

keystring

Name of the object key. +This member is required.

last_modifiedint64

Date and time the object was last modified.

ownerOwner

Specifies the owner of the object.

sizeint64

Size in bytes of the object.

storage_classstring

The class of storage used to store the object.

version_idstring

Version ID of an object.

+ + + + + +

Owner

+

Owner

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
display_namestring

Owner display name

idstring

Owner id

+ + + + + +

Part

+

Part

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
etagstring

Part Etag

last_modifiedint64

Last modified time

part_numberint64

Part number

sizeint64

Part size

+ + + + + +

PutObjectCannedAclInput

+

PutObjectCannedAclInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

aclstring

The canned ACL to apply to the object

version_idstring

VersionId used to reference a specific version of the object.

+ + + + + +

PutObjectCannedAclOutput

+

PutObjectCannedAclOutput

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
request_chargedstring

Request charged

+ + + + + +

PutObjectInput

+

PutObjectInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bodybytes

Object data.

bucketstring

The bucket name to which the PUT action was initiated +This member is required.

keystring

Object key for which the PUT action was initiated. +This member is required.

aclstring

The canned ACL to apply to the object,different oss provider have different acl type

bucket_key_enabledbool

Indicates whether the multipart upload uses an S3 Bucket Key for server-side +encryption with Amazon Web Services KMS (SSE-KMS).

cache_controlstring

Can be used to specify caching behavior along the request/reply chain.

content_dispositionstring

Specifies presentational information for the object. For more information, see +http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 +(http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1).

content_encodingstring

Specifies what content encodings have been applied to the object and thus what +decoding mechanisms must be applied to obtain the media-type referenced by the +Content-Type header field. For more information, see +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 +(http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11).

expiresint64

The date and time at which the object is no longer cacheable. For more +information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 +(http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21).

server_side_encryptionstring

The server-side encryption algorithm used when storing this object in Amazon S3 +(for example, AES256, aws:kms).

signed_urlstring

Specify the signed url of object, user can put object with signed url without ak、sk

metaPutObjectInput.MetaEntryrepeated

A map of metadata to store with the object in S3.

taggingPutObjectInput.TaggingEntryrepeated

The tag-set for the object. The tag-set must be encoded as URL Query parameters.

+ + + + + +

PutObjectInput.MetaEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

PutObjectInput.TaggingEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

PutObjectOutput

+

PutObjectOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
bucket_key_enabledbool

Indicates whether the uploaded object uses an S3 Bucket Key for server-side +encryption with Amazon Web Services KMS (SSE-KMS).

etagstring

Entity tag for the uploaded object.

expirationstring

If the expiration is configured for the object

request_chargedstring

If present, indicates that the requester was successfully charged for the request.

version_idstring

Version of the object.

+ + + + + +

PutObjectTaggingInput

+

PutObjectTaggingInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required.

keystring

Name of the object key. +This member is required.

tagsPutObjectTaggingInput.TagsEntryrepeated

Container for the TagSet and Tag elements

version_idstring

The versionId of the object that the tag-set will be added to.

+ + + + + +

PutObjectTaggingInput.TagsEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

PutObjectTaggingOutput

+

PutObjectTaggingOutput

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
version_idstring

The versionId of the object the tag-set was added to.

result_metadataPutObjectTaggingOutput.ResultMetadataEntryrepeated

Metadata pertaining to the operation's result.

+ + + + + +

PutObjectTaggingOutput.ResultMetadataEntry

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keystring

valuestring

+ + + + + +

RestoreObjectInput

+

RestoreObjectInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

version_idstring

VersionId used to reference a specific version of the object.

+ + + + + +

RestoreObjectOutput

+

RestoreObjectOutput

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
request_chargedstring

If present, indicates that the requester was successfully charged for the +request.

restore_output_pathstring

Indicates the path in the provided S3 output location where Select results will +be restored to.

+ + + + + +

SignURLInput

+

SignURLInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

methodstring

the method for sign url, eg. GET、POST

expired_in_secint64

expire time of the sign url

+ + + + + +

SignURLOutput

+

SignURLOutput

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
signed_urlstring

Object signed url

+ + + + + +

UpdateBandwidthRateLimitInput

+

UpdateBandwidthRateLimitInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

average_rate_limit_in_bits_per_secint64

The average upload/download bandwidth rate limit in bits per second.

gateway_resource_namestring

Resource name of gateway

+ + + + + +

UploadPartCopyInput

+

UploadPartCopyInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

copy_sourceCopySource

CopySource

part_numberint32

Part number of part being copied. This is a positive integer between 1 and 10,000. +This member is required.

upload_idstring

Upload ID identifying the multipart upload whose part is being copied. +This member is required.

start_positionint64

The range of bytes to copy from the source object.bytes=start_position-part_size

part_sizeint64

Part size

+ + + + + +

UploadPartCopyOutput

+

UploadPartCopyOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
bucket_key_enabledbool

Indicates whether the multipart upload uses an S3 Bucket Key for server-side +encryption with Amazon Web Services KMS (SSE-KMS).

copy_part_resultCopyPartResult

Container for all response elements.

copy_source_version_idstring

The version of the source object that was copied, if you have enabled versioning +on the source bucket.

request_chargedstring

If present, indicates that the requester was successfully charged for the +request.

sse_customer_algorithmstring

If server-side encryption with a customer-provided encryption key was requested, +the response will include this header confirming the encryption algorithm used.

sse_customer_key_md5string

If server-side encryption with a customer-provided encryption key was requested, +the response will include this header to provide round-trip message integrity +verification of the customer-provided encryption key.

sse_kms_key_idstring

If present, specifies the ID of the Amazon Web Services Key Management Service +(Amazon Web Services KMS) symmetric customer managed key that was used for the +object.

server_side_encryptionstring

The server-side encryption algorithm used when storing this object in Amazon S3 +(for example, AES256, aws:kms).

+ + + + + +

UploadPartInput

+

UploadPartInput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
store_namestring

Required. The name of oss store.

bucketstring

The bucket name containing the object +This member is required

keystring

Name of the object key. +This member is required.

bodybytes

Object data.

content_lengthint64

Size of the body in bytes. This parameter is useful when the size of the body +cannot be determined automatically.

content_md5string

The base64-encoded 128-bit MD5 digest of the part data.

expected_bucket_ownerstring

The account ID of the expected bucket owner

part_numberint32

Part number of part being uploaded. This is a positive integer between 1 and 10,000. +This member is required.

request_payerstring

Confirms that the requester knows that they will be charged for the request.

sse_customer_algorithmstring

Specifies the algorithm to use to when encrypting the object (for example, +AES256).

sse_customer_keystring

Specifies the customer-provided encryption key for Amazon S3 to use in +encrypting data

sse_customer_key_md5string

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.

upload_idstring

Upload ID identifying the multipart upload whose part is being uploaded. +This member is required.

+ + + + + +

UploadPartOutput

+

UploadPartOutput

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
bucket_key_enabledbool

Indicates whether the multipart upload uses an S3 Bucket Key for server-side +encryption with Amazon Web Services KMS (SSE-KMS).

etagstring

Entity tag for the uploaded object.

request_chargedstring

If present, indicates that the requester was successfully charged for the +request.

sse_customer_algorithmstring

Specifies the algorithm to use to when encrypting the object (for example, +AES256).

sse_customer_key_md5string

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.

sse_kms_key_idstring

Specifies the ID of the symmetric customer managed key to use for object encryption

server_side_encryptionstring

The server-side encryption algorithm used when storing this object in Amazon S3 +(for example, AES256, aws:kms).

+ + + + + + + + + + + + +

Scalar Value Types

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
.proto TypeNotesC++JavaPythonGoC#PHPRuby
doubledoubledoublefloatfloat64doublefloatFloat
floatfloatfloatfloatfloat32floatfloatFloat
int32Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead.int32intintint32intintegerBignum or Fixnum (as required)
int64Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead.int64longint/longint64longinteger/stringBignum
uint32Uses variable-length encoding.uint32intint/longuint32uintintegerBignum or Fixnum (as required)
uint64Uses variable-length encoding.uint64longint/longuint64ulonginteger/stringBignum or Fixnum (as required)
sint32Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s.int32intintint32intintegerBignum or Fixnum (as required)
sint64Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s.int64longint/longint64longinteger/stringBignum
fixed32Always four bytes. More efficient than uint32 if values are often greater than 2^28.uint32intintuint32uintintegerBignum or Fixnum (as required)
fixed64Always eight bytes. More efficient than uint64 if values are often greater than 2^56.uint64longint/longuint64ulonginteger/stringBignum
sfixed32Always four bytes.int32intintint32intintegerBignum or Fixnum (as required)
sfixed64Always eight bytes.int64longint/longint64longinteger/stringBignum
boolboolbooleanbooleanboolboolbooleanTrueClass/FalseClass
stringA string must always contain UTF-8 encoded or 7-bit ASCII text.stringStringstr/unicodestringstringstringString (UTF-8)
bytesMay contain any arbitrary sequence of bytes.stringByteStringstr[]byteByteStringstringString (ASCII-8BIT)
+ + diff --git a/docs/en/README.md b/docs/en/README.md index e8ddfcf536..dc89f76925 100644 --- a/docs/en/README.md +++ b/docs/en/README.md @@ -124,7 +124,7 @@ Layotto enriches the CNCF CLOUD N | Platform | Link | | :----------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 💬 [DingTalk](https://www.dingtalk.com/en) (preferred) | Search the group number: 31912621 or scan the QR code below
| +| 💬 [DingTalk](https://www.dingtalk.com/en) (preferred) | Search the group number: 31912621 or scan the QR code below
| [comment]: <> (| 💬 [Wechat](https://www.wechat.com/en/) | Scan the QR code below and she will invite you into the wechat group
) diff --git a/docs/en/api_reference/README.md b/docs/en/api_reference/README.md new file mode 100644 index 0000000000..7d98ef9fcc --- /dev/null +++ b/docs/en/api_reference/README.md @@ -0,0 +1,14 @@ +# gRPC API reference + +Layotto has multiple gRPC proto files, and the corresponding API reference are at: + +[https://mosn.io/layotto/api/v1/runtime.html](https://mosn.io/layotto/api/v1/runtime.html) + +These protos define Layotto's runtime API, including: + +- API provided by Layotto for App +- The callback API that needs to be implemented by App. Layotto will call back the App and get the pubsub subscription message + +In addition to this, Layotto also provides some extension APIs, including: + +s3: [spec/proto/extension/v1/s3](https://mosn.io/layotto/api/v1/s3.html) diff --git a/docs/en/api_reference/comment_spec_of_proto.md b/docs/en/api_reference/comment_spec_of_proto.md index 42e87c1a95..0abe908bac 100644 --- a/docs/en/api_reference/comment_spec_of_proto.md +++ b/docs/en/api_reference/comment_spec_of_proto.md @@ -5,7 +5,8 @@ Avoid adding empty lines between comments symbols `//`.If there is a blank line bad case: ``` -message BadCase{ +// XXXXXXXX +message BadCase{ // XXXXXXXX // // XX @@ -18,7 +19,8 @@ message BadCase{ good case: ``` -message GoodCase{ +// XXXXXXXX +message GoodCase{ // XXXXXXXX // XX // XXXXXX @@ -32,6 +34,8 @@ Or you can use another annotation symbol directly `/* */` If you want to have some comment in your proto files, but don't want them to be part of the docs, you can simply prefix the comment with `@exclude`. Example: include only the comment for the id field +Attention: `/* */` comments do not count towards passing ci `Proto Validation`. [refence](https://docs.buf.build/lint/rules#comments) + ``` /** * @exclude @@ -40,8 +44,7 @@ Example: include only the comment for the id field message ExcludedMessage { string id = 1; // the id of this message. string name = 2; // @exclude the name of this message - /* @exclude the value of this message. */ int32 value = 3; } -``` +``` \ No newline at end of file diff --git a/docs/en/api_reference/how_to_generate_api_doc.md b/docs/en/api_reference/how_to_generate_api_doc.md index c5c201d8f6..cfe837c65e 100644 --- a/docs/en/api_reference/how_to_generate_api_doc.md +++ b/docs/en/api_reference/how_to_generate_api_doc.md @@ -1,6 +1,17 @@ # How to generate `.pb.go` code and API reference Note: the commands below should be executed under layotto directory +```shell +make proto +``` + +Then you get: +- `.pb.go` code +- API reference docs +- updated sidebar in the doc site + +That's all :) + ## How to compile the proto files into `.pb.go` code ### **Make cmmand(recommended)** diff --git a/docs/en/api_reference/oss_v1.md b/docs/en/api_reference/oss_v1.md new file mode 100644 index 0000000000..98da1ebce9 --- /dev/null +++ b/docs/en/api_reference/oss_v1.md @@ -0,0 +1,1658 @@ + + +
+ +# oss.proto + + +This document is automaticallly generated from the [`.proto`](https://github.com/mosn/layotto/tree/main/spec/proto/runtime/v1) files. + +The file defined base on s3 protocol, to get an in-depth walkthrough of this file, see: +https://docs.aws.amazon.com/s3/index.html +https://github.com/aws/aws-sdk-go-v2 + + + + +## [gRPC Service] ObjectStorageService +ObjectStorageService + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| PutObject | [PutObjectInput](#spec.proto.extension.v1.PutObjectInput) stream | [PutObjectOutput](#spec.proto.extension.v1.PutObjectOutput) | Object CRUD API Adds an object to a bucket. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html | +| GetObject | [GetObjectInput](#spec.proto.extension.v1.GetObjectInput) | [GetObjectOutput](#spec.proto.extension.v1.GetObjectOutput) stream | Retrieves objects. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html | +| DeleteObject | [DeleteObjectInput](#spec.proto.extension.v1.DeleteObjectInput) | [DeleteObjectOutput](#spec.proto.extension.v1.DeleteObjectOutput) | Delete objects. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html | +| CopyObject | [CopyObjectInput](#spec.proto.extension.v1.CopyObjectInput) | [CopyObjectOutput](#spec.proto.extension.v1.CopyObjectOutput) | Creates a copy of an object that is already stored in oss server. Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_CopyObject.html | +| DeleteObjects | [DeleteObjectsInput](#spec.proto.extension.v1.DeleteObjectsInput) | [DeleteObjectsOutput](#spec.proto.extension.v1.DeleteObjectsOutput) | Delete multiple objects from a bucket. Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_DeleteObjects.html | +| ListObjects | [ListObjectsInput](#spec.proto.extension.v1.ListObjectsInput) | [ListObjectsOutput](#spec.proto.extension.v1.ListObjectsOutput) | Returns some or all (up to 1,000) of the objects in a bucket. Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_ListObjects.html | +| HeadObject | [HeadObjectInput](#spec.proto.extension.v1.HeadObjectInput) | [HeadObjectOutput](#spec.proto.extension.v1.HeadObjectOutput) | The HEAD action retrieves metadata from an object without returning the object itself. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html | +| IsObjectExist | [IsObjectExistInput](#spec.proto.extension.v1.IsObjectExistInput) | [IsObjectExistOutput](#spec.proto.extension.v1.IsObjectExistOutput) | This action used to check if the file exists. | +| PutObjectTagging | [PutObjectTaggingInput](#spec.proto.extension.v1.PutObjectTaggingInput) | [PutObjectTaggingOutput](#spec.proto.extension.v1.PutObjectTaggingOutput) | Object Tagging API Sets the supplied tag-set to an object that already exists in a bucket. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html | +| DeleteObjectTagging | [DeleteObjectTaggingInput](#spec.proto.extension.v1.DeleteObjectTaggingInput) | [DeleteObjectTaggingOutput](#spec.proto.extension.v1.DeleteObjectTaggingOutput) | Removes the entire tag set from the specified object. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html | +| GetObjectTagging | [GetObjectTaggingInput](#spec.proto.extension.v1.GetObjectTaggingInput) | [GetObjectTaggingOutput](#spec.proto.extension.v1.GetObjectTaggingOutput) | Returns the tag-set of an object. Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_GetObjectTagging.html | +| GetObjectCannedAcl | [GetObjectCannedAclInput](#spec.proto.extension.v1.GetObjectCannedAclInput) | [GetObjectCannedAclOutput](#spec.proto.extension.v1.GetObjectCannedAclOutput) | Returns object canned acl. Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL | +| PutObjectCannedAcl | [PutObjectCannedAclInput](#spec.proto.extension.v1.PutObjectCannedAclInput) | [PutObjectCannedAclOutput](#spec.proto.extension.v1.PutObjectCannedAclOutput) | Set object canned acl. Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL | +| CreateMultipartUpload | [CreateMultipartUploadInput](#spec.proto.extension.v1.CreateMultipartUploadInput) | [CreateMultipartUploadOutput](#spec.proto.extension.v1.CreateMultipartUploadOutput) | Object Multipart Operation API Initiates a multipart upload and returns an upload ID. Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_CreateMultipartUpload.html | +| UploadPart | [UploadPartInput](#spec.proto.extension.v1.UploadPartInput) stream | [UploadPartOutput](#spec.proto.extension.v1.UploadPartOutput) | Uploads a part in a multipart upload. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html | +| UploadPartCopy | [UploadPartCopyInput](#spec.proto.extension.v1.UploadPartCopyInput) | [UploadPartCopyOutput](#spec.proto.extension.v1.UploadPartCopyOutput) | Uploads a part by copying data from an existing object as data source. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html | +| CompleteMultipartUpload | [CompleteMultipartUploadInput](#spec.proto.extension.v1.CompleteMultipartUploadInput) | [CompleteMultipartUploadOutput](#spec.proto.extension.v1.CompleteMultipartUploadOutput) | Completes a multipart upload by assembling previously uploaded parts. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html | +| AbortMultipartUpload | [AbortMultipartUploadInput](#spec.proto.extension.v1.AbortMultipartUploadInput) | [AbortMultipartUploadOutput](#spec.proto.extension.v1.AbortMultipartUploadOutput) | This action aborts a multipart upload. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html | +| ListMultipartUploads | [ListMultipartUploadsInput](#spec.proto.extension.v1.ListMultipartUploadsInput) | [ListMultipartUploadsOutput](#spec.proto.extension.v1.ListMultipartUploadsOutput) | This action lists in-progress multipart uploads. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html | +| ListParts | [ListPartsInput](#spec.proto.extension.v1.ListPartsInput) | [ListPartsOutput](#spec.proto.extension.v1.ListPartsOutput) | Lists the parts that have been uploaded for a specific multipart upload. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html | +| ListObjectVersions | [ListObjectVersionsInput](#spec.proto.extension.v1.ListObjectVersionsInput) | [ListObjectVersionsOutput](#spec.proto.extension.v1.ListObjectVersionsOutput) | Returns metadata about all versions of the objects in a bucket. Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html | +| SignURL | [SignURLInput](#spec.proto.extension.v1.SignURLInput) | [SignURLOutput](#spec.proto.extension.v1.SignURLOutput) | A presigned URL gives you access to the object identified in the URL, provided that the creator of the presigned URL has permissions to access that object. Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/PresignedUrlUploadObject.html | +| UpdateDownloadBandwidthRateLimit | [UpdateBandwidthRateLimitInput](#spec.proto.extension.v1.UpdateBandwidthRateLimitInput) | [.google.protobuf.Empty](#google.protobuf.Empty) | This action used to set download bandwidth limit speed. Refer https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client.go#L2106 | +| UpdateUploadBandwidthRateLimit | [UpdateBandwidthRateLimitInput](#spec.proto.extension.v1.UpdateBandwidthRateLimitInput) | [.google.protobuf.Empty](#google.protobuf.Empty) | This action used to set upload bandwidth limit speed. Refer https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client.go#L2096 | +| AppendObject | [AppendObjectInput](#spec.proto.extension.v1.AppendObjectInput) stream | [AppendObjectOutput](#spec.proto.extension.v1.AppendObjectOutput) | This action is used to append object. Refer https://help.aliyun.com/document_detail/31981.html or https://github.com/minio/minio-java/issues/980 | +| RestoreObject | [RestoreObjectInput](#spec.proto.extension.v1.RestoreObjectInput) | [RestoreObjectOutput](#spec.proto.extension.v1.RestoreObjectOutput) | Restores an archived copy of an object back. Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_RestoreObject.html | + + + + + +

Top

+ +## AbortMultipartUploadInput +AbortMultipartUploadInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| expected_bucket_owner | [string](#string) | | The account ID of the expected bucket owner | +| request_payer | [string](#string) | | Confirms that the requester knows that they will be charged for the request. | +| upload_id | [string](#string) | | Upload ID that identifies the multipart upload. This member is required. | + + + + + + + +

Top

+ +## AbortMultipartUploadOutput +AbortMultipartUploadOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| request_charged | [string](#string) | | If present, indicates that the requester was successfully charged for the request. | + + + + + + + +

Top

+ +## AppendObjectInput +AppendObjectInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| body | [bytes](#bytes) | | Object content | +| position | [int64](#int64) | | Append start position | +| acl | [string](#string) | | Object ACL | +| cache_control | [string](#string) | | Sets the Cache-Control header of the response. | +| content_disposition | [string](#string) | | Sets the Content-Disposition header of the response | +| content_encoding | [string](#string) | | Sets the Content-Encoding header of the response | +| content_md5 | [string](#string) | | The base64-encoded 128-bit MD5 digest of the part data. | +| expires | [int64](#int64) | | Sets the Expires header of the response | +| storage_class | [string](#string) | | Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects. | +| server_side_encryption | [string](#string) | | The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms). | +| meta | [string](#string) | | Object metadata | +| tags | [AppendObjectInput.TagsEntry](#spec.proto.extension.v1.AppendObjectInput.TagsEntry) | repeated | Object tags | + + + + + + + +

Top

+ +## AppendObjectInput.TagsEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## AppendObjectOutput +AppendObjectOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| append_position | [int64](#int64) | | Next append position | + + + + + + + +

Top

+ +## CompleteMultipartUploadInput +CompleteMultipartUploadInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| upload_id | [string](#string) | | ID for the initiated multipart upload. This member is required. | +| request_payer | [string](#string) | | Confirms that the requester knows that they will be charged for the request. | +| expected_bucket_owner | [string](#string) | | Expected bucket owner | +| multipart_upload | [CompletedMultipartUpload](#spec.proto.extension.v1.CompletedMultipartUpload) | | The container for the multipart upload request information. | + + + + + + + +

Top

+ +## CompleteMultipartUploadOutput +CompleteMultipartUploadOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| bucket_key_enabled | [bool](#bool) | | Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS). | +| etag | [string](#string) | | Entity tag that identifies the newly created object's data | +| expiration | [string](#string) | | If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. | +| location | [string](#string) | | The URI that identifies the newly created object. | +| request_charged | [string](#string) | | If present, indicates that the requester was successfully charged for the request. | +| sse_kms_keyId | [string](#string) | | If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed key that was used for the object. | +| server_side_encryption | [string](#string) | | The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms). | +| version_id | [string](#string) | | Version ID of the newly created object, in case the bucket has versioning turned on. | + + + + + + + +

Top

+ +## CompletedMultipartUpload +CompletedMultipartUpload + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| parts | [CompletedPart](#spec.proto.extension.v1.CompletedPart) | repeated | Array of CompletedPart data types. | + + + + + + + +

Top

+ +## CompletedPart +CompletedPart + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| etag | [string](#string) | | Entity tag returned when the part was uploaded. | +| part_number | [int32](#int32) | | Part number that identifies the part. This is a positive integer between 1 and 10,000. | + + + + + + + +

Top

+ +## CopyObjectInput +CopyObjectInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The name of the destination bucket. When using this action with an access point This member is required. | +| key | [string](#string) | | The key of the destination object. This member is required. | +| copy_source | [CopySource](#spec.proto.extension.v1.CopySource) | | CopySource | +| tagging | [CopyObjectInput.TaggingEntry](#spec.proto.extension.v1.CopyObjectInput.TaggingEntry) | repeated | The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query parameters. | +| expires | [int64](#int64) | | The date and time at which the object is no longer cacheable. | +| metadata_directive | [string](#string) | | Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request. | +| metadata | [CopyObjectInput.MetadataEntry](#spec.proto.extension.v1.CopyObjectInput.MetadataEntry) | repeated | A map of metadata to store with the object in S3. | + + + + + + + +

Top

+ +## CopyObjectInput.MetadataEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## CopyObjectInput.TaggingEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## CopyObjectOutput +CopyObjectOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| copy_object_result | [CopyObjectResult](#spec.proto.extension.v1.CopyObjectResult) | | Container for all response elements. | +| version_id | [string](#string) | | Version ID of the newly created copy. | +| expiration | [string](#string) | | If the object expiration is configured, the response includes this header. | + + + + + + + +

Top

+ +## CopyObjectResult +CopyObjectResult + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| etag | [string](#string) | | Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata. | +| last_modified | [int64](#int64) | | Creation date of the object. | + + + + + + + +

Top

+ +## CopyPartResult +CopyPartResult + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| etag | [string](#string) | | Entity tag of the object. | +| last_modified | [int64](#int64) | | Last modified time | + + + + + + + +

Top

+ +## CopySource +CopySource + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| copy_source_bucket | [string](#string) | | source object bucket name | +| copy_source_key | [string](#string) | | source object name | +| copy_source_version_id | [string](#string) | | source object version | + + + + + + + +

Top

+ +## CreateMultipartUploadInput +CreateMultipartUploadInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| acl | [string](#string) | | The canned ACL to apply to the object. This action is not supported by Amazon S3 on Outposts. | +| bucket_key_enabled | [bool](#bool) | | Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key. | +| cache_control | [string](#string) | | Specifies caching behavior along the request/reply chain | +| content_disposition | [string](#string) | | Specifies presentational information for the object | +| content_encoding | [string](#string) | | Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. | +| content_language | [string](#string) | | The language the content is in. | +| content_type | [string](#string) | | A standard MIME type describing the format of the object data. | +| expected_bucket_owner | [string](#string) | | The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied). | +| expires | [int64](#int64) | | The date and time at which the object is no longer cacheable. | +| grant_full_control | [string](#string) | | Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This action is not supported by Amazon S3 on Outposts. | +| grant_read | [string](#string) | | Allows grantee to read the object data and its metadata. This action is not supported by Amazon S3 on Outposts. | +| grant_read_acp | [string](#string) | | Allows grantee to read the object ACL. This action is not supported by Amazon S3 on Outposts. | +| grant_write_acp | [string](#string) | | Allows grantee to write the ACL for the applicable object. This action is not supported by Amazon S3 on Outposts. | +| meta_data | [CreateMultipartUploadInput.MetaDataEntry](#spec.proto.extension.v1.CreateMultipartUploadInput.MetaDataEntry) | repeated | A map of metadata to store with the object | +| object_lock_legal_hold_status | [string](#string) | | Specifies whether you want to apply a legal hold to the uploaded object | +| object_lock_mode | [string](#string) | | Specifies the Object Lock mode that you want to apply to the uploaded object | +| object_lock_retain_until_date | [int64](#int64) | | Specifies the date and time when you want the Object Lock to expire | +| request_payer | [string](#string) | | Confirms that the requester knows that they will be charged for the request | +| sse_customer_algorithm | [string](#string) | | Specifies the algorithm to use to when encrypting the object (for example, AES256). | +| sse_customer_key | [string](#string) | | Specifies the customer-provided encryption key to use in encrypting data | +| sse_customer_key_md5 | [string](#string) | | Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321 | +| sse_kms_encryption_context | [string](#string) | | Specifies the Amazon Web Services KMS Encryption Context to use for object encryption | +| sse_kms_key_id | [string](#string) | | Specifies the ID of the symmetric customer managed key to use for object encryption | +| server_side_encryption | [string](#string) | | The server-side encryption algorithm used when storing this object | +| storage_class | [string](#string) | | By default, oss store uses the STANDARD Storage Class to store newly created objects | +| tagging | [CreateMultipartUploadInput.TaggingEntry](#spec.proto.extension.v1.CreateMultipartUploadInput.TaggingEntry) | repeated | The tag-set for the object. The tag-set must be encoded as URL Query parameters. | +| website_redirect_location | [string](#string) | | If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. | + + + + + + + +

Top

+ +## CreateMultipartUploadInput.MetaDataEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## CreateMultipartUploadInput.TaggingEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## CreateMultipartUploadOutput +CreateMultipartUploadOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| abort_date | [int64](#int64) | | If the bucket has a lifecycle rule configured with an action to abort incomplete multipart uploads and the prefix in the lifecycle rule matches the object name in the request, the response includes this header | +| abort_rule_id | [string](#string) | | It identifies the applicable lifecycle configuration rule that defines the action to abort incomplete multipart uploads. | +| bucket_key_enabled | [bool](#bool) | | Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS). | +| request_charged | [string](#string) | | If present, indicates that the requester was successfully charged for the request. | +| sse_customer_algorithm | [string](#string) | | If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. | +| sse_customer_key_md5 | [string](#string) | | If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key. | +| sse_kms_encryption_context | [string](#string) | | If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. | +| sse_kms_key_id | [string](#string) | | If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed key that was used for the object. | +| server_side_encryption | [string](#string) | | The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms). | +| upload_id | [string](#string) | | ID for the initiated multipart upload. | + + + + + + + +

Top

+ +## Delete +Delete + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| objects | [ObjectIdentifier](#spec.proto.extension.v1.ObjectIdentifier) | repeated | ObjectIdentifier | +| quiet | [bool](#bool) | | Element to enable quiet mode for the request. When you add this element, you must set its value to true. | + + + + + + + +

Top

+ +## DeleteMarkerEntry +DeleteMarkerEntry + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| is_latest | [bool](#bool) | | Specifies whether the object is (true) or is not (false) the latest version of an object. | +| key | [string](#string) | | Name of the object key. This member is required. | +| last_modified | [int64](#int64) | | Date and time the object was last modified. | +| owner | [Owner](#spec.proto.extension.v1.Owner) | | Owner | +| version_id | [string](#string) | | Version ID of an object. | + + + + + + + +

Top

+ +## DeleteObjectInput +DeleteObjectInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name to which the DEL action was initiated This member is required. | +| key | [string](#string) | | Object key for which the DEL action was initiated. This member is required. | +| request_payer | [string](#string) | | Confirms that the requester knows that they will be charged for the request. | +| version_id | [string](#string) | | VersionId used to reference a specific version of the object. | + + + + + + + +

Top

+ +## DeleteObjectOutput +DeleteObjectOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| delete_marker | [bool](#bool) | | Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker. | +| request_charged | [string](#string) | | If present, indicates that the requester was successfully charged for the request. | +| version_id | [string](#string) | | Returns the version ID of the delete marker created as a result of the DELETE operation. | + + + + + + + +

Top

+ +## DeleteObjectTaggingInput +DeleteObjectTaggingInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the objects from which to remove the tags. | +| key | [string](#string) | | The key that identifies the object in the bucket from which to remove all tags. This member is required. | +| version_id | [string](#string) | | The versionId of the object that the tag-set will be removed from. | +| expected_bucket_owner | [string](#string) | | The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied). | + + + + + + + +

Top

+ +## DeleteObjectTaggingOutput +DeleteObjectTaggingOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| version_id | [string](#string) | | The versionId of the object the tag-set was removed from. | +| result_metadata | [DeleteObjectTaggingOutput.ResultMetadataEntry](#spec.proto.extension.v1.DeleteObjectTaggingOutput.ResultMetadataEntry) | repeated | Metadata pertaining to the operation's result. | + + + + + + + +

Top

+ +## DeleteObjectTaggingOutput.ResultMetadataEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## DeleteObjectsInput +DeleteObjectsInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| delete | [Delete](#spec.proto.extension.v1.Delete) | | Delete objects | +| request_payer | [string](#string) | | Confirms that the requester knows that they will be charged for the request. | + + + + + + + +

Top

+ +## DeleteObjectsOutput +DeleteObjectsOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| deleted | [DeletedObject](#spec.proto.extension.v1.DeletedObject) | repeated | DeletedObject | + + + + + + + +

Top

+ +## DeletedObject +DeletedObject + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| delete_marker | [bool](#bool) | | Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker. In a simple DELETE, this header indicates whether (true) or not (false) a delete marker was created. | +| delete_marker_version_id | [string](#string) | | The version ID of the delete marker created as a result of the DELETE operation. If you delete a specific object version, the value returned by this header is the version ID of the object version deleted. | +| key | [string](#string) | | The name of the deleted object. | +| version_id | [string](#string) | | The version ID of the deleted object. | + + + + + + + +

Top

+ +## GetObjectCannedAclInput +GetObjectCannedAclInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| version_id | [string](#string) | | VersionId used to reference a specific version of the object | + + + + + + + +

Top

+ +## GetObjectCannedAclOutput +GetObjectCannedAclOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| canned_acl | [string](#string) | | Object CannedACL | +| owner | [Owner](#spec.proto.extension.v1.Owner) | | Owner | +| request_charged | [string](#string) | | If present, indicates that the requester was successfully charged for the request. | + + + + + + + +

Top

+ +## GetObjectInput +GetObjectInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Key of the object to get This member is required | +| expected_bucket_owner | [string](#string) | | The account ID of the expected bucket owner | +| if_match | [string](#string) | | Return the object only if its entity tag (ETag) is the same as the one specified | +| if_modified_since | [int64](#int64) | | Return the object only if it has been modified since the specified time | +| if_none_match | [string](#string) | | Return the object only if its entity tag (ETag) is different from the one specified | +| if_unmodified_since | [int64](#int64) | | Return the object only if it has not been modified since the specified time | +| part_number | [int64](#int64) | | Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. Useful for downloading just a part of an object. | +| start | [int64](#int64) | | Downloads the specified range bytes of an object start is used to specify the location where the file starts | +| end | [int64](#int64) | | end is used to specify the location where the file end | +| request_payer | [string](#string) | | Confirms that the requester knows that they will be charged for the request. | +| response_cache_control | [string](#string) | | Sets the Cache-Control header of the response. | +| response_content_disposition | [string](#string) | | Sets the Content-Disposition header of the response | +| response_content_encoding | [string](#string) | | Sets the Content-Encoding header of the response | +| response_content_language | [string](#string) | | Sets the Content-Language header of the response | +| response_content_type | [string](#string) | | Sets the Content-Type header of the response | +| response_expires | [string](#string) | | Sets the Expires header of the response | +| sse_customer_algorithm | [string](#string) | | Specifies the algorithm to use to when decrypting the object (for example,AES256) | +| sse_customer_key | [string](#string) | | Specifies the customer-provided encryption key for Amazon S3 used to encrypt the data. This value is used to decrypt the object when recovering it and must match the one used when storing the data. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header | +| sse_customer_key_md5 | [string](#string) | | Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321 Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. | +| version_id | [string](#string) | | VersionId used to reference a specific version of the object | +| accept_encoding | [string](#string) | | Specify Accept-Encoding, aws not supported now | +| signed_url | [string](#string) | | Specify the signed url of object, user can get object with signed url without ak、sk | + + + + + + + +

Top

+ +## GetObjectOutput +GetObjectOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| body | [bytes](#bytes) | | Object data. | +| cache_control | [string](#string) | | Specifies caching behavior along the request/reply chain. | +| content_disposition | [string](#string) | | Specifies presentational information for the object. | +| content_encoding | [string](#string) | | Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. | +| content_language | [string](#string) | | The language the content is in. | +| content_length | [int64](#int64) | | Size of the body in bytes. | +| content_range | [string](#string) | | The portion of the object returned in the response. | +| content_type | [string](#string) | | A standard MIME type describing the format of the object data. | +| delete_marker | [bool](#bool) | | Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response. | +| etag | [string](#string) | | An entity tag (ETag) is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. | +| expiration | [string](#string) | | If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key-value pairs providing object expiration information. The value of the rule-id is URL-encoded. | +| expires | [string](#string) | | The date and time at which the object is no longer cacheable. | +| last_modified | [int64](#int64) | | Creation date of the object. | +| version_id | [string](#string) | | Version of the object. | +| tag_count | [int64](#int64) | | The number of tags, if any, on the object. | +| storage_class | [string](#string) | | Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects. | +| parts_count | [int64](#int64) | | The count of parts this object has. This value is only returned if you specify partNumber in your request and the object was uploaded as a multipart upload. | +| metadata | [GetObjectOutput.MetadataEntry](#spec.proto.extension.v1.GetObjectOutput.MetadataEntry) | repeated | A map of metadata to store with the object in S3. Map keys will be normalized to lower-case. | + + + + + + + +

Top

+ +## GetObjectOutput.MetadataEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## GetObjectTaggingInput +GetObjectTaggingInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object for which to get the tagging information. This member is required. | +| key | [string](#string) | | Object key for which to get the tagging information. This member is required. | +| version_id | [string](#string) | | The versionId of the object for which to get the tagging information. | +| expected_bucket_owner | [string](#string) | | The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied). | +| request_payer | [string](#string) | | Confirms that the requester knows that they will be charged for the request. | + + + + + + + +

Top

+ +## GetObjectTaggingOutput +GetObjectTaggingOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| tags | [GetObjectTaggingOutput.TagsEntry](#spec.proto.extension.v1.GetObjectTaggingOutput.TagsEntry) | repeated | Contains the tag set. This member is required. | +| version_id | [string](#string) | | The versionId of the object for which you got the tagging information. | +| result_metadata | [GetObjectTaggingOutput.ResultMetadataEntry](#spec.proto.extension.v1.GetObjectTaggingOutput.ResultMetadataEntry) | repeated | Metadata pertaining to the operation's result. | + + + + + + + +

Top

+ +## GetObjectTaggingOutput.ResultMetadataEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## GetObjectTaggingOutput.TagsEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## HeadObjectInput +HeadObjectInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| checksum_mode | [string](#string) | | To retrieve the checksum, this parameter must be enabled | +| expected_bucket_owner | [string](#string) | | The account ID of the expected bucket owner | +| if_match | [string](#string) | | Return the object only if its entity tag (ETag) is the same as the one specified; otherwise, return a 412 (precondition failed) error. | +| if_modified_since | [int64](#int64) | | Return the object only if it has been modified since the specified time; otherwise, return a 304 (not modified) error. | +| if_none_match | [string](#string) | | Return the object only if its entity tag (ETag) is different from the one specified | +| if_unmodified_since | [int64](#int64) | | Return the object only if it has not been modified since the specified time; | +| part_number | [int32](#int32) | | Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object. | +| request_payer | [string](#string) | | Confirms that the requester knows that they will be charged for the request. | +| sse_customer_algorithm | [string](#string) | | Specifies the algorithm to use to when encrypting the object (for example, AES256). | +| sse_customer_key | [string](#string) | | Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data | +| sse_customer_key_md5 | [string](#string) | | Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. | +| version_id | [string](#string) | | VersionId used to reference a specific version of the object. | +| with_details | [bool](#bool) | | Return object details meta | + + + + + + + +

Top

+ +## HeadObjectOutput +HeadObjectOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| result_metadata | [HeadObjectOutput.ResultMetadataEntry](#spec.proto.extension.v1.HeadObjectOutput.ResultMetadataEntry) | repeated | Metadata pertaining to the operation's result. | + + + + + + + +

Top

+ +## HeadObjectOutput.ResultMetadataEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## Initiator +Initiator + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| display_name | [string](#string) | | Initiator name | +| id | [string](#string) | | Initiator id | + + + + + + + +

Top

+ +## IsObjectExistInput +IsObjectExistInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| version_id | [string](#string) | | Object version id | + + + + + + + +

Top

+ +## IsObjectExistOutput +IsObjectExistOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| file_exist | [bool](#bool) | | Object exist or not | + + + + + + + +

Top

+ +## ListMultipartUploadsInput +ListMultipartUploadsInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| delimiter | [string](#string) | | Character you use to group keys. All keys that contain the same string between the prefix, if specified, and the first occurrence of the delimiter after the prefix are grouped under a single result element, CommonPrefixes. If you don't specify the prefix parameter, then the substring starts at the beginning of the key. The keys that are grouped under CommonPrefixes result element are not returned elsewhere in the response. | +| encoding_type | [string](#string) | | Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; | +| expected_bucket_owner | [string](#string) | | The account ID of the expected bucket owner | +| key_marker | [string](#string) | | Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin. If upload-id-marker is not specified, only the keys lexicographically greater than the specified key-marker will be included in the list. If upload-id-marker is specified, any multipart uploads for a key equal to the key-marker might also be included, provided those multipart uploads have upload IDs lexicographically greater than the specified upload-id-marker. | +| max_uploads | [int64](#int64) | | Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the response body. 1,000 is the maximum number of uploads that can be returned in a response. | +| prefix | [string](#string) | | Lists in-progress uploads only for those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different grouping of keys. (You can think of using prefix to make groups in the same way you'd use a folder in a file system.) | +| upload_id_marker | [string](#string) | | Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored. Otherwise, any multipart uploads for a key equal to the key-marker might be included in the list only if they have an upload ID lexicographically greater than the specified upload-id-marker. | + + + + + + + +

Top

+ +## ListMultipartUploadsOutput +ListMultipartUploadsOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| common_prefixes | [string](#string) | repeated | If you specify a delimiter in the request, then the result returns each distinct key prefix containing the delimiter in a CommonPrefixes element. | +| delimiter | [string](#string) | | Contains the delimiter you specified in the request. If you don't specify a delimiter in your request, this element is absent from the response. | +| encoding_type | [string](#string) | | Encoding type used by Amazon S3 to encode object keys in the response. | +| is_truncated | [bool](#bool) | | Indicates whether the returned list of multipart uploads is truncated. A value of true indicates that the list was truncated. The list can be truncated if the number of multipart uploads exceeds the limit allowed or specified by max uploads. | +| key_marker | [string](#string) | | The key at or after which the listing began. | +| max_uploads | [int32](#int32) | | Maximum number of multipart uploads that could have been included in the response. | +| next_key_marker | [string](#string) | | When a list is truncated, this element specifies the value that should be used for the key-marker request parameter in a subsequent request. | +| next_upload_id_marker | [string](#string) | | When a list is truncated, this element specifies the value that should be used for the upload-id-marker request parameter in a subsequent request. | +| prefix | [string](#string) | | When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix. | +| upload_id_marker | [string](#string) | | Upload ID after which listing began. | +| uploads | [MultipartUpload](#spec.proto.extension.v1.MultipartUpload) | repeated | Container for elements related to a particular multipart upload. A response can contain zero or more Upload elements. | + + + + + + + +

Top

+ +## ListObjectVersionsInput +ListObjectVersionsInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| delimiter | [string](#string) | | A delimiter is a character that you specify to group keys. All keys that contain the same string between the prefix and the first occurrence of the delimiter are grouped under a single result element in CommonPrefixes. These groups are counted as one result against the max-keys limitation. These keys are not returned elsewhere in the response. | +| encoding_type | [string](#string) | | Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; | +| expected_bucket_owner | [string](#string) | | The account ID of the expected bucket owner | +| key_marker | [string](#string) | | Specifies the key to start with when listing objects in a bucket. | +| max_keys | [int64](#int64) | | Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains true. To return the additional keys, see key-marker and version-id-marker. | +| prefix | [string](#string) | | Use this parameter to select only those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different groupings of keys. (You can think of using prefix to make groups in the same way you'd use a folder in a file system.) You can use prefix with delimiter to roll up numerous objects into a single result under CommonPrefixes. | +| version_id_marker | [string](#string) | | Specifies the object version you want to start listing from. | + + + + + + + +

Top

+ +## ListObjectVersionsOutput +ListObjectVersionsOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| common_prefixes | [string](#string) | repeated | All of the keys rolled up into a common prefix count as a single return when calculating the number of returns. | +| delete_markers | [DeleteMarkerEntry](#spec.proto.extension.v1.DeleteMarkerEntry) | repeated | Container for an object that is a delete marker. | +| delimiter | [string](#string) | | The delimiter grouping the included keys. | +| encoding_type | [string](#string) | | Encoding type used by Amazon S3 to encode object key names in the XML response. | +| is_truncated | [bool](#bool) | | A flag that indicates whether Amazon S3 returned all of the results that satisfied the search criteria | +| key_marker | [string](#string) | | Marks the last key returned in a truncated response. | +| max_keys | [int64](#int64) | | Specifies the maximum number of objects to return | +| name | [string](#string) | | The bucket name. | +| next_key_marker | [string](#string) | | When the number of responses exceeds the value of MaxKeys, NextKeyMarker specifies the first key not returned that satisfies the search criteria | +| next_version_id_marker | [string](#string) | | When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker specifies the first object version not returned that satisfies the search criteria. | +| prefix | [string](#string) | | Selects objects that start with the value supplied by this parameter. | +| version_id_marker | [string](#string) | | Marks the last version of the key returned in a truncated response. | +| versions | [ObjectVersion](#spec.proto.extension.v1.ObjectVersion) | repeated | Container for version information. | + + + + + + + +

Top

+ +## ListObjectsInput +ListObjectsInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| delimiter | [string](#string) | | A delimiter is a character you use to group keys. | +| encoding_type | [string](#string) | | Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response. | +| expected_bucket_owner | [string](#string) | | The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied). | +| marker | [string](#string) | | Marker is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. Marker can be any key in the bucket. | +| maxKeys | [int32](#int32) | | Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. | +| prefix | [string](#string) | | Limits the response to keys that begin with the specified prefix. | +| request_payer | [string](#string) | | Confirms that the requester knows that they will be charged for the request. | + + + + + + + +

Top

+ +## ListObjectsOutput +ListObjectsOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| common_prefixes | [string](#string) | repeated | CommonPrefixes | +| contents | [Object](#spec.proto.extension.v1.Object) | repeated | Objects contents | +| delimiter | [string](#string) | | Causes keys that contain the same string between the prefix and the first occurrence of the delimiter to be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. Each rolled-up result counts as only one return against the MaxKeys value. | +| encoding_type | [string](#string) | | Encoding type used by Amazon S3 to encode object keys in the response. | +| is_truncated | [bool](#bool) | | A flag that indicates whether Amazon S3 returned all of the results that satisfied the search criteria. | +| marker | [string](#string) | | Indicates where in the bucket listing begins. Marker is included in the response if it was sent with the request. | +| max_keys | [int32](#int32) | | The maximum number of keys returned in the response body. | +| name | [string](#string) | | The bucket name. | +| next_marker | [string](#string) | | When response is truncated (the IsTruncated element value in the response is true), you can use the key name in this field as marker in the subsequent request to get next set of objects. | +| prefix | [string](#string) | | Keys that begin with the indicated prefix. | + + + + + + + +

Top

+ +## ListPartsInput +ListPartsInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| expected_bucket_owner | [string](#string) | | The account ID of the expected bucket owner | +| max_parts | [int64](#int64) | | Sets the maximum number of parts to return | +| part_number_marker | [int64](#int64) | | Specifies the part after which listing should begin. Only parts with higher part numbers will be listed. | +| request_payer | [string](#string) | | Confirms that the requester knows that they will be charged for the request. | +| upload_id | [string](#string) | | Upload ID identifying the multipart upload whose parts are being listed. | + + + + + + + +

Top

+ +## ListPartsOutput +ListPartsOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| upload_id | [string](#string) | | Upload ID identifying the multipart upload whose parts are being listed. | +| next_part_number_marker | [string](#string) | | When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request. | +| max_parts | [int64](#int64) | | Maximum number of parts that were allowed in the response. | +| is_truncated | [bool](#bool) | | Indicates whether the returned list of parts is truncated. A true value indicates that the list was truncated. A list can be truncated if the number of parts exceeds the limit returned in the MaxParts element. | +| parts | [Part](#spec.proto.extension.v1.Part) | repeated | Container for elements related to a particular part. A response can contain zero or more Part elements. | + + + + + + + +

Top

+ +## MultipartUpload +MultipartUpload + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| initiated | [int64](#int64) | | Date and time at which the multipart upload was initiated. | +| initiator | [Initiator](#spec.proto.extension.v1.Initiator) | | Identifies who initiated the multipart upload. | +| key | [string](#string) | | Name of the object key. This member is required. | +| owner | [Owner](#spec.proto.extension.v1.Owner) | | Specifies the owner of the object that is part of the multipart upload. | +| storage_class | [string](#string) | | The class of storage used to store the object. | +| upload_id | [string](#string) | | Upload ID that identifies the multipart upload. | + + + + + + + +

Top

+ +## Object +Object + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| etag | [string](#string) | | The entity tag is a hash of the object | +| key | [string](#string) | | The name that you assign to an object. You use the object key to retrieve the object. | +| last_modified | [int64](#int64) | | Creation date of the object. | +| owner | [Owner](#spec.proto.extension.v1.Owner) | | The owner of the object | +| size | [int64](#int64) | | Size in bytes of the object | +| storage_class | [string](#string) | | The class of storage used to store the object. | + + + + + + + +

Top

+ +## ObjectIdentifier +ObjectIdentifier + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | Key name of the object. This member is required. | +| version_id | [string](#string) | | VersionId for the specific version of the object to delete. | + + + + + + + +

Top

+ +## ObjectVersion +ObjectVersion + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| etag | [string](#string) | | The entity tag is an MD5 hash of that version of the object. | +| is_latest | [bool](#bool) | | Specifies whether the object is (true) or is not (false) the latest version of an object. | +| key | [string](#string) | | Name of the object key. This member is required. | +| last_modified | [int64](#int64) | | Date and time the object was last modified. | +| owner | [Owner](#spec.proto.extension.v1.Owner) | | Specifies the owner of the object. | +| size | [int64](#int64) | | Size in bytes of the object. | +| storage_class | [string](#string) | | The class of storage used to store the object. | +| version_id | [string](#string) | | Version ID of an object. | + + + + + + + +

Top

+ +## Owner +Owner + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| display_name | [string](#string) | | Owner display name | +| id | [string](#string) | | Owner id | + + + + + + + +

Top

+ +## Part +Part + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| etag | [string](#string) | | Part Etag | +| last_modified | [int64](#int64) | | Last modified time | +| part_number | [int64](#int64) | | Part number | +| size | [int64](#int64) | | Part size | + + + + + + + +

Top

+ +## PutObjectCannedAclInput +PutObjectCannedAclInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| acl | [string](#string) | | The canned ACL to apply to the object | +| version_id | [string](#string) | | VersionId used to reference a specific version of the object. | + + + + + + + +

Top

+ +## PutObjectCannedAclOutput +PutObjectCannedAclOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| request_charged | [string](#string) | | Request charged | + + + + + + + +

Top

+ +## PutObjectInput +PutObjectInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| body | [bytes](#bytes) | | Object data. | +| bucket | [string](#string) | | The bucket name to which the PUT action was initiated This member is required. | +| key | [string](#string) | | Object key for which the PUT action was initiated. This member is required. | +| acl | [string](#string) | | The canned ACL to apply to the object,different oss provider have different acl type | +| bucket_key_enabled | [bool](#bool) | | Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS). | +| cache_control | [string](#string) | | Can be used to specify caching behavior along the request/reply chain. | +| content_disposition | [string](#string) | | Specifies presentational information for the object. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). | +| content_encoding | [string](#string) | | Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). | +| expires | [int64](#int64) | | The date and time at which the object is no longer cacheable. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). | +| server_side_encryption | [string](#string) | | The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms). | +| signed_url | [string](#string) | | Specify the signed url of object, user can put object with signed url without ak、sk | +| meta | [PutObjectInput.MetaEntry](#spec.proto.extension.v1.PutObjectInput.MetaEntry) | repeated | A map of metadata to store with the object in S3. | +| tagging | [PutObjectInput.TaggingEntry](#spec.proto.extension.v1.PutObjectInput.TaggingEntry) | repeated | The tag-set for the object. The tag-set must be encoded as URL Query parameters. | + + + + + + + +

Top

+ +## PutObjectInput.MetaEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## PutObjectInput.TaggingEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## PutObjectOutput +PutObjectOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| bucket_key_enabled | [bool](#bool) | | Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS). | +| etag | [string](#string) | | Entity tag for the uploaded object. | +| expiration | [string](#string) | | If the expiration is configured for the object | +| request_charged | [string](#string) | | If present, indicates that the requester was successfully charged for the request. | +| version_id | [string](#string) | | Version of the object. | + + + + + + + +

Top

+ +## PutObjectTaggingInput +PutObjectTaggingInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required. | +| key | [string](#string) | | Name of the object key. This member is required. | +| tags | [PutObjectTaggingInput.TagsEntry](#spec.proto.extension.v1.PutObjectTaggingInput.TagsEntry) | repeated | Container for the TagSet and Tag elements | +| version_id | [string](#string) | | The versionId of the object that the tag-set will be added to. | + + + + + + + +

Top

+ +## PutObjectTaggingInput.TagsEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## PutObjectTaggingOutput +PutObjectTaggingOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| version_id | [string](#string) | | The versionId of the object the tag-set was added to. | +| result_metadata | [PutObjectTaggingOutput.ResultMetadataEntry](#spec.proto.extension.v1.PutObjectTaggingOutput.ResultMetadataEntry) | repeated | Metadata pertaining to the operation's result. | + + + + + + + +

Top

+ +## PutObjectTaggingOutput.ResultMetadataEntry + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + +

Top

+ +## RestoreObjectInput +RestoreObjectInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| version_id | [string](#string) | | VersionId used to reference a specific version of the object. | + + + + + + + +

Top

+ +## RestoreObjectOutput +RestoreObjectOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| request_charged | [string](#string) | | If present, indicates that the requester was successfully charged for the request. | +| restore_output_path | [string](#string) | | Indicates the path in the provided S3 output location where Select results will be restored to. | + + + + + + + +

Top

+ +## SignURLInput +SignURLInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| method | [string](#string) | | the method for sign url, eg. GET、POST | +| expired_in_sec | [int64](#int64) | | expire time of the sign url | + + + + + + + +

Top

+ +## SignURLOutput +SignURLOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| signed_url | [string](#string) | | Object signed url | + + + + + + + +

Top

+ +## UpdateBandwidthRateLimitInput +UpdateBandwidthRateLimitInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| average_rate_limit_in_bits_per_sec | [int64](#int64) | | The average upload/download bandwidth rate limit in bits per second. | +| gateway_resource_name | [string](#string) | | Resource name of gateway | + + + + + + + +

Top

+ +## UploadPartCopyInput +UploadPartCopyInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| copy_source | [CopySource](#spec.proto.extension.v1.CopySource) | | CopySource | +| part_number | [int32](#int32) | | Part number of part being copied. This is a positive integer between 1 and 10,000. This member is required. | +| upload_id | [string](#string) | | Upload ID identifying the multipart upload whose part is being copied. This member is required. | +| start_position | [int64](#int64) | | The range of bytes to copy from the source object.bytes=start_position-part_size | +| part_size | [int64](#int64) | | Part size | + + + + + + + +

Top

+ +## UploadPartCopyOutput +UploadPartCopyOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| bucket_key_enabled | [bool](#bool) | | Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS). | +| copy_part_result | [CopyPartResult](#spec.proto.extension.v1.CopyPartResult) | | Container for all response elements. | +| copy_source_version_id | [string](#string) | | The version of the source object that was copied, if you have enabled versioning on the source bucket. | +| request_charged | [string](#string) | | If present, indicates that the requester was successfully charged for the request. | +| sse_customer_algorithm | [string](#string) | | If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. | +| sse_customer_key_md5 | [string](#string) | | If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key. | +| sse_kms_key_id | [string](#string) | | If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed key that was used for the object. | +| server_side_encryption | [string](#string) | | The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms). | + + + + + + + +

Top

+ +## UploadPartInput +UploadPartInput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| store_name | [string](#string) | | Required. The name of oss store. | +| bucket | [string](#string) | | The bucket name containing the object This member is required | +| key | [string](#string) | | Name of the object key. This member is required. | +| body | [bytes](#bytes) | | Object data. | +| content_length | [int64](#int64) | | Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. | +| content_md5 | [string](#string) | | The base64-encoded 128-bit MD5 digest of the part data. | +| expected_bucket_owner | [string](#string) | | The account ID of the expected bucket owner | +| part_number | [int32](#int32) | | Part number of part being uploaded. This is a positive integer between 1 and 10,000. This member is required. | +| request_payer | [string](#string) | | Confirms that the requester knows that they will be charged for the request. | +| sse_customer_algorithm | [string](#string) | | Specifies the algorithm to use to when encrypting the object (for example, AES256). | +| sse_customer_key | [string](#string) | | Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data | +| sse_customer_key_md5 | [string](#string) | | Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. | +| upload_id | [string](#string) | | Upload ID identifying the multipart upload whose part is being uploaded. This member is required. | + + + + + + + +

Top

+ +## UploadPartOutput +UploadPartOutput + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| bucket_key_enabled | [bool](#bool) | | Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS). | +| etag | [string](#string) | | Entity tag for the uploaded object. | +| request_charged | [string](#string) | | If present, indicates that the requester was successfully charged for the request. | +| sse_customer_algorithm | [string](#string) | | Specifies the algorithm to use to when encrypting the object (for example, AES256). | +| sse_customer_key_md5 | [string](#string) | | Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. | +| sse_kms_key_id | [string](#string) | | Specifies the ID of the symmetric customer managed key to use for object encryption | +| server_side_encryption | [string](#string) | | The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms). | + + + + + + + + + + + diff --git a/docs/en/api_reference/runtime_v1.md b/docs/en/api_reference/runtime_v1.md index b39b3e3068..787e1605e3 100644 --- a/docs/en/api_reference/runtime_v1.md +++ b/docs/en/api_reference/runtime_v1.md @@ -13,7 +13,7 @@ This document is automaticallly generated from the [`.proto`](https://github.com ## [gRPC Service] Runtime - +Runtime encapsulates variours Runtime APIs(such as Configuration API, Pub/Sub API, etc) | Method Name | Request Type | Response Type | Description | | ----------- | ------------ | ------------- | ------------| @@ -24,7 +24,7 @@ This document is automaticallly generated from the [`.proto`](https://github.com | DeleteConfiguration | [DeleteConfigurationRequest](#spec.proto.runtime.v1.DeleteConfigurationRequest) | [.google.protobuf.Empty](#google.protobuf.Empty) | DeleteConfiguration deletes configuration from configuration store. | | SubscribeConfiguration | [SubscribeConfigurationRequest](#spec.proto.runtime.v1.SubscribeConfigurationRequest) stream | [SubscribeConfigurationResponse](#spec.proto.runtime.v1.SubscribeConfigurationResponse) stream | SubscribeConfiguration gets configuration from configuration store and subscribe the updates. | | TryLock | [TryLockRequest](#spec.proto.runtime.v1.TryLockRequest) | [TryLockResponse](#spec.proto.runtime.v1.TryLockResponse) | Distributed Lock API A non-blocking method trying to get a lock with ttl. | -| Unlock | [UnlockRequest](#spec.proto.runtime.v1.UnlockRequest) | [UnlockResponse](#spec.proto.runtime.v1.UnlockResponse) | | +| Unlock | [UnlockRequest](#spec.proto.runtime.v1.UnlockRequest) | [UnlockResponse](#spec.proto.runtime.v1.UnlockResponse) | A method trying to unlock. | | GetNextId | [GetNextIdRequest](#spec.proto.runtime.v1.GetNextIdRequest) | [GetNextIdResponse](#spec.proto.runtime.v1.GetNextIdResponse) | Sequencer API Get next unique id with some auto-increment guarantee | | GetState | [GetStateRequest](#spec.proto.runtime.v1.GetStateRequest) | [GetStateResponse](#spec.proto.runtime.v1.GetStateResponse) | Gets the state for a specific key. | | GetBulkState | [GetBulkStateRequest](#spec.proto.runtime.v1.GetBulkStateRequest) | [GetBulkStateResponse](#spec.proto.runtime.v1.GetBulkStateResponse) | Gets a bulk of state items for a list of keys | @@ -87,15 +87,15 @@ Return values include the item key, data and etag.

Top

## CommonInvokeRequest - +Common invoke request message which includes invoke method and data | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| method | [string](#string) | | | -| data | [google.protobuf.Any](#google.protobuf.Any) | | | -| content_type | [string](#string) | | | -| http_extension | [HTTPExtension](#spec.proto.runtime.v1.HTTPExtension) | | | +| method | [string](#string) | | The method of requset | +| data | [google.protobuf.Any](#google.protobuf.Any) | | The request data | +| content_type | [string](#string) | | The content type of request data | +| http_extension | [HTTPExtension](#spec.proto.runtime.v1.HTTPExtension) | | The extra information of http | @@ -161,12 +161,12 @@ ConfigurationItem represents a configuration item with key, content and other in

Top

## DelFileRequest - +Delete file request message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| request | [FileRequest](#spec.proto.runtime.v1.FileRequest) | | | +| request | [FileRequest](#spec.proto.runtime.v1.FileRequest) | | File request | @@ -320,7 +320,7 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

## FileInfo - +File info message | Field | Type | Label | Description | @@ -356,12 +356,12 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

## FileMeta - +A map that store FileMetaValue | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| metadata | [FileMeta.MetadataEntry](#spec.proto.runtime.v1.FileMeta.MetadataEntry) | repeated | | +| metadata | [FileMeta.MetadataEntry](#spec.proto.runtime.v1.FileMeta.MetadataEntry) | repeated | A data structure to store metadata | @@ -389,12 +389,12 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

## FileMetaValue - +FileMeta value | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| value | [string](#string) | repeated | | +| value | [string](#string) | repeated | File meta value | @@ -405,12 +405,12 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

## FileRequest - +File request message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| store_name | [string](#string) | | | +| store_name | [string](#string) | | The name of store | | name | [string](#string) | | The name of the directory | | metadata | [FileRequest.MetadataEntry](#spec.proto.runtime.v1.FileRequest.MetadataEntry) | repeated | The metadata for user extension. | @@ -614,12 +614,12 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

## GetFileMetaRequest - +Get fileMeta request message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| request | [FileRequest](#spec.proto.runtime.v1.FileRequest) | | | +| request | [FileRequest](#spec.proto.runtime.v1.FileRequest) | | File meta request | @@ -630,14 +630,14 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

## GetFileMetaResponse - +Get fileMeta response message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | size | [int64](#int64) | | The size of file | | last_modified | [string](#string) | | The modified time of file | -| response | [FileMeta](#spec.proto.runtime.v1.FileMeta) | | | +| response | [FileMeta](#spec.proto.runtime.v1.FileMeta) | | File meta response | @@ -648,12 +648,12 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

## GetFileRequest - +Get file request message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| store_name | [string](#string) | | | +| store_name | [string](#string) | | The name of store | | name | [string](#string) | | The name of the file or object want to get. | | metadata | [GetFileRequest.MetadataEntry](#spec.proto.runtime.v1.GetFileRequest.MetadataEntry) | repeated | The metadata for user extension. | @@ -683,12 +683,12 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

## GetFileResponse - +Get file response message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| data | [bytes](#bytes) | | | +| data | [bytes](#bytes) | | The data of file | @@ -699,7 +699,7 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

## GetNextIdRequest - +Get next id request message | Field | Type | Label | Description | @@ -735,7 +735,7 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

## GetNextIdResponse - +Get next id response message | Field | Type | Label | Description | @@ -890,13 +890,13 @@ GetStateResponse is the response conveying the state value and etag.

Top

## HTTPExtension - +Http extension message is about invoke http information | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| verb | [HTTPExtension.Verb](#spec.proto.runtime.v1.HTTPExtension.Verb) | | | -| querystring | [string](#string) | | | +| verb | [HTTPExtension.Verb](#spec.proto.runtime.v1.HTTPExtension.Verb) | | The method of http reuest | +| querystring | [string](#string) | | The query information of http | @@ -977,13 +977,13 @@ InvokeBindingResponse is the message returned from an output binding invocation

Top

## InvokeResponse - +Invoke service response message is result of invoke service queset | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| data | [google.protobuf.Any](#google.protobuf.Any) | | | -| content_type | [string](#string) | | | +| data | [google.protobuf.Any](#google.protobuf.Any) | | The response data | +| content_type | [string](#string) | | The content type of response data | @@ -994,13 +994,13 @@ InvokeBindingResponse is the message returned from an output binding invocation

Top

## InvokeServiceRequest - +Invoke service request message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| id | [string](#string) | | | -| message | [CommonInvokeRequest](#spec.proto.runtime.v1.CommonInvokeRequest) | | | +| id | [string](#string) | | The identify of InvokeServiceRequest | +| message | [CommonInvokeRequest](#spec.proto.runtime.v1.CommonInvokeRequest) | | InvokeServiceRequest message | @@ -1011,14 +1011,14 @@ InvokeBindingResponse is the message returned from an output binding invocation

Top

## ListFileRequest - +List file request message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| request | [FileRequest](#spec.proto.runtime.v1.FileRequest) | | | -| page_size | [int32](#int32) | | | -| marker | [string](#string) | | | +| request | [FileRequest](#spec.proto.runtime.v1.FileRequest) | | File request | +| page_size | [int32](#int32) | | Page size | +| marker | [string](#string) | | Marker | @@ -1029,14 +1029,14 @@ InvokeBindingResponse is the message returned from an output binding invocation

Top

## ListFileResp - +List file response message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| files | [FileInfo](#spec.proto.runtime.v1.FileInfo) | repeated | | -| marker | [string](#string) | | | -| is_truncated | [bool](#bool) | | | +| files | [FileInfo](#spec.proto.runtime.v1.FileInfo) | repeated | File info | +| marker | [string](#string) | | Marker | +| is_truncated | [bool](#bool) | | Is truncated | @@ -1086,12 +1086,12 @@ metadata property: - key : the key of the message. |

Top

## PutFileRequest - +Put file request message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| store_name | [string](#string) | | | +| store_name | [string](#string) | | The name of store | | name | [string](#string) | | The name of the file or object want to put. | | data | [bytes](#bytes) | | The data will be store. | | metadata | [PutFileRequest.MetadataEntry](#spec.proto.runtime.v1.PutFileRequest.MetadataEntry) | repeated | The metadata for user extension. | @@ -1175,13 +1175,13 @@ SaveStateRequest is the message to save multiple states into state store.

Top

## SayHelloRequest - +Hello request message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| service_name | [string](#string) | | | -| name | [string](#string) | | | +| service_name | [string](#string) | | The name of service | +| name | [string](#string) | | Reuqest name | | data | [google.protobuf.Any](#google.protobuf.Any) | | Optional. This field is used to control the packet size during load tests. | @@ -1193,13 +1193,13 @@ SaveStateRequest is the message to save multiple states into state store.

Top

## SayHelloResponse - +Hello response message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| hello | [string](#string) | | | -| data | [google.protobuf.Any](#google.protobuf.Any) | | | +| hello | [string](#string) | | Hello | +| data | [google.protobuf.Any](#google.protobuf.Any) | | Hello message of data | @@ -1215,7 +1215,7 @@ SecretResponse is a map of decrypted string/string values | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| secrets | [SecretResponse.SecretsEntry](#spec.proto.runtime.v1.SecretResponse.SecretsEntry) | repeated | | +| secrets | [SecretResponse.SecretsEntry](#spec.proto.runtime.v1.SecretResponse.SecretsEntry) | repeated | The data struct of secrets | @@ -1248,7 +1248,7 @@ SequencerOptions configures requirements for auto-increment guarantee | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| increment | [SequencerOptions.AutoIncrement](#spec.proto.runtime.v1.SequencerOptions.AutoIncrement) | | | +| increment | [SequencerOptions.AutoIncrement](#spec.proto.runtime.v1.SequencerOptions.AutoIncrement) | | Default STRONG auto-increment | @@ -1301,8 +1301,8 @@ StateOptions configures concurrency and consistency for state operations | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| concurrency | [StateOptions.StateConcurrency](#spec.proto.runtime.v1.StateOptions.StateConcurrency) | | | -| consistency | [StateOptions.StateConsistency](#spec.proto.runtime.v1.StateOptions.StateConsistency) | | | +| concurrency | [StateOptions.StateConcurrency](#spec.proto.runtime.v1.StateOptions.StateConcurrency) | | The state operation of concurrency | +| consistency | [StateOptions.StateConsistency](#spec.proto.runtime.v1.StateOptions.StateConsistency) | | The state operation of consistency | @@ -1386,7 +1386,7 @@ TransactionalStateOperation is the message to execute a specified operation with

Top

## TryLockRequest - +Lock request message is distributed lock API which is not blocking method tring to get a lock with ttl | Field | Type | Label | Description | @@ -1405,12 +1405,12 @@ TransactionalStateOperation is the message to execute a specified operation with

Top

## TryLockResponse - +Lock response message returns is the lock obtained. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| success | [bool](#bool) | | | +| success | [bool](#bool) | | Is lock success | @@ -1421,14 +1421,14 @@ TransactionalStateOperation is the message to execute a specified operation with

Top

## UnlockRequest - +UnLock request message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| store_name | [string](#string) | | | +| store_name | [string](#string) | | The name of store | | resource_id | [string](#string) | | resource_id is the lock key. | -| lock_owner | [string](#string) | | | +| lock_owner | [string](#string) | | The owner of the lock | @@ -1439,12 +1439,12 @@ TransactionalStateOperation is the message to execute a specified operation with

Top

## UnlockResponse - +UnLock response message | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| status | [UnlockResponse.Status](#spec.proto.runtime.v1.UnlockResponse.Status) | | | +| status | [UnlockResponse.Status](#spec.proto.runtime.v1.UnlockResponse.Status) | | The status of unlock | @@ -1456,20 +1456,20 @@ TransactionalStateOperation is the message to execute a specified operation with ## HTTPExtension.Verb - +The enum of http reuest method | Name | Number | Description | | ---- | ------ | ----------- | -| NONE | 0 | | -| GET | 1 | | -| HEAD | 2 | | -| POST | 3 | | -| PUT | 4 | | -| DELETE | 5 | | -| CONNECT | 6 | | -| OPTIONS | 7 | | -| TRACE | 8 | | -| PATCH | 9 | | +| NONE | 0 | NONE | +| GET | 1 | GET method | +| HEAD | 2 | HEAD method | +| POST | 3 | POST method | +| PUT | 4 | PUT method | +| DELETE | 5 | DELETE method | +| CONNECT | 6 | CONNECT method | +| OPTIONS | 7 | CONNECT method | +| TRACE | 8 | CONNECT method | +| PATCH | 9 | PATCH method | @@ -1495,7 +1495,7 @@ But when ETag is missing in the write requests, the state store shall handle the | Name | Number | Description | | ---- | ------ | ----------- | -| CONCURRENCY_UNSPECIFIED | 0 | | +| CONCURRENCY_UNSPECIFIED | 0 | Concurrency state is unspecified | | CONCURRENCY_FIRST_WRITE | 1 | First write wins | | CONCURRENCY_LAST_WRITE | 2 | Last write wins | @@ -1508,7 +1508,7 @@ Enum describing the supported consistency for state. | Name | Number | Description | | ---- | ------ | ----------- | -| CONSISTENCY_UNSPECIFIED | 0 | | +| CONSISTENCY_UNSPECIFIED | 0 | Consistency state is unspecified | | CONSISTENCY_EVENTUAL | 1 | The API server assumes data stores are eventually consistent by default.A state store should: - For read requests, the state store can return data from any of the replicas - For write request, the state store should asynchronously replicate updates to configured quorum after acknowledging the update request. | | CONSISTENCY_STRONG | 2 | When a strong consistency hint is attached, a state store should: - For read requests, the state store should return the most up-to-date data consistently across replicas. - For write/delete requests, the state store should synchronisely replicate updated data to configured quorum before completing the write request. | @@ -1517,14 +1517,14 @@ Enum describing the supported consistency for state. ## UnlockResponse.Status - +The enum of unlock status | Name | Number | Description | | ---- | ------ | ----------- | -| SUCCESS | 0 | | -| LOCK_UNEXIST | 1 | | -| LOCK_BELONG_TO_OTHERS | 2 | | -| INTERNAL_ERROR | 3 | | +| SUCCESS | 0 | Unlock is success | +| LOCK_UNEXIST | 1 | The lock is not exist | +| LOCK_BELONG_TO_OTHERS | 2 | The lock is belong to others | +| INTERNAL_ERROR | 3 | Internal error | diff --git a/docs/zh/_sidebar.md b/docs/zh/_sidebar.md index ccfdedea27..6bfecf83e9 100644 --- a/docs/zh/_sidebar.md +++ b/docs/zh/_sidebar.md @@ -15,6 +15,7 @@ - [Dubbo JSON RPC](zh/start/rpc/dubbo_json_rpc.md) - 使用File API - [基于Minio](zh/start/file/minio.md) + - [使用 OSS API](zh/start/oss/oss.md) - [API插件:注册您自己的API](zh/start/api_plugin/helloworld.md) - 作为 Istio 的数据面 - [集成 Istio 1.10.6 演示](zh/start/istio/) @@ -46,8 +47,6 @@ - 可扩展性 - [API插件](zh/design/api_plugin/design.md) - [gRPC API 接口文档](zh/api_reference/README.md) - - [spec/proto/runtime/v1/runtime.proto](https://github.com/mosn/layotto/blob/main/docs/en/api_reference/runtime_v1.md) - - [spec/proto/runtime/v1/appcallback.proto](https://github.com/mosn/layotto/blob/main/docs/en/api_reference/appcallback_v1.md) - SDK文档 - [java sdk](https://github.com/layotto/java-sdk) - [.net sdk](https://github.com/layotto/dotnet-sdk) @@ -96,6 +95,7 @@ - [FaaS 设计文档](zh/design/faas/faas-poc-design.md) - [API插件](zh/design/api_plugin/design.md) - [支持Dapr API](zh/design/api_plugin/dapr_api.md) + - [OSS API设计文档](zh/design/oss/oss-api-design.md) - 贡献指南 - [Layotto 贡献指南](zh/development/CONTRIBUTING.md) - [新手攻略:从零开始成为 Layotto 贡献者](zh/development/start-from-zero.md) diff --git a/docs/zh/api_reference/README.md b/docs/zh/api_reference/README.md index ec781e0647..c5b88649d3 100644 --- a/docs/zh/api_reference/README.md +++ b/docs/zh/api_reference/README.md @@ -1,11 +1,14 @@ # gRPC API 接口文档 -Layotto 有两个 gRPC proto 文件, 对应的接口文档在: +Layotto 有多个 gRPC proto 文件, 对应的接口文档在: -- [spec/proto/runtime/v1/runtime.proto](https://github.com/mosn/layotto/blob/main/docs/en/api_reference/runtime_v1.md) +[https://mosn.io/layotto/api/v1/runtime.html](https://mosn.io/layotto/api/v1/runtime.html) -该 proto 定义的 gRPC API, 就是 Layotto 对 App 提供的 API。 - -- [spec/proto/runtime/v1/appcallback.proto](https://github.com/mosn/layotto/blob/main/docs/en/api_reference/appcallback_v1.md) +这些 proto 里定义了 Layotto 的运行时 API, 包括: -该接口需要由 App 来实现,用来处理 pubsub 订阅消息 + - Layotto 对 App 提供的 API + - 需要由 App 来实现的 callback API。 Layotto 会回调 App、获取 pubsub 订阅消息 + +除此之外,Layotto 还提供了一些扩展 API,包括: + +s3: [spec/proto/extension/v1/s3](https://mosn.io/layotto/api/v1/s3.html) diff --git a/docs/zh/api_reference/comment_spec_of_proto.md b/docs/zh/api_reference/comment_spec_of_proto.md index 6a388b030a..70ed1789b1 100644 --- a/docs/zh/api_reference/comment_spec_of_proto.md +++ b/docs/zh/api_reference/comment_spec_of_proto.md @@ -5,7 +5,8 @@ 一个坏示例: ``` -message BadCase{ +// XXXXXXXX +message BadCase{ // XXXXXXXX // // XX @@ -18,7 +19,8 @@ message BadCase{ 一个好示例: ``` -message GoodCase{ +// XXXXXXXX +message GoodCase{ // XXXXXXXX // XX // XXXXXX @@ -31,6 +33,8 @@ message GoodCase{ 假如你想添加一些注释在proto文件里,但不想让它们出现在生成的文档里,你可以在注释里使用`@exclude`前缀。 示例:只包括id字段的注释 +注意:在ci检查proto文件该类注释符号并不会被检查到,具体参考[文档](https://docs.buf.build/lint/rules#comments) + ``` /** * @exclude @@ -39,8 +43,7 @@ message GoodCase{ message ExcludedMessage { string id = 1; // the id of this message. string name = 2; // @exclude the name of this message - /* @exclude the value of this message. */ int32 value = 3; } -``` +``` \ No newline at end of file diff --git a/docs/zh/api_reference/how_to_generate_api_doc.md b/docs/zh/api_reference/how_to_generate_api_doc.md index 7a095b5955..36f25d284b 100644 --- a/docs/zh/api_reference/how_to_generate_api_doc.md +++ b/docs/zh/api_reference/how_to_generate_api_doc.md @@ -1,5 +1,16 @@ # 如何基于proto文件生成代码、接口文档 +```shell +make proto +``` + +Then you get: +- `.pb.go` code +- API reference docs +- updated sidebar in the doc site + +That's all :) + ## 如何把 proto 文件编译成`.pb.go`代码 ### **Make 命令生成(推荐)** diff --git a/docs/zh/blog/code/start_process/start_process.md b/docs/zh/blog/code/start_process/start_process.md index b9e41b6e3a..4f23a75e77 100644 --- a/docs/zh/blog/code/start_process/start_process.md +++ b/docs/zh/blog/code/start_process/start_process.md @@ -74,12 +74,12 @@ func NewRuntimeGrpcServer(data json.RawMessage, opts ...grpc.ServerOption) (mgrp // 4. 添加所有组件的初始化函数 // 我们只看下File组件的,将NewXXX()添加到组件Factory里 runtime.WithFileFactory( - file.NewFileFactory("aliOSS", alicloud.NewAliCloudOSS), - file.NewFileFactory("minioOSS", minio.NewMinioOss), - file.NewFileFactory("awsOSS", aws.NewAwsOss), - file.NewFileFactory("tencentCloudOSS", tencentcloud.NewTencentCloudOSS), + file.NewFileFactory("aliyun.oss", alicloud.NewAliCloudOSS), + file.NewFileFactory("minio", minio.NewMinioOss), + file.NewFileFactory("aws.s3", aws.NewAwsOss), + file.NewFileFactory("tencent.oss", tencentcloud.NewTencentCloudOSS), file.NewFileFactory("local", local.NewLocalStore), - file.NewFileFactory("qiniuOSS", qiniu.NewQiniuOSS), + file.NewFileFactory("qiniu.oss", qiniu.NewQiniuOSS), ), ... return server, err diff --git a/docs/zh/component_specs/file/common.md b/docs/zh/component_specs/file/common.md index a202a0bf30..ada5fc9420 100644 --- a/docs/zh/component_specs/file/common.md +++ b/docs/zh/component_specs/file/common.md @@ -7,7 +7,7 @@ json配置文件有如下结构: ```json "file": { "file_demo": { - "type": "aliOSS" + "type": "aliyun.oss" "metadata":[ { "endpoint": "endpoint_address", @@ -34,7 +34,7 @@ json配置文件有如下结构: Files map[string]file.FileConfig `json:"file"` ``` -上面的Files是一个map,key为component的名字,比如上述json的aliOSS,component的配置没有具体的格式限制,不同component可以根据需求自己定义,比如: +上面的Files是一个map,key为component的名字,比如上述json的aliyun.oss,component的配置没有具体的格式限制,不同component可以根据需求自己定义,比如: ```json "file": { diff --git a/docs/zh/design/file/file-design.md b/docs/zh/design/file/file-design.md index e907cb826c..4eebc4712a 100644 --- a/docs/zh/design/file/file-design.md +++ b/docs/zh/design/file/file-design.md @@ -75,7 +75,7 @@ Get的入参主要有三个: | **参数名** | **意义** | **是否必传** | | --- | --- | --- | --- | --- | --- | --- | -| store_name | 后端对应的components(eg: aliOSS, awsOSS) | yes | +| store_name | 后端对应的components(eg: aliyun.oss, aws.s3) | yes | | name | 文件名字 | yes| | metadata | 元数据,该字段用户可以用来指定component需要的一些字段,(eg:权限,用户名等) | yes| @@ -85,7 +85,7 @@ Put接口入参主要有三个,多了一个data字段用来传输文件内容 | **参数名** | **意义** | **是否必传** | | --- | --- | --- | --- | --- | --- | --- | -| store_name | 后端对应的components(eg: aliOSS, awsOSS) | yes | +| store_name | 后端对应的components(eg: aliyun.oss, aws.s3) | yes | | name | 文件名字 | yes| | data | 文件内容 | no(允许用户上传空数据,每个component可以做具体实现)| | metadata | 元数据,该字段用户可以用来指定component需要的一些字段,(eg:权限,用户名等) | yes| @@ -97,20 +97,20 @@ Put接口入参主要有三个,多了一个data字段用来传输文件内容 | **参数名** | **意义** | **是否必传** | | --- | --- | --- | --- | --- | --- | --- | -| store_name | 后端对应的components(eg: aliOSS, awsOSS) | yes | +| store_name | 后端对应的components(eg: aliyun.oss, aws.s3) | yes | | name | 文件名字 | yes| | metadata | 元数据,该字段用户可以用来指定component需要的一些字段,(eg:权限,用户名等) | yes| ### 配置参数 -配置参数,不同的component可以配置不同格式,比如aliOSS的配置如下: +配置参数,不同的component可以配置不同格式,比如aliyun.oss的配置如下: ```protobuf { "file": { "file_demo": { - "type": "aliOSS", + "type": "aliyun.oss", "metadata":[ { "endpoint": "endpoint_address", diff --git a/docs/zh/design/oss/oss-api-design.md b/docs/zh/design/oss/oss-api-design.md new file mode 100644 index 0000000000..73cf863e33 --- /dev/null +++ b/docs/zh/design/oss/oss-api-design.md @@ -0,0 +1,233 @@ +# Layotto对象存储(OSS)并集API接口定义及设计 + +## 背景 + +为了让layotto支持对象存储能力,需要对oss的接口进行抽象。抽象出的接口需要满足理论上的”可移植性“以及让接口具有明确的语义。 + +## 接口设计 + +整个接口的设计将遵循以下原则: + +1. **语义性,即抽象出的接口具有明确的语义。** +2. **功能完整性,即抽象出的接口需要尽可能满足不同oss的能力。** +3. **最大可移植性,即抽象出的接口需要尽最大可能的满足可移植性的要求。** + +上述原则设计的时候考量的优先级从高到低。为了满足上述的要求,可能会存在以下问题: + +1. 字段的冗余,入参和出参可能会存在对应特定厂商的字段。 +2. 部分接口可能只在部分oss厂商上可以支持,即“最大可能移植性”。 + +## 配置模块设计 + +oss原始配置模块的字段抽象如下所示: + +```go +// OssMetadata wraps the configuration of oss implementation +type OssMetadata struct { + Endpoint string `json:"endpoint"` + AccessKeyID string `json:"accessKeyID"` + AccessKeySecret string `json:"accessKeySecret"` + Region string `json:"region"` +} +``` + +Endpoint、AccessKeyID、AccessKeySecret、Region是现有的oss都有的概念,本文不做过多解释。 + + +## 接口设计 + +接口的定义主要分为两类: + +1. 通用接口,即类似于PutObject、GetObject等所有的oss服务都支持的接口。 +2. 非通用接口,即只有部分oss服务支持的接口。比如ListParts接口,aws就无法支持。 + +该接口的设计主要参考aliyun和aws以及minio的接口定义。 +> https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_GetObject.html +> https://help.aliyun.com/document_detail/31980.html +> https://docs.min.io/docs/golang-client-api-reference.html + +### PutObject + +对象上传接口,用作上传文件,是oss最基本能力。 + +> [https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_PutObject.html](https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_PutObject.html) +> [https://help.aliyun.com/document_detail/31978.html](https://help.aliyun.com/document_detail/31978.html) + +### GetObject + +对象下载接口,用作文件下载,是oss最基本能力。 + +> [https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_GetObject.html](https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_GetObject.html) +> [https://help.aliyun.com/document_detail/31980.html](https://help.aliyun.com/document_detail/31980.html) + + +### DeleteObject + +对象删除接口,用作文件删除,是oss最基本能力。对应接口的定义,请在上述的链接或者pb定义中查询。 + +### PutObjectTagging + +给对象添加标签,是oss最基本能力。对应接口的定义,请在上述的链接或者pb定义中查询。 + +### DeleteObjectTagging + +删除对象的标签,是oss最基本能力。对应接口的定义,请在上述的链接或者pb定义中查询。 + +### GetObjectTagging + +获取对象的标签,是oss最基本能力。对应接口的定义,请在上述的链接或者pb定义中查询。 + +### CopyObject + +复制已经存在的object,是oss最基本能力。对应接口的定义,请在上述的链接或者pb定义中查询。 + +### DeleteObjects + +删除多个object,是oss最基本能力。对应接口的定义,请在上述的链接或者pb定义中查询。 + +### ListObjects + +查询bucket下面的所有的objects,支持分页查询,是oss最基本能力。对应接口的定义,请在上述的链接或者pb定义中查询。 + +### GetObjectCannedAcl + +读取对象的canned acl,用户可以设置object的acl来控制object的访问权限,首先第一个问题,设计该接口时需要考虑以下问题: + +1. 是否允许用户通过api来设置object的acl。 +2. oss的canned acl是否是具有可移植性的 + +对于第一个问题,阿里云是允许用户上传时指定object的acl的,同时也允许随时动态的修改object的acl,腾讯云也允许此操作。 + +--- + +**阿里云的acl定义如下:** + +| 权限值 | 权限描述 | +| --- | --- | +| public-read-write | 公共读写:任何人(包括匿名访问者)都可以对该Object进行读写操作。 | +| public-read | 公共读:只有该Object的拥有者可以对该Object进行写操作,任何人(包括匿名访问者)都可以对该Object进行读操作。 | +| private | 私有:只有Object的拥有者可以对该Object进行读写操作,其他人无法访问该Object。 | +| default | 默认:该Object遵循Bucket的读写权限,即Bucket是什么权限,Object就是什么权限。 | + + +--- + +**腾讯云的定义如下:** + +| 权限值 | 权限描述 | +| --- | --- | +| default | 空描述,此时根据各级目录的显式设置及存储桶的设置来确定是否允许请求(默认) | +| private | 创建者(主账号)具备 FULL_CONTROL 权限,其他人没有权限 | +| public-read | 创建者具备 FULL_CONTROL 权限,匿名用户组具备 READ 权限 | +| authenticated-read | 创建者具备 FULL_CONTROL 权限,认证用户组具备 READ 权限 | +| bucket-owner-read | 创建者具备 FULL_CONTROL 权限,存储桶拥有者具备 READ 权限 | +| bucket-owner-full-control | 创建者和存储桶拥有者都具备 FULL_CONTROL 权限 | + +**说明:** +对象不支持授予 public-read-write 权限。 + +--- + +**aws定义如下:** + +| **Canned ACL** | **Applies to** | **Permissions added to ACL** | +| --- | --- | --- | +| private | Bucket and object | Owner gets FULL_CONTROL. No one else has access rights (default). | +| public-read | Bucket and object | Owner gets FULL_CONTROL. The AllUsers group (see [Who is a grantee?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#specifying-grantee)) gets READ access. | +| public-read-write | Bucket and object | Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. Granting this on a bucket is generally not recommended. | +| aws-exec-read | Bucket and object | Owner gets FULL_CONTROL. Amazon EC2 gets READ access to GET an Amazon Machine Image (AMI) bundle from Amazon S3. | +| authenticated-read | Bucket and object | Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. | +| bucket-owner-read | Object | Object owner gets FULL_CONTROL. Bucket owner gets READ access. If you specify this canned ACL when creating a bucket, Amazon S3 ignores it. | +| bucket-owner-full-control | Object | Both the object owner and the bucket owner get FULL_CONTROL over the object. If you specify this canned ACL when creating a bucket, Amazon S3 ignores it. | +| log-delivery-write | Bucket | The LogDelivery group gets WRITE and READ_ACP permissions on the bucket. For more information about logs, see ([Logging requests using server access logging](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html)). | + +--- + +从上述列表可以看出,不同的oss厂商对于acl的定义是有区别的,但canned acl的概念是都存在的,因此该接口属于不保证可移植性的接口, +这就需要在具体的component的实现中对acl的值进行判断。例如用户从腾讯云迁移到阿里云的过程中, +如果指定了ACl为public-read-write,那么在迁移到阿里云的时候,需要返回类似于“not supported acl”, +只要可以做到提醒用户该接口不能满足可移植即可。 + +**Note: Layotto虽然提供了acl的操作,但用户对于acl的使用需要谨慎,因为服务端不同的差异可能会导致不可预期的结果。** + + + +> [https://help.aliyun.com/document_detail/100676.html](https://help.aliyun.com/document_detail/100676.html) 阿里云object acl类型 +> [https://cloud.tencent.com/document/product/436/30752#.E9.A2.84.E8.AE.BE.E7.9A.84-acl](https://cloud.tencent.com/document/product/436/30752#.E9.A2.84.E8.AE.BE.E7.9A.84-acl) 腾讯云acl类型 +> [https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL) +> [https://github.com/minio/minio/issues/8195](https://github.com/minio/minio/issues/8195) 对于minio是否应该支持acl的讨论 + + +### PutObjectCannedAcl + +这个和上述的GetObjectCannedAcl相对应,用来设置object的canned acl。 + +**Note: Layotto虽然提供了acl的操作,但用户对于acl的使用需要谨慎,因为服务端不同的差异可能会导致不可预期的结果。** + +### RestoreObject + +调用RestoreObject接口解冻归档类型(Archive)或冷归档(Cold Archive)的文件(Object)。对应接口的定义, +请在上述的链接中或者pb接口定义注释的引用中查询。 + +### CreateMultipartUpload + +创建分片上传接口,是oss最基本能力。对应接口的定义,请在上述的链接中或者pb接口定义注释的引用中查询。 + +### UploadPart + +分片上传接口,是oss最基本能力。对应接口的定义,请在上述的链接中或者pb接口定义注释的引用中查询。 + +### UploadPartCopy + +分片copy接口,是oss最基本能力。对应接口的定义,请在上述的链接中或者pb接口定义注释的引用中查询。 + +### CompleteMultipartUpload + +完成分片上传接口,是oss最基本能力。对应接口的定义,请在上述的链接中或者pb接口定义注释的引用中查询。 + +### AbortMultipartUpload + +中断分片上传接口,是oss最基本能力。对应接口的定义,请在上述的链接中或者pb接口定义注释的引用中查询。 + +### ListMultipartUploads + +查询已经上传的分片,是oss最基本能力。对应接口的定义,请在上述的链接中或者pb接口定义注释的引用中查询。 + +### ListObjectVersions + +查询对象所有的版本信息,是oss最基本能力。对应接口的定义,请在上述的链接中或者pb接口定义注释的引用中查询。 + +### HeadObject + +返回object的metadata数据,是oss最基本能力。对应接口的定义,请在上述的链接中或者pb接口定义注释的引用中查询。 + +### IsObjectExist + +该接口在s3中没有明确的定义,用户可以通过HeadObject返回的http标准错误码是不是404来判断object是不是存在,这里单独抽象出来是为了让接口更加具有语义信息。 + +> [http://biercoff.com/how-to-check-with-aws-cli-if-file-exists-in-s3/](http://biercoff.com/how-to-check-with-aws-cli-if-file-exists-in-s3/) +> [https://stackoverflow.com/questions/41871948/aws-s3-how-to-check-if-a-file-exists-in-a-bucket-using-bash](https://stackoverflow.com/questions/41871948/aws-s3-how-to-check-if-a-file-exists-in-a-bucket-using-bash) + +### SignURL + +该接口会生成一个url用作object的上传和下载,主要用于未经授权的用户。是oss最基本能力。对应接口的定义, +请在上述的链接中或者pb接口定义注释的引用中查询。 + +### UpdateDownloadBandwidthRateLimit + +该接口为阿里云提供的接口,可以限制client的下载速度。具体信息可参照pb定义里面的注释信息。 + +### UpdateUploadBandwidthRateLimit + +该接口为阿里云提供的接口,可以限制client的上传速度。具体信息可参照pb定义里面的注释信息。 + +### AppendObject + +该接口为追加接口,主要用于对文件进行append操作,aws不支持该操作,但阿里云和腾讯云以及minio都提供了对应的方式来实现。 + +> https://help.aliyun.com/document_detail/31981.html +> https://github.com/minio/minio-java/issues/980 + +### ListParts + +查询已经上传的分片,是oss最基本能力。对应接口的定义,请在上述的链接中或者pb接口定义注释的引用中查询。 diff --git a/docs/zh/operation/local.md b/docs/zh/operation/local.md index 3cb264fa4b..38388da4c5 100644 --- a/docs/zh/operation/local.md +++ b/docs/zh/operation/local.md @@ -1,24 +1,31 @@ # 如何本地开发、本地调试 -## app 开发者如何本地开发、调试 app +## 1. app 开发者如何本地开发、调试 app 一般有以下几种方案: -- 本地用 docker 启动一个 Layotto sidecar, 或者 docker-compose启动 Layotto+其他存储系统(比如 Redis) -- 公司提供远端 Layotto sidecar +### 1.1. 本地启动 sidecar +本地用 docker 启动一个 Layotto sidecar, 或者 docker-compose启动 Layotto+其他存储系统(比如 Redis) + +### 1.2. 公司提供远端 Layotto sidecar 比如在远端测试环境,自己拉起一个 Pod,在里面跑 Layotto sidecar。 -1. 如果您能以ip直接访问远端测试环境pod: - 1. 可以把 Layotto ip 修改为上述 Pod ip,本地ip直连pod -2. 如果不能以ip直接访问远端测试环境pod: - 1. 可以将该pod的service类型,设置为`NodePort`或者`LoadBalancer`,本地直连pod service - 2. 可以将该pod注册到gateway,本地直连gateway +- 如果您能以ip直接访问远端测试环境pod: + - 可以把 Layotto ip 修改为上述 Pod ip,本地ip直连pod +- 如果不能以ip直接访问远端测试环境pod: + - 可以将该pod的service类型,设置为`NodePort`或者`LoadBalancer`,本地直连pod service + - 可以将该pod注册到gateway,本地直连gateway 开发者在本地调试时,使用以上方式即可实现"本地 app 进程连接到远端 Layotto sidecar"。 更进一步,可以由公司里负责研发环境的团队把上述操作自动化,提供"一键申请远端 sidecar" 功能。 -- 如果公司有类似于 github codespace的云端研发环境,那可以在研发环境自带 sidecar +### 1.3. 云端研发环境 +如果公司有类似于 github codespace的云端研发环境,那可以在研发环境自带 sidecar + + +## 2. Layotto 开发者如何本地、开发调试 Layotto +本地编译运行 Layotto 即可。 +例如,用 Goland IDE 运行 Layotto 时,配置如下图所示: -## Layotto 开发者如何本地、开发调试 Layotto -本地编译运行 Layotto 即可 \ No newline at end of file +![](https://gw.alipayobjects.com/mdn/rms_5891a1/afts/img/A*CHFYQK6kMEgAAAAAAAAAAAAAARQnAQ) \ No newline at end of file diff --git a/docs/zh/start/file/minio.md b/docs/zh/start/file/minio.md index bc7e4ad237..69292586fa 100644 --- a/docs/zh/start/file/minio.md +++ b/docs/zh/start/file/minio.md @@ -38,7 +38,7 @@ layotto提供了minio的配置文件[oss配置](https://github.com/mosn/layotto/ ```json "file": { - "minioOSS": { + "minio": { "metadata":[ { "endpoint": "play.min.io", diff --git a/docs/zh/start/oss/oss.md b/docs/zh/start/oss/oss.md new file mode 100644 index 0000000000..c4480fe108 --- /dev/null +++ b/docs/zh/start/oss/oss.md @@ -0,0 +1,75 @@ +# 基于S3协议实现对象存储的无感迁移 + +## 快速开始 + +Layotto提供了访问OSS的示例 [demo](https://github.com/mosn/layotto/blob/main/demo/oss/client.go) ,该示例基于S3协议实现了对象的一系列操作,当前 +已支持部分接口。可以做到在不同的OSS实例之间进行无感迁移。 + +### step 1. 启动layotto + +layotto提供了aws的配置文件`configs/oss.json`,配置文件内容如下所示: + +```json +"grpc_config": { + "oss": { + "oss_demo": { + "type": "aws.oss", + "metadata": { + "basic_config":{ + "region": "your-oss-resource-region", + "endpoint": "your-oss-resource-endpoint", + "accessKeyID": "your-oss-resource-accessKeyID", + "accessKeySecret": "your-oss-resource-accessKeySecret" + } + } + } + } +} +``` + +配置中对应的字段,需要替换成自己的OSS账号的配置。type 支持多种类型,例如 `aliyun.oss`对应阿里云的OSS服务, `aws.oss` 对应亚马逊云的 S3 服务。 +用户可以根据自己的实际场景进行配置。 + +配置好后,切换目录: + +```shell +#备注 请将${project_path}替换成你的项目路径 +cd ${project_path}/cmd/layotto +``` + +构建: + +```shell @if.not.exist layotto +go build -o layotto +``` + +启动 Layotto: + +```shell @background +./layotto start -c ../../configs/config_oss.json +``` + +### step 2. 启动测试demo + +Layotto提供了访问文件的示例 [demo](https://github.com/mosn/layotto/blob/main/demo/oss/client.go) + +```shell +cd ${project_path}/demo/file/s3/ +go build client.go + +# 上传名为test3.txt的文件到名为antsys-wenxuwan的bucket下,内容为"hello" +./client put antsys-wenxuwan test3.txt "hello" + +# 获取antsys-wenxuwan bucket下名为test3.txt的文件 +./client get antsys-wenxuwan test3.txt + +# 删除antsys-wenxuwan bucket下名为test3.txt的文件 +./client del antsys-wenxuwan test3.txt + +# 返回antsys-wenxuwan bucket下的所有文件信息 +./client list antsys-wenxuwan + +``` + +#### 细节以后再说,继续体验其他API +通过左侧的导航栏,继续体验别的API吧! diff --git a/etc/script/generate-code.sh b/etc/script/generate-code.sh new file mode 100644 index 0000000000..c91fe6c27e --- /dev/null +++ b/etc/script/generate-code.sh @@ -0,0 +1,17 @@ +project_path=$(pwd) + +echo "===========> Generating code for spec/proto/extension/v1/" +# generate code for extension/v1 +res=$(ls -d spec/proto/extension/v1/*/) +for r in $res; do + echo $r + docker run --rm \ + -v $project_path/$r:/api/proto \ + layotto/protoc +done + +# generate code for runtime/v1 +echo "===========> Generating code for spec/proto/runtime/v1/" +docker run --rm \ + -v $project_path/spec/proto/runtime/v1:/api/proto \ + layotto/protoc diff --git a/etc/script/generate-doc.sh b/etc/script/generate-doc.sh new file mode 100644 index 0000000000..d0afce62e0 --- /dev/null +++ b/etc/script/generate-doc.sh @@ -0,0 +1,43 @@ +project_path=$(pwd) + +echo "===========> Generating docs for spec/proto/extension/v1/" +# generate docs for extension/v1 +res=$(cd spec/proto/extension/v1/ && ls -d *) +for r in $res; do + docker run --rm \ + -v $project_path/docs/api/v1:/out \ + -v $project_path/spec/proto/extension/v1/$r:/protos \ + -v $project_path/spec/proto/runtime/v1:/protos/tpl \ + pseudomuto/protoc-gen-doc --doc_opt=/protos/tpl/html.tmpl,$r.html +done + +# generate docs for runtime/v1 +echo "===========> Generating docs for spec/proto/runtime/v1/" +docker run --rm \ + -v $project_path/docs/api/v1:/out \ + -v $project_path/spec/proto/runtime/v1:/protos \ + pseudomuto/protoc-gen-doc --doc_opt=/protos/html.tmpl,runtime.html + +# update the sidebar +cd $project_path +sidebar_zh=docs/zh/api_reference/README.md +sidebar=docs/en/api_reference/README.md +echo "===========> Updating the sidebar" +# delete existing lines +# -i "" is for compatibility with MacOS. See https://blog.csdn.net/dawn_moon/article/details/8547408 +sed -i "" '/.*: \[.*\]\(.*\)/d' $sidebar_zh +sed -i "" '/.*: \[.*\]\(.*\)/d' $sidebar +# reinsert the reference lines +for r in $res; do + echo "$r: [spec/proto/extension/v1/$r](https://mosn.io/layotto/api/v1/$r.html) \n" >> $sidebar_zh + echo "$r: [spec/proto/extension/v1/$r](https://mosn.io/layotto/api/v1/$r.html) \n" >> $sidebar +done +# delete last line +sed -i "" '$d' $sidebar_zh +sed -i "" '$d' $sidebar + + +cd $project_path +# generate index for api references +#idx=$(cd docs && ls api/v1/*) +#echo $idx > docs/api/extensions.txt diff --git a/go.sum b/go.sum index 4b62aa1400..1762bcde72 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,8 @@ github.com/alicebob/miniredis/v2 v2.16.0 h1:ALkyFg7bSTEd1Mkrb4ppq4fnwjklA59dVtIe github.com/alicebob/miniredis/v2 v2.16.0/go.mod h1:gquAfGbzn92jvtrSC69+6zZnwSODVXVpYDRaGhWaL6I= github.com/aliyun/alibaba-cloud-sdk-go v1.61.18/go.mod h1:v8ESoHo4SyHmuB4b1tJqDHxfTGEciD+yhvOU/5s1Rfk= github.com/aliyun/aliyun-oss-go-sdk v2.0.7+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/aliyun/aliyun-oss-go-sdk v2.1.8+incompatible h1:hLUNPbx10wawWW7DeNExvTrlb90db3UnnNTFKHZEFhE= -github.com/aliyun/aliyun-oss-go-sdk v2.1.8+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/aliyun/aliyun-oss-go-sdk v2.2.0+incompatible h1:ht2+VfbXtNLGhCsnTMc6/N26nSTBK6qdhktjYyjJQkk= +github.com/aliyun/aliyun-oss-go-sdk v2.2.0+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/aliyun/aliyun-tablestore-go-sdk v1.6.0/go.mod h1:jixoiNNRR/4ziq0yub1fTlxmDcQwlpkaujpaWIATQWM= github.com/aliyunmq/mq-http-go-sdk v1.0.3/go.mod h1:JYfRMQoPexERvnNNBcal0ZQ2TVQ5ialDiW9ScjaadEM= github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= @@ -219,30 +219,42 @@ github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/ github.com/aws/aws-sdk-go v1.36.30 h1:hAwyfe7eZa7sM+S5mIJZFiNFwJMia9Whz6CYblioLoU= github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aws/aws-sdk-go-v2 v1.9.1 h1:ZbovGV/qo40nrOJ4q8G33AGICzaPI45FHQWJ9650pF4= -github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2/config v1.8.2 h1:Dqy4ySXFmulRmZhfynm/5CD4Y6aXiTVhDtXLIuUe/r0= -github.com/aws/aws-sdk-go-v2/config v1.8.2/go.mod h1:r0bkX9NyuCuf28qVcsEMtpAQibT7gA1Q0gzkjvgJdLU= -github.com/aws/aws-sdk-go-v2/credentials v1.4.2 h1:8kVE4Og6wlhVrMGiORQ3p9gRj2exjzhFRB+QzWBUa5Q= -github.com/aws/aws-sdk-go-v2/credentials v1.4.2/go.mod h1:9Sp6u121/f0NnvHyhG7dgoYeUTEFC2vsvJqJ6wXpkaI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 h1:Nm+BxqBtT0r+AnD6byGMCGT4Km0QwHBy8mAYptNPXY4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1/go.mod h1:W1ldHfsgeGlKpJ4xZMKZUI6Wmp6EAstU7PxnhbXWWrI= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 h1:NnXJXUz7oihrSlPKEM0yZ19b+7GQ47MX/LluLlEyE/Y= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3/go.mod h1:EES9ToeC3h063zCFDdqWGnARExNdULPaBvARm1FLwxA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1 h1:APEjhKZLFlNVLATnA/TJyA+w1r/xd5r5ACWBDZ9aIvc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1/go.mod h1:Ve+eJOx9UWaT/lMVebnFhDhO49fSLVedHoA82+Rqme0= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 h1:YEz2KMyqK2zyG3uOa0l2xBc/H6NUVJir8FhwHQHF3rc= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1/go.mod h1:yg4EN/BKoc7+DLhNOxxdvoO3+iyW2FuynvaKqLcLDUM= -github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 h1:dt1JQFj/135ozwGIWeCM3aQ8N/kB3Xu3Uu4r9zuOIyc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0/go.mod h1:Tk23mCmfL3wb3tNIeMk/0diUZ0W4R6uZtjYKguMLW2s= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.1 h1:RfgQyv3bFT2Js6XokcrNtTjQ6wAVBRpoCgTFsypihHA= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.1/go.mod h1:ycPdbJZlM0BLhuBnd80WX9PucWPG88qps/2jl9HugXs= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 h1:7ce9ugapSgBapwLhg7AJTqKW5U92VRX3vX65k2tsB+g= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.1/go.mod h1:r1i8QwKPzwByXqZb3POQfBs7jozrdnHz8PVbsvyx73w= -github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= -github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/aws-sdk-go-v2 v1.16.4 h1:swQTEQUyJF/UkEA94/Ga55miiKFoXmm/Zd67XHgmjSg= +github.com/aws/aws-sdk-go-v2 v1.16.4/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 h1:SdK4Ppk5IzLs64ZMvr6MrSficMtjY2oS0WOORXTlxwU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM= +github.com/aws/aws-sdk-go-v2/config v1.15.9 h1:TK5yNEnFDQ9iaO04gJS/3Y+eW8BioQiCUafW75/Wc3Q= +github.com/aws/aws-sdk-go-v2/config v1.15.9/go.mod h1:rv/l/TbZo67kp99v/3Kb0qV6Fm1KEtKyruEV2GvVfgs= +github.com/aws/aws-sdk-go-v2/credentials v1.12.4 h1:xggwS+qxCukXRVXJBJWQJGyUsvuxGC8+J1kKzv2cxuw= +github.com/aws/aws-sdk-go-v2/credentials v1.12.4/go.mod h1:7g+GGSp7xtR823o1jedxKmqRZGqLdoHQfI4eFasKKxs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.5 h1:YPxclBeE07HsLQE8vtjC8T2emcTjM9nzqsnDi2fv5UM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.5/go.mod h1:WAPnuhG5IQ/i6DETFl5NmX3kKqCzw7aau9NHAGcm4QE= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.14 h1:qpJmFbypCfwPok5PGTSnQy1NKbv4Hn8xGsee9l4xOPE= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.14/go.mod h1:IOYB+xOZik8YgdTlnDSwbvKmCkikA3nVue8/Qnfzs0c= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11 h1:gsqHplNh1DaQunEKZISK56wlpbCg0yKxNVvGWCFuF1k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11/go.mod h1:tmUB6jakq5DFNcXsXOA/ZQ7/C8VnSKYkx58OI7Fh79g= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5 h1:PLFj+M2PgIDHG//hw3T0O0KLI4itVtAjtxrZx4AHPLg= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5/go.mod h1:fV1AaS2gFc1tM0RCb015FJ0pvWVUfJZANzjwoO4YakM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.12 h1:j0VqrjtgsY1Bx27tD0ysay36/K4kFMWRp9K3ieO9nLU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.12/go.mod h1:00c7+ALdPh4YeEUPXJzyU0Yy01nPGOq2+9rUaz05z9g= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.2 h1:1fs9WkbFcMawQjxEI0B5L0SqvBhJZebxWM6Z3x/qHWY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.2/go.mod h1:0jDVeWUFPbI3sOfsXXAsIdiawXcn7VBLx/IlFVTRP64= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 h1:T4pFel53bkHjL2mMo+4DKE6r6AuoZnM0fg7k1/ratr4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1/go.mod h1:GeUru+8VzrTXV/83XyMJ80KpH8xO89VPoUileyNQ+tc= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.6 h1:9mvDAsMiN+07wcfGM+hJ1J3dOKZ2YOpDiPZ6ufRJcgw= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.6/go.mod h1:Eus+Z2iBIEfhOvhSdMTcscNOMy6n3X9/BJV0Zgax98w= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.5 h1:gRW1ZisKc93EWEORNJRvy/ZydF3o6xLSveJHdi1Oa0U= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.5/go.mod h1:ZbkttHXaVn3bBo/wpJbQGiiIWR90eTBUVBrEHUEQlho= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.5 h1:DyPYkrH4R2zn+Pdu6hM3VTuPsQYAE6x2WB24X85Sgw0= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.5/go.mod h1:XtL92YWo0Yq80iN3AgYRERJqohg4TozrqRlxYhHGJ7g= +github.com/aws/aws-sdk-go-v2/service/s3 v1.26.10 h1:GWdLZK0r1AK5sKb8rhB9bEXqXCK8WNuyv4TBAD6ZviQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.26.10/go.mod h1:+O7qJxF8nLorAhuIVhYTHse6okjHJJm4EwhhzvpnkT0= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.7 h1:suAGD+RyiHWPPihZzY+jw4mCZlOFWgmdjb2AeTenz7c= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.7/go.mod h1:TFVe6Rr2joVLsYQ1ABACXgOC6lXip/qpX2x5jWg/A9w= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.6 h1:aYToU0/iazkMY67/BYLt3r6/LT/mUtarLAF5mGof1Kg= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.6/go.mod h1:rP1rEOKAGZoXp4iGDxSXFvODAtXpm34Egf0lL0eshaQ= +github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= +github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= @@ -642,8 +654,8 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -837,8 +849,9 @@ github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxy github.com/jcmturner/gokrb5/v8 v8.4.1/go.mod h1:T1hnNppQsBtxW0tCHMHTkAt8n/sABdzZgZdoFrZaZNM= github.com/jcmturner/rpc/v2 v2.0.2/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869/go.mod h1:cJ6Cj7dQo+O6GJNiMx+Pa94qKj+TG8ONdKHgMNIyyag= -github.com/jinzhu/copier v0.3.2 h1:QdBOCbaouLDYaIPFfi1bKv5F5tPpeTwXe4sD0jqtz5w= github.com/jinzhu/copier v0.3.2/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro= +github.com/jinzhu/copier v0.3.6-0.20220506024824-3e39b055319a h1:1tB9lnwJFOtcMERtVVAad4aK5e3Q7sVbP0id87FxIS8= +github.com/jinzhu/copier v0.3.6-0.20220506024824-3e39b055319a/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= diff --git a/make/common.mk b/make/common.mk index 95f9179e4a..c33725c728 100644 --- a/make/common.mk +++ b/make/common.mk @@ -51,6 +51,12 @@ endif ifeq ($(origin SUPERVISOR_DIR),undefined) SUPERVISOR_DIR := $(ROOT_DIR)/etc/supervisor endif +ifeq ($(origin DEPLOY_DIR),undefined) +DEPLOY_DIR := $(ROOT_DIR)/deploy +endif +ifeq ($(origin K8S_DIR),undefined) +K8S_DIR := $(DEPLOY_DIR)/k8s +endif # set the version number. you should not need to do this # for the majority of scenarios. diff --git a/make/deploy.mk b/make/deploy.mk new file mode 100644 index 0000000000..bd2069b8dc --- /dev/null +++ b/make/deploy.mk @@ -0,0 +1,33 @@ +# Copyright 2021 Layotto Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file contains commands to deploy/undeploy layotto into Kubernetes +# Default namespace is `default` + +NAMESPACE := default + +.PHONY: deploy.k8s +deploy.k8s: deploy.k8s.standalone + +.PHONY: deploy.k8s.standalone +deploy.k8s.standalone: + @echo "===========> Deploy Layotto to Kubernetes in namespace ${NAMESPACE} in standalone mode" + @kubectl apply -f $(K8S_DIR)/standalone/default_quickstart.yaml -n ${NAMESPACE} + +.PHONY: undeploy.k8s +undeploy.k8s: undeploy.k8s.standalone + +.PHONY: undeploy.k8s.standalone +undeploy.k8s.standalone: + @echo "===========> Clean Layotto to Kubernetes in namespace ${NAMESPACE} in standalone mode" + @kubectl delete -f $(K8S_DIR)/standalone/default_quickstart.yaml -n ${NAMESPACE} diff --git a/make/proto.mk b/make/proto.mk index fd10bceedf..f88c552b39 100644 --- a/make/proto.mk +++ b/make/proto.mk @@ -13,23 +13,29 @@ .PHONY: proto.gen.doc proto.gen.doc: - $(DOCKER) run --rm \ - -v $(ROOT_DIR)/docs/en/api_reference:/out \ - -v $(ROOT_DIR)/spec/proto/runtime/v1:/protos \ - pseudomuto/protoc-gen-doc --doc_opt=/protos/template.tmpl,runtime_v1.md runtime.proto - $(DOCKER) run --rm \ - -v $(ROOT_DIR)/docs/en/api_reference:/out \ - -v $(ROOT_DIR)/spec/proto/runtime/v1:/protos \ - pseudomuto/protoc-gen-doc --doc_opt=/protos/template.tmpl,appcallback_v1.md appcallback.proto + sh ${SCRIPT_DIR}/generate-doc.sh .PHONY: proto.gen.init proto.gen.init: - go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28 - go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2 + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28 + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2 .PHONY: proto.gen.code proto.gen.code: - $(DOCKER) build -t layotto/protoc $(ROOT_DIR)/docker/proto && \ - $(DOCKER) run --rm \ - -v $(ROOT_DIR)/spec/proto/runtime/v1:/api/proto \ - layotto/protoc + $(DOCKER) build -t layotto/protoc $(ROOT_DIR)/docker/proto && sh ${SCRIPT_DIR}/generate-code.sh + $(MAKE) format + +.PHONY: proto.comments +proto.comments: +ifeq (,$(shell which buf)) + @echo "===========> Installing buf linter" + @curl -fsSL \ + "https://github.com/bufbuild/buf/releases/download/v1.6.0/buf-$$(uname -s)-$$(uname -m)" \ + -o "$(OUTPUT_DIR)/buf" + @sudo install -m 0755 $(OUTPUT_DIR)/buf /usr/local/bin/buf +endif + @echo "===========> Running buf linter" + buf lint $(ROOT_DIR) + +.PHONY: proto.gen.all +proto.gen.all: proto.gen.code proto.gen.doc \ No newline at end of file diff --git a/pkg/grpc/extension/s3/param.go b/pkg/grpc/extension/s3/param.go new file mode 100644 index 0000000000..7de7f8cad4 --- /dev/null +++ b/pkg/grpc/extension/s3/param.go @@ -0,0 +1,21 @@ +/* +* Copyright 2021 Layotto Authors +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package s3 + +const ( + NotSupportStoreName = "not supported store type: %+v" +) diff --git a/pkg/grpc/extension/s3/s3.go b/pkg/grpc/extension/s3/s3.go new file mode 100644 index 0000000000..452b1934c1 --- /dev/null +++ b/pkg/grpc/extension/s3/s3.go @@ -0,0 +1,766 @@ +/* + * Copyright 2021 Layotto Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3 + +import ( + "context" + "encoding/json" + "io" + "sync" + + "mosn.io/layotto/spec/proto/extension/v1/s3" + + l8s3 "mosn.io/layotto/components/oss" + + rawGRPC "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" + "mosn.io/pkg/log" + + "mosn.io/layotto/pkg/grpc" +) + +var ( + s3Instance *S3Server +) + +var ( + bytesPool = sync.Pool{ + New: func() interface{} { + // set size to 100kb + return new([]byte) + }, + } +) + +type S3Server struct { + appId string + ossInstance map[string]l8s3.Oss +} + +func NewS3Server(ac *grpc.ApplicationContext) grpc.GrpcAPI { + s3Instance = &S3Server{} + s3Instance.appId = ac.AppId + s3Instance.ossInstance = ac.Oss + return s3Instance +} + +func (s *S3Server) Init(conn *rawGRPC.ClientConn) error { + return nil +} + +func (s *S3Server) Register(rawGrpcServer *rawGRPC.Server) error { + s3.RegisterObjectStorageServiceServer(rawGrpcServer, s) + return nil +} + +func transferData(source interface{}, target interface{}) error { + data, err := json.Marshal(source) + if err != nil { + return nil + } + err = json.Unmarshal(data, target) + return err +} + +func (s *S3Server) GetObject(req *s3.GetObjectInput, stream s3.ObjectStorageService_GetObjectServer) error { + // 1. validate + if s.ossInstance[req.StoreName] == nil { + return status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + // 2. convert request + st := &l8s3.GetObjectInput{} + err := transferData(req, st) + if err != nil { + return status.Errorf(codes.InvalidArgument, "transfer request data fail for GetObject,err: %+v", err) + } + // 3. find the component + result, err := s.ossInstance[req.StoreName].GetObject(stream.Context(), st) + if err != nil { + return status.Errorf(codes.Internal, "get file fail,err: %+v", err) + } + + buffsPtr := bytesPool.Get().(*[]byte) + buf := *buffsPtr + if len(buf) == 0 { + buf = make([]byte, 102400) + } + defer func() { + result.DataStream.Close() + *buffsPtr = buf + bytesPool.Put(buffsPtr) + }() + + for { + length, err := result.DataStream.Read(buf) + if err != nil && err != io.EOF { + log.DefaultLogger.Warnf("get file fail, err: %+v", err) + return status.Errorf(codes.Internal, "get file fail,err: %+v", err) + } + if err == nil || (err == io.EOF && length != 0) { + resp := &s3.GetObjectOutput{} + err := transferData(result, resp) + if err != nil { + return status.Errorf(codes.InvalidArgument, "transfer request data fail for GetObject,err: %+v", err) + } + resp.Body = buf[:length] + if err = stream.Send(resp); err != nil { + return status.Errorf(codes.Internal, "send file data fail,err: %+v", err) + } + } + if err == io.EOF { + return nil + } + } +} + +type putObjectStreamReader struct { + data []byte + server s3.ObjectStorageService_PutObjectServer +} + +func newPutObjectStreamReader(data []byte, server s3.ObjectStorageService_PutObjectServer) *putObjectStreamReader { + return &putObjectStreamReader{data: data, server: server} +} + +func (r *putObjectStreamReader) Read(p []byte) (int, error) { + var count int + total := len(p) + for { + if len(r.data) > 0 { + n := copy(p[count:], r.data) + r.data = r.data[n:] + count += n + if count == total { + return count, nil + } + } + req, err := r.server.Recv() + if err != nil { + if err != io.EOF { + log.DefaultLogger.Errorf("recv data from grpc stream fail, err:%+v", err) + } + return count, err + } + r.data = req.Body + } +} + +func (s *S3Server) PutObject(stream s3.ObjectStorageService_PutObjectServer) error { + req, err := stream.Recv() + if err != nil { + //if client send eof error directly, return nil + if err == io.EOF { + return nil + } + return status.Errorf(codes.Internal, "receive file data fail: err: %+v", err) + } + + if s.ossInstance[req.StoreName] == nil { + return status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + fileReader := newPutObjectStreamReader(req.Body, stream) + + st := &l8s3.PutObjectInput{} + err = transferData(req, st) + if err != nil { + return status.Errorf(codes.InvalidArgument, "transfer request data fail for PutObject,err: %+v", err) + } + st.DataStream = fileReader + var resp *l8s3.PutObjectOutput + if resp, err = s.ossInstance[req.StoreName].PutObject(stream.Context(), st); err != nil { + return status.Errorf(codes.Internal, err.Error()) + } + output := &s3.PutObjectOutput{} + err = transferData(resp, output) + if err != nil { + return status.Errorf(codes.Internal, "transfer response data fail for PutObject,err: %+v", err) + } + return stream.SendAndClose(output) +} + +func (s *S3Server) DeleteObject(ctx context.Context, req *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.DeleteObjectInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for DeleteObject,err: %+v", err) + } + var resp *l8s3.DeleteObjectOutput + if resp, err = s.ossInstance[req.StoreName].DeleteObject(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.DeleteObjectOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for DeleteObject,err: %+v", err) + } + return output, nil +} +func (s *S3Server) PutObjectTagging(ctx context.Context, req *s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + + st := &l8s3.PutObjectTaggingInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for PutObjectTagging,err: %+v", err) + } + var resp *l8s3.PutObjectTaggingOutput + if resp, err = s.ossInstance[req.StoreName].PutObjectTagging(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.PutObjectTaggingOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for PutObjectTagging,err: %+v", err) + } + return output, nil +} +func (s *S3Server) DeleteObjectTagging(ctx context.Context, req *s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.DeleteObjectTaggingInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for DeleteObjectTagging,err: %+v", err) + } + var resp *l8s3.DeleteObjectTaggingOutput + if resp, err = s.ossInstance[req.StoreName].DeleteObjectTagging(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.DeleteObjectTaggingOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for DeleteObjectTagging,err: %+v", err) + } + return output, nil +} +func (s *S3Server) GetObjectTagging(ctx context.Context, req *s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.GetObjectTaggingInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for GetObjectTagging,err: %+v", err) + } + var resp *l8s3.GetObjectTaggingOutput + if resp, err = s.ossInstance[req.StoreName].GetObjectTagging(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.GetObjectTaggingOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for GetObjectTagging,err: %+v", err) + } + return output, nil +} +func (s *S3Server) CopyObject(ctx context.Context, req *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.CopyObjectInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for CopyObject,err: %+v", err) + } + var resp *l8s3.CopyObjectOutput + if resp, err = s.ossInstance[req.StoreName].CopyObject(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.CopyObjectOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for CopyObject,err: %+v", err) + } + return output, nil + +} +func (s *S3Server) DeleteObjects(ctx context.Context, req *s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.DeleteObjectsInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for DeleteObjects,err: %+v", err) + } + var resp *l8s3.DeleteObjectsOutput + if resp, err = s.ossInstance[req.StoreName].DeleteObjects(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.DeleteObjectsOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for DeleteObjects,err: %+v", err) + } + return output, nil + +} +func (s *S3Server) ListObjects(ctx context.Context, req *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.ListObjectsInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for ListObjects,err: %+v", err) + } + var resp *l8s3.ListObjectsOutput + if resp, err = s.ossInstance[req.StoreName].ListObjects(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.ListObjectsOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for ListObjects,err: %+v", err) + } + return output, nil + +} +func (s *S3Server) GetObjectCannedAcl(ctx context.Context, req *s3.GetObjectCannedAclInput) (*s3.GetObjectCannedAclOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.GetObjectCannedAclInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for GetObjectAcl,err: %+v", err) + } + var resp *l8s3.GetObjectCannedAclOutput + if resp, err = s.ossInstance[req.StoreName].GetObjectCannedAcl(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.GetObjectCannedAclOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for GetObjectAcl,err: %+v", err) + } + return output, nil + +} +func (s *S3Server) PutObjectCannedAcl(ctx context.Context, req *s3.PutObjectCannedAclInput) (*s3.PutObjectCannedAclOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.PutObjectCannedAclInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for PutObjectAcl,err: %+v", err) + } + var resp *l8s3.PutObjectCannedAclOutput + if resp, err = s.ossInstance[req.StoreName].PutObjectCannedAcl(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.PutObjectCannedAclOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for PutObjectAcl,err: %+v", err) + } + return output, nil + +} +func (s *S3Server) RestoreObject(ctx context.Context, req *s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.RestoreObjectInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for RestoreObject,err: %+v", err) + } + var resp *l8s3.RestoreObjectOutput + if resp, err = s.ossInstance[req.StoreName].RestoreObject(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.RestoreObjectOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for RestoreObject,err: %+v", err) + } + return output, nil + +} +func (s *S3Server) CreateMultipartUpload(ctx context.Context, req *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.CreateMultipartUploadInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for CreateMultipartUpload,err: %+v", err) + } + var resp *l8s3.CreateMultipartUploadOutput + if resp, err = s.ossInstance[req.StoreName].CreateMultipartUpload(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.CreateMultipartUploadOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for CreateMultipartUpload,err: %+v", err) + } + return output, nil + +} + +type uploadPartStreamReader struct { + data []byte + server s3.ObjectStorageService_UploadPartServer +} + +func newUploadPartStreamReader(data []byte, server s3.ObjectStorageService_UploadPartServer) *uploadPartStreamReader { + return &uploadPartStreamReader{data: data, server: server} +} + +func (r *uploadPartStreamReader) Read(p []byte) (int, error) { + var count int + total := len(p) + for { + if len(r.data) > 0 { + n := copy(p[count:], r.data) + r.data = r.data[n:] + count += n + if count == total { + return count, nil + } + } + req, err := r.server.Recv() + if err != nil { + if err != io.EOF { + log.DefaultLogger.Errorf("recv data from grpc stream fail, err:%+v", err) + } + return count, err + } + r.data = req.Body + } +} + +func (s *S3Server) UploadPart(stream s3.ObjectStorageService_UploadPartServer) error { + req, err := stream.Recv() + if err != nil { + //if client send eof error directly, return nil + if err == io.EOF { + return nil + } + return status.Errorf(codes.Internal, "receive file data fail: err: %+v", err) + } + + if s.ossInstance[req.StoreName] == nil { + return status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + fileReader := newUploadPartStreamReader(req.Body, stream) + + st := &l8s3.UploadPartInput{} + err = transferData(req, st) + if err != nil { + return status.Errorf(codes.InvalidArgument, "transfer request data fail for UploadPart,err: %+v", err) + } + st.DataStream = fileReader + var resp *l8s3.UploadPartOutput + if resp, err = s.ossInstance[req.StoreName].UploadPart(stream.Context(), st); err != nil { + return status.Errorf(codes.Internal, err.Error()) + } + output := &s3.UploadPartOutput{} + err = transferData(resp, output) + if err != nil { + return status.Errorf(codes.Internal, "transfer response data fail for UploadPart,err: %+v", err) + } + return stream.SendAndClose(output) + +} +func (s *S3Server) UploadPartCopy(ctx context.Context, req *s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.UploadPartCopyInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for UploadPartCopy,err: %+v", err) + } + var resp *l8s3.UploadPartCopyOutput + if resp, err = s.ossInstance[req.StoreName].UploadPartCopy(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.UploadPartCopyOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for UploadPartCopy,err: %+v", err) + } + return output, nil + +} +func (s *S3Server) CompleteMultipartUpload(ctx context.Context, req *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.CompleteMultipartUploadInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for CompleteMultipartUpload,err: %+v", err) + } + var resp *l8s3.CompleteMultipartUploadOutput + if resp, err = s.ossInstance[req.StoreName].CompleteMultipartUpload(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.CompleteMultipartUploadOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for CompleteMultipartUpload,err: %+v", err) + } + return output, nil + +} +func (s *S3Server) AbortMultipartUpload(ctx context.Context, req *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.AbortMultipartUploadInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for AbortMultipartUpload,err: %+v", err) + } + var resp *l8s3.AbortMultipartUploadOutput + if resp, err = s.ossInstance[req.StoreName].AbortMultipartUpload(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.AbortMultipartUploadOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for AbortMultipartUpload,err: %+v", err) + } + return output, nil + +} +func (s *S3Server) ListMultipartUploads(ctx context.Context, req *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.ListMultipartUploadsInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for AbortMultipartUpload,err: %+v", err) + } + var resp *l8s3.ListMultipartUploadsOutput + if resp, err = s.ossInstance[req.StoreName].ListMultipartUploads(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.ListMultipartUploadsOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for AbortMultipartUpload,err: %+v", err) + } + return output, nil +} +func (s *S3Server) ListObjectVersions(ctx context.Context, req *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.ListObjectVersionsInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for ListObjectVersions,err: %+v", err) + } + var resp *l8s3.ListObjectVersionsOutput + if resp, err = s.ossInstance[req.StoreName].ListObjectVersions(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.ListObjectVersionsOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for ListObjectVersions,err: %+v", err) + } + return output, nil + +} + +func (s *S3Server) HeadObject(ctx context.Context, req *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.HeadObjectInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for ListObjectVersions,err: %+v", err) + } + var resp *l8s3.HeadObjectOutput + if resp, err = s.ossInstance[req.StoreName].HeadObject(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.HeadObjectOutput{} + err = transferData(resp, output) + if err != nil { + return nil, status.Errorf(codes.Internal, "transfer response data fail for ListObjectVersions,err: %+v", err) + } + return output, nil + +} + +func (s *S3Server) IsObjectExist(ctx context.Context, req *s3.IsObjectExistInput) (*s3.IsObjectExistOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.IsObjectExistInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for IsObjectExist,err: %+v", err) + } + var resp *l8s3.IsObjectExistOutput + if resp, err = s.ossInstance[req.StoreName].IsObjectExist(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.IsObjectExistOutput{} + output.FileExist = resp.FileExist + return output, nil +} + +func (s *S3Server) SignURL(ctx context.Context, req *s3.SignURLInput) (*s3.SignURLOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.SignURLInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for SignURL,err: %+v", err) + } + var resp *l8s3.SignURLOutput + if resp, err = s.ossInstance[req.StoreName].SignURL(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + output := &s3.SignURLOutput{} + output.SignedUrl = resp.SignedUrl + return output, nil +} + +func (s *S3Server) UpdateDownloadBandwidthRateLimit(ctx context.Context, req *s3.UpdateBandwidthRateLimitInput) (*emptypb.Empty, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.UpdateBandwidthRateLimitInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for UpdateDownloadBandwidthRateLimit,err: %+v", err) + } + if err := s.ossInstance[req.StoreName].UpdateDownloadBandwidthRateLimit(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + return &emptypb.Empty{}, nil +} + +func (s *S3Server) UpdateUploadBandwidthRateLimit(ctx context.Context, req *s3.UpdateBandwidthRateLimitInput) (*emptypb.Empty, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.UpdateBandwidthRateLimitInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for UpdateUploadBandwidthRateLimit,err: %+v", err) + } + if err := s.ossInstance[req.StoreName].UpdateUploadBandwidthRateLimit(ctx, st); err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + return &emptypb.Empty{}, nil +} + +type appendObjectStreamReader struct { + data []byte + server s3.ObjectStorageService_AppendObjectServer +} + +func newAppendObjectStreamReader(data []byte, server s3.ObjectStorageService_AppendObjectServer) *appendObjectStreamReader { + return &appendObjectStreamReader{data: data, server: server} +} + +func (r *appendObjectStreamReader) Read(p []byte) (int, error) { + var count int + total := len(p) + for { + if len(r.data) > 0 { + n := copy(p[count:], r.data) + r.data = r.data[n:] + count += n + if count == total { + return count, nil + } + } + req, err := r.server.Recv() + if err != nil { + if err != io.EOF { + log.DefaultLogger.Errorf("recv data from grpc stream fail, err:%+v", err) + } + return count, err + } + r.data = req.Body + } +} + +func (s *S3Server) AppendObject(stream s3.ObjectStorageService_AppendObjectServer) error { + req, err := stream.Recv() + if err != nil { + //if client send eof error directly, return nil + if err == io.EOF { + return nil + } + return status.Errorf(codes.Internal, "receive file data fail: err: %+v", err) + } + + if s.ossInstance[req.StoreName] == nil { + return status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + fileReader := newAppendObjectStreamReader(req.Body, stream) + + st := &l8s3.AppendObjectInput{} + err = transferData(req, st) + if err != nil { + return status.Errorf(codes.InvalidArgument, "transfer request data fail for AppendObject,err: %+v", err) + } + st.DataStream = fileReader + var resp *l8s3.AppendObjectOutput + if resp, err = s.ossInstance[req.StoreName].AppendObject(stream.Context(), st); err != nil { + return status.Errorf(codes.Internal, err.Error()) + } + output := &s3.AppendObjectOutput{} + output.AppendPosition = resp.AppendPosition + return stream.SendAndClose(output) + +} + +func (s *S3Server) ListParts(ctx context.Context, req *s3.ListPartsInput) (*s3.ListPartsOutput, error) { + if s.ossInstance[req.StoreName] == nil { + return nil, status.Errorf(codes.InvalidArgument, NotSupportStoreName, req.StoreName) + } + st := &l8s3.ListPartsInput{} + err := transferData(req, st) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer request data fail for ListParts,err: %+v", err) + } + resp, err := s.ossInstance[req.StoreName].ListParts(ctx, st) + if err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + out := &s3.ListPartsOutput{} + err = transferData(resp, out) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "transfer response data fail for ListParts,err: %+v", err) + } + return out, nil +} diff --git a/pkg/grpc/extension/s3/s3_test.go b/pkg/grpc/extension/s3/s3_test.go new file mode 100644 index 0000000000..ae8f880113 --- /dev/null +++ b/pkg/grpc/extension/s3/s3_test.go @@ -0,0 +1,854 @@ +/* + * Copyright 2021 Layotto Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package s3 + +import ( + "context" + "io" + "testing" + + "mosn.io/layotto/spec/proto/extension/v1/s3" + + l8s3 "mosn.io/layotto/components/oss" + + mockoss "mosn.io/layotto/pkg/mock/components/oss" + + "mosn.io/pkg/buffer" + + mocks3 "mosn.io/layotto/pkg/mock/runtime/oss" + + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/golang/mock/gomock" + + "mosn.io/layotto/pkg/grpc" +) + +const ( + MOCKSERVER = "mockossServer" + ByteSize = 5 +) + +type MockDataStream struct { + buffer.IoBuffer +} + +func (m *MockDataStream) Close() error { + m.CloseWithError(nil) + return nil +} + +// TestGetObject +func TestGetObject(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + // Test GetObject function + ctx := context.TODO() + mockServer := mocks3.NewMockObjectStorageService_GetObjectServer(ctrl) + getObjectReq := &s3.GetObjectInput{StoreName: "NoStore", Bucket: "layotto", Key: "object"} + err := s3Server.GetObject(getObjectReq, mockServer) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + iobuf := buffer.NewIoBufferBytes([]byte("hello")) + dataStream := &MockDataStream{iobuf} + output := &l8s3.GetObjectOutput{Etag: "tag"} + output.DataStream = dataStream + mockServer.EXPECT().Context().Return(ctx) + mockossServer.EXPECT().GetObject(ctx, &l8s3.GetObjectInput{Bucket: "layotto", Key: "object"}).Return(output, nil) + getObjectReq.StoreName = MOCKSERVER + mockServer.EXPECT().Send(&s3.GetObjectOutput{Body: []byte("hello"), Etag: "tag"}).Times(1) + err = s3Server.GetObject(getObjectReq, mockServer) + assert.Nil(t, err) +} + +// TestPutObject +func TestPutObject(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + // Test GetObject function + ctx := context.TODO() + mockStream := mocks3.NewMockObjectStorageService_PutObjectServer(ctrl) + putObjectReq := &s3.PutObjectInput{StoreName: "NoStore", Bucket: "layotto", Key: "object", Body: []byte("put")} + mockStream.EXPECT().Recv().Return(putObjectReq, nil) + err := s3Server.PutObject(mockStream) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + + putObjectReq.StoreName = MOCKSERVER + output := &l8s3.PutObjectOutput{ETag: "tag"} + mockStream.EXPECT().Context().Return(ctx) + mockStream.EXPECT().Recv().Return(putObjectReq, nil) + mockStream.EXPECT().SendAndClose(&s3.PutObjectOutput{Etag: "tag"}).Times(1) + mockossServer.EXPECT().PutObject(ctx, &l8s3.PutObjectInput{DataStream: newPutObjectStreamReader(putObjectReq.Body, mockStream), Bucket: "layotto", Key: "object"}).Return(output, nil) + err = s3Server.PutObject(mockStream) + assert.Nil(t, err) + + mockStream.EXPECT().Recv().Return(nil, io.EOF) + stream := newPutObjectStreamReader(putObjectReq.Body, mockStream) + data := make([]byte, ByteSize) + n, err := stream.Read(data) + assert.Equal(t, 3, n) + assert.Equal(t, io.EOF, err) +} + +// TestUploadPart +func TestUploadPart(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + // Test GetObject function + ctx := context.TODO() + mockStream := mocks3.NewMockObjectStorageService_UploadPartServer(ctrl) + UploadPartReq := &s3.UploadPartInput{StoreName: "NoStore", Bucket: "layotto", Key: "object", Body: []byte("put")} + mockStream.EXPECT().Recv().Return(UploadPartReq, nil) + err := s3Server.UploadPart(mockStream) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + + UploadPartReq.StoreName = MOCKSERVER + output := &l8s3.UploadPartOutput{ETag: "tag"} + mockStream.EXPECT().Context().Return(ctx) + mockStream.EXPECT().Recv().Return(UploadPartReq, nil) + mockStream.EXPECT().SendAndClose(&s3.UploadPartOutput{Etag: "tag"}).Times(1) + mockossServer.EXPECT().UploadPart(ctx, &l8s3.UploadPartInput{DataStream: newUploadPartStreamReader(UploadPartReq.Body, mockStream), Bucket: "layotto", Key: "object"}).Return(output, nil) + err = s3Server.UploadPart(mockStream) + assert.Nil(t, err) + + mockStream.EXPECT().Recv().Return(nil, io.EOF) + stream := newUploadPartStreamReader(UploadPartReq.Body, mockStream) + data := make([]byte, ByteSize) + n, err := stream.Read(data) + assert.Equal(t, 3, n) + assert.Equal(t, io.EOF, err) +} + +// TestAppendObject +func TestAppendObject(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + // Test GetObject function + ctx := context.TODO() + mockStream := mocks3.NewMockObjectStorageService_AppendObjectServer(ctrl) + req := &s3.AppendObjectInput{StoreName: "NoStore", Bucket: "layotto", Key: "object", Body: []byte("put")} + mockStream.EXPECT().Recv().Return(req, nil) + err := s3Server.AppendObject(mockStream) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + + req.StoreName = MOCKSERVER + output := &l8s3.AppendObjectOutput{AppendPosition: 123} + mockStream.EXPECT().Context().Return(ctx) + mockStream.EXPECT().Recv().Return(req, nil) + mockStream.EXPECT().SendAndClose(&s3.AppendObjectOutput{AppendPosition: 123}).Times(1) + mockossServer.EXPECT().AppendObject(ctx, &l8s3.AppendObjectInput{DataStream: newAppendObjectStreamReader(req.Body, mockStream), Bucket: "layotto", Key: "object"}).Return(output, nil) + err = s3Server.AppendObject(mockStream) + assert.Nil(t, err) + + mockStream.EXPECT().Recv().Return(nil, io.EOF) + stream := newAppendObjectStreamReader(req.Body, mockStream) + data := make([]byte, ByteSize) + n, err := stream.Read(data) + assert.Equal(t, 3, n) + assert.Equal(t, io.EOF, err) +} + +// TestDeleteObject +func TestDeleteObject(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + deleteObjectReq := &s3.DeleteObjectInput{StoreName: "NoStore", Bucket: "layotto", Key: "object"} + _, err := s3Server.DeleteObject(ctx, deleteObjectReq) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.DeleteObjectOutput{DeleteMarker: false, VersionId: "123"} + mockossServer.EXPECT().DeleteObject(ctx, &l8s3.DeleteObjectInput{Bucket: "layotto", Key: "object"}).Return(output, nil) + deleteObjectReq.StoreName = MOCKSERVER + resp, err := s3Server.DeleteObject(ctx, deleteObjectReq) + assert.Nil(t, err) + assert.Equal(t, false, resp.DeleteMarker) + assert.Equal(t, "123", resp.VersionId) +} + +//TestPutObjectTagging +func TestPutObjectTagging(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.PutObjectTaggingInput{StoreName: "NoStore", Bucket: "layotto", Key: "object", Tags: map[string]string{"key": "value"}, VersionId: "123"} + _, err := s3Server.PutObjectTagging(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.PutObjectTaggingOutput{} + mockossServer.EXPECT().PutObjectTagging(ctx, &l8s3.PutObjectTaggingInput{Bucket: "layotto", Key: "object", VersionId: "123", Tags: map[string]string{"key": "value"}}).Return(output, nil) + req.StoreName = MOCKSERVER + _, err = s3Server.PutObjectTagging(ctx, req) + assert.Nil(t, err) +} + +//TestDeleteObjectTagging +func TestDeleteObjectTagging(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.DeleteObjectTaggingInput{StoreName: "NoStore", Bucket: "layotto", Key: "object", VersionId: "123"} + _, err := s3Server.DeleteObjectTagging(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.DeleteObjectTaggingOutput{VersionId: "123"} + mockossServer.EXPECT().DeleteObjectTagging(ctx, &l8s3.DeleteObjectTaggingInput{Bucket: "layotto", Key: "object", VersionId: "123"}).Return(output, nil) + req.StoreName = MOCKSERVER + _, err = s3Server.DeleteObjectTagging(ctx, req) + assert.Nil(t, err) +} + +//TestGetObjectTagging +func TestGetObjectTagging(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.GetObjectTaggingInput{StoreName: "NoStore", Bucket: "layotto", Key: "object", VersionId: "123"} + _, err := s3Server.GetObjectTagging(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.GetObjectTaggingOutput{Tags: map[string]string{"key": "value"}, VersionId: "123"} + mockossServer.EXPECT().GetObjectTagging(ctx, &l8s3.GetObjectTaggingInput{Bucket: "layotto", Key: "object", VersionId: "123"}).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.GetObjectTagging(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "value", resp.Tags["key"]) + assert.Equal(t, "123", resp.VersionId) +} + +//TestCopyObject +func TestCopyObject(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.CopyObjectInput{StoreName: "NoStore", Bucket: "layotto", Key: "object"} + _, err := s3Server.CopyObject(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.CopyObjectOutput{CopyObjectResult: &l8s3.CopyObjectResult{ETag: "etag"}} + mockossServer.EXPECT().CopyObject(ctx, &l8s3.CopyObjectInput{Bucket: "layotto", Key: "object"}).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.CopyObject(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "etag", resp.CopyObjectResult.Etag) +} + +//TestDeleteObjects +func TestDeleteObjects(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.DeleteObjectsInput{StoreName: "NoStore", Bucket: "layotto", Delete: &s3.Delete{Quiet: true, Objects: []*s3.ObjectIdentifier{{Key: "object", VersionId: "version"}}}} + _, err := s3Server.DeleteObjects(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.DeleteObjectsOutput{Deleted: []*l8s3.DeletedObject{{DeleteMarker: true, VersionId: "version"}}} + mockossServer.EXPECT().DeleteObjects(ctx, &l8s3.DeleteObjectsInput{Bucket: "layotto", Delete: &l8s3.Delete{Quiet: true, Objects: []*l8s3.ObjectIdentifier{{Key: "object", VersionId: "version"}}}}).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.DeleteObjects(ctx, req) + assert.Nil(t, err) + assert.Equal(t, true, resp.Deleted[0].DeleteMarker) + assert.Equal(t, "version", resp.Deleted[0].VersionId) +} + +//TestListObjects +func TestListObjects(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.ListObjectsInput{ + StoreName: "NoStore", + Bucket: "layotto", + Delimiter: "delimiter", + EncodingType: "EncodingType", + ExpectedBucketOwner: "ExpectedBucketOwner", + Marker: "Marker", + MaxKeys: 1, + Prefix: "Prefix", + RequestPayer: "RequestPayer", + } + _, err := s3Server.ListObjects(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.ListObjectsOutput{Delimiter: "delimiter", IsTruncated: true} + mockossServer.EXPECT().ListObjects(ctx, + &l8s3.ListObjectsInput{ + Bucket: "layotto", + Delimiter: "delimiter", + EncodingType: "EncodingType", + ExpectedBucketOwner: "ExpectedBucketOwner", + Marker: "Marker", + MaxKeys: 1, + Prefix: "Prefix", + RequestPayer: "RequestPayer", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.ListObjects(ctx, req) + assert.Nil(t, err) + assert.Equal(t, true, resp.IsTruncated) + assert.Equal(t, "delimiter", resp.Delimiter) +} + +//TestGetObjectCannedAcl +func TestGetObjectCannedAcl(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.GetObjectCannedAclInput{ + StoreName: "NoStore", + Bucket: "layotto", + Key: "key", + VersionId: "versionId", + } + _, err := s3Server.GetObjectCannedAcl(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.GetObjectCannedAclOutput{CannedAcl: "public-read-write", RequestCharged: "yes"} + mockossServer.EXPECT().GetObjectCannedAcl(ctx, + &l8s3.GetObjectCannedAclInput{ + Bucket: "layotto", + Key: "key", + VersionId: "versionId", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.GetObjectCannedAcl(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "public-read-write", resp.CannedAcl) + assert.Equal(t, "yes", resp.RequestCharged) +} + +//TestPutObjectCannedAcl +func TestPutObjectCannedAcl(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.PutObjectCannedAclInput{ + StoreName: "NoStore", + Bucket: "layotto", + Key: "key", + VersionId: "versionId", + } + _, err := s3Server.PutObjectCannedAcl(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.PutObjectCannedAclOutput{RequestCharged: "yes"} + mockossServer.EXPECT().PutObjectCannedAcl(ctx, + &l8s3.PutObjectCannedAclInput{ + Bucket: "layotto", + Key: "key", + VersionId: "versionId", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.PutObjectCannedAcl(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "yes", resp.RequestCharged) +} + +//TestRestoreObject +func TestRestoreObject(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.RestoreObjectInput{ + StoreName: "NoStore", + Bucket: "layotto", + Key: "key", + VersionId: "versionId", + } + _, err := s3Server.RestoreObject(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.RestoreObjectOutput{RestoreOutputPath: "yes", RequestCharged: "yes"} + mockossServer.EXPECT().RestoreObject(ctx, + &l8s3.RestoreObjectInput{ + Bucket: "layotto", + Key: "key", + VersionId: "versionId", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.RestoreObject(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "yes", resp.RequestCharged) + assert.Equal(t, "yes", resp.RestoreOutputPath) +} + +//TestCreateMultipartUpload +func TestCreateMultipartUpload(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.CreateMultipartUploadInput{ + StoreName: "NoStore", + Bucket: "layotto", + Key: "key", + } + _, err := s3Server.CreateMultipartUpload(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.CreateMultipartUploadOutput{Bucket: "layotto", Key: "object", UploadId: "123"} + mockossServer.EXPECT().CreateMultipartUpload(ctx, + &l8s3.CreateMultipartUploadInput{ + Bucket: "layotto", + Key: "key", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.CreateMultipartUpload(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "123", resp.UploadId) + assert.Equal(t, "layotto", resp.Bucket) + assert.Equal(t, "object", resp.Key) +} + +//TestUploadPartCopy +func TestUploadPartCopy(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.UploadPartCopyInput{ + StoreName: "NoStore", + Bucket: "layotto", + Key: "key", + } + _, err := s3Server.UploadPartCopy(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.UploadPartCopyOutput{BucketKeyEnabled: true, CopyPartResult: &l8s3.CopyPartResult{ETag: "123", LastModified: 456}} + mockossServer.EXPECT().UploadPartCopy(ctx, + &l8s3.UploadPartCopyInput{ + Bucket: "layotto", + Key: "key", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.UploadPartCopy(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "123", resp.CopyPartResult.Etag) + assert.Equal(t, int64(456), resp.CopyPartResult.LastModified) + assert.Equal(t, true, resp.BucketKeyEnabled) +} + +//TestCompleteMultipartUpload +func TestCompleteMultipartUpload(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.CompleteMultipartUploadInput{ + StoreName: "NoStore", + Bucket: "layotto", + Key: "key", + UploadId: "123", + } + _, err := s3Server.CompleteMultipartUpload(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.CompleteMultipartUploadOutput{ + BucketKeyEnabled: true, + Expiration: "expiration", + ETag: "etag", + } + mockossServer.EXPECT().CompleteMultipartUpload(ctx, + &l8s3.CompleteMultipartUploadInput{ + Bucket: "layotto", + Key: "key", + UploadId: "123", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.CompleteMultipartUpload(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "expiration", resp.Expiration) + assert.Equal(t, "etag", resp.Etag) + assert.Equal(t, true, resp.BucketKeyEnabled) +} + +//TestAbortMultipartUpload +func TestAbortMultipartUpload(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.AbortMultipartUploadInput{ + StoreName: "NoStore", + Bucket: "layotto", + Key: "key", + UploadId: "123", + } + _, err := s3Server.AbortMultipartUpload(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.AbortMultipartUploadOutput{ + RequestCharged: "true", + } + mockossServer.EXPECT().AbortMultipartUpload(ctx, + &l8s3.AbortMultipartUploadInput{ + Bucket: "layotto", + Key: "key", + UploadId: "123", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.AbortMultipartUpload(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "true", resp.RequestCharged) +} + +//TestListMultipartUploads +func TestListMultipartUploads(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.ListMultipartUploadsInput{ + StoreName: "NoStore", + Bucket: "layotto", + } + _, err := s3Server.ListMultipartUploads(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.ListMultipartUploadsOutput{ + Bucket: "layotto", + } + mockossServer.EXPECT().ListMultipartUploads(ctx, + &l8s3.ListMultipartUploadsInput{ + Bucket: "layotto", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.ListMultipartUploads(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "layotto", resp.Bucket) +} + +//TestListObjectVersions +func TestListObjectVersions(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.ListObjectVersionsInput{ + StoreName: "NoStore", + Bucket: "layotto", + KeyMarker: "marker", + } + _, err := s3Server.ListObjectVersions(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.ListObjectVersionsOutput{ + Delimiter: "layotto", + } + mockossServer.EXPECT().ListObjectVersions(ctx, + &l8s3.ListObjectVersionsInput{ + Bucket: "layotto", + KeyMarker: "marker", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.ListObjectVersions(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "layotto", resp.Delimiter) +} + +//TestHeadObject +func TestHeadObject(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.HeadObjectInput{ + StoreName: "NoStore", + Bucket: "layotto", + Key: "object", + } + _, err := s3Server.HeadObject(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.HeadObjectOutput{ + ResultMetadata: map[string]string{"key": "value"}, + } + mockossServer.EXPECT().HeadObject(ctx, + &l8s3.HeadObjectInput{ + Bucket: "layotto", + Key: "object", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.HeadObject(ctx, req) + assert.Nil(t, err) + assert.Equal(t, map[string]string{"key": "value"}, resp.ResultMetadata) +} + +//TestIsObjectExist +func TestIsObjectExist(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.IsObjectExistInput{ + StoreName: "NoStore", + Bucket: "layotto", + Key: "object", + } + _, err := s3Server.IsObjectExist(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.IsObjectExistOutput{ + FileExist: true, + } + mockossServer.EXPECT().IsObjectExist(ctx, + &l8s3.IsObjectExistInput{ + Bucket: "layotto", + Key: "object", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.IsObjectExist(ctx, req) + assert.Nil(t, err) + assert.Equal(t, true, resp.FileExist) +} + +//TestSignURL +func TestSignURL(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.SignURLInput{ + StoreName: "NoStore", + Bucket: "layotto", + Key: "object", + } + _, err := s3Server.SignURL(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.SignURLOutput{ + SignedUrl: "http://object", + } + mockossServer.EXPECT().SignURL(ctx, + &l8s3.SignURLInput{ + Bucket: "layotto", + Key: "object", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.SignURL(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "http://object", resp.SignedUrl) +} + +//TestUpdateDownLoadBandwidthRateLimit +func TestUpdateDownLoadBandwidthRateLimit(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.UpdateBandwidthRateLimitInput{ + StoreName: "NoStore", + AverageRateLimitInBitsPerSec: 1, + } + _, err := s3Server.UpdateDownloadBandwidthRateLimit(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + mockossServer.EXPECT().UpdateDownloadBandwidthRateLimit(ctx, + &l8s3.UpdateBandwidthRateLimitInput{ + AverageRateLimitInBitsPerSec: 1, + }, + ).Return(nil) + req.StoreName = MOCKSERVER + _, err = s3Server.UpdateDownloadBandwidthRateLimit(ctx, req) + assert.Nil(t, err) +} + +//TestUpdateUpLoadBandwidthRateLimit +func TestUpdateUpLoadBandwidthRateLimit(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.UpdateBandwidthRateLimitInput{ + StoreName: "NoStore", + AverageRateLimitInBitsPerSec: 1, + } + _, err := s3Server.UpdateUploadBandwidthRateLimit(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + mockossServer.EXPECT().UpdateUploadBandwidthRateLimit(ctx, + &l8s3.UpdateBandwidthRateLimitInput{ + AverageRateLimitInBitsPerSec: 1, + }, + ).Return(nil) + req.StoreName = MOCKSERVER + _, err = s3Server.UpdateUploadBandwidthRateLimit(ctx, req) + assert.Nil(t, err) +} + +//TestListParts +func TestListParts(t *testing.T) { + // prepare oss server + ac := &grpc.ApplicationContext{AppId: "test", Oss: map[string]l8s3.Oss{}} + ctrl := gomock.NewController(t) + mockossServer := mockoss.NewMockOss(ctrl) + ac.Oss[MOCKSERVER] = mockossServer + NewS3Server(ac) + s3Server := &S3Server{appId: ac.AppId, ossInstance: ac.Oss} + + ctx := context.TODO() + req := &s3.ListPartsInput{ + StoreName: "NoStore", + Bucket: "layotto", + } + _, err := s3Server.ListParts(ctx, req) + assert.Equal(t, status.Errorf(codes.InvalidArgument, NotSupportStoreName, "NoStore"), err) + output := &l8s3.ListPartsOutput{ + Bucket: "layotto", + Key: "object", + } + mockossServer.EXPECT().ListParts(ctx, + &l8s3.ListPartsInput{ + Bucket: "layotto", + }, + ).Return(output, nil) + req.StoreName = MOCKSERVER + resp, err := s3Server.ListParts(ctx, req) + assert.Nil(t, err) + assert.Equal(t, "layotto", resp.Bucket) +} diff --git a/pkg/grpc/grpc_api.go b/pkg/grpc/grpc_api.go index 4fda354bec..d2c2c98059 100644 --- a/pkg/grpc/grpc_api.go +++ b/pkg/grpc/grpc_api.go @@ -23,6 +23,8 @@ import ( "github.com/dapr/components-contrib/state" "google.golang.org/grpc" + "mosn.io/layotto/components/oss" + "mosn.io/layotto/components/configstores" "mosn.io/layotto/components/custom" "mosn.io/layotto/components/file" @@ -55,6 +57,7 @@ type ApplicationContext struct { PubSubs map[string]pubsub.PubSub StateStores map[string]state.Store Files map[string]file.File + Oss map[string]oss.Oss LockStores map[string]lock.LockStore Sequencers map[string]sequencer.Store SendToOutputBindingFn func(name string, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) diff --git a/pkg/mock/components/oss/oss.go b/pkg/mock/components/oss/oss.go new file mode 100644 index 0000000000..8684b7ecd0 --- /dev/null +++ b/pkg/mock/components/oss/oss.go @@ -0,0 +1,439 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: oss.go + +// Package mock_oss is a generated GoMock package. +package mock_oss + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + oss "mosn.io/layotto/components/oss" +) + +// MockOss is a mock of Oss interface. +type MockOss struct { + ctrl *gomock.Controller + recorder *MockOssMockRecorder +} + +// MockOssMockRecorder is the mock recorder for MockOss. +type MockOssMockRecorder struct { + mock *MockOss +} + +// NewMockOss creates a new mock instance. +func NewMockOss(ctrl *gomock.Controller) *MockOss { + mock := &MockOss{ctrl: ctrl} + mock.recorder = &MockOssMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockOss) EXPECT() *MockOssMockRecorder { + return m.recorder +} + +// AbortMultipartUpload mocks base method. +func (m *MockOss) AbortMultipartUpload(arg0 context.Context, arg1 *oss.AbortMultipartUploadInput) (*oss.AbortMultipartUploadOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AbortMultipartUpload", arg0, arg1) + ret0, _ := ret[0].(*oss.AbortMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AbortMultipartUpload indicates an expected call of AbortMultipartUpload. +func (mr *MockOssMockRecorder) AbortMultipartUpload(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUpload", reflect.TypeOf((*MockOss)(nil).AbortMultipartUpload), arg0, arg1) +} + +// AppendObject mocks base method. +func (m *MockOss) AppendObject(arg0 context.Context, arg1 *oss.AppendObjectInput) (*oss.AppendObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppendObject", arg0, arg1) + ret0, _ := ret[0].(*oss.AppendObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AppendObject indicates an expected call of AppendObject. +func (mr *MockOssMockRecorder) AppendObject(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendObject", reflect.TypeOf((*MockOss)(nil).AppendObject), arg0, arg1) +} + +// CompleteMultipartUpload mocks base method. +func (m *MockOss) CompleteMultipartUpload(arg0 context.Context, arg1 *oss.CompleteMultipartUploadInput) (*oss.CompleteMultipartUploadOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompleteMultipartUpload", arg0, arg1) + ret0, _ := ret[0].(*oss.CompleteMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CompleteMultipartUpload indicates an expected call of CompleteMultipartUpload. +func (mr *MockOssMockRecorder) CompleteMultipartUpload(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUpload", reflect.TypeOf((*MockOss)(nil).CompleteMultipartUpload), arg0, arg1) +} + +// CopyObject mocks base method. +func (m *MockOss) CopyObject(arg0 context.Context, arg1 *oss.CopyObjectInput) (*oss.CopyObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CopyObject", arg0, arg1) + ret0, _ := ret[0].(*oss.CopyObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CopyObject indicates an expected call of CopyObject. +func (mr *MockOssMockRecorder) CopyObject(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObject", reflect.TypeOf((*MockOss)(nil).CopyObject), arg0, arg1) +} + +// CreateMultipartUpload mocks base method. +func (m *MockOss) CreateMultipartUpload(arg0 context.Context, arg1 *oss.CreateMultipartUploadInput) (*oss.CreateMultipartUploadOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateMultipartUpload", arg0, arg1) + ret0, _ := ret[0].(*oss.CreateMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateMultipartUpload indicates an expected call of CreateMultipartUpload. +func (mr *MockOssMockRecorder) CreateMultipartUpload(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUpload", reflect.TypeOf((*MockOss)(nil).CreateMultipartUpload), arg0, arg1) +} + +// DeleteObject mocks base method. +func (m *MockOss) DeleteObject(arg0 context.Context, arg1 *oss.DeleteObjectInput) (*oss.DeleteObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObject", arg0, arg1) + ret0, _ := ret[0].(*oss.DeleteObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObject indicates an expected call of DeleteObject. +func (mr *MockOssMockRecorder) DeleteObject(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObject", reflect.TypeOf((*MockOss)(nil).DeleteObject), arg0, arg1) +} + +// DeleteObjectTagging mocks base method. +func (m *MockOss) DeleteObjectTagging(arg0 context.Context, arg1 *oss.DeleteObjectTaggingInput) (*oss.DeleteObjectTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjectTagging", arg0, arg1) + ret0, _ := ret[0].(*oss.DeleteObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjectTagging indicates an expected call of DeleteObjectTagging. +func (mr *MockOssMockRecorder) DeleteObjectTagging(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTagging", reflect.TypeOf((*MockOss)(nil).DeleteObjectTagging), arg0, arg1) +} + +// DeleteObjects mocks base method. +func (m *MockOss) DeleteObjects(arg0 context.Context, arg1 *oss.DeleteObjectsInput) (*oss.DeleteObjectsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjects", arg0, arg1) + ret0, _ := ret[0].(*oss.DeleteObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjects indicates an expected call of DeleteObjects. +func (mr *MockOssMockRecorder) DeleteObjects(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjects", reflect.TypeOf((*MockOss)(nil).DeleteObjects), arg0, arg1) +} + +// GetObject mocks base method. +func (m *MockOss) GetObject(arg0 context.Context, arg1 *oss.GetObjectInput) (*oss.GetObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObject", arg0, arg1) + ret0, _ := ret[0].(*oss.GetObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObject indicates an expected call of GetObject. +func (mr *MockOssMockRecorder) GetObject(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockOss)(nil).GetObject), arg0, arg1) +} + +// GetObjectCannedAcl mocks base method. +func (m *MockOss) GetObjectCannedAcl(arg0 context.Context, arg1 *oss.GetObjectCannedAclInput) (*oss.GetObjectCannedAclOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectCannedAcl", arg0, arg1) + ret0, _ := ret[0].(*oss.GetObjectCannedAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectCannedAcl indicates an expected call of GetObjectCannedAcl. +func (mr *MockOssMockRecorder) GetObjectCannedAcl(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectCannedAcl", reflect.TypeOf((*MockOss)(nil).GetObjectCannedAcl), arg0, arg1) +} + +// GetObjectTagging mocks base method. +func (m *MockOss) GetObjectTagging(arg0 context.Context, arg1 *oss.GetObjectTaggingInput) (*oss.GetObjectTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectTagging", arg0, arg1) + ret0, _ := ret[0].(*oss.GetObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectTagging indicates an expected call of GetObjectTagging. +func (mr *MockOssMockRecorder) GetObjectTagging(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTagging", reflect.TypeOf((*MockOss)(nil).GetObjectTagging), arg0, arg1) +} + +// HeadObject mocks base method. +func (m *MockOss) HeadObject(arg0 context.Context, arg1 *oss.HeadObjectInput) (*oss.HeadObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadObject", arg0, arg1) + ret0, _ := ret[0].(*oss.HeadObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeadObject indicates an expected call of HeadObject. +func (mr *MockOssMockRecorder) HeadObject(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObject", reflect.TypeOf((*MockOss)(nil).HeadObject), arg0, arg1) +} + +// Init mocks base method. +func (m *MockOss) Init(arg0 context.Context, arg1 *oss.Config) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Init", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Init indicates an expected call of Init. +func (mr *MockOssMockRecorder) Init(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockOss)(nil).Init), arg0, arg1) +} + +// IsObjectExist mocks base method. +func (m *MockOss) IsObjectExist(arg0 context.Context, arg1 *oss.IsObjectExistInput) (*oss.IsObjectExistOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsObjectExist", arg0, arg1) + ret0, _ := ret[0].(*oss.IsObjectExistOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsObjectExist indicates an expected call of IsObjectExist. +func (mr *MockOssMockRecorder) IsObjectExist(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsObjectExist", reflect.TypeOf((*MockOss)(nil).IsObjectExist), arg0, arg1) +} + +// ListMultipartUploads mocks base method. +func (m *MockOss) ListMultipartUploads(arg0 context.Context, arg1 *oss.ListMultipartUploadsInput) (*oss.ListMultipartUploadsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMultipartUploads", arg0, arg1) + ret0, _ := ret[0].(*oss.ListMultipartUploadsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMultipartUploads indicates an expected call of ListMultipartUploads. +func (mr *MockOssMockRecorder) ListMultipartUploads(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploads", reflect.TypeOf((*MockOss)(nil).ListMultipartUploads), arg0, arg1) +} + +// ListObjectVersions mocks base method. +func (m *MockOss) ListObjectVersions(arg0 context.Context, arg1 *oss.ListObjectVersionsInput) (*oss.ListObjectVersionsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectVersions", arg0, arg1) + ret0, _ := ret[0].(*oss.ListObjectVersionsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectVersions indicates an expected call of ListObjectVersions. +func (mr *MockOssMockRecorder) ListObjectVersions(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersions", reflect.TypeOf((*MockOss)(nil).ListObjectVersions), arg0, arg1) +} + +// ListObjects mocks base method. +func (m *MockOss) ListObjects(arg0 context.Context, arg1 *oss.ListObjectsInput) (*oss.ListObjectsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjects", arg0, arg1) + ret0, _ := ret[0].(*oss.ListObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjects indicates an expected call of ListObjects. +func (mr *MockOssMockRecorder) ListObjects(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjects", reflect.TypeOf((*MockOss)(nil).ListObjects), arg0, arg1) +} + +// ListParts mocks base method. +func (m *MockOss) ListParts(arg0 context.Context, arg1 *oss.ListPartsInput) (*oss.ListPartsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListParts", arg0, arg1) + ret0, _ := ret[0].(*oss.ListPartsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListParts indicates an expected call of ListParts. +func (mr *MockOssMockRecorder) ListParts(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListParts", reflect.TypeOf((*MockOss)(nil).ListParts), arg0, arg1) +} + +// PutObject mocks base method. +func (m *MockOss) PutObject(arg0 context.Context, arg1 *oss.PutObjectInput) (*oss.PutObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObject", arg0, arg1) + ret0, _ := ret[0].(*oss.PutObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObject indicates an expected call of PutObject. +func (mr *MockOssMockRecorder) PutObject(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObject", reflect.TypeOf((*MockOss)(nil).PutObject), arg0, arg1) +} + +// PutObjectCannedAcl mocks base method. +func (m *MockOss) PutObjectCannedAcl(arg0 context.Context, arg1 *oss.PutObjectCannedAclInput) (*oss.PutObjectCannedAclOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectCannedAcl", arg0, arg1) + ret0, _ := ret[0].(*oss.PutObjectCannedAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectCannedAcl indicates an expected call of PutObjectCannedAcl. +func (mr *MockOssMockRecorder) PutObjectCannedAcl(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectCannedAcl", reflect.TypeOf((*MockOss)(nil).PutObjectCannedAcl), arg0, arg1) +} + +// PutObjectTagging mocks base method. +func (m *MockOss) PutObjectTagging(arg0 context.Context, arg1 *oss.PutObjectTaggingInput) (*oss.PutObjectTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectTagging", arg0, arg1) + ret0, _ := ret[0].(*oss.PutObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectTagging indicates an expected call of PutObjectTagging. +func (mr *MockOssMockRecorder) PutObjectTagging(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTagging", reflect.TypeOf((*MockOss)(nil).PutObjectTagging), arg0, arg1) +} + +// RestoreObject mocks base method. +func (m *MockOss) RestoreObject(arg0 context.Context, arg1 *oss.RestoreObjectInput) (*oss.RestoreObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RestoreObject", arg0, arg1) + ret0, _ := ret[0].(*oss.RestoreObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RestoreObject indicates an expected call of RestoreObject. +func (mr *MockOssMockRecorder) RestoreObject(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObject", reflect.TypeOf((*MockOss)(nil).RestoreObject), arg0, arg1) +} + +// SignURL mocks base method. +func (m *MockOss) SignURL(arg0 context.Context, arg1 *oss.SignURLInput) (*oss.SignURLOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SignURL", arg0, arg1) + ret0, _ := ret[0].(*oss.SignURLOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SignURL indicates an expected call of SignURL. +func (mr *MockOssMockRecorder) SignURL(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignURL", reflect.TypeOf((*MockOss)(nil).SignURL), arg0, arg1) +} + +// UpdateDownloadBandwidthRateLimit mocks base method. +func (m *MockOss) UpdateDownloadBandwidthRateLimit(arg0 context.Context, arg1 *oss.UpdateBandwidthRateLimitInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateDownloadBandwidthRateLimit", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateDownloadBandwidthRateLimit indicates an expected call of UpdateDownloadBandwidthRateLimit. +func (mr *MockOssMockRecorder) UpdateDownloadBandwidthRateLimit(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDownloadBandwidthRateLimit", reflect.TypeOf((*MockOss)(nil).UpdateDownloadBandwidthRateLimit), arg0, arg1) +} + +// UpdateUploadBandwidthRateLimit mocks base method. +func (m *MockOss) UpdateUploadBandwidthRateLimit(arg0 context.Context, arg1 *oss.UpdateBandwidthRateLimitInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateUploadBandwidthRateLimit", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateUploadBandwidthRateLimit indicates an expected call of UpdateUploadBandwidthRateLimit. +func (mr *MockOssMockRecorder) UpdateUploadBandwidthRateLimit(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateUploadBandwidthRateLimit", reflect.TypeOf((*MockOss)(nil).UpdateUploadBandwidthRateLimit), arg0, arg1) +} + +// UploadPart mocks base method. +func (m *MockOss) UploadPart(arg0 context.Context, arg1 *oss.UploadPartInput) (*oss.UploadPartOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadPart", arg0, arg1) + ret0, _ := ret[0].(*oss.UploadPartOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPart indicates an expected call of UploadPart. +func (mr *MockOssMockRecorder) UploadPart(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPart", reflect.TypeOf((*MockOss)(nil).UploadPart), arg0, arg1) +} + +// UploadPartCopy mocks base method. +func (m *MockOss) UploadPartCopy(arg0 context.Context, arg1 *oss.UploadPartCopyInput) (*oss.UploadPartCopyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadPartCopy", arg0, arg1) + ret0, _ := ret[0].(*oss.UploadPartCopyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPartCopy indicates an expected call of UploadPartCopy. +func (mr *MockOssMockRecorder) UploadPartCopy(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopy", reflect.TypeOf((*MockOss)(nil).UploadPartCopy), arg0, arg1) +} diff --git a/pkg/mock/runtime/oss/oss.go b/pkg/mock/runtime/oss/oss.go new file mode 100644 index 0000000000..59a5b475e0 --- /dev/null +++ b/pkg/mock/runtime/oss/oss.go @@ -0,0 +1,2059 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: oss_grpc.pb.go + +// Package mock_s3 is a generated GoMock package. +package mock_s3 + +import ( + context "context" + reflect "reflect" + + "mosn.io/layotto/spec/proto/extension/v1/s3" + + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + metadata "google.golang.org/grpc/metadata" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// MockObjectStorageServiceClient is a mock of ObjectStorageServiceClient interface. +type MockObjectStorageServiceClient struct { + ctrl *gomock.Controller + recorder *MockObjectStorageServiceClientMockRecorder +} + +// MockObjectStorageServiceClientMockRecorder is the mock recorder for MockObjectStorageServiceClient. +type MockObjectStorageServiceClientMockRecorder struct { + mock *MockObjectStorageServiceClient +} + +// NewMockObjectStorageServiceClient creates a new mock instance. +func NewMockObjectStorageServiceClient(ctrl *gomock.Controller) *MockObjectStorageServiceClient { + mock := &MockObjectStorageServiceClient{ctrl: ctrl} + mock.recorder = &MockObjectStorageServiceClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectStorageServiceClient) EXPECT() *MockObjectStorageServiceClientMockRecorder { + return m.recorder +} + +// AbortMultipartUpload mocks base method. +func (m *MockObjectStorageServiceClient) AbortMultipartUpload(ctx context.Context, in *s3.AbortMultipartUploadInput, opts ...grpc.CallOption) (*s3.AbortMultipartUploadOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "AbortMultipartUpload", varargs...) + ret0, _ := ret[0].(*s3.AbortMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AbortMultipartUpload indicates an expected call of AbortMultipartUpload. +func (mr *MockObjectStorageServiceClientMockRecorder) AbortMultipartUpload(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUpload", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).AbortMultipartUpload), varargs...) +} + +// AppendObject mocks base method. +func (m *MockObjectStorageServiceClient) AppendObject(ctx context.Context, opts ...grpc.CallOption) (s3.ObjectStorageService_AppendObjectClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "AppendObject", varargs...) + ret0, _ := ret[0].(s3.ObjectStorageService_AppendObjectClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AppendObject indicates an expected call of AppendObject. +func (mr *MockObjectStorageServiceClientMockRecorder) AppendObject(ctx interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendObject", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).AppendObject), varargs...) +} + +// CompleteMultipartUpload mocks base method. +func (m *MockObjectStorageServiceClient) CompleteMultipartUpload(ctx context.Context, in *s3.CompleteMultipartUploadInput, opts ...grpc.CallOption) (*s3.CompleteMultipartUploadOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CompleteMultipartUpload", varargs...) + ret0, _ := ret[0].(*s3.CompleteMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CompleteMultipartUpload indicates an expected call of CompleteMultipartUpload. +func (mr *MockObjectStorageServiceClientMockRecorder) CompleteMultipartUpload(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUpload", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).CompleteMultipartUpload), varargs...) +} + +// CopyObject mocks base method. +func (m *MockObjectStorageServiceClient) CopyObject(ctx context.Context, in *s3.CopyObjectInput, opts ...grpc.CallOption) (*s3.CopyObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CopyObject", varargs...) + ret0, _ := ret[0].(*s3.CopyObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CopyObject indicates an expected call of CopyObject. +func (mr *MockObjectStorageServiceClientMockRecorder) CopyObject(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObject", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).CopyObject), varargs...) +} + +// CreateMultipartUpload mocks base method. +func (m *MockObjectStorageServiceClient) CreateMultipartUpload(ctx context.Context, in *s3.CreateMultipartUploadInput, opts ...grpc.CallOption) (*s3.CreateMultipartUploadOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateMultipartUpload", varargs...) + ret0, _ := ret[0].(*s3.CreateMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateMultipartUpload indicates an expected call of CreateMultipartUpload. +func (mr *MockObjectStorageServiceClientMockRecorder) CreateMultipartUpload(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUpload", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).CreateMultipartUpload), varargs...) +} + +// DeleteObject mocks base method. +func (m *MockObjectStorageServiceClient) DeleteObject(ctx context.Context, in *s3.DeleteObjectInput, opts ...grpc.CallOption) (*s3.DeleteObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteObject", varargs...) + ret0, _ := ret[0].(*s3.DeleteObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObject indicates an expected call of DeleteObject. +func (mr *MockObjectStorageServiceClientMockRecorder) DeleteObject(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObject", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).DeleteObject), varargs...) +} + +// DeleteObjectTagging mocks base method. +func (m *MockObjectStorageServiceClient) DeleteObjectTagging(ctx context.Context, in *s3.DeleteObjectTaggingInput, opts ...grpc.CallOption) (*s3.DeleteObjectTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteObjectTagging", varargs...) + ret0, _ := ret[0].(*s3.DeleteObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjectTagging indicates an expected call of DeleteObjectTagging. +func (mr *MockObjectStorageServiceClientMockRecorder) DeleteObjectTagging(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTagging", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).DeleteObjectTagging), varargs...) +} + +// DeleteObjects mocks base method. +func (m *MockObjectStorageServiceClient) DeleteObjects(ctx context.Context, in *s3.DeleteObjectsInput, opts ...grpc.CallOption) (*s3.DeleteObjectsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteObjects", varargs...) + ret0, _ := ret[0].(*s3.DeleteObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjects indicates an expected call of DeleteObjects. +func (mr *MockObjectStorageServiceClientMockRecorder) DeleteObjects(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjects", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).DeleteObjects), varargs...) +} + +// GetObject mocks base method. +func (m *MockObjectStorageServiceClient) GetObject(ctx context.Context, in *s3.GetObjectInput, opts ...grpc.CallOption) (s3.ObjectStorageService_GetObjectClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObject", varargs...) + ret0, _ := ret[0].(s3.ObjectStorageService_GetObjectClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObject indicates an expected call of GetObject. +func (mr *MockObjectStorageServiceClientMockRecorder) GetObject(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).GetObject), varargs...) +} + +// GetObjectCannedAcl mocks base method. +func (m *MockObjectStorageServiceClient) GetObjectCannedAcl(ctx context.Context, in *s3.GetObjectCannedAclInput, opts ...grpc.CallOption) (*s3.GetObjectCannedAclOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectCannedAcl", varargs...) + ret0, _ := ret[0].(*s3.GetObjectCannedAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectCannedAcl indicates an expected call of GetObjectCannedAcl. +func (mr *MockObjectStorageServiceClientMockRecorder) GetObjectCannedAcl(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectCannedAcl", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).GetObjectCannedAcl), varargs...) +} + +// GetObjectTagging mocks base method. +func (m *MockObjectStorageServiceClient) GetObjectTagging(ctx context.Context, in *s3.GetObjectTaggingInput, opts ...grpc.CallOption) (*s3.GetObjectTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectTagging", varargs...) + ret0, _ := ret[0].(*s3.GetObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectTagging indicates an expected call of GetObjectTagging. +func (mr *MockObjectStorageServiceClientMockRecorder) GetObjectTagging(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTagging", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).GetObjectTagging), varargs...) +} + +// HeadObject mocks base method. +func (m *MockObjectStorageServiceClient) HeadObject(ctx context.Context, in *s3.HeadObjectInput, opts ...grpc.CallOption) (*s3.HeadObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "HeadObject", varargs...) + ret0, _ := ret[0].(*s3.HeadObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeadObject indicates an expected call of HeadObject. +func (mr *MockObjectStorageServiceClientMockRecorder) HeadObject(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObject", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).HeadObject), varargs...) +} + +// IsObjectExist mocks base method. +func (m *MockObjectStorageServiceClient) IsObjectExist(ctx context.Context, in *s3.IsObjectExistInput, opts ...grpc.CallOption) (*s3.IsObjectExistOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "IsObjectExist", varargs...) + ret0, _ := ret[0].(*s3.IsObjectExistOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsObjectExist indicates an expected call of IsObjectExist. +func (mr *MockObjectStorageServiceClientMockRecorder) IsObjectExist(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsObjectExist", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).IsObjectExist), varargs...) +} + +// ListMultipartUploads mocks base method. +func (m *MockObjectStorageServiceClient) ListMultipartUploads(ctx context.Context, in *s3.ListMultipartUploadsInput, opts ...grpc.CallOption) (*s3.ListMultipartUploadsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListMultipartUploads", varargs...) + ret0, _ := ret[0].(*s3.ListMultipartUploadsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMultipartUploads indicates an expected call of ListMultipartUploads. +func (mr *MockObjectStorageServiceClientMockRecorder) ListMultipartUploads(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploads", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).ListMultipartUploads), varargs...) +} + +// ListObjectVersions mocks base method. +func (m *MockObjectStorageServiceClient) ListObjectVersions(ctx context.Context, in *s3.ListObjectVersionsInput, opts ...grpc.CallOption) (*s3.ListObjectVersionsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectVersions", varargs...) + ret0, _ := ret[0].(*s3.ListObjectVersionsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectVersions indicates an expected call of ListObjectVersions. +func (mr *MockObjectStorageServiceClientMockRecorder) ListObjectVersions(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersions", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).ListObjectVersions), varargs...) +} + +// ListObjects mocks base method. +func (m *MockObjectStorageServiceClient) ListObjects(ctx context.Context, in *s3.ListObjectsInput, opts ...grpc.CallOption) (*s3.ListObjectsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjects", varargs...) + ret0, _ := ret[0].(*s3.ListObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjects indicates an expected call of ListObjects. +func (mr *MockObjectStorageServiceClientMockRecorder) ListObjects(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjects", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).ListObjects), varargs...) +} + +// ListParts mocks base method. +func (m *MockObjectStorageServiceClient) ListParts(ctx context.Context, in *s3.ListPartsInput, opts ...grpc.CallOption) (*s3.ListPartsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListParts", varargs...) + ret0, _ := ret[0].(*s3.ListPartsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListParts indicates an expected call of ListParts. +func (mr *MockObjectStorageServiceClientMockRecorder) ListParts(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListParts", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).ListParts), varargs...) +} + +// PutObject mocks base method. +func (m *MockObjectStorageServiceClient) PutObject(ctx context.Context, opts ...grpc.CallOption) (s3.ObjectStorageService_PutObjectClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObject", varargs...) + ret0, _ := ret[0].(s3.ObjectStorageService_PutObjectClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObject indicates an expected call of PutObject. +func (mr *MockObjectStorageServiceClientMockRecorder) PutObject(ctx interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObject", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).PutObject), varargs...) +} + +// PutObjectCannedAcl mocks base method. +func (m *MockObjectStorageServiceClient) PutObjectCannedAcl(ctx context.Context, in *s3.PutObjectCannedAclInput, opts ...grpc.CallOption) (*s3.PutObjectCannedAclOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectCannedAcl", varargs...) + ret0, _ := ret[0].(*s3.PutObjectCannedAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectCannedAcl indicates an expected call of PutObjectCannedAcl. +func (mr *MockObjectStorageServiceClientMockRecorder) PutObjectCannedAcl(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectCannedAcl", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).PutObjectCannedAcl), varargs...) +} + +// PutObjectTagging mocks base method. +func (m *MockObjectStorageServiceClient) PutObjectTagging(ctx context.Context, in *s3.PutObjectTaggingInput, opts ...grpc.CallOption) (*s3.PutObjectTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectTagging", varargs...) + ret0, _ := ret[0].(*s3.PutObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectTagging indicates an expected call of PutObjectTagging. +func (mr *MockObjectStorageServiceClientMockRecorder) PutObjectTagging(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTagging", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).PutObjectTagging), varargs...) +} + +// RestoreObject mocks base method. +func (m *MockObjectStorageServiceClient) RestoreObject(ctx context.Context, in *s3.RestoreObjectInput, opts ...grpc.CallOption) (*s3.RestoreObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "RestoreObject", varargs...) + ret0, _ := ret[0].(*s3.RestoreObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RestoreObject indicates an expected call of RestoreObject. +func (mr *MockObjectStorageServiceClientMockRecorder) RestoreObject(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObject", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).RestoreObject), varargs...) +} + +// SignURL mocks base method. +func (m *MockObjectStorageServiceClient) SignURL(ctx context.Context, in *s3.SignURLInput, opts ...grpc.CallOption) (*s3.SignURLOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SignURL", varargs...) + ret0, _ := ret[0].(*s3.SignURLOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SignURL indicates an expected call of SignURL. +func (mr *MockObjectStorageServiceClientMockRecorder) SignURL(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignURL", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).SignURL), varargs...) +} + +// UpdateDownloadBandwidthRateLimit mocks base method. +func (m *MockObjectStorageServiceClient) UpdateDownloadBandwidthRateLimit(ctx context.Context, in *s3.UpdateBandwidthRateLimitInput, opts ...grpc.CallOption) (*emptypb.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateDownloadBandwidthRateLimit", varargs...) + ret0, _ := ret[0].(*emptypb.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateDownloadBandwidthRateLimit indicates an expected call of UpdateDownloadBandwidthRateLimit. +func (mr *MockObjectStorageServiceClientMockRecorder) UpdateDownloadBandwidthRateLimit(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDownloadBandwidthRateLimit", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).UpdateDownloadBandwidthRateLimit), varargs...) +} + +// UpdateUploadBandwidthRateLimit mocks base method. +func (m *MockObjectStorageServiceClient) UpdateUploadBandwidthRateLimit(ctx context.Context, in *s3.UpdateBandwidthRateLimitInput, opts ...grpc.CallOption) (*emptypb.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateUploadBandwidthRateLimit", varargs...) + ret0, _ := ret[0].(*emptypb.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateUploadBandwidthRateLimit indicates an expected call of UpdateUploadBandwidthRateLimit. +func (mr *MockObjectStorageServiceClientMockRecorder) UpdateUploadBandwidthRateLimit(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateUploadBandwidthRateLimit", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).UpdateUploadBandwidthRateLimit), varargs...) +} + +// UploadPart mocks base method. +func (m *MockObjectStorageServiceClient) UploadPart(ctx context.Context, opts ...grpc.CallOption) (s3.ObjectStorageService_UploadPartClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UploadPart", varargs...) + ret0, _ := ret[0].(s3.ObjectStorageService_UploadPartClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPart indicates an expected call of UploadPart. +func (mr *MockObjectStorageServiceClientMockRecorder) UploadPart(ctx interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPart", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).UploadPart), varargs...) +} + +// UploadPartCopy mocks base method. +func (m *MockObjectStorageServiceClient) UploadPartCopy(ctx context.Context, in *s3.UploadPartCopyInput, opts ...grpc.CallOption) (*s3.UploadPartCopyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UploadPartCopy", varargs...) + ret0, _ := ret[0].(*s3.UploadPartCopyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPartCopy indicates an expected call of UploadPartCopy. +func (mr *MockObjectStorageServiceClientMockRecorder) UploadPartCopy(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopy", reflect.TypeOf((*MockObjectStorageServiceClient)(nil).UploadPartCopy), varargs...) +} + +// MockObjectStorageService_PutObjectClient is a mock of ObjectStorageService_PutObjectClient interface. +type MockObjectStorageService_PutObjectClient struct { + ctrl *gomock.Controller + recorder *MockObjectStorageService_PutObjectClientMockRecorder +} + +// MockObjectStorageService_PutObjectClientMockRecorder is the mock recorder for MockObjectStorageService_PutObjectClient. +type MockObjectStorageService_PutObjectClientMockRecorder struct { + mock *MockObjectStorageService_PutObjectClient +} + +// NewMockObjectStorageService_PutObjectClient creates a new mock instance. +func NewMockObjectStorageService_PutObjectClient(ctrl *gomock.Controller) *MockObjectStorageService_PutObjectClient { + mock := &MockObjectStorageService_PutObjectClient{ctrl: ctrl} + mock.recorder = &MockObjectStorageService_PutObjectClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectStorageService_PutObjectClient) EXPECT() *MockObjectStorageService_PutObjectClientMockRecorder { + return m.recorder +} + +// CloseAndRecv mocks base method. +func (m *MockObjectStorageService_PutObjectClient) CloseAndRecv() (*s3.PutObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseAndRecv") + ret0, _ := ret[0].(*s3.PutObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CloseAndRecv indicates an expected call of CloseAndRecv. +func (mr *MockObjectStorageService_PutObjectClientMockRecorder) CloseAndRecv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockObjectStorageService_PutObjectClient)(nil).CloseAndRecv)) +} + +// CloseSend mocks base method. +func (m *MockObjectStorageService_PutObjectClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockObjectStorageService_PutObjectClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockObjectStorageService_PutObjectClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockObjectStorageService_PutObjectClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockObjectStorageService_PutObjectClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockObjectStorageService_PutObjectClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockObjectStorageService_PutObjectClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockObjectStorageService_PutObjectClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockObjectStorageService_PutObjectClient)(nil).Header)) +} + +// RecvMsg mocks base method. +func (m_2 *MockObjectStorageService_PutObjectClient) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockObjectStorageService_PutObjectClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockObjectStorageService_PutObjectClient)(nil).RecvMsg), m) +} + +// Send mocks base method. +func (m *MockObjectStorageService_PutObjectClient) Send(arg0 *s3.PutObjectInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockObjectStorageService_PutObjectClientMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockObjectStorageService_PutObjectClient)(nil).Send), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockObjectStorageService_PutObjectClient) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockObjectStorageService_PutObjectClientMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockObjectStorageService_PutObjectClient)(nil).SendMsg), m) +} + +// Trailer mocks base method. +func (m *MockObjectStorageService_PutObjectClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockObjectStorageService_PutObjectClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockObjectStorageService_PutObjectClient)(nil).Trailer)) +} + +// MockObjectStorageService_GetObjectClient is a mock of ObjectStorageService_GetObjectClient interface. +type MockObjectStorageService_GetObjectClient struct { + ctrl *gomock.Controller + recorder *MockObjectStorageService_GetObjectClientMockRecorder +} + +// MockObjectStorageService_GetObjectClientMockRecorder is the mock recorder for MockObjectStorageService_GetObjectClient. +type MockObjectStorageService_GetObjectClientMockRecorder struct { + mock *MockObjectStorageService_GetObjectClient +} + +// NewMockObjectStorageService_GetObjectClient creates a new mock instance. +func NewMockObjectStorageService_GetObjectClient(ctrl *gomock.Controller) *MockObjectStorageService_GetObjectClient { + mock := &MockObjectStorageService_GetObjectClient{ctrl: ctrl} + mock.recorder = &MockObjectStorageService_GetObjectClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectStorageService_GetObjectClient) EXPECT() *MockObjectStorageService_GetObjectClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method. +func (m *MockObjectStorageService_GetObjectClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockObjectStorageService_GetObjectClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockObjectStorageService_GetObjectClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockObjectStorageService_GetObjectClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockObjectStorageService_GetObjectClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockObjectStorageService_GetObjectClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockObjectStorageService_GetObjectClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockObjectStorageService_GetObjectClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockObjectStorageService_GetObjectClient)(nil).Header)) +} + +// Recv mocks base method. +func (m *MockObjectStorageService_GetObjectClient) Recv() (*s3.GetObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*s3.GetObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockObjectStorageService_GetObjectClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockObjectStorageService_GetObjectClient)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m_2 *MockObjectStorageService_GetObjectClient) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockObjectStorageService_GetObjectClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockObjectStorageService_GetObjectClient)(nil).RecvMsg), m) +} + +// SendMsg mocks base method. +func (m_2 *MockObjectStorageService_GetObjectClient) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockObjectStorageService_GetObjectClientMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockObjectStorageService_GetObjectClient)(nil).SendMsg), m) +} + +// Trailer mocks base method. +func (m *MockObjectStorageService_GetObjectClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockObjectStorageService_GetObjectClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockObjectStorageService_GetObjectClient)(nil).Trailer)) +} + +// MockObjectStorageService_UploadPartClient is a mock of ObjectStorageService_UploadPartClient interface. +type MockObjectStorageService_UploadPartClient struct { + ctrl *gomock.Controller + recorder *MockObjectStorageService_UploadPartClientMockRecorder +} + +// MockObjectStorageService_UploadPartClientMockRecorder is the mock recorder for MockObjectStorageService_UploadPartClient. +type MockObjectStorageService_UploadPartClientMockRecorder struct { + mock *MockObjectStorageService_UploadPartClient +} + +// NewMockObjectStorageService_UploadPartClient creates a new mock instance. +func NewMockObjectStorageService_UploadPartClient(ctrl *gomock.Controller) *MockObjectStorageService_UploadPartClient { + mock := &MockObjectStorageService_UploadPartClient{ctrl: ctrl} + mock.recorder = &MockObjectStorageService_UploadPartClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectStorageService_UploadPartClient) EXPECT() *MockObjectStorageService_UploadPartClientMockRecorder { + return m.recorder +} + +// CloseAndRecv mocks base method. +func (m *MockObjectStorageService_UploadPartClient) CloseAndRecv() (*s3.UploadPartOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseAndRecv") + ret0, _ := ret[0].(*s3.UploadPartOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CloseAndRecv indicates an expected call of CloseAndRecv. +func (mr *MockObjectStorageService_UploadPartClientMockRecorder) CloseAndRecv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockObjectStorageService_UploadPartClient)(nil).CloseAndRecv)) +} + +// CloseSend mocks base method. +func (m *MockObjectStorageService_UploadPartClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockObjectStorageService_UploadPartClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockObjectStorageService_UploadPartClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockObjectStorageService_UploadPartClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockObjectStorageService_UploadPartClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockObjectStorageService_UploadPartClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockObjectStorageService_UploadPartClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockObjectStorageService_UploadPartClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockObjectStorageService_UploadPartClient)(nil).Header)) +} + +// RecvMsg mocks base method. +func (m_2 *MockObjectStorageService_UploadPartClient) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockObjectStorageService_UploadPartClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockObjectStorageService_UploadPartClient)(nil).RecvMsg), m) +} + +// Send mocks base method. +func (m *MockObjectStorageService_UploadPartClient) Send(arg0 *s3.UploadPartInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockObjectStorageService_UploadPartClientMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockObjectStorageService_UploadPartClient)(nil).Send), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockObjectStorageService_UploadPartClient) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockObjectStorageService_UploadPartClientMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockObjectStorageService_UploadPartClient)(nil).SendMsg), m) +} + +// Trailer mocks base method. +func (m *MockObjectStorageService_UploadPartClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockObjectStorageService_UploadPartClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockObjectStorageService_UploadPartClient)(nil).Trailer)) +} + +// MockObjectStorageService_AppendObjectClient is a mock of ObjectStorageService_AppendObjectClient interface. +type MockObjectStorageService_AppendObjectClient struct { + ctrl *gomock.Controller + recorder *MockObjectStorageService_AppendObjectClientMockRecorder +} + +// MockObjectStorageService_AppendObjectClientMockRecorder is the mock recorder for MockObjectStorageService_AppendObjectClient. +type MockObjectStorageService_AppendObjectClientMockRecorder struct { + mock *MockObjectStorageService_AppendObjectClient +} + +// NewMockObjectStorageService_AppendObjectClient creates a new mock instance. +func NewMockObjectStorageService_AppendObjectClient(ctrl *gomock.Controller) *MockObjectStorageService_AppendObjectClient { + mock := &MockObjectStorageService_AppendObjectClient{ctrl: ctrl} + mock.recorder = &MockObjectStorageService_AppendObjectClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectStorageService_AppendObjectClient) EXPECT() *MockObjectStorageService_AppendObjectClientMockRecorder { + return m.recorder +} + +// CloseAndRecv mocks base method. +func (m *MockObjectStorageService_AppendObjectClient) CloseAndRecv() (*s3.AppendObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseAndRecv") + ret0, _ := ret[0].(*s3.AppendObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CloseAndRecv indicates an expected call of CloseAndRecv. +func (mr *MockObjectStorageService_AppendObjectClientMockRecorder) CloseAndRecv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockObjectStorageService_AppendObjectClient)(nil).CloseAndRecv)) +} + +// CloseSend mocks base method. +func (m *MockObjectStorageService_AppendObjectClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockObjectStorageService_AppendObjectClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockObjectStorageService_AppendObjectClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockObjectStorageService_AppendObjectClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockObjectStorageService_AppendObjectClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockObjectStorageService_AppendObjectClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockObjectStorageService_AppendObjectClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockObjectStorageService_AppendObjectClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockObjectStorageService_AppendObjectClient)(nil).Header)) +} + +// RecvMsg mocks base method. +func (m_2 *MockObjectStorageService_AppendObjectClient) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockObjectStorageService_AppendObjectClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockObjectStorageService_AppendObjectClient)(nil).RecvMsg), m) +} + +// Send mocks base method. +func (m *MockObjectStorageService_AppendObjectClient) Send(arg0 *s3.AppendObjectInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockObjectStorageService_AppendObjectClientMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockObjectStorageService_AppendObjectClient)(nil).Send), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockObjectStorageService_AppendObjectClient) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockObjectStorageService_AppendObjectClientMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockObjectStorageService_AppendObjectClient)(nil).SendMsg), m) +} + +// Trailer mocks base method. +func (m *MockObjectStorageService_AppendObjectClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockObjectStorageService_AppendObjectClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockObjectStorageService_AppendObjectClient)(nil).Trailer)) +} + +// MockObjectStorageServiceServer is a mock of ObjectStorageServiceServer interface. +type MockObjectStorageServiceServer struct { + ctrl *gomock.Controller + recorder *MockObjectStorageServiceServerMockRecorder +} + +// MockObjectStorageServiceServerMockRecorder is the mock recorder for MockObjectStorageServiceServer. +type MockObjectStorageServiceServerMockRecorder struct { + mock *MockObjectStorageServiceServer +} + +// NewMockObjectStorageServiceServer creates a new mock instance. +func NewMockObjectStorageServiceServer(ctrl *gomock.Controller) *MockObjectStorageServiceServer { + mock := &MockObjectStorageServiceServer{ctrl: ctrl} + mock.recorder = &MockObjectStorageServiceServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectStorageServiceServer) EXPECT() *MockObjectStorageServiceServerMockRecorder { + return m.recorder +} + +// AbortMultipartUpload mocks base method. +func (m *MockObjectStorageServiceServer) AbortMultipartUpload(arg0 context.Context, arg1 *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AbortMultipartUpload", arg0, arg1) + ret0, _ := ret[0].(*s3.AbortMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AbortMultipartUpload indicates an expected call of AbortMultipartUpload. +func (mr *MockObjectStorageServiceServerMockRecorder) AbortMultipartUpload(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUpload", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).AbortMultipartUpload), arg0, arg1) +} + +// AppendObject mocks base method. +func (m *MockObjectStorageServiceServer) AppendObject(arg0 s3.ObjectStorageService_AppendObjectServer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppendObject", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// AppendObject indicates an expected call of AppendObject. +func (mr *MockObjectStorageServiceServerMockRecorder) AppendObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendObject", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).AppendObject), arg0) +} + +// CompleteMultipartUpload mocks base method. +func (m *MockObjectStorageServiceServer) CompleteMultipartUpload(arg0 context.Context, arg1 *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompleteMultipartUpload", arg0, arg1) + ret0, _ := ret[0].(*s3.CompleteMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CompleteMultipartUpload indicates an expected call of CompleteMultipartUpload. +func (mr *MockObjectStorageServiceServerMockRecorder) CompleteMultipartUpload(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUpload", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).CompleteMultipartUpload), arg0, arg1) +} + +// CopyObject mocks base method. +func (m *MockObjectStorageServiceServer) CopyObject(arg0 context.Context, arg1 *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CopyObject", arg0, arg1) + ret0, _ := ret[0].(*s3.CopyObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CopyObject indicates an expected call of CopyObject. +func (mr *MockObjectStorageServiceServerMockRecorder) CopyObject(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObject", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).CopyObject), arg0, arg1) +} + +// CreateMultipartUpload mocks base method. +func (m *MockObjectStorageServiceServer) CreateMultipartUpload(arg0 context.Context, arg1 *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateMultipartUpload", arg0, arg1) + ret0, _ := ret[0].(*s3.CreateMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateMultipartUpload indicates an expected call of CreateMultipartUpload. +func (mr *MockObjectStorageServiceServerMockRecorder) CreateMultipartUpload(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUpload", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).CreateMultipartUpload), arg0, arg1) +} + +// DeleteObject mocks base method. +func (m *MockObjectStorageServiceServer) DeleteObject(arg0 context.Context, arg1 *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObject", arg0, arg1) + ret0, _ := ret[0].(*s3.DeleteObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObject indicates an expected call of DeleteObject. +func (mr *MockObjectStorageServiceServerMockRecorder) DeleteObject(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObject", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).DeleteObject), arg0, arg1) +} + +// DeleteObjectTagging mocks base method. +func (m *MockObjectStorageServiceServer) DeleteObjectTagging(arg0 context.Context, arg1 *s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjectTagging", arg0, arg1) + ret0, _ := ret[0].(*s3.DeleteObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjectTagging indicates an expected call of DeleteObjectTagging. +func (mr *MockObjectStorageServiceServerMockRecorder) DeleteObjectTagging(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTagging", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).DeleteObjectTagging), arg0, arg1) +} + +// DeleteObjects mocks base method. +func (m *MockObjectStorageServiceServer) DeleteObjects(arg0 context.Context, arg1 *s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjects", arg0, arg1) + ret0, _ := ret[0].(*s3.DeleteObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjects indicates an expected call of DeleteObjects. +func (mr *MockObjectStorageServiceServerMockRecorder) DeleteObjects(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjects", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).DeleteObjects), arg0, arg1) +} + +// GetObject mocks base method. +func (m *MockObjectStorageServiceServer) GetObject(arg0 *s3.GetObjectInput, arg1 s3.ObjectStorageService_GetObjectServer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObject", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// GetObject indicates an expected call of GetObject. +func (mr *MockObjectStorageServiceServerMockRecorder) GetObject(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).GetObject), arg0, arg1) +} + +// GetObjectCannedAcl mocks base method. +func (m *MockObjectStorageServiceServer) GetObjectCannedAcl(arg0 context.Context, arg1 *s3.GetObjectCannedAclInput) (*s3.GetObjectCannedAclOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectCannedAcl", arg0, arg1) + ret0, _ := ret[0].(*s3.GetObjectCannedAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectCannedAcl indicates an expected call of GetObjectCannedAcl. +func (mr *MockObjectStorageServiceServerMockRecorder) GetObjectCannedAcl(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectCannedAcl", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).GetObjectCannedAcl), arg0, arg1) +} + +// GetObjectTagging mocks base method. +func (m *MockObjectStorageServiceServer) GetObjectTagging(arg0 context.Context, arg1 *s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectTagging", arg0, arg1) + ret0, _ := ret[0].(*s3.GetObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectTagging indicates an expected call of GetObjectTagging. +func (mr *MockObjectStorageServiceServerMockRecorder) GetObjectTagging(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTagging", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).GetObjectTagging), arg0, arg1) +} + +// HeadObject mocks base method. +func (m *MockObjectStorageServiceServer) HeadObject(arg0 context.Context, arg1 *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadObject", arg0, arg1) + ret0, _ := ret[0].(*s3.HeadObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeadObject indicates an expected call of HeadObject. +func (mr *MockObjectStorageServiceServerMockRecorder) HeadObject(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObject", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).HeadObject), arg0, arg1) +} + +// IsObjectExist mocks base method. +func (m *MockObjectStorageServiceServer) IsObjectExist(arg0 context.Context, arg1 *s3.IsObjectExistInput) (*s3.IsObjectExistOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsObjectExist", arg0, arg1) + ret0, _ := ret[0].(*s3.IsObjectExistOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsObjectExist indicates an expected call of IsObjectExist. +func (mr *MockObjectStorageServiceServerMockRecorder) IsObjectExist(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsObjectExist", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).IsObjectExist), arg0, arg1) +} + +// ListMultipartUploads mocks base method. +func (m *MockObjectStorageServiceServer) ListMultipartUploads(arg0 context.Context, arg1 *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMultipartUploads", arg0, arg1) + ret0, _ := ret[0].(*s3.ListMultipartUploadsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMultipartUploads indicates an expected call of ListMultipartUploads. +func (mr *MockObjectStorageServiceServerMockRecorder) ListMultipartUploads(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploads", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).ListMultipartUploads), arg0, arg1) +} + +// ListObjectVersions mocks base method. +func (m *MockObjectStorageServiceServer) ListObjectVersions(arg0 context.Context, arg1 *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectVersions", arg0, arg1) + ret0, _ := ret[0].(*s3.ListObjectVersionsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectVersions indicates an expected call of ListObjectVersions. +func (mr *MockObjectStorageServiceServerMockRecorder) ListObjectVersions(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersions", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).ListObjectVersions), arg0, arg1) +} + +// ListObjects mocks base method. +func (m *MockObjectStorageServiceServer) ListObjects(arg0 context.Context, arg1 *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjects", arg0, arg1) + ret0, _ := ret[0].(*s3.ListObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjects indicates an expected call of ListObjects. +func (mr *MockObjectStorageServiceServerMockRecorder) ListObjects(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjects", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).ListObjects), arg0, arg1) +} + +// ListParts mocks base method. +func (m *MockObjectStorageServiceServer) ListParts(arg0 context.Context, arg1 *s3.ListPartsInput) (*s3.ListPartsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListParts", arg0, arg1) + ret0, _ := ret[0].(*s3.ListPartsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListParts indicates an expected call of ListParts. +func (mr *MockObjectStorageServiceServerMockRecorder) ListParts(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListParts", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).ListParts), arg0, arg1) +} + +// PutObject mocks base method. +func (m *MockObjectStorageServiceServer) PutObject(arg0 s3.ObjectStorageService_PutObjectServer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObject", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// PutObject indicates an expected call of PutObject. +func (mr *MockObjectStorageServiceServerMockRecorder) PutObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObject", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).PutObject), arg0) +} + +// PutObjectCannedAcl mocks base method. +func (m *MockObjectStorageServiceServer) PutObjectCannedAcl(arg0 context.Context, arg1 *s3.PutObjectCannedAclInput) (*s3.PutObjectCannedAclOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectCannedAcl", arg0, arg1) + ret0, _ := ret[0].(*s3.PutObjectCannedAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectCannedAcl indicates an expected call of PutObjectCannedAcl. +func (mr *MockObjectStorageServiceServerMockRecorder) PutObjectCannedAcl(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectCannedAcl", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).PutObjectCannedAcl), arg0, arg1) +} + +// PutObjectTagging mocks base method. +func (m *MockObjectStorageServiceServer) PutObjectTagging(arg0 context.Context, arg1 *s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectTagging", arg0, arg1) + ret0, _ := ret[0].(*s3.PutObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectTagging indicates an expected call of PutObjectTagging. +func (mr *MockObjectStorageServiceServerMockRecorder) PutObjectTagging(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTagging", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).PutObjectTagging), arg0, arg1) +} + +// RestoreObject mocks base method. +func (m *MockObjectStorageServiceServer) RestoreObject(arg0 context.Context, arg1 *s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RestoreObject", arg0, arg1) + ret0, _ := ret[0].(*s3.RestoreObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RestoreObject indicates an expected call of RestoreObject. +func (mr *MockObjectStorageServiceServerMockRecorder) RestoreObject(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObject", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).RestoreObject), arg0, arg1) +} + +// SignURL mocks base method. +func (m *MockObjectStorageServiceServer) SignURL(arg0 context.Context, arg1 *s3.SignURLInput) (*s3.SignURLOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SignURL", arg0, arg1) + ret0, _ := ret[0].(*s3.SignURLOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SignURL indicates an expected call of SignURL. +func (mr *MockObjectStorageServiceServerMockRecorder) SignURL(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignURL", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).SignURL), arg0, arg1) +} + +// UpdateDownloadBandwidthRateLimit mocks base method. +func (m *MockObjectStorageServiceServer) UpdateDownloadBandwidthRateLimit(arg0 context.Context, arg1 *s3.UpdateBandwidthRateLimitInput) (*emptypb.Empty, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateDownloadBandwidthRateLimit", arg0, arg1) + ret0, _ := ret[0].(*emptypb.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateDownloadBandwidthRateLimit indicates an expected call of UpdateDownloadBandwidthRateLimit. +func (mr *MockObjectStorageServiceServerMockRecorder) UpdateDownloadBandwidthRateLimit(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDownloadBandwidthRateLimit", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).UpdateDownloadBandwidthRateLimit), arg0, arg1) +} + +// UpdateUploadBandwidthRateLimit mocks base method. +func (m *MockObjectStorageServiceServer) UpdateUploadBandwidthRateLimit(arg0 context.Context, arg1 *s3.UpdateBandwidthRateLimitInput) (*emptypb.Empty, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateUploadBandwidthRateLimit", arg0, arg1) + ret0, _ := ret[0].(*emptypb.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateUploadBandwidthRateLimit indicates an expected call of UpdateUploadBandwidthRateLimit. +func (mr *MockObjectStorageServiceServerMockRecorder) UpdateUploadBandwidthRateLimit(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateUploadBandwidthRateLimit", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).UpdateUploadBandwidthRateLimit), arg0, arg1) +} + +// UploadPart mocks base method. +func (m *MockObjectStorageServiceServer) UploadPart(arg0 s3.ObjectStorageService_UploadPartServer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadPart", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UploadPart indicates an expected call of UploadPart. +func (mr *MockObjectStorageServiceServerMockRecorder) UploadPart(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPart", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).UploadPart), arg0) +} + +// UploadPartCopy mocks base method. +func (m *MockObjectStorageServiceServer) UploadPartCopy(arg0 context.Context, arg1 *s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadPartCopy", arg0, arg1) + ret0, _ := ret[0].(*s3.UploadPartCopyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPartCopy indicates an expected call of UploadPartCopy. +func (mr *MockObjectStorageServiceServerMockRecorder) UploadPartCopy(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopy", reflect.TypeOf((*MockObjectStorageServiceServer)(nil).UploadPartCopy), arg0, arg1) +} + +// MockUnsafeObjectStorageServiceServer is a mock of UnsafeObjectStorageServiceServer interface. +type MockUnsafeObjectStorageServiceServer struct { + ctrl *gomock.Controller + recorder *MockUnsafeObjectStorageServiceServerMockRecorder +} + +// MockUnsafeObjectStorageServiceServerMockRecorder is the mock recorder for MockUnsafeObjectStorageServiceServer. +type MockUnsafeObjectStorageServiceServerMockRecorder struct { + mock *MockUnsafeObjectStorageServiceServer +} + +// NewMockUnsafeObjectStorageServiceServer creates a new mock instance. +func NewMockUnsafeObjectStorageServiceServer(ctrl *gomock.Controller) *MockUnsafeObjectStorageServiceServer { + mock := &MockUnsafeObjectStorageServiceServer{ctrl: ctrl} + mock.recorder = &MockUnsafeObjectStorageServiceServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockUnsafeObjectStorageServiceServer) EXPECT() *MockUnsafeObjectStorageServiceServerMockRecorder { + return m.recorder +} + +// mustEmbedUnimplementedObjectStorageServiceServer mocks base method. +func (m *MockUnsafeObjectStorageServiceServer) mustEmbedUnimplementedObjectStorageServiceServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedObjectStorageServiceServer") +} + +// mustEmbedUnimplementedObjectStorageServiceServer indicates an expected call of mustEmbedUnimplementedObjectStorageServiceServer. +func (mr *MockUnsafeObjectStorageServiceServerMockRecorder) mustEmbedUnimplementedObjectStorageServiceServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedObjectStorageServiceServer", reflect.TypeOf((*MockUnsafeObjectStorageServiceServer)(nil).mustEmbedUnimplementedObjectStorageServiceServer)) +} + +// MockObjectStorageService_PutObjectServer is a mock of ObjectStorageService_PutObjectServer interface. +type MockObjectStorageService_PutObjectServer struct { + ctrl *gomock.Controller + recorder *MockObjectStorageService_PutObjectServerMockRecorder +} + +// MockObjectStorageService_PutObjectServerMockRecorder is the mock recorder for MockObjectStorageService_PutObjectServer. +type MockObjectStorageService_PutObjectServerMockRecorder struct { + mock *MockObjectStorageService_PutObjectServer +} + +// NewMockObjectStorageService_PutObjectServer creates a new mock instance. +func NewMockObjectStorageService_PutObjectServer(ctrl *gomock.Controller) *MockObjectStorageService_PutObjectServer { + mock := &MockObjectStorageService_PutObjectServer{ctrl: ctrl} + mock.recorder = &MockObjectStorageService_PutObjectServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectStorageService_PutObjectServer) EXPECT() *MockObjectStorageService_PutObjectServerMockRecorder { + return m.recorder +} + +// Context mocks base method. +func (m *MockObjectStorageService_PutObjectServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockObjectStorageService_PutObjectServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockObjectStorageService_PutObjectServer)(nil).Context)) +} + +// Recv mocks base method. +func (m *MockObjectStorageService_PutObjectServer) Recv() (*s3.PutObjectInput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*s3.PutObjectInput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockObjectStorageService_PutObjectServerMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockObjectStorageService_PutObjectServer)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m_2 *MockObjectStorageService_PutObjectServer) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockObjectStorageService_PutObjectServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockObjectStorageService_PutObjectServer)(nil).RecvMsg), m) +} + +// SendAndClose mocks base method. +func (m *MockObjectStorageService_PutObjectServer) SendAndClose(arg0 *s3.PutObjectOutput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendAndClose", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendAndClose indicates an expected call of SendAndClose. +func (mr *MockObjectStorageService_PutObjectServerMockRecorder) SendAndClose(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAndClose", reflect.TypeOf((*MockObjectStorageService_PutObjectServer)(nil).SendAndClose), arg0) +} + +// SendHeader mocks base method. +func (m *MockObjectStorageService_PutObjectServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader. +func (mr *MockObjectStorageService_PutObjectServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockObjectStorageService_PutObjectServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockObjectStorageService_PutObjectServer) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockObjectStorageService_PutObjectServerMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockObjectStorageService_PutObjectServer)(nil).SendMsg), m) +} + +// SetHeader mocks base method. +func (m *MockObjectStorageService_PutObjectServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader. +func (mr *MockObjectStorageService_PutObjectServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockObjectStorageService_PutObjectServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method. +func (m *MockObjectStorageService_PutObjectServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer. +func (mr *MockObjectStorageService_PutObjectServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockObjectStorageService_PutObjectServer)(nil).SetTrailer), arg0) +} + +// MockObjectStorageService_GetObjectServer is a mock of ObjectStorageService_GetObjectServer interface. +type MockObjectStorageService_GetObjectServer struct { + ctrl *gomock.Controller + recorder *MockObjectStorageService_GetObjectServerMockRecorder +} + +// MockObjectStorageService_GetObjectServerMockRecorder is the mock recorder for MockObjectStorageService_GetObjectServer. +type MockObjectStorageService_GetObjectServerMockRecorder struct { + mock *MockObjectStorageService_GetObjectServer +} + +// NewMockObjectStorageService_GetObjectServer creates a new mock instance. +func NewMockObjectStorageService_GetObjectServer(ctrl *gomock.Controller) *MockObjectStorageService_GetObjectServer { + mock := &MockObjectStorageService_GetObjectServer{ctrl: ctrl} + mock.recorder = &MockObjectStorageService_GetObjectServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectStorageService_GetObjectServer) EXPECT() *MockObjectStorageService_GetObjectServerMockRecorder { + return m.recorder +} + +// Context mocks base method. +func (m *MockObjectStorageService_GetObjectServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockObjectStorageService_GetObjectServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockObjectStorageService_GetObjectServer)(nil).Context)) +} + +// RecvMsg mocks base method. +func (m_2 *MockObjectStorageService_GetObjectServer) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockObjectStorageService_GetObjectServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockObjectStorageService_GetObjectServer)(nil).RecvMsg), m) +} + +// Send mocks base method. +func (m *MockObjectStorageService_GetObjectServer) Send(arg0 *s3.GetObjectOutput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockObjectStorageService_GetObjectServerMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockObjectStorageService_GetObjectServer)(nil).Send), arg0) +} + +// SendHeader mocks base method. +func (m *MockObjectStorageService_GetObjectServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader. +func (mr *MockObjectStorageService_GetObjectServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockObjectStorageService_GetObjectServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockObjectStorageService_GetObjectServer) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockObjectStorageService_GetObjectServerMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockObjectStorageService_GetObjectServer)(nil).SendMsg), m) +} + +// SetHeader mocks base method. +func (m *MockObjectStorageService_GetObjectServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader. +func (mr *MockObjectStorageService_GetObjectServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockObjectStorageService_GetObjectServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method. +func (m *MockObjectStorageService_GetObjectServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer. +func (mr *MockObjectStorageService_GetObjectServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockObjectStorageService_GetObjectServer)(nil).SetTrailer), arg0) +} + +// MockObjectStorageService_UploadPartServer is a mock of ObjectStorageService_UploadPartServer interface. +type MockObjectStorageService_UploadPartServer struct { + ctrl *gomock.Controller + recorder *MockObjectStorageService_UploadPartServerMockRecorder +} + +// MockObjectStorageService_UploadPartServerMockRecorder is the mock recorder for MockObjectStorageService_UploadPartServer. +type MockObjectStorageService_UploadPartServerMockRecorder struct { + mock *MockObjectStorageService_UploadPartServer +} + +// NewMockObjectStorageService_UploadPartServer creates a new mock instance. +func NewMockObjectStorageService_UploadPartServer(ctrl *gomock.Controller) *MockObjectStorageService_UploadPartServer { + mock := &MockObjectStorageService_UploadPartServer{ctrl: ctrl} + mock.recorder = &MockObjectStorageService_UploadPartServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectStorageService_UploadPartServer) EXPECT() *MockObjectStorageService_UploadPartServerMockRecorder { + return m.recorder +} + +// Context mocks base method. +func (m *MockObjectStorageService_UploadPartServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockObjectStorageService_UploadPartServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockObjectStorageService_UploadPartServer)(nil).Context)) +} + +// Recv mocks base method. +func (m *MockObjectStorageService_UploadPartServer) Recv() (*s3.UploadPartInput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*s3.UploadPartInput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockObjectStorageService_UploadPartServerMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockObjectStorageService_UploadPartServer)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m_2 *MockObjectStorageService_UploadPartServer) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockObjectStorageService_UploadPartServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockObjectStorageService_UploadPartServer)(nil).RecvMsg), m) +} + +// SendAndClose mocks base method. +func (m *MockObjectStorageService_UploadPartServer) SendAndClose(arg0 *s3.UploadPartOutput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendAndClose", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendAndClose indicates an expected call of SendAndClose. +func (mr *MockObjectStorageService_UploadPartServerMockRecorder) SendAndClose(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAndClose", reflect.TypeOf((*MockObjectStorageService_UploadPartServer)(nil).SendAndClose), arg0) +} + +// SendHeader mocks base method. +func (m *MockObjectStorageService_UploadPartServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader. +func (mr *MockObjectStorageService_UploadPartServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockObjectStorageService_UploadPartServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockObjectStorageService_UploadPartServer) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockObjectStorageService_UploadPartServerMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockObjectStorageService_UploadPartServer)(nil).SendMsg), m) +} + +// SetHeader mocks base method. +func (m *MockObjectStorageService_UploadPartServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader. +func (mr *MockObjectStorageService_UploadPartServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockObjectStorageService_UploadPartServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method. +func (m *MockObjectStorageService_UploadPartServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer. +func (mr *MockObjectStorageService_UploadPartServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockObjectStorageService_UploadPartServer)(nil).SetTrailer), arg0) +} + +// MockObjectStorageService_AppendObjectServer is a mock of ObjectStorageService_AppendObjectServer interface. +type MockObjectStorageService_AppendObjectServer struct { + ctrl *gomock.Controller + recorder *MockObjectStorageService_AppendObjectServerMockRecorder +} + +// MockObjectStorageService_AppendObjectServerMockRecorder is the mock recorder for MockObjectStorageService_AppendObjectServer. +type MockObjectStorageService_AppendObjectServerMockRecorder struct { + mock *MockObjectStorageService_AppendObjectServer +} + +// NewMockObjectStorageService_AppendObjectServer creates a new mock instance. +func NewMockObjectStorageService_AppendObjectServer(ctrl *gomock.Controller) *MockObjectStorageService_AppendObjectServer { + mock := &MockObjectStorageService_AppendObjectServer{ctrl: ctrl} + mock.recorder = &MockObjectStorageService_AppendObjectServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectStorageService_AppendObjectServer) EXPECT() *MockObjectStorageService_AppendObjectServerMockRecorder { + return m.recorder +} + +// Context mocks base method. +func (m *MockObjectStorageService_AppendObjectServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockObjectStorageService_AppendObjectServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockObjectStorageService_AppendObjectServer)(nil).Context)) +} + +// Recv mocks base method. +func (m *MockObjectStorageService_AppendObjectServer) Recv() (*s3.AppendObjectInput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*s3.AppendObjectInput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockObjectStorageService_AppendObjectServerMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockObjectStorageService_AppendObjectServer)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m_2 *MockObjectStorageService_AppendObjectServer) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockObjectStorageService_AppendObjectServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockObjectStorageService_AppendObjectServer)(nil).RecvMsg), m) +} + +// SendAndClose mocks base method. +func (m *MockObjectStorageService_AppendObjectServer) SendAndClose(arg0 *s3.AppendObjectOutput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendAndClose", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendAndClose indicates an expected call of SendAndClose. +func (mr *MockObjectStorageService_AppendObjectServerMockRecorder) SendAndClose(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAndClose", reflect.TypeOf((*MockObjectStorageService_AppendObjectServer)(nil).SendAndClose), arg0) +} + +// SendHeader mocks base method. +func (m *MockObjectStorageService_AppendObjectServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader. +func (mr *MockObjectStorageService_AppendObjectServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockObjectStorageService_AppendObjectServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockObjectStorageService_AppendObjectServer) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockObjectStorageService_AppendObjectServerMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockObjectStorageService_AppendObjectServer)(nil).SendMsg), m) +} + +// SetHeader mocks base method. +func (m *MockObjectStorageService_AppendObjectServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader. +func (mr *MockObjectStorageService_AppendObjectServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockObjectStorageService_AppendObjectServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method. +func (m *MockObjectStorageService_AppendObjectServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer. +func (mr *MockObjectStorageService_AppendObjectServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockObjectStorageService_AppendObjectServer)(nil).SetTrailer), arg0) +} diff --git a/pkg/runtime/config.go b/pkg/runtime/config.go index ade6ea7c9f..b0a253ae5b 100644 --- a/pkg/runtime/config.go +++ b/pkg/runtime/config.go @@ -19,6 +19,8 @@ package runtime import ( "encoding/json" + "mosn.io/layotto/components/oss" + "mosn.io/layotto/pkg/runtime/secretstores" "mosn.io/layotto/components/custom" @@ -50,6 +52,7 @@ type MosnRuntimeConfig struct { PubSubManagement map[string]pubsub.Config `json:"pub_subs"` StateManagement map[string]state.Config `json:"state"` Files map[string]file.FileConfig `json:"file"` + Oss map[string]oss.Config `json:"oss"` LockManagement map[string]lock.Config `json:"lock"` SequencerManagement map[string]sequencer.Config `json:"sequencer"` Bindings map[string]bindings.Metadata `json:"bindings"` diff --git a/pkg/runtime/config_test.go b/pkg/runtime/config_test.go index 46e33cf698..331ca3bd8e 100644 --- a/pkg/runtime/config_test.go +++ b/pkg/runtime/config_test.go @@ -11,15 +11,16 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + package runtime import ( "encoding/json" "testing" - "github.com/stretchr/testify/assert" + "mosn.io/layotto/components/pkg/utils" - "mosn.io/layotto/components/file/s3/alicloud" + "github.com/stretchr/testify/assert" ) func TestConfig(t *testing.T) { @@ -29,7 +30,7 @@ func TestConfig(t *testing.T) { } }, "file": { - "aliOSS": { + "aliyun.oss": { "metadata":[ { "endpoint": "endpoint_address", @@ -42,8 +43,8 @@ func TestConfig(t *testing.T) { }` mscf, err := ParseRuntimeConfig([]byte(data)) assert.Nil(t, err) - v := mscf.Files["aliOSS"] - m := make([]*alicloud.OssMetadata, 0) + v := mscf.Files["aliyun.oss"] + m := make([]*utils.OssMetadata, 0) err = json.Unmarshal(v.Metadata, &m) assert.Nil(t, err) for _, x := range m { diff --git a/pkg/runtime/options.go b/pkg/runtime/options.go index 39d7a44f9b..5afca36e25 100644 --- a/pkg/runtime/options.go +++ b/pkg/runtime/options.go @@ -20,6 +20,8 @@ import ( "google.golang.org/grpc" "mosn.io/pkg/log" + "mosn.io/layotto/components/oss" + "mosn.io/layotto/components/configstores" "mosn.io/layotto/components/custom" "mosn.io/layotto/components/file" @@ -40,6 +42,7 @@ type services struct { configStores []*configstores.StoreFactory rpcs []*rpc.Factory files []*file.FileFactory + oss []*oss.Factory pubSubs []*pubsub.Factory states []*state.Factory locks []*runtime_lock.Factory @@ -129,6 +132,12 @@ func WithRpcFactory(rpcs ...*rpc.Factory) Option { } } +func WithOssFactory(oss ...*oss.Factory) Option { + return func(o *runtimeOptions) { + o.services.oss = append(o.services.oss, oss...) + } +} + func WithFileFactory(files ...*file.FileFactory) Option { return func(o *runtimeOptions) { o.services.files = append(o.services.files, files...) diff --git a/pkg/runtime/runtime.go b/pkg/runtime/runtime.go index afbe73e81b..f823d80f26 100644 --- a/pkg/runtime/runtime.go +++ b/pkg/runtime/runtime.go @@ -23,6 +23,8 @@ import ( "strings" "time" + "mosn.io/layotto/components/oss" + "mosn.io/layotto/pkg/runtime/ref" "github.com/dapr/components-contrib/secretstores" @@ -69,6 +71,7 @@ type MosnRuntime struct { lockRegistry runtime_lock.Registry sequencerRegistry runtime_sequencer.Registry fileRegistry file.Registry + ossRegistry oss.Registry bindingsRegistry mbindings.Registry secretStoresRegistry msecretstores.Registry customComponentRegistry custom.Registry @@ -82,6 +85,7 @@ type MosnRuntime struct { // state implementations store here are already initialized states map[string]state.Store files map[string]file.File + oss map[string]oss.Oss locks map[string]lock.LockStore sequencers map[string]sequencer.Store outputBindings map[string]bindings.OutputBinding @@ -113,6 +117,7 @@ func NewMosnRuntime(runtimeConfig *MosnRuntimeConfig) *MosnRuntime { stateRegistry: runtime_state.NewRegistry(info), bindingsRegistry: mbindings.NewRegistry(info), fileRegistry: file.NewRegistry(info), + ossRegistry: oss.NewRegistry(info), lockRegistry: runtime_lock.NewRegistry(info), sequencerRegistry: runtime_sequencer.NewRegistry(info), secretStoresRegistry: msecretstores.NewRegistry(info), @@ -123,6 +128,7 @@ func NewMosnRuntime(runtimeConfig *MosnRuntimeConfig) *MosnRuntime { pubSubs: make(map[string]pubsub.PubSub), states: make(map[string]state.Store), files: make(map[string]file.File), + oss: make(map[string]oss.Oss), locks: make(map[string]lock.LockStore), sequencers: make(map[string]sequencer.Store), outputBindings: make(map[string]bindings.OutputBinding), @@ -193,6 +199,7 @@ func (m *MosnRuntime) Run(opts ...Option) (mgrpc.RegisteredServer, error) { PubSubs: m.pubSubs, StateStores: m.states, Files: m.files, + Oss: m.oss, LockStores: m.locks, Sequencers: m.sequencers, SendToOutputBindingFn: m.sendToOutputBinding, @@ -268,6 +275,9 @@ func DefaultInitRuntimeStage(o *runtimeOptions, m *MosnRuntime) error { if err := m.initFiles(o.services.files...); err != nil { return err } + if err := m.initOss(o.services.oss...); err != nil { + return err + } if err := m.initLocks(o.services.locks...); err != nil { return err } @@ -401,11 +411,32 @@ func (m *MosnRuntime) initStates(factorys ...*runtime_state.Factory) error { return nil } +func (m *MosnRuntime) initOss(oss ...*oss.Factory) error { + log.DefaultLogger.Infof("[runtime] init oss service") + + // register all oss store services implementation + m.ossRegistry.Register(oss...) + for name, config := range m.runtimeConfig.Oss { + c, err := m.ossRegistry.Create(config.Type) + if err != nil { + m.errInt(err, "create oss component %s failed", name) + return err + } + if err := c.Init(context.TODO(), &config); err != nil { + m.errInt(err, "init oss component %s failed", name) + return err + } + m.oss[name] = c + } + return nil +} + func (m *MosnRuntime) initFiles(files ...*file.FileFactory) error { log.DefaultLogger.Infof("[runtime] init file service") // register all files store services implementation m.fileRegistry.Register(files...) + for name, config := range m.runtimeConfig.Files { c, err := m.fileRegistry.Create(config.Type) if err != nil { diff --git a/pkg/runtime/runtime_test.go b/pkg/runtime/runtime_test.go index a9e5f93de5..8abd97cfd3 100644 --- a/pkg/runtime/runtime_test.go +++ b/pkg/runtime/runtime_test.go @@ -24,6 +24,12 @@ import ( "net" "testing" + aws2 "mosn.io/layotto/components/oss/aws" + + s3ext "mosn.io/layotto/pkg/grpc/extension/s3" + + "mosn.io/layotto/components/oss" + "github.com/dapr/components-contrib/bindings" "google.golang.org/grpc/test/bufconn" @@ -829,3 +835,32 @@ func runtimeWithCallbackConnection(t *testing.T) (*MosnRuntime, *mock_appcallbac rt.AppCallbackConn = callbackClient return rt, mockAppCallbackServer } + +func TestMosnRuntimeWithOssConfig(t *testing.T) { + t.Run("normal", func(t *testing.T) { + // 1. construct config + cfg := &MosnRuntimeConfig{ + Oss: map[string]oss.Config{ + "awsdemo": {Type: "aws.oss"}, + }, + } + // 2. construct runtime + rt := NewMosnRuntime(cfg) + // 3. Run + server, err := rt.Run( + // register your grpc API here + WithGrpcAPI( + default_api.NewGrpcAPI, + s3ext.NewS3Server, + ), + WithOssFactory( + oss.NewFactory("aws.oss", aws2.NewAwsOss), + ), + ) + // 4. assert + assert.Equal(t, "invalid argument", err.Error()) + assert.Nil(t, server) + // 5. stop + rt.Stop() + }) +} diff --git a/spec/proto/extension/v1/s3/oss.pb.go b/spec/proto/extension/v1/s3/oss.pb.go new file mode 100644 index 0000000000..f7b95045ef --- /dev/null +++ b/spec/proto/extension/v1/s3/oss.pb.go @@ -0,0 +1,8766 @@ +//The file defined base on s3 protocol, to get an in-depth walkthrough of this file, see: +//https://docs.aws.amazon.com/s3/index.html +//https://github.com/aws/aws-sdk-go-v2 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.17.3 +// source: oss.proto + +package s3 + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// GetObjectInput +type GetObjectInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Key of the object to get + // This member is required + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // The account ID of the expected bucket owner + ExpectedBucketOwner string `protobuf:"bytes,4,opt,name=expected_bucket_owner,json=expectedBucketOwner,proto3" json:"expected_bucket_owner,omitempty"` + // Return the object only if its entity tag (ETag) is the same as the one specified + IfMatch string `protobuf:"bytes,5,opt,name=if_match,json=ifMatch,proto3" json:"if_match,omitempty"` + // Return the object only if it has been modified since the specified time + IfModifiedSince int64 `protobuf:"varint,6,opt,name=if_modified_since,json=ifModifiedSince,proto3" json:"if_modified_since,omitempty"` + // Return the object only if its entity tag (ETag) is different from the one specified + IfNoneMatch string `protobuf:"bytes,7,opt,name=if_none_match,json=ifNoneMatch,proto3" json:"if_none_match,omitempty"` + // Return the object only if it has not been modified since the specified time + IfUnmodifiedSince int64 `protobuf:"varint,8,opt,name=if_unmodified_since,json=ifUnmodifiedSince,proto3" json:"if_unmodified_since,omitempty"` + // Part number of the object being read. This is a positive integer between 1 and + // 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber int64 `protobuf:"varint,9,opt,name=part_number,json=partNumber,proto3" json:"part_number,omitempty"` + // Downloads the specified range bytes of an object + // start is used to specify the location where the file starts + Start int64 `protobuf:"varint,10,opt,name=start,proto3" json:"start,omitempty"` + // end is used to specify the location where the file end + End int64 `protobuf:"varint,11,opt,name=end,proto3" json:"end,omitempty"` + // Confirms that the requester knows that they will be charged for the request. + RequestPayer string `protobuf:"bytes,12,opt,name=request_payer,json=requestPayer,proto3" json:"request_payer,omitempty"` + // Sets the Cache-Control header of the response. + ResponseCacheControl string `protobuf:"bytes,13,opt,name=response_cache_control,json=responseCacheControl,proto3" json:"response_cache_control,omitempty"` + // Sets the Content-Disposition header of the response + ResponseContentDisposition string `protobuf:"bytes,14,opt,name=response_content_disposition,json=responseContentDisposition,proto3" json:"response_content_disposition,omitempty"` + // Sets the Content-Encoding header of the response + ResponseContentEncoding string `protobuf:"bytes,15,opt,name=response_content_encoding,json=responseContentEncoding,proto3" json:"response_content_encoding,omitempty"` + // Sets the Content-Language header of the response + ResponseContentLanguage string `protobuf:"bytes,16,opt,name=response_content_language,json=responseContentLanguage,proto3" json:"response_content_language,omitempty"` + // Sets the Content-Type header of the response + ResponseContentType string `protobuf:"bytes,17,opt,name=response_content_type,json=responseContentType,proto3" json:"response_content_type,omitempty"` + // Sets the Expires header of the response + ResponseExpires string `protobuf:"bytes,18,opt,name=response_expires,json=responseExpires,proto3" json:"response_expires,omitempty"` + // Specifies the algorithm to use to when decrypting the object (for example,AES256) + SseCustomerAlgorithm string `protobuf:"bytes,19,opt,name=sse_customer_algorithm,json=sseCustomerAlgorithm,proto3" json:"sse_customer_algorithm,omitempty"` + // Specifies the customer-provided encryption key for Amazon S3 used to encrypt the + // data. This value is used to decrypt the object when recovering it and must match + // the one used when storing the data. The key must be appropriate for use with the + // algorithm specified in the x-amz-server-side-encryption-customer-algorithm header + SseCustomerKey string `protobuf:"bytes,20,opt,name=sse_customer_key,json=sseCustomerKey,proto3" json:"sse_customer_key,omitempty"` + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321 + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SseCustomerKeyMd5 string `protobuf:"bytes,21,opt,name=sse_customer_key_md5,json=sseCustomerKeyMd5,proto3" json:"sse_customer_key_md5,omitempty"` + // VersionId used to reference a specific version of the object + VersionId string `protobuf:"bytes,22,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` + // Specify Accept-Encoding, aws not supported now + AcceptEncoding string `protobuf:"bytes,23,opt,name=accept_encoding,json=acceptEncoding,proto3" json:"accept_encoding,omitempty"` + // Specify the signed url of object, user can get object with signed url without ak、sk + SignedUrl string `protobuf:"bytes,24,opt,name=signed_url,json=signedUrl,proto3" json:"signed_url,omitempty"` +} + +func (x *GetObjectInput) Reset() { + *x = GetObjectInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetObjectInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetObjectInput) ProtoMessage() {} + +func (x *GetObjectInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetObjectInput.ProtoReflect.Descriptor instead. +func (*GetObjectInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{0} +} + +func (x *GetObjectInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *GetObjectInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *GetObjectInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *GetObjectInput) GetExpectedBucketOwner() string { + if x != nil { + return x.ExpectedBucketOwner + } + return "" +} + +func (x *GetObjectInput) GetIfMatch() string { + if x != nil { + return x.IfMatch + } + return "" +} + +func (x *GetObjectInput) GetIfModifiedSince() int64 { + if x != nil { + return x.IfModifiedSince + } + return 0 +} + +func (x *GetObjectInput) GetIfNoneMatch() string { + if x != nil { + return x.IfNoneMatch + } + return "" +} + +func (x *GetObjectInput) GetIfUnmodifiedSince() int64 { + if x != nil { + return x.IfUnmodifiedSince + } + return 0 +} + +func (x *GetObjectInput) GetPartNumber() int64 { + if x != nil { + return x.PartNumber + } + return 0 +} + +func (x *GetObjectInput) GetStart() int64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *GetObjectInput) GetEnd() int64 { + if x != nil { + return x.End + } + return 0 +} + +func (x *GetObjectInput) GetRequestPayer() string { + if x != nil { + return x.RequestPayer + } + return "" +} + +func (x *GetObjectInput) GetResponseCacheControl() string { + if x != nil { + return x.ResponseCacheControl + } + return "" +} + +func (x *GetObjectInput) GetResponseContentDisposition() string { + if x != nil { + return x.ResponseContentDisposition + } + return "" +} + +func (x *GetObjectInput) GetResponseContentEncoding() string { + if x != nil { + return x.ResponseContentEncoding + } + return "" +} + +func (x *GetObjectInput) GetResponseContentLanguage() string { + if x != nil { + return x.ResponseContentLanguage + } + return "" +} + +func (x *GetObjectInput) GetResponseContentType() string { + if x != nil { + return x.ResponseContentType + } + return "" +} + +func (x *GetObjectInput) GetResponseExpires() string { + if x != nil { + return x.ResponseExpires + } + return "" +} + +func (x *GetObjectInput) GetSseCustomerAlgorithm() string { + if x != nil { + return x.SseCustomerAlgorithm + } + return "" +} + +func (x *GetObjectInput) GetSseCustomerKey() string { + if x != nil { + return x.SseCustomerKey + } + return "" +} + +func (x *GetObjectInput) GetSseCustomerKeyMd5() string { + if x != nil { + return x.SseCustomerKeyMd5 + } + return "" +} + +func (x *GetObjectInput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +func (x *GetObjectInput) GetAcceptEncoding() string { + if x != nil { + return x.AcceptEncoding + } + return "" +} + +func (x *GetObjectInput) GetSignedUrl() string { + if x != nil { + return x.SignedUrl + } + return "" +} + +// GetObjectOutput +type GetObjectOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Object data. + Body []byte `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Specifies caching behavior along the request/reply chain. + CacheControl string `protobuf:"bytes,2,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` + // Specifies presentational information for the object. + ContentDisposition string `protobuf:"bytes,3,opt,name=content_disposition,json=contentDisposition,proto3" json:"content_disposition,omitempty"` + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding string `protobuf:"bytes,4,opt,name=content_encoding,json=contentEncoding,proto3" json:"content_encoding,omitempty"` + // The language the content is in. + ContentLanguage string `protobuf:"bytes,5,opt,name=content_language,json=contentLanguage,proto3" json:"content_language,omitempty"` + // Size of the body in bytes. + ContentLength int64 `protobuf:"varint,6,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"` + // The portion of the object returned in the response. + ContentRange string `protobuf:"bytes,7,opt,name=content_range,json=contentRange,proto3" json:"content_range,omitempty"` + // A standard MIME type describing the format of the object data. + ContentType string `protobuf:"bytes,8,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker bool `protobuf:"varint,9,opt,name=delete_marker,json=deleteMarker,proto3" json:"delete_marker,omitempty"` + // An entity tag (ETag) is an opaque identifier assigned by a web server to a + // specific version of a resource found at a URL. + Etag string `protobuf:"bytes,10,opt,name=etag,proto3" json:"etag,omitempty"` + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is + // URL-encoded. + Expiration string `protobuf:"bytes,11,opt,name=expiration,proto3" json:"expiration,omitempty"` + // The date and time at which the object is no longer cacheable. + Expires string `protobuf:"bytes,12,opt,name=expires,proto3" json:"expires,omitempty"` + // Creation date of the object. + LastModified int64 `protobuf:"varint,13,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` + // Version of the object. + VersionId string `protobuf:"bytes,14,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` + // The number of tags, if any, on the object. + TagCount int64 `protobuf:"varint,15,opt,name=tag_count,json=tagCount,proto3" json:"tag_count,omitempty"` + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. + StorageClass string `protobuf:"bytes,16,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount int64 `protobuf:"varint,17,opt,name=parts_count,json=partsCount,proto3" json:"parts_count,omitempty"` + // A map of metadata to store with the object in S3. + // Map keys will be normalized to lower-case. + Metadata map[string]string `protobuf:"bytes,18,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GetObjectOutput) Reset() { + *x = GetObjectOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetObjectOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetObjectOutput) ProtoMessage() {} + +func (x *GetObjectOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetObjectOutput.ProtoReflect.Descriptor instead. +func (*GetObjectOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{1} +} + +func (x *GetObjectOutput) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +func (x *GetObjectOutput) GetCacheControl() string { + if x != nil { + return x.CacheControl + } + return "" +} + +func (x *GetObjectOutput) GetContentDisposition() string { + if x != nil { + return x.ContentDisposition + } + return "" +} + +func (x *GetObjectOutput) GetContentEncoding() string { + if x != nil { + return x.ContentEncoding + } + return "" +} + +func (x *GetObjectOutput) GetContentLanguage() string { + if x != nil { + return x.ContentLanguage + } + return "" +} + +func (x *GetObjectOutput) GetContentLength() int64 { + if x != nil { + return x.ContentLength + } + return 0 +} + +func (x *GetObjectOutput) GetContentRange() string { + if x != nil { + return x.ContentRange + } + return "" +} + +func (x *GetObjectOutput) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *GetObjectOutput) GetDeleteMarker() bool { + if x != nil { + return x.DeleteMarker + } + return false +} + +func (x *GetObjectOutput) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *GetObjectOutput) GetExpiration() string { + if x != nil { + return x.Expiration + } + return "" +} + +func (x *GetObjectOutput) GetExpires() string { + if x != nil { + return x.Expires + } + return "" +} + +func (x *GetObjectOutput) GetLastModified() int64 { + if x != nil { + return x.LastModified + } + return 0 +} + +func (x *GetObjectOutput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +func (x *GetObjectOutput) GetTagCount() int64 { + if x != nil { + return x.TagCount + } + return 0 +} + +func (x *GetObjectOutput) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *GetObjectOutput) GetPartsCount() int64 { + if x != nil { + return x.PartsCount + } + return 0 +} + +func (x *GetObjectOutput) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +// PutObjectInput +type PutObjectInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // Object data. + Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` + // The bucket name to which the PUT action was initiated + // This member is required. + Bucket string `protobuf:"bytes,3,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Object key for which the PUT action was initiated. + // This member is required. + Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` + // The canned ACL to apply to the object,different oss provider have different acl type + Acl string `protobuf:"bytes,5,opt,name=acl,proto3" json:"acl,omitempty"` + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool `protobuf:"varint,6,opt,name=bucket_key_enabled,json=bucketKeyEnabled,proto3" json:"bucket_key_enabled,omitempty"` + // Can be used to specify caching behavior along the request/reply chain. + CacheControl string `protobuf:"bytes,7,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` + // Specifies presentational information for the object. For more information, see + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + ContentDisposition string `protobuf:"bytes,8,opt,name=content_disposition,json=contentDisposition,proto3" json:"content_disposition,omitempty"` + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. For more information, see + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + ContentEncoding string `protobuf:"bytes,9,opt,name=content_encoding,json=contentEncoding,proto3" json:"content_encoding,omitempty"` + // The date and time at which the object is no longer cacheable. For more + // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + Expires int64 `protobuf:"varint,10,opt,name=expires,proto3" json:"expires,omitempty"` + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption string `protobuf:"bytes,11,opt,name=server_side_encryption,json=serverSideEncryption,proto3" json:"server_side_encryption,omitempty"` + // Specify the signed url of object, user can put object with signed url without ak、sk + SignedUrl string `protobuf:"bytes,12,opt,name=signed_url,json=signedUrl,proto3" json:"signed_url,omitempty"` + // A map of metadata to store with the object in S3. + Meta map[string]string `protobuf:"bytes,13,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + Tagging map[string]string `protobuf:"bytes,14,rep,name=tagging,proto3" json:"tagging,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *PutObjectInput) Reset() { + *x = PutObjectInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutObjectInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutObjectInput) ProtoMessage() {} + +func (x *PutObjectInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutObjectInput.ProtoReflect.Descriptor instead. +func (*PutObjectInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{2} +} + +func (x *PutObjectInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *PutObjectInput) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +func (x *PutObjectInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *PutObjectInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *PutObjectInput) GetAcl() string { + if x != nil { + return x.Acl + } + return "" +} + +func (x *PutObjectInput) GetBucketKeyEnabled() bool { + if x != nil { + return x.BucketKeyEnabled + } + return false +} + +func (x *PutObjectInput) GetCacheControl() string { + if x != nil { + return x.CacheControl + } + return "" +} + +func (x *PutObjectInput) GetContentDisposition() string { + if x != nil { + return x.ContentDisposition + } + return "" +} + +func (x *PutObjectInput) GetContentEncoding() string { + if x != nil { + return x.ContentEncoding + } + return "" +} + +func (x *PutObjectInput) GetExpires() int64 { + if x != nil { + return x.Expires + } + return 0 +} + +func (x *PutObjectInput) GetServerSideEncryption() string { + if x != nil { + return x.ServerSideEncryption + } + return "" +} + +func (x *PutObjectInput) GetSignedUrl() string { + if x != nil { + return x.SignedUrl + } + return "" +} + +func (x *PutObjectInput) GetMeta() map[string]string { + if x != nil { + return x.Meta + } + return nil +} + +func (x *PutObjectInput) GetTagging() map[string]string { + if x != nil { + return x.Tagging + } + return nil +} + +// PutObjectOutput +type PutObjectOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool `protobuf:"varint,1,opt,name=bucket_key_enabled,json=bucketKeyEnabled,proto3" json:"bucket_key_enabled,omitempty"` + // Entity tag for the uploaded object. + Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"` + // If the expiration is configured for the object + Expiration string `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"` + // If present, indicates that the requester was successfully charged for the request. + RequestCharged string `protobuf:"bytes,4,opt,name=request_charged,json=requestCharged,proto3" json:"request_charged,omitempty"` + // Version of the object. + VersionId string `protobuf:"bytes,5,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *PutObjectOutput) Reset() { + *x = PutObjectOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutObjectOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutObjectOutput) ProtoMessage() {} + +func (x *PutObjectOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutObjectOutput.ProtoReflect.Descriptor instead. +func (*PutObjectOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{3} +} + +func (x *PutObjectOutput) GetBucketKeyEnabled() bool { + if x != nil { + return x.BucketKeyEnabled + } + return false +} + +func (x *PutObjectOutput) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *PutObjectOutput) GetExpiration() string { + if x != nil { + return x.Expiration + } + return "" +} + +func (x *PutObjectOutput) GetRequestCharged() string { + if x != nil { + return x.RequestCharged + } + return "" +} + +func (x *PutObjectOutput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// DeleteObjectInput +type DeleteObjectInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name to which the DEL action was initiated + // This member is required. + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Object key for which the DEL action was initiated. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Confirms that the requester knows that they will be charged for the request. + RequestPayer string `protobuf:"bytes,4,opt,name=request_payer,json=requestPayer,proto3" json:"request_payer,omitempty"` + // VersionId used to reference a specific version of the object. + VersionId string `protobuf:"bytes,5,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *DeleteObjectInput) Reset() { + *x = DeleteObjectInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteObjectInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteObjectInput) ProtoMessage() {} + +func (x *DeleteObjectInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteObjectInput.ProtoReflect.Descriptor instead. +func (*DeleteObjectInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{4} +} + +func (x *DeleteObjectInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *DeleteObjectInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *DeleteObjectInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *DeleteObjectInput) GetRequestPayer() string { + if x != nil { + return x.RequestPayer + } + return "" +} + +func (x *DeleteObjectInput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// DeleteObjectOutput +type DeleteObjectOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker bool `protobuf:"varint,1,opt,name=delete_marker,json=deleteMarker,proto3" json:"delete_marker,omitempty"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged string `protobuf:"bytes,2,opt,name=request_charged,json=requestCharged,proto3" json:"request_charged,omitempty"` + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId string `protobuf:"bytes,3,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *DeleteObjectOutput) Reset() { + *x = DeleteObjectOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteObjectOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteObjectOutput) ProtoMessage() {} + +func (x *DeleteObjectOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteObjectOutput.ProtoReflect.Descriptor instead. +func (*DeleteObjectOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteObjectOutput) GetDeleteMarker() bool { + if x != nil { + return x.DeleteMarker + } + return false +} + +func (x *DeleteObjectOutput) GetRequestCharged() string { + if x != nil { + return x.RequestCharged + } + return "" +} + +func (x *DeleteObjectOutput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// PutObjectTaggingInput +type PutObjectTaggingInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required. + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Container for the TagSet and Tag elements + Tags map[string]string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The versionId of the object that the tag-set will be added to. + VersionId string `protobuf:"bytes,5,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *PutObjectTaggingInput) Reset() { + *x = PutObjectTaggingInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutObjectTaggingInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutObjectTaggingInput) ProtoMessage() {} + +func (x *PutObjectTaggingInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutObjectTaggingInput.ProtoReflect.Descriptor instead. +func (*PutObjectTaggingInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{6} +} + +func (x *PutObjectTaggingInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *PutObjectTaggingInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *PutObjectTaggingInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *PutObjectTaggingInput) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *PutObjectTaggingInput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// PutObjectTaggingOutput +type PutObjectTaggingOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The versionId of the object the tag-set was added to. + VersionId string `protobuf:"bytes,1,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` + // Metadata pertaining to the operation's result. + ResultMetadata map[string]string `protobuf:"bytes,2,rep,name=result_metadata,json=resultMetadata,proto3" json:"result_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *PutObjectTaggingOutput) Reset() { + *x = PutObjectTaggingOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutObjectTaggingOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutObjectTaggingOutput) ProtoMessage() {} + +func (x *PutObjectTaggingOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutObjectTaggingOutput.ProtoReflect.Descriptor instead. +func (*PutObjectTaggingOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{7} +} + +func (x *PutObjectTaggingOutput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +func (x *PutObjectTaggingOutput) GetResultMetadata() map[string]string { + if x != nil { + return x.ResultMetadata + } + return nil +} + +// DeleteObjectTaggingInput +type DeleteObjectTaggingInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the objects from which to remove the tags. + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // The key that identifies the object in the bucket from which to remove all tags. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // The versionId of the object that the tag-set will be removed from. + VersionId string `protobuf:"bytes,4,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner string `protobuf:"bytes,5,opt,name=expected_bucket_owner,json=expectedBucketOwner,proto3" json:"expected_bucket_owner,omitempty"` +} + +func (x *DeleteObjectTaggingInput) Reset() { + *x = DeleteObjectTaggingInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteObjectTaggingInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteObjectTaggingInput) ProtoMessage() {} + +func (x *DeleteObjectTaggingInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteObjectTaggingInput.ProtoReflect.Descriptor instead. +func (*DeleteObjectTaggingInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{8} +} + +func (x *DeleteObjectTaggingInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *DeleteObjectTaggingInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *DeleteObjectTaggingInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *DeleteObjectTaggingInput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +func (x *DeleteObjectTaggingInput) GetExpectedBucketOwner() string { + if x != nil { + return x.ExpectedBucketOwner + } + return "" +} + +// DeleteObjectTaggingOutput +type DeleteObjectTaggingOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The versionId of the object the tag-set was removed from. + VersionId string `protobuf:"bytes,1,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` + // Metadata pertaining to the operation's result. + ResultMetadata map[string]string `protobuf:"bytes,2,rep,name=result_metadata,json=resultMetadata,proto3" json:"result_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *DeleteObjectTaggingOutput) Reset() { + *x = DeleteObjectTaggingOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteObjectTaggingOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteObjectTaggingOutput) ProtoMessage() {} + +func (x *DeleteObjectTaggingOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteObjectTaggingOutput.ProtoReflect.Descriptor instead. +func (*DeleteObjectTaggingOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{9} +} + +func (x *DeleteObjectTaggingOutput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +func (x *DeleteObjectTaggingOutput) GetResultMetadata() map[string]string { + if x != nil { + return x.ResultMetadata + } + return nil +} + +// GetObjectTaggingInput +type GetObjectTaggingInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object for which to get the tagging information. + // This member is required. + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Object key for which to get the tagging information. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // The versionId of the object for which to get the tagging information. + VersionId string `protobuf:"bytes,4,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner string `protobuf:"bytes,5,opt,name=expected_bucket_owner,json=expectedBucketOwner,proto3" json:"expected_bucket_owner,omitempty"` + // Confirms that the requester knows that they will be charged for the request. + RequestPayer string `protobuf:"bytes,6,opt,name=request_payer,json=requestPayer,proto3" json:"request_payer,omitempty"` +} + +func (x *GetObjectTaggingInput) Reset() { + *x = GetObjectTaggingInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetObjectTaggingInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetObjectTaggingInput) ProtoMessage() {} + +func (x *GetObjectTaggingInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetObjectTaggingInput.ProtoReflect.Descriptor instead. +func (*GetObjectTaggingInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{10} +} + +func (x *GetObjectTaggingInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *GetObjectTaggingInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *GetObjectTaggingInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *GetObjectTaggingInput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +func (x *GetObjectTaggingInput) GetExpectedBucketOwner() string { + if x != nil { + return x.ExpectedBucketOwner + } + return "" +} + +func (x *GetObjectTaggingInput) GetRequestPayer() string { + if x != nil { + return x.RequestPayer + } + return "" +} + +// GetObjectTaggingOutput +type GetObjectTaggingOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Contains the tag set. + // This member is required. + Tags map[string]string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The versionId of the object for which you got the tagging information. + VersionId string `protobuf:"bytes,2,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` + // Metadata pertaining to the operation's result. + ResultMetadata map[string]string `protobuf:"bytes,3,rep,name=result_metadata,json=resultMetadata,proto3" json:"result_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GetObjectTaggingOutput) Reset() { + *x = GetObjectTaggingOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetObjectTaggingOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetObjectTaggingOutput) ProtoMessage() {} + +func (x *GetObjectTaggingOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetObjectTaggingOutput.ProtoReflect.Descriptor instead. +func (*GetObjectTaggingOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{11} +} + +func (x *GetObjectTaggingOutput) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *GetObjectTaggingOutput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +func (x *GetObjectTaggingOutput) GetResultMetadata() map[string]string { + if x != nil { + return x.ResultMetadata + } + return nil +} + +// CopySource +type CopySource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // source object bucket name + CopySourceBucket string `protobuf:"bytes,1,opt,name=copy_source_bucket,json=copySourceBucket,proto3" json:"copy_source_bucket,omitempty"` + // source object name + CopySourceKey string `protobuf:"bytes,2,opt,name=copy_source_key,json=copySourceKey,proto3" json:"copy_source_key,omitempty"` + // source object version + CopySourceVersionId string `protobuf:"bytes,3,opt,name=copy_source_version_id,json=copySourceVersionId,proto3" json:"copy_source_version_id,omitempty"` +} + +func (x *CopySource) Reset() { + *x = CopySource{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopySource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CopySource) ProtoMessage() {} + +func (x *CopySource) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopySource.ProtoReflect.Descriptor instead. +func (*CopySource) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{12} +} + +func (x *CopySource) GetCopySourceBucket() string { + if x != nil { + return x.CopySourceBucket + } + return "" +} + +func (x *CopySource) GetCopySourceKey() string { + if x != nil { + return x.CopySourceKey + } + return "" +} + +func (x *CopySource) GetCopySourceVersionId() string { + if x != nil { + return x.CopySourceVersionId + } + return "" +} + +// CopyObjectInput +type CopyObjectInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The name of the destination bucket. When using this action with an access point + // This member is required. + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // The key of the destination object. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // CopySource + CopySource *CopySource `protobuf:"bytes,4,opt,name=copy_source,json=copySource,proto3" json:"copy_source,omitempty"` + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL Query + // parameters. + Tagging map[string]string `protobuf:"bytes,5,rep,name=tagging,proto3" json:"tagging,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The date and time at which the object is no longer cacheable. + Expires int64 `protobuf:"varint,6,opt,name=expires,proto3" json:"expires,omitempty"` + // Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request. + MetadataDirective string `protobuf:"bytes,7,opt,name=metadata_directive,json=metadataDirective,proto3" json:"metadata_directive,omitempty"` + // A map of metadata to store with the object in S3. + Metadata map[string]string `protobuf:"bytes,8,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CopyObjectInput) Reset() { + *x = CopyObjectInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopyObjectInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CopyObjectInput) ProtoMessage() {} + +func (x *CopyObjectInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopyObjectInput.ProtoReflect.Descriptor instead. +func (*CopyObjectInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{13} +} + +func (x *CopyObjectInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *CopyObjectInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *CopyObjectInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *CopyObjectInput) GetCopySource() *CopySource { + if x != nil { + return x.CopySource + } + return nil +} + +func (x *CopyObjectInput) GetTagging() map[string]string { + if x != nil { + return x.Tagging + } + return nil +} + +func (x *CopyObjectInput) GetExpires() int64 { + if x != nil { + return x.Expires + } + return 0 +} + +func (x *CopyObjectInput) GetMetadataDirective() string { + if x != nil { + return x.MetadataDirective + } + return "" +} + +func (x *CopyObjectInput) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +// CopyObjectResult +type CopyObjectResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. + Etag string `protobuf:"bytes,1,opt,name=etag,proto3" json:"etag,omitempty"` + // Creation date of the object. + LastModified int64 `protobuf:"varint,2,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` +} + +func (x *CopyObjectResult) Reset() { + *x = CopyObjectResult{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopyObjectResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CopyObjectResult) ProtoMessage() {} + +func (x *CopyObjectResult) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopyObjectResult.ProtoReflect.Descriptor instead. +func (*CopyObjectResult) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{14} +} + +func (x *CopyObjectResult) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *CopyObjectResult) GetLastModified() int64 { + if x != nil { + return x.LastModified + } + return 0 +} + +// CopyObjectOutput +type CopyObjectOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Container for all response elements. + CopyObjectResult *CopyObjectResult `protobuf:"bytes,1,opt,name=copy_object_result,json=copyObjectResult,proto3" json:"copy_object_result,omitempty"` + // Version ID of the newly created copy. + VersionId string `protobuf:"bytes,2,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` + // If the object expiration is configured, the response includes this header. + Expiration string `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"` +} + +func (x *CopyObjectOutput) Reset() { + *x = CopyObjectOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopyObjectOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CopyObjectOutput) ProtoMessage() {} + +func (x *CopyObjectOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopyObjectOutput.ProtoReflect.Descriptor instead. +func (*CopyObjectOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{15} +} + +func (x *CopyObjectOutput) GetCopyObjectResult() *CopyObjectResult { + if x != nil { + return x.CopyObjectResult + } + return nil +} + +func (x *CopyObjectOutput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +func (x *CopyObjectOutput) GetExpiration() string { + if x != nil { + return x.Expiration + } + return "" +} + +// Delete +type Delete struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ObjectIdentifier + Objects []*ObjectIdentifier `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"` + // Element to enable quiet mode for the request. When you add this element, you + // must set its value to true. + Quiet bool `protobuf:"varint,2,opt,name=quiet,proto3" json:"quiet,omitempty"` +} + +func (x *Delete) Reset() { + *x = Delete{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Delete) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Delete) ProtoMessage() {} + +func (x *Delete) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Delete.ProtoReflect.Descriptor instead. +func (*Delete) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{16} +} + +func (x *Delete) GetObjects() []*ObjectIdentifier { + if x != nil { + return x.Objects + } + return nil +} + +func (x *Delete) GetQuiet() bool { + if x != nil { + return x.Quiet + } + return false +} + +// ObjectIdentifier +type ObjectIdentifier struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key name of the object. + // This member is required. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // VersionId for the specific version of the object to delete. + VersionId string `protobuf:"bytes,2,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *ObjectIdentifier) Reset() { + *x = ObjectIdentifier{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectIdentifier) ProtoMessage() {} + +func (x *ObjectIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectIdentifier.ProtoReflect.Descriptor instead. +func (*ObjectIdentifier) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{17} +} + +func (x *ObjectIdentifier) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *ObjectIdentifier) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// DeleteObjectsInput +type DeleteObjectsInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Delete objects + Delete *Delete `protobuf:"bytes,3,opt,name=delete,proto3" json:"delete,omitempty"` + // Confirms that the requester knows that they will be charged for the request. + RequestPayer string `protobuf:"bytes,4,opt,name=request_payer,json=requestPayer,proto3" json:"request_payer,omitempty"` +} + +func (x *DeleteObjectsInput) Reset() { + *x = DeleteObjectsInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteObjectsInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteObjectsInput) ProtoMessage() {} + +func (x *DeleteObjectsInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteObjectsInput.ProtoReflect.Descriptor instead. +func (*DeleteObjectsInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{18} +} + +func (x *DeleteObjectsInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *DeleteObjectsInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *DeleteObjectsInput) GetDelete() *Delete { + if x != nil { + return x.Delete + } + return nil +} + +func (x *DeleteObjectsInput) GetRequestPayer() string { + if x != nil { + return x.RequestPayer + } + return "" +} + +// DeletedObject +type DeletedObject struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. + DeleteMarker bool `protobuf:"varint,1,opt,name=delete_marker,json=deleteMarker,proto3" json:"delete_marker,omitempty"` + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header is + // the version ID of the object version deleted. + DeleteMarkerVersionId string `protobuf:"bytes,2,opt,name=delete_marker_version_id,json=deleteMarkerVersionId,proto3" json:"delete_marker_version_id,omitempty"` + // The name of the deleted object. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // The version ID of the deleted object. + VersionId string `protobuf:"bytes,4,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *DeletedObject) Reset() { + *x = DeletedObject{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeletedObject) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeletedObject) ProtoMessage() {} + +func (x *DeletedObject) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeletedObject.ProtoReflect.Descriptor instead. +func (*DeletedObject) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{19} +} + +func (x *DeletedObject) GetDeleteMarker() bool { + if x != nil { + return x.DeleteMarker + } + return false +} + +func (x *DeletedObject) GetDeleteMarkerVersionId() string { + if x != nil { + return x.DeleteMarkerVersionId + } + return "" +} + +func (x *DeletedObject) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *DeletedObject) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// DeleteObjectsOutput +type DeleteObjectsOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DeletedObject + Deleted []*DeletedObject `protobuf:"bytes,1,rep,name=deleted,proto3" json:"deleted,omitempty"` +} + +func (x *DeleteObjectsOutput) Reset() { + *x = DeleteObjectsOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteObjectsOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteObjectsOutput) ProtoMessage() {} + +func (x *DeleteObjectsOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteObjectsOutput.ProtoReflect.Descriptor instead. +func (*DeleteObjectsOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{20} +} + +func (x *DeleteObjectsOutput) GetDeleted() []*DeletedObject { + if x != nil { + return x.Deleted + } + return nil +} + +// ListObjectsInput +type ListObjectsInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // A delimiter is a character you use to group keys. + Delimiter string `protobuf:"bytes,3,opt,name=delimiter,proto3" json:"delimiter,omitempty"` + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters with an + // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. + EncodingType string `protobuf:"bytes,4,opt,name=encoding_type,json=encodingType,proto3" json:"encoding_type,omitempty"` + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner string `protobuf:"bytes,5,opt,name=expected_bucket_owner,json=expectedBucketOwner,proto3" json:"expected_bucket_owner,omitempty"` + // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. Marker can be any key in the bucket. + Marker string `protobuf:"bytes,6,opt,name=marker,proto3" json:"marker,omitempty"` + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. + MaxKeys int32 `protobuf:"varint,7,opt,name=maxKeys,proto3" json:"maxKeys,omitempty"` + // Limits the response to keys that begin with the specified prefix. + Prefix string `protobuf:"bytes,8,opt,name=prefix,proto3" json:"prefix,omitempty"` + // Confirms that the requester knows that they will be charged for the request. + RequestPayer string `protobuf:"bytes,9,opt,name=request_payer,json=requestPayer,proto3" json:"request_payer,omitempty"` +} + +func (x *ListObjectsInput) Reset() { + *x = ListObjectsInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectsInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectsInput) ProtoMessage() {} + +func (x *ListObjectsInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectsInput.ProtoReflect.Descriptor instead. +func (*ListObjectsInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{21} +} + +func (x *ListObjectsInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *ListObjectsInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *ListObjectsInput) GetDelimiter() string { + if x != nil { + return x.Delimiter + } + return "" +} + +func (x *ListObjectsInput) GetEncodingType() string { + if x != nil { + return x.EncodingType + } + return "" +} + +func (x *ListObjectsInput) GetExpectedBucketOwner() string { + if x != nil { + return x.ExpectedBucketOwner + } + return "" +} + +func (x *ListObjectsInput) GetMarker() string { + if x != nil { + return x.Marker + } + return "" +} + +func (x *ListObjectsInput) GetMaxKeys() int32 { + if x != nil { + return x.MaxKeys + } + return 0 +} + +func (x *ListObjectsInput) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *ListObjectsInput) GetRequestPayer() string { + if x != nil { + return x.RequestPayer + } + return "" +} + +// ListObjectsOutput +type ListObjectsOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // CommonPrefixes + CommonPrefixes []string `protobuf:"bytes,1,rep,name=common_prefixes,json=commonPrefixes,proto3" json:"common_prefixes,omitempty"` + // Objects contents + Contents []*Object `protobuf:"bytes,2,rep,name=contents,proto3" json:"contents,omitempty"` + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element in the + // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in + // the response. Each rolled-up result counts as only one return against the + // MaxKeys value. + Delimiter string `protobuf:"bytes,3,opt,name=delimiter,proto3" json:"delimiter,omitempty"` + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType string `protobuf:"bytes,4,opt,name=encoding_type,json=encodingType,proto3" json:"encoding_type,omitempty"` + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. + IsTruncated bool `protobuf:"varint,5,opt,name=is_truncated,json=isTruncated,proto3" json:"is_truncated,omitempty"` + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. + Marker string `protobuf:"bytes,6,opt,name=marker,proto3" json:"marker,omitempty"` + // The maximum number of keys returned in the response body. + MaxKeys int32 `protobuf:"varint,7,opt,name=max_keys,json=maxKeys,proto3" json:"max_keys,omitempty"` + // The bucket name. + Name string `protobuf:"bytes,8,opt,name=name,proto3" json:"name,omitempty"` + // When response is truncated (the IsTruncated element value in the response is + // true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. + NextMarker string `protobuf:"bytes,9,opt,name=next_marker,json=nextMarker,proto3" json:"next_marker,omitempty"` + // Keys that begin with the indicated prefix. + Prefix string `protobuf:"bytes,10,opt,name=prefix,proto3" json:"prefix,omitempty"` +} + +func (x *ListObjectsOutput) Reset() { + *x = ListObjectsOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectsOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectsOutput) ProtoMessage() {} + +func (x *ListObjectsOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectsOutput.ProtoReflect.Descriptor instead. +func (*ListObjectsOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{22} +} + +func (x *ListObjectsOutput) GetCommonPrefixes() []string { + if x != nil { + return x.CommonPrefixes + } + return nil +} + +func (x *ListObjectsOutput) GetContents() []*Object { + if x != nil { + return x.Contents + } + return nil +} + +func (x *ListObjectsOutput) GetDelimiter() string { + if x != nil { + return x.Delimiter + } + return "" +} + +func (x *ListObjectsOutput) GetEncodingType() string { + if x != nil { + return x.EncodingType + } + return "" +} + +func (x *ListObjectsOutput) GetIsTruncated() bool { + if x != nil { + return x.IsTruncated + } + return false +} + +func (x *ListObjectsOutput) GetMarker() string { + if x != nil { + return x.Marker + } + return "" +} + +func (x *ListObjectsOutput) GetMaxKeys() int32 { + if x != nil { + return x.MaxKeys + } + return 0 +} + +func (x *ListObjectsOutput) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListObjectsOutput) GetNextMarker() string { + if x != nil { + return x.NextMarker + } + return "" +} + +func (x *ListObjectsOutput) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +// Owner +type Owner struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Owner display name + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Owner id + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *Owner) Reset() { + *x = Owner{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Owner) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Owner) ProtoMessage() {} + +func (x *Owner) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Owner.ProtoReflect.Descriptor instead. +func (*Owner) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{23} +} + +func (x *Owner) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *Owner) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +// Object +type Object struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The entity tag is a hash of the object + Etag string `protobuf:"bytes,1,opt,name=etag,proto3" json:"etag,omitempty"` + // The name that you assign to an object. You use the object key to retrieve the + // object. + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // Creation date of the object. + LastModified int64 `protobuf:"varint,3,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` + // The owner of the object + Owner *Owner `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + // Size in bytes of the object + Size int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"` + // The class of storage used to store the object. + StorageClass string `protobuf:"bytes,6,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` +} + +func (x *Object) Reset() { + *x = Object{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Object) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Object) ProtoMessage() {} + +func (x *Object) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Object.ProtoReflect.Descriptor instead. +func (*Object) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{24} +} + +func (x *Object) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *Object) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *Object) GetLastModified() int64 { + if x != nil { + return x.LastModified + } + return 0 +} + +func (x *Object) GetOwner() *Owner { + if x != nil { + return x.Owner + } + return nil +} + +func (x *Object) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *Object) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +// GetObjectCannedAclInput +type GetObjectCannedAclInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // VersionId used to reference a specific version of the object + VersionId string `protobuf:"bytes,4,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *GetObjectCannedAclInput) Reset() { + *x = GetObjectCannedAclInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetObjectCannedAclInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetObjectCannedAclInput) ProtoMessage() {} + +func (x *GetObjectCannedAclInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetObjectCannedAclInput.ProtoReflect.Descriptor instead. +func (*GetObjectCannedAclInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{25} +} + +func (x *GetObjectCannedAclInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *GetObjectCannedAclInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *GetObjectCannedAclInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *GetObjectCannedAclInput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// GetObjectCannedAclOutput +type GetObjectCannedAclOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Object CannedACL + CannedAcl string `protobuf:"bytes,1,opt,name=canned_acl,json=cannedAcl,proto3" json:"canned_acl,omitempty"` + // Owner + Owner *Owner `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged string `protobuf:"bytes,3,opt,name=request_charged,json=requestCharged,proto3" json:"request_charged,omitempty"` +} + +func (x *GetObjectCannedAclOutput) Reset() { + *x = GetObjectCannedAclOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetObjectCannedAclOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetObjectCannedAclOutput) ProtoMessage() {} + +func (x *GetObjectCannedAclOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetObjectCannedAclOutput.ProtoReflect.Descriptor instead. +func (*GetObjectCannedAclOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{26} +} + +func (x *GetObjectCannedAclOutput) GetCannedAcl() string { + if x != nil { + return x.CannedAcl + } + return "" +} + +func (x *GetObjectCannedAclOutput) GetOwner() *Owner { + if x != nil { + return x.Owner + } + return nil +} + +func (x *GetObjectCannedAclOutput) GetRequestCharged() string { + if x != nil { + return x.RequestCharged + } + return "" +} + +// PutObjectCannedAclInput +type PutObjectCannedAclInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // The canned ACL to apply to the object + Acl string `protobuf:"bytes,4,opt,name=acl,proto3" json:"acl,omitempty"` + // VersionId used to reference a specific version of the object. + VersionId string `protobuf:"bytes,5,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *PutObjectCannedAclInput) Reset() { + *x = PutObjectCannedAclInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutObjectCannedAclInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutObjectCannedAclInput) ProtoMessage() {} + +func (x *PutObjectCannedAclInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutObjectCannedAclInput.ProtoReflect.Descriptor instead. +func (*PutObjectCannedAclInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{27} +} + +func (x *PutObjectCannedAclInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *PutObjectCannedAclInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *PutObjectCannedAclInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *PutObjectCannedAclInput) GetAcl() string { + if x != nil { + return x.Acl + } + return "" +} + +func (x *PutObjectCannedAclInput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// PutObjectCannedAclOutput +type PutObjectCannedAclOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Request charged + RequestCharged string `protobuf:"bytes,1,opt,name=request_charged,json=requestCharged,proto3" json:"request_charged,omitempty"` +} + +func (x *PutObjectCannedAclOutput) Reset() { + *x = PutObjectCannedAclOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutObjectCannedAclOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutObjectCannedAclOutput) ProtoMessage() {} + +func (x *PutObjectCannedAclOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutObjectCannedAclOutput.ProtoReflect.Descriptor instead. +func (*PutObjectCannedAclOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{28} +} + +func (x *PutObjectCannedAclOutput) GetRequestCharged() string { + if x != nil { + return x.RequestCharged + } + return "" +} + +// RestoreObjectInput +type RestoreObjectInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // VersionId used to reference a specific version of the object. + VersionId string `protobuf:"bytes,5,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *RestoreObjectInput) Reset() { + *x = RestoreObjectInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreObjectInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreObjectInput) ProtoMessage() {} + +func (x *RestoreObjectInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreObjectInput.ProtoReflect.Descriptor instead. +func (*RestoreObjectInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{29} +} + +func (x *RestoreObjectInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *RestoreObjectInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *RestoreObjectInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *RestoreObjectInput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// RestoreObjectOutput +type RestoreObjectOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged string `protobuf:"bytes,1,opt,name=request_charged,json=requestCharged,proto3" json:"request_charged,omitempty"` + // Indicates the path in the provided S3 output location where Select results will + // be restored to. + RestoreOutputPath string `protobuf:"bytes,2,opt,name=restore_output_path,json=restoreOutputPath,proto3" json:"restore_output_path,omitempty"` +} + +func (x *RestoreObjectOutput) Reset() { + *x = RestoreObjectOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreObjectOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreObjectOutput) ProtoMessage() {} + +func (x *RestoreObjectOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreObjectOutput.ProtoReflect.Descriptor instead. +func (*RestoreObjectOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{30} +} + +func (x *RestoreObjectOutput) GetRequestCharged() string { + if x != nil { + return x.RequestCharged + } + return "" +} + +func (x *RestoreObjectOutput) GetRestoreOutputPath() string { + if x != nil { + return x.RestoreOutputPath + } + return "" +} + +// CreateMultipartUploadInput +type CreateMultipartUploadInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // The canned ACL to apply to the object. This action is not supported by Amazon S3 + // on Outposts. + Acl string `protobuf:"bytes,4,opt,name=acl,proto3" json:"acl,omitempty"` + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true + // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. + BucketKeyEnabled bool `protobuf:"varint,5,opt,name=bucket_key_enabled,json=bucketKeyEnabled,proto3" json:"bucket_key_enabled,omitempty"` + // Specifies caching behavior along the request/reply chain + CacheControl string `protobuf:"bytes,6,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` + // Specifies presentational information for the object + ContentDisposition string `protobuf:"bytes,7,opt,name=content_disposition,json=contentDisposition,proto3" json:"content_disposition,omitempty"` + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding string `protobuf:"bytes,8,opt,name=content_encoding,json=contentEncoding,proto3" json:"content_encoding,omitempty"` + // The language the content is in. + ContentLanguage string `protobuf:"bytes,9,opt,name=content_language,json=contentLanguage,proto3" json:"content_language,omitempty"` + // A standard MIME type describing the format of the object data. + ContentType string `protobuf:"bytes,10,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner string `protobuf:"bytes,11,opt,name=expected_bucket_owner,json=expectedBucketOwner,proto3" json:"expected_bucket_owner,omitempty"` + // The date and time at which the object is no longer cacheable. + Expires int64 `protobuf:"varint,12,opt,name=expires,proto3" json:"expires,omitempty"` + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This + // action is not supported by Amazon S3 on Outposts. + GrantFullControl string `protobuf:"bytes,13,opt,name=grant_full_control,json=grantFullControl,proto3" json:"grant_full_control,omitempty"` + // Allows grantee to read the object data and its metadata. This action is not + // supported by Amazon S3 on Outposts. + GrantRead string `protobuf:"bytes,14,opt,name=grant_read,json=grantRead,proto3" json:"grant_read,omitempty"` + // Allows grantee to read the object ACL. This action is not supported by Amazon S3 + // on Outposts. + GrantReadAcp string `protobuf:"bytes,15,opt,name=grant_read_acp,json=grantReadAcp,proto3" json:"grant_read_acp,omitempty"` + // Allows grantee to write the ACL for the applicable object. This action is not + // supported by Amazon S3 on Outposts. + GrantWriteAcp string `protobuf:"bytes,16,opt,name=grant_write_acp,json=grantWriteAcp,proto3" json:"grant_write_acp,omitempty"` + // A map of metadata to store with the object + MetaData map[string]string `protobuf:"bytes,17,rep,name=meta_data,json=metaData,proto3" json:"meta_data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Specifies whether you want to apply a legal hold to the uploaded object + ObjectLockLegalHoldStatus string `protobuf:"bytes,18,opt,name=object_lock_legal_hold_status,json=objectLockLegalHoldStatus,proto3" json:"object_lock_legal_hold_status,omitempty"` + // Specifies the Object Lock mode that you want to apply to the uploaded object + ObjectLockMode string `protobuf:"bytes,19,opt,name=object_lock_mode,json=objectLockMode,proto3" json:"object_lock_mode,omitempty"` + // Specifies the date and time when you want the Object Lock to expire + ObjectLockRetainUntilDate int64 `protobuf:"varint,20,opt,name=object_lock_retain_until_date,json=objectLockRetainUntilDate,proto3" json:"object_lock_retain_until_date,omitempty"` + // Confirms that the requester knows that they will be charged for the request + RequestPayer string `protobuf:"bytes,21,opt,name=request_payer,json=requestPayer,proto3" json:"request_payer,omitempty"` + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SseCustomerAlgorithm string `protobuf:"bytes,22,opt,name=sse_customer_algorithm,json=sseCustomerAlgorithm,proto3" json:"sse_customer_algorithm,omitempty"` + // Specifies the customer-provided encryption key to use in encrypting data + SseCustomerKey string `protobuf:"bytes,23,opt,name=sse_customer_key,json=sseCustomerKey,proto3" json:"sse_customer_key,omitempty"` + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321 + SseCustomerKeyMd5 string `protobuf:"bytes,24,opt,name=sse_customer_key_md5,json=sseCustomerKeyMd5,proto3" json:"sse_customer_key_md5,omitempty"` + // Specifies the Amazon Web Services KMS Encryption Context to use for object encryption + SseKmsEncryptionContext string `protobuf:"bytes,25,opt,name=sse_kms_encryption_context,json=sseKmsEncryptionContext,proto3" json:"sse_kms_encryption_context,omitempty"` + // Specifies the ID of the symmetric customer managed key to use for object encryption + SseKmsKeyId string `protobuf:"bytes,26,opt,name=sse_kms_key_id,json=sseKmsKeyId,proto3" json:"sse_kms_key_id,omitempty"` + // The server-side encryption algorithm used when storing this object + ServerSideEncryption string `protobuf:"bytes,27,opt,name=server_side_encryption,json=serverSideEncryption,proto3" json:"server_side_encryption,omitempty"` + // By default, oss store uses the STANDARD Storage Class to store newly created objects + StorageClass string `protobuf:"bytes,28,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + Tagging map[string]string `protobuf:"bytes,29,rep,name=tagging,proto3" json:"tagging,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. + WebsiteRedirectLocation string `protobuf:"bytes,30,opt,name=website_redirect_location,json=websiteRedirectLocation,proto3" json:"website_redirect_location,omitempty"` +} + +func (x *CreateMultipartUploadInput) Reset() { + *x = CreateMultipartUploadInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateMultipartUploadInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateMultipartUploadInput) ProtoMessage() {} + +func (x *CreateMultipartUploadInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateMultipartUploadInput.ProtoReflect.Descriptor instead. +func (*CreateMultipartUploadInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{31} +} + +func (x *CreateMultipartUploadInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *CreateMultipartUploadInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *CreateMultipartUploadInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *CreateMultipartUploadInput) GetAcl() string { + if x != nil { + return x.Acl + } + return "" +} + +func (x *CreateMultipartUploadInput) GetBucketKeyEnabled() bool { + if x != nil { + return x.BucketKeyEnabled + } + return false +} + +func (x *CreateMultipartUploadInput) GetCacheControl() string { + if x != nil { + return x.CacheControl + } + return "" +} + +func (x *CreateMultipartUploadInput) GetContentDisposition() string { + if x != nil { + return x.ContentDisposition + } + return "" +} + +func (x *CreateMultipartUploadInput) GetContentEncoding() string { + if x != nil { + return x.ContentEncoding + } + return "" +} + +func (x *CreateMultipartUploadInput) GetContentLanguage() string { + if x != nil { + return x.ContentLanguage + } + return "" +} + +func (x *CreateMultipartUploadInput) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *CreateMultipartUploadInput) GetExpectedBucketOwner() string { + if x != nil { + return x.ExpectedBucketOwner + } + return "" +} + +func (x *CreateMultipartUploadInput) GetExpires() int64 { + if x != nil { + return x.Expires + } + return 0 +} + +func (x *CreateMultipartUploadInput) GetGrantFullControl() string { + if x != nil { + return x.GrantFullControl + } + return "" +} + +func (x *CreateMultipartUploadInput) GetGrantRead() string { + if x != nil { + return x.GrantRead + } + return "" +} + +func (x *CreateMultipartUploadInput) GetGrantReadAcp() string { + if x != nil { + return x.GrantReadAcp + } + return "" +} + +func (x *CreateMultipartUploadInput) GetGrantWriteAcp() string { + if x != nil { + return x.GrantWriteAcp + } + return "" +} + +func (x *CreateMultipartUploadInput) GetMetaData() map[string]string { + if x != nil { + return x.MetaData + } + return nil +} + +func (x *CreateMultipartUploadInput) GetObjectLockLegalHoldStatus() string { + if x != nil { + return x.ObjectLockLegalHoldStatus + } + return "" +} + +func (x *CreateMultipartUploadInput) GetObjectLockMode() string { + if x != nil { + return x.ObjectLockMode + } + return "" +} + +func (x *CreateMultipartUploadInput) GetObjectLockRetainUntilDate() int64 { + if x != nil { + return x.ObjectLockRetainUntilDate + } + return 0 +} + +func (x *CreateMultipartUploadInput) GetRequestPayer() string { + if x != nil { + return x.RequestPayer + } + return "" +} + +func (x *CreateMultipartUploadInput) GetSseCustomerAlgorithm() string { + if x != nil { + return x.SseCustomerAlgorithm + } + return "" +} + +func (x *CreateMultipartUploadInput) GetSseCustomerKey() string { + if x != nil { + return x.SseCustomerKey + } + return "" +} + +func (x *CreateMultipartUploadInput) GetSseCustomerKeyMd5() string { + if x != nil { + return x.SseCustomerKeyMd5 + } + return "" +} + +func (x *CreateMultipartUploadInput) GetSseKmsEncryptionContext() string { + if x != nil { + return x.SseKmsEncryptionContext + } + return "" +} + +func (x *CreateMultipartUploadInput) GetSseKmsKeyId() string { + if x != nil { + return x.SseKmsKeyId + } + return "" +} + +func (x *CreateMultipartUploadInput) GetServerSideEncryption() string { + if x != nil { + return x.ServerSideEncryption + } + return "" +} + +func (x *CreateMultipartUploadInput) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *CreateMultipartUploadInput) GetTagging() map[string]string { + if x != nil { + return x.Tagging + } + return nil +} + +func (x *CreateMultipartUploadInput) GetWebsiteRedirectLocation() string { + if x != nil { + return x.WebsiteRedirectLocation + } + return "" +} + +// CreateMultipartUploadOutput +type CreateMultipartUploadOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object name + // in the request, the response includes this header + AbortDate int64 `protobuf:"varint,3,opt,name=abort_date,json=abortDate,proto3" json:"abort_date,omitempty"` + // It identifies the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId string `protobuf:"bytes,4,opt,name=abort_rule_id,json=abortRuleId,proto3" json:"abort_rule_id,omitempty"` + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool `protobuf:"varint,5,opt,name=bucket_key_enabled,json=bucketKeyEnabled,proto3" json:"bucket_key_enabled,omitempty"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged string `protobuf:"bytes,6,opt,name=request_charged,json=requestCharged,proto3" json:"request_charged,omitempty"` + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SseCustomerAlgorithm string `protobuf:"bytes,7,opt,name=sse_customer_algorithm,json=sseCustomerAlgorithm,proto3" json:"sse_customer_algorithm,omitempty"` + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SseCustomerKeyMd5 string `protobuf:"bytes,8,opt,name=sse_customer_key_md5,json=sseCustomerKeyMd5,proto3" json:"sse_customer_key_md5,omitempty"` + // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. + SseKmsEncryptionContext string `protobuf:"bytes,9,opt,name=sse_kms_encryption_context,json=sseKmsEncryptionContext,proto3" json:"sse_kms_encryption_context,omitempty"` + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SseKmsKeyId string `protobuf:"bytes,10,opt,name=sse_kms_key_id,json=sseKmsKeyId,proto3" json:"sse_kms_key_id,omitempty"` + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption string `protobuf:"bytes,11,opt,name=server_side_encryption,json=serverSideEncryption,proto3" json:"server_side_encryption,omitempty"` + // ID for the initiated multipart upload. + UploadId string `protobuf:"bytes,12,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *CreateMultipartUploadOutput) Reset() { + *x = CreateMultipartUploadOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateMultipartUploadOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateMultipartUploadOutput) ProtoMessage() {} + +func (x *CreateMultipartUploadOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateMultipartUploadOutput.ProtoReflect.Descriptor instead. +func (*CreateMultipartUploadOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{32} +} + +func (x *CreateMultipartUploadOutput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *CreateMultipartUploadOutput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *CreateMultipartUploadOutput) GetAbortDate() int64 { + if x != nil { + return x.AbortDate + } + return 0 +} + +func (x *CreateMultipartUploadOutput) GetAbortRuleId() string { + if x != nil { + return x.AbortRuleId + } + return "" +} + +func (x *CreateMultipartUploadOutput) GetBucketKeyEnabled() bool { + if x != nil { + return x.BucketKeyEnabled + } + return false +} + +func (x *CreateMultipartUploadOutput) GetRequestCharged() string { + if x != nil { + return x.RequestCharged + } + return "" +} + +func (x *CreateMultipartUploadOutput) GetSseCustomerAlgorithm() string { + if x != nil { + return x.SseCustomerAlgorithm + } + return "" +} + +func (x *CreateMultipartUploadOutput) GetSseCustomerKeyMd5() string { + if x != nil { + return x.SseCustomerKeyMd5 + } + return "" +} + +func (x *CreateMultipartUploadOutput) GetSseKmsEncryptionContext() string { + if x != nil { + return x.SseKmsEncryptionContext + } + return "" +} + +func (x *CreateMultipartUploadOutput) GetSseKmsKeyId() string { + if x != nil { + return x.SseKmsKeyId + } + return "" +} + +func (x *CreateMultipartUploadOutput) GetServerSideEncryption() string { + if x != nil { + return x.ServerSideEncryption + } + return "" +} + +func (x *CreateMultipartUploadOutput) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// UploadPartInput +type UploadPartInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Object data. + Body []byte `protobuf:"bytes,4,opt,name=body,proto3" json:"body,omitempty"` + // Size of the body in bytes. This parameter is useful when the size of the body + // cannot be determined automatically. + ContentLength int64 `protobuf:"varint,5,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMd5 string `protobuf:"bytes,6,opt,name=content_md5,json=contentMd5,proto3" json:"content_md5,omitempty"` + // The account ID of the expected bucket owner + ExpectedBucketOwner string `protobuf:"bytes,7,opt,name=expected_bucket_owner,json=expectedBucketOwner,proto3" json:"expected_bucket_owner,omitempty"` + // Part number of part being uploaded. This is a positive integer between 1 and 10,000. + // This member is required. + PartNumber int32 `protobuf:"varint,8,opt,name=part_number,json=partNumber,proto3" json:"part_number,omitempty"` + // Confirms that the requester knows that they will be charged for the request. + RequestPayer string `protobuf:"bytes,9,opt,name=request_payer,json=requestPayer,proto3" json:"request_payer,omitempty"` + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SseCustomerAlgorithm string `protobuf:"bytes,10,opt,name=sse_customer_algorithm,json=sseCustomerAlgorithm,proto3" json:"sse_customer_algorithm,omitempty"` + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data + SseCustomerKey string `protobuf:"bytes,11,opt,name=sse_customer_key,json=sseCustomerKey,proto3" json:"sse_customer_key,omitempty"` + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + SseCustomerKeyMd5 string `protobuf:"bytes,12,opt,name=sse_customer_key_md5,json=sseCustomerKeyMd5,proto3" json:"sse_customer_key_md5,omitempty"` + // Upload ID identifying the multipart upload whose part is being uploaded. + // This member is required. + UploadId string `protobuf:"bytes,13,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *UploadPartInput) Reset() { + *x = UploadPartInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UploadPartInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadPartInput) ProtoMessage() {} + +func (x *UploadPartInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadPartInput.ProtoReflect.Descriptor instead. +func (*UploadPartInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{33} +} + +func (x *UploadPartInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *UploadPartInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *UploadPartInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *UploadPartInput) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +func (x *UploadPartInput) GetContentLength() int64 { + if x != nil { + return x.ContentLength + } + return 0 +} + +func (x *UploadPartInput) GetContentMd5() string { + if x != nil { + return x.ContentMd5 + } + return "" +} + +func (x *UploadPartInput) GetExpectedBucketOwner() string { + if x != nil { + return x.ExpectedBucketOwner + } + return "" +} + +func (x *UploadPartInput) GetPartNumber() int32 { + if x != nil { + return x.PartNumber + } + return 0 +} + +func (x *UploadPartInput) GetRequestPayer() string { + if x != nil { + return x.RequestPayer + } + return "" +} + +func (x *UploadPartInput) GetSseCustomerAlgorithm() string { + if x != nil { + return x.SseCustomerAlgorithm + } + return "" +} + +func (x *UploadPartInput) GetSseCustomerKey() string { + if x != nil { + return x.SseCustomerKey + } + return "" +} + +func (x *UploadPartInput) GetSseCustomerKeyMd5() string { + if x != nil { + return x.SseCustomerKeyMd5 + } + return "" +} + +func (x *UploadPartInput) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// UploadPartOutput +type UploadPartOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool `protobuf:"varint,1,opt,name=bucket_key_enabled,json=bucketKeyEnabled,proto3" json:"bucket_key_enabled,omitempty"` + // Entity tag for the uploaded object. + Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged string `protobuf:"bytes,3,opt,name=request_charged,json=requestCharged,proto3" json:"request_charged,omitempty"` + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SseCustomerAlgorithm string `protobuf:"bytes,4,opt,name=sse_customer_algorithm,json=sseCustomerAlgorithm,proto3" json:"sse_customer_algorithm,omitempty"` + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + SseCustomerKeyMd5 string `protobuf:"bytes,5,opt,name=sse_customer_key_md5,json=sseCustomerKeyMd5,proto3" json:"sse_customer_key_md5,omitempty"` + // Specifies the ID of the symmetric customer managed key to use for object encryption + SseKmsKeyId string `protobuf:"bytes,6,opt,name=sse_kms_key_id,json=sseKmsKeyId,proto3" json:"sse_kms_key_id,omitempty"` + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption string `protobuf:"bytes,7,opt,name=server_side_encryption,json=serverSideEncryption,proto3" json:"server_side_encryption,omitempty"` +} + +func (x *UploadPartOutput) Reset() { + *x = UploadPartOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UploadPartOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadPartOutput) ProtoMessage() {} + +func (x *UploadPartOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadPartOutput.ProtoReflect.Descriptor instead. +func (*UploadPartOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{34} +} + +func (x *UploadPartOutput) GetBucketKeyEnabled() bool { + if x != nil { + return x.BucketKeyEnabled + } + return false +} + +func (x *UploadPartOutput) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *UploadPartOutput) GetRequestCharged() string { + if x != nil { + return x.RequestCharged + } + return "" +} + +func (x *UploadPartOutput) GetSseCustomerAlgorithm() string { + if x != nil { + return x.SseCustomerAlgorithm + } + return "" +} + +func (x *UploadPartOutput) GetSseCustomerKeyMd5() string { + if x != nil { + return x.SseCustomerKeyMd5 + } + return "" +} + +func (x *UploadPartOutput) GetSseKmsKeyId() string { + if x != nil { + return x.SseKmsKeyId + } + return "" +} + +func (x *UploadPartOutput) GetServerSideEncryption() string { + if x != nil { + return x.ServerSideEncryption + } + return "" +} + +// UploadPartCopyInput +type UploadPartCopyInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // CopySource + CopySource *CopySource `protobuf:"bytes,4,opt,name=copy_source,json=copySource,proto3" json:"copy_source,omitempty"` + // Part number of part being copied. This is a positive integer between 1 and 10,000. + // This member is required. + PartNumber int32 `protobuf:"varint,5,opt,name=part_number,json=partNumber,proto3" json:"part_number,omitempty"` + // Upload ID identifying the multipart upload whose part is being copied. + // This member is required. + UploadId string `protobuf:"bytes,6,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` + // The range of bytes to copy from the source object.bytes=start_position-part_size + StartPosition int64 `protobuf:"varint,7,opt,name=start_position,json=startPosition,proto3" json:"start_position,omitempty"` + // Part size + PartSize int64 `protobuf:"varint,8,opt,name=part_size,json=partSize,proto3" json:"part_size,omitempty"` +} + +func (x *UploadPartCopyInput) Reset() { + *x = UploadPartCopyInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UploadPartCopyInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadPartCopyInput) ProtoMessage() {} + +func (x *UploadPartCopyInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadPartCopyInput.ProtoReflect.Descriptor instead. +func (*UploadPartCopyInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{35} +} + +func (x *UploadPartCopyInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *UploadPartCopyInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *UploadPartCopyInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *UploadPartCopyInput) GetCopySource() *CopySource { + if x != nil { + return x.CopySource + } + return nil +} + +func (x *UploadPartCopyInput) GetPartNumber() int32 { + if x != nil { + return x.PartNumber + } + return 0 +} + +func (x *UploadPartCopyInput) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +func (x *UploadPartCopyInput) GetStartPosition() int64 { + if x != nil { + return x.StartPosition + } + return 0 +} + +func (x *UploadPartCopyInput) GetPartSize() int64 { + if x != nil { + return x.PartSize + } + return 0 +} + +// CopyPartResult +type CopyPartResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Entity tag of the object. + Etag string `protobuf:"bytes,1,opt,name=etag,proto3" json:"etag,omitempty"` + // Last modified time + LastModified int64 `protobuf:"varint,2,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` +} + +func (x *CopyPartResult) Reset() { + *x = CopyPartResult{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopyPartResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CopyPartResult) ProtoMessage() {} + +func (x *CopyPartResult) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopyPartResult.ProtoReflect.Descriptor instead. +func (*CopyPartResult) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{36} +} + +func (x *CopyPartResult) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *CopyPartResult) GetLastModified() int64 { + if x != nil { + return x.LastModified + } + return 0 +} + +// UploadPartCopyOutput +type UploadPartCopyOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool `protobuf:"varint,1,opt,name=bucket_key_enabled,json=bucketKeyEnabled,proto3" json:"bucket_key_enabled,omitempty"` + // Container for all response elements. + CopyPartResult *CopyPartResult `protobuf:"bytes,2,opt,name=copy_part_result,json=copyPartResult,proto3" json:"copy_part_result,omitempty"` + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId string `protobuf:"bytes,3,opt,name=copy_source_version_id,json=copySourceVersionId,proto3" json:"copy_source_version_id,omitempty"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged string `protobuf:"bytes,4,opt,name=request_charged,json=requestCharged,proto3" json:"request_charged,omitempty"` + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SseCustomerAlgorithm string `protobuf:"bytes,5,opt,name=sse_customer_algorithm,json=sseCustomerAlgorithm,proto3" json:"sse_customer_algorithm,omitempty"` + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SseCustomerKeyMd5 string `protobuf:"bytes,6,opt,name=sse_customer_key_md5,json=sseCustomerKeyMd5,proto3" json:"sse_customer_key_md5,omitempty"` + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SseKmsKeyId string `protobuf:"bytes,7,opt,name=sse_kms_key_id,json=sseKmsKeyId,proto3" json:"sse_kms_key_id,omitempty"` + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption string `protobuf:"bytes,8,opt,name=server_side_encryption,json=serverSideEncryption,proto3" json:"server_side_encryption,omitempty"` +} + +func (x *UploadPartCopyOutput) Reset() { + *x = UploadPartCopyOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UploadPartCopyOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadPartCopyOutput) ProtoMessage() {} + +func (x *UploadPartCopyOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadPartCopyOutput.ProtoReflect.Descriptor instead. +func (*UploadPartCopyOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{37} +} + +func (x *UploadPartCopyOutput) GetBucketKeyEnabled() bool { + if x != nil { + return x.BucketKeyEnabled + } + return false +} + +func (x *UploadPartCopyOutput) GetCopyPartResult() *CopyPartResult { + if x != nil { + return x.CopyPartResult + } + return nil +} + +func (x *UploadPartCopyOutput) GetCopySourceVersionId() string { + if x != nil { + return x.CopySourceVersionId + } + return "" +} + +func (x *UploadPartCopyOutput) GetRequestCharged() string { + if x != nil { + return x.RequestCharged + } + return "" +} + +func (x *UploadPartCopyOutput) GetSseCustomerAlgorithm() string { + if x != nil { + return x.SseCustomerAlgorithm + } + return "" +} + +func (x *UploadPartCopyOutput) GetSseCustomerKeyMd5() string { + if x != nil { + return x.SseCustomerKeyMd5 + } + return "" +} + +func (x *UploadPartCopyOutput) GetSseKmsKeyId() string { + if x != nil { + return x.SseKmsKeyId + } + return "" +} + +func (x *UploadPartCopyOutput) GetServerSideEncryption() string { + if x != nil { + return x.ServerSideEncryption + } + return "" +} + +// CompletedPart +type CompletedPart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Entity tag returned when the part was uploaded. + Etag string `protobuf:"bytes,1,opt,name=etag,proto3" json:"etag,omitempty"` + // Part number that identifies the part. This is a positive integer between 1 and + // 10,000. + PartNumber int32 `protobuf:"varint,2,opt,name=part_number,json=partNumber,proto3" json:"part_number,omitempty"` +} + +func (x *CompletedPart) Reset() { + *x = CompletedPart{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompletedPart) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompletedPart) ProtoMessage() {} + +func (x *CompletedPart) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompletedPart.ProtoReflect.Descriptor instead. +func (*CompletedPart) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{38} +} + +func (x *CompletedPart) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *CompletedPart) GetPartNumber() int32 { + if x != nil { + return x.PartNumber + } + return 0 +} + +// CompletedMultipartUpload +type CompletedMultipartUpload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Array of CompletedPart data types. + Parts []*CompletedPart `protobuf:"bytes,1,rep,name=parts,proto3" json:"parts,omitempty"` +} + +func (x *CompletedMultipartUpload) Reset() { + *x = CompletedMultipartUpload{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompletedMultipartUpload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompletedMultipartUpload) ProtoMessage() {} + +func (x *CompletedMultipartUpload) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompletedMultipartUpload.ProtoReflect.Descriptor instead. +func (*CompletedMultipartUpload) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{39} +} + +func (x *CompletedMultipartUpload) GetParts() []*CompletedPart { + if x != nil { + return x.Parts + } + return nil +} + +// CompleteMultipartUploadInput +type CompleteMultipartUploadInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // ID for the initiated multipart upload. + // This member is required. + UploadId string `protobuf:"bytes,4,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` + // Confirms that the requester knows that they will be charged for the request. + RequestPayer string `protobuf:"bytes,5,opt,name=request_payer,json=requestPayer,proto3" json:"request_payer,omitempty"` + // Expected bucket owner + ExpectedBucketOwner string `protobuf:"bytes,6,opt,name=expected_bucket_owner,json=expectedBucketOwner,proto3" json:"expected_bucket_owner,omitempty"` + // The container for the multipart upload request information. + MultipartUpload *CompletedMultipartUpload `protobuf:"bytes,7,opt,name=multipart_upload,json=multipartUpload,proto3" json:"multipart_upload,omitempty"` +} + +func (x *CompleteMultipartUploadInput) Reset() { + *x = CompleteMultipartUploadInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompleteMultipartUploadInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompleteMultipartUploadInput) ProtoMessage() {} + +func (x *CompleteMultipartUploadInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompleteMultipartUploadInput.ProtoReflect.Descriptor instead. +func (*CompleteMultipartUploadInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{40} +} + +func (x *CompleteMultipartUploadInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *CompleteMultipartUploadInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *CompleteMultipartUploadInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *CompleteMultipartUploadInput) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +func (x *CompleteMultipartUploadInput) GetRequestPayer() string { + if x != nil { + return x.RequestPayer + } + return "" +} + +func (x *CompleteMultipartUploadInput) GetExpectedBucketOwner() string { + if x != nil { + return x.ExpectedBucketOwner + } + return "" +} + +func (x *CompleteMultipartUploadInput) GetMultipartUpload() *CompletedMultipartUpload { + if x != nil { + return x.MultipartUpload + } + return nil +} + +// CompleteMultipartUploadOutput +type CompleteMultipartUploadOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool `protobuf:"varint,3,opt,name=bucket_key_enabled,json=bucketKeyEnabled,proto3" json:"bucket_key_enabled,omitempty"` + // Entity tag that identifies the newly created object's data + Etag string `protobuf:"bytes,4,opt,name=etag,proto3" json:"etag,omitempty"` + // If the object expiration is configured, this will contain the expiration date + // (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. + Expiration string `protobuf:"bytes,5,opt,name=expiration,proto3" json:"expiration,omitempty"` + // The URI that identifies the newly created object. + Location string `protobuf:"bytes,6,opt,name=location,proto3" json:"location,omitempty"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged string `protobuf:"bytes,7,opt,name=request_charged,json=requestCharged,proto3" json:"request_charged,omitempty"` + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SseKmsKeyId string `protobuf:"bytes,8,opt,name=sse_kms_keyId,json=sseKmsKeyId,proto3" json:"sse_kms_keyId,omitempty"` + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption string `protobuf:"bytes,9,opt,name=server_side_encryption,json=serverSideEncryption,proto3" json:"server_side_encryption,omitempty"` + // Version ID of the newly created object, in case the bucket has versioning turned + // on. + VersionId string `protobuf:"bytes,10,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *CompleteMultipartUploadOutput) Reset() { + *x = CompleteMultipartUploadOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompleteMultipartUploadOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompleteMultipartUploadOutput) ProtoMessage() {} + +func (x *CompleteMultipartUploadOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompleteMultipartUploadOutput.ProtoReflect.Descriptor instead. +func (*CompleteMultipartUploadOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{41} +} + +func (x *CompleteMultipartUploadOutput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *CompleteMultipartUploadOutput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *CompleteMultipartUploadOutput) GetBucketKeyEnabled() bool { + if x != nil { + return x.BucketKeyEnabled + } + return false +} + +func (x *CompleteMultipartUploadOutput) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *CompleteMultipartUploadOutput) GetExpiration() string { + if x != nil { + return x.Expiration + } + return "" +} + +func (x *CompleteMultipartUploadOutput) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *CompleteMultipartUploadOutput) GetRequestCharged() string { + if x != nil { + return x.RequestCharged + } + return "" +} + +func (x *CompleteMultipartUploadOutput) GetSseKmsKeyId() string { + if x != nil { + return x.SseKmsKeyId + } + return "" +} + +func (x *CompleteMultipartUploadOutput) GetServerSideEncryption() string { + if x != nil { + return x.ServerSideEncryption + } + return "" +} + +func (x *CompleteMultipartUploadOutput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// AbortMultipartUploadInput +type AbortMultipartUploadInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // The account ID of the expected bucket owner + ExpectedBucketOwner string `protobuf:"bytes,4,opt,name=expected_bucket_owner,json=expectedBucketOwner,proto3" json:"expected_bucket_owner,omitempty"` + // Confirms that the requester knows that they will be charged for the request. + RequestPayer string `protobuf:"bytes,5,opt,name=request_payer,json=requestPayer,proto3" json:"request_payer,omitempty"` + // Upload ID that identifies the multipart upload. + // This member is required. + UploadId string `protobuf:"bytes,6,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *AbortMultipartUploadInput) Reset() { + *x = AbortMultipartUploadInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AbortMultipartUploadInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AbortMultipartUploadInput) ProtoMessage() {} + +func (x *AbortMultipartUploadInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AbortMultipartUploadInput.ProtoReflect.Descriptor instead. +func (*AbortMultipartUploadInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{42} +} + +func (x *AbortMultipartUploadInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *AbortMultipartUploadInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *AbortMultipartUploadInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *AbortMultipartUploadInput) GetExpectedBucketOwner() string { + if x != nil { + return x.ExpectedBucketOwner + } + return "" +} + +func (x *AbortMultipartUploadInput) GetRequestPayer() string { + if x != nil { + return x.RequestPayer + } + return "" +} + +func (x *AbortMultipartUploadInput) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// AbortMultipartUploadOutput +type AbortMultipartUploadOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If present, indicates that the requester was successfully charged for the request. + RequestCharged string `protobuf:"bytes,1,opt,name=request_charged,json=requestCharged,proto3" json:"request_charged,omitempty"` +} + +func (x *AbortMultipartUploadOutput) Reset() { + *x = AbortMultipartUploadOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AbortMultipartUploadOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AbortMultipartUploadOutput) ProtoMessage() {} + +func (x *AbortMultipartUploadOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AbortMultipartUploadOutput.ProtoReflect.Descriptor instead. +func (*AbortMultipartUploadOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{43} +} + +func (x *AbortMultipartUploadOutput) GetRequestCharged() string { + if x != nil { + return x.RequestCharged + } + return "" +} + +// ListMultipartUploadsInput +type ListMultipartUploadsInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Character you use to group keys. All keys that contain the same string between + // the prefix, if specified, and the first occurrence of the delimiter after the + // prefix are grouped under a single result element, CommonPrefixes. If you don't + // specify the prefix parameter, then the substring starts at the beginning of the + // key. The keys that are grouped under CommonPrefixes result element are not + // returned elsewhere in the response. + Delimiter string `protobuf:"bytes,3,opt,name=delimiter,proto3" json:"delimiter,omitempty"` + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key may contain any Unicode character; + EncodingType string `protobuf:"bytes,4,opt,name=encoding_type,json=encodingType,proto3" json:"encoding_type,omitempty"` + // The account ID of the expected bucket owner + ExpectedBucketOwner string `protobuf:"bytes,5,opt,name=expected_bucket_owner,json=expectedBucketOwner,proto3" json:"expected_bucket_owner,omitempty"` + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. If upload-id-marker is not specified, only the + // keys lexicographically greater than the specified key-marker will be included in + // the list. If upload-id-marker is specified, any multipart uploads for a key + // equal to the key-marker might also be included, provided those multipart uploads + // have upload IDs lexicographically greater than the specified upload-id-marker. + KeyMarker string `protobuf:"bytes,6,opt,name=key_marker,json=keyMarker,proto3" json:"key_marker,omitempty"` + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the + // response body. 1,000 is the maximum number of uploads that can be returned in a + // response. + MaxUploads int64 `protobuf:"varint,7,opt,name=max_uploads,json=maxUploads,proto3" json:"max_uploads,omitempty"` + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different grouping of + // keys. (You can think of using prefix to make groups in the same way you'd use a + // folder in a file system.) + Prefix string `protobuf:"bytes,8,opt,name=prefix,proto3" json:"prefix,omitempty"` + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter is + // ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + UploadIdMarker string `protobuf:"bytes,9,opt,name=upload_id_marker,json=uploadIdMarker,proto3" json:"upload_id_marker,omitempty"` +} + +func (x *ListMultipartUploadsInput) Reset() { + *x = ListMultipartUploadsInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMultipartUploadsInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMultipartUploadsInput) ProtoMessage() {} + +func (x *ListMultipartUploadsInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMultipartUploadsInput.ProtoReflect.Descriptor instead. +func (*ListMultipartUploadsInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{44} +} + +func (x *ListMultipartUploadsInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *ListMultipartUploadsInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *ListMultipartUploadsInput) GetDelimiter() string { + if x != nil { + return x.Delimiter + } + return "" +} + +func (x *ListMultipartUploadsInput) GetEncodingType() string { + if x != nil { + return x.EncodingType + } + return "" +} + +func (x *ListMultipartUploadsInput) GetExpectedBucketOwner() string { + if x != nil { + return x.ExpectedBucketOwner + } + return "" +} + +func (x *ListMultipartUploadsInput) GetKeyMarker() string { + if x != nil { + return x.KeyMarker + } + return "" +} + +func (x *ListMultipartUploadsInput) GetMaxUploads() int64 { + if x != nil { + return x.MaxUploads + } + return 0 +} + +func (x *ListMultipartUploadsInput) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *ListMultipartUploadsInput) GetUploadIdMarker() string { + if x != nil { + return x.UploadIdMarker + } + return "" +} + +// Initiator +type Initiator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Initiator name + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Initiator id + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *Initiator) Reset() { + *x = Initiator{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Initiator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Initiator) ProtoMessage() {} + +func (x *Initiator) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Initiator.ProtoReflect.Descriptor instead. +func (*Initiator) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{45} +} + +func (x *Initiator) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *Initiator) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +// MultipartUpload +type MultipartUpload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Date and time at which the multipart upload was initiated. + Initiated int64 `protobuf:"varint,1,opt,name=initiated,proto3" json:"initiated,omitempty"` + // Identifies who initiated the multipart upload. + Initiator *Initiator `protobuf:"bytes,2,opt,name=initiator,proto3" json:"initiator,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Specifies the owner of the object that is part of the multipart upload. + Owner *Owner `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + // The class of storage used to store the object. + StorageClass string `protobuf:"bytes,5,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + // Upload ID that identifies the multipart upload. + UploadId string `protobuf:"bytes,6,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *MultipartUpload) Reset() { + *x = MultipartUpload{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MultipartUpload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MultipartUpload) ProtoMessage() {} + +func (x *MultipartUpload) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MultipartUpload.ProtoReflect.Descriptor instead. +func (*MultipartUpload) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{46} +} + +func (x *MultipartUpload) GetInitiated() int64 { + if x != nil { + return x.Initiated + } + return 0 +} + +func (x *MultipartUpload) GetInitiator() *Initiator { + if x != nil { + return x.Initiator + } + return nil +} + +func (x *MultipartUpload) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *MultipartUpload) GetOwner() *Owner { + if x != nil { + return x.Owner + } + return nil +} + +func (x *MultipartUpload) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *MultipartUpload) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// ListMultipartUploadsOutput +type ListMultipartUploadsOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. + CommonPrefixes []string `protobuf:"bytes,2,rep,name=common_prefixes,json=commonPrefixes,proto3" json:"common_prefixes,omitempty"` + // Contains the delimiter you specified in the request. If you don't specify a + // delimiter in your request, this element is absent from the response. + Delimiter string `protobuf:"bytes,3,opt,name=delimiter,proto3" json:"delimiter,omitempty"` + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType string `protobuf:"bytes,4,opt,name=encoding_type,json=encodingType,proto3" json:"encoding_type,omitempty"` + // Indicates whether the returned list of multipart uploads is truncated. A value + // of true indicates that the list was truncated. The list can be truncated if the + // number of multipart uploads exceeds the limit allowed or specified by max + // uploads. + IsTruncated bool `protobuf:"varint,5,opt,name=is_truncated,json=isTruncated,proto3" json:"is_truncated,omitempty"` + // The key at or after which the listing began. + KeyMarker string `protobuf:"bytes,6,opt,name=key_marker,json=keyMarker,proto3" json:"key_marker,omitempty"` + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads int32 `protobuf:"varint,7,opt,name=max_uploads,json=maxUploads,proto3" json:"max_uploads,omitempty"` + // When a list is truncated, this element specifies the value that should be used + // for the key-marker request parameter in a subsequent request. + NextKeyMarker string `protobuf:"bytes,8,opt,name=next_key_marker,json=nextKeyMarker,proto3" json:"next_key_marker,omitempty"` + // When a list is truncated, this element specifies the value that should be used + // for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker string `protobuf:"bytes,9,opt,name=next_upload_id_marker,json=nextUploadIdMarker,proto3" json:"next_upload_id_marker,omitempty"` + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix string `protobuf:"bytes,10,opt,name=prefix,proto3" json:"prefix,omitempty"` + // Upload ID after which listing began. + UploadIdMarker string `protobuf:"bytes,11,opt,name=upload_id_marker,json=uploadIdMarker,proto3" json:"upload_id_marker,omitempty"` + // Container for elements related to a particular multipart upload. A response can + // contain zero or more Upload elements. + Uploads []*MultipartUpload `protobuf:"bytes,12,rep,name=uploads,proto3" json:"uploads,omitempty"` +} + +func (x *ListMultipartUploadsOutput) Reset() { + *x = ListMultipartUploadsOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMultipartUploadsOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMultipartUploadsOutput) ProtoMessage() {} + +func (x *ListMultipartUploadsOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMultipartUploadsOutput.ProtoReflect.Descriptor instead. +func (*ListMultipartUploadsOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{47} +} + +func (x *ListMultipartUploadsOutput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *ListMultipartUploadsOutput) GetCommonPrefixes() []string { + if x != nil { + return x.CommonPrefixes + } + return nil +} + +func (x *ListMultipartUploadsOutput) GetDelimiter() string { + if x != nil { + return x.Delimiter + } + return "" +} + +func (x *ListMultipartUploadsOutput) GetEncodingType() string { + if x != nil { + return x.EncodingType + } + return "" +} + +func (x *ListMultipartUploadsOutput) GetIsTruncated() bool { + if x != nil { + return x.IsTruncated + } + return false +} + +func (x *ListMultipartUploadsOutput) GetKeyMarker() string { + if x != nil { + return x.KeyMarker + } + return "" +} + +func (x *ListMultipartUploadsOutput) GetMaxUploads() int32 { + if x != nil { + return x.MaxUploads + } + return 0 +} + +func (x *ListMultipartUploadsOutput) GetNextKeyMarker() string { + if x != nil { + return x.NextKeyMarker + } + return "" +} + +func (x *ListMultipartUploadsOutput) GetNextUploadIdMarker() string { + if x != nil { + return x.NextUploadIdMarker + } + return "" +} + +func (x *ListMultipartUploadsOutput) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *ListMultipartUploadsOutput) GetUploadIdMarker() string { + if x != nil { + return x.UploadIdMarker + } + return "" +} + +func (x *ListMultipartUploadsOutput) GetUploads() []*MultipartUpload { + if x != nil { + return x.Uploads + } + return nil +} + +// ListObjectVersionsInput +type ListObjectVersionsInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // A delimiter is a character that you specify to group keys. All keys that contain + // the same string between the prefix and the first occurrence of the delimiter are + // grouped under a single result element in CommonPrefixes. These groups are + // counted as one result against the max-keys limitation. These keys are not + // returned elsewhere in the response. + Delimiter string `protobuf:"bytes,3,opt,name=delimiter,proto3" json:"delimiter,omitempty"` + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key may contain any Unicode character; + EncodingType string `protobuf:"bytes,4,opt,name=encoding_type,json=encodingType,proto3" json:"encoding_type,omitempty"` + // The account ID of the expected bucket owner + ExpectedBucketOwner string `protobuf:"bytes,5,opt,name=expected_bucket_owner,json=expectedBucketOwner,proto3" json:"expected_bucket_owner,omitempty"` + // Specifies the key to start with when listing objects in a bucket. + KeyMarker string `protobuf:"bytes,6,opt,name=key_marker,json=keyMarker,proto3" json:"key_marker,omitempty"` + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. If additional keys satisfy the search criteria, but were not + // returned because max-keys was exceeded, the response contains true. To return + // the additional keys, see key-marker and version-id-marker. + MaxKeys int64 `protobuf:"varint,7,opt,name=max_keys,json=maxKeys,proto3" json:"max_keys,omitempty"` + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings of + // keys. (You can think of using prefix to make groups in the same way you'd use a + // folder in a file system.) You can use prefix with delimiter to roll up numerous + // objects into a single result under CommonPrefixes. + Prefix string `protobuf:"bytes,8,opt,name=prefix,proto3" json:"prefix,omitempty"` + // Specifies the object version you want to start listing from. + VersionIdMarker string `protobuf:"bytes,9,opt,name=version_id_marker,json=versionIdMarker,proto3" json:"version_id_marker,omitempty"` +} + +func (x *ListObjectVersionsInput) Reset() { + *x = ListObjectVersionsInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectVersionsInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectVersionsInput) ProtoMessage() {} + +func (x *ListObjectVersionsInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectVersionsInput.ProtoReflect.Descriptor instead. +func (*ListObjectVersionsInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{48} +} + +func (x *ListObjectVersionsInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *ListObjectVersionsInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *ListObjectVersionsInput) GetDelimiter() string { + if x != nil { + return x.Delimiter + } + return "" +} + +func (x *ListObjectVersionsInput) GetEncodingType() string { + if x != nil { + return x.EncodingType + } + return "" +} + +func (x *ListObjectVersionsInput) GetExpectedBucketOwner() string { + if x != nil { + return x.ExpectedBucketOwner + } + return "" +} + +func (x *ListObjectVersionsInput) GetKeyMarker() string { + if x != nil { + return x.KeyMarker + } + return "" +} + +func (x *ListObjectVersionsInput) GetMaxKeys() int64 { + if x != nil { + return x.MaxKeys + } + return 0 +} + +func (x *ListObjectVersionsInput) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *ListObjectVersionsInput) GetVersionIdMarker() string { + if x != nil { + return x.VersionIdMarker + } + return "" +} + +// DeleteMarkerEntry +type DeleteMarkerEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies whether the object is (true) or is not (false) the latest version of + // an object. + IsLatest bool `protobuf:"varint,1,opt,name=is_latest,json=isLatest,proto3" json:"is_latest,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // Date and time the object was last modified. + LastModified int64 `protobuf:"varint,3,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` + // Owner + Owner *Owner `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + // Version ID of an object. + VersionId string `protobuf:"bytes,5,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *DeleteMarkerEntry) Reset() { + *x = DeleteMarkerEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteMarkerEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteMarkerEntry) ProtoMessage() {} + +func (x *DeleteMarkerEntry) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteMarkerEntry.ProtoReflect.Descriptor instead. +func (*DeleteMarkerEntry) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{49} +} + +func (x *DeleteMarkerEntry) GetIsLatest() bool { + if x != nil { + return x.IsLatest + } + return false +} + +func (x *DeleteMarkerEntry) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *DeleteMarkerEntry) GetLastModified() int64 { + if x != nil { + return x.LastModified + } + return 0 +} + +func (x *DeleteMarkerEntry) GetOwner() *Owner { + if x != nil { + return x.Owner + } + return nil +} + +func (x *DeleteMarkerEntry) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// ObjectVersion +type ObjectVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The entity tag is an MD5 hash of that version of the object. + Etag string `protobuf:"bytes,1,opt,name=etag,proto3" json:"etag,omitempty"` + // Specifies whether the object is (true) or is not (false) the latest version of + // an object. + IsLatest bool `protobuf:"varint,2,opt,name=is_latest,json=isLatest,proto3" json:"is_latest,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Date and time the object was last modified. + LastModified int64 `protobuf:"varint,4,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` + // Specifies the owner of the object. + Owner *Owner `protobuf:"bytes,5,opt,name=owner,proto3" json:"owner,omitempty"` + // Size in bytes of the object. + Size int64 `protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` + // The class of storage used to store the object. + StorageClass string `protobuf:"bytes,7,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + // Version ID of an object. + VersionId string `protobuf:"bytes,8,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *ObjectVersion) Reset() { + *x = ObjectVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectVersion) ProtoMessage() {} + +func (x *ObjectVersion) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectVersion.ProtoReflect.Descriptor instead. +func (*ObjectVersion) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{50} +} + +func (x *ObjectVersion) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *ObjectVersion) GetIsLatest() bool { + if x != nil { + return x.IsLatest + } + return false +} + +func (x *ObjectVersion) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *ObjectVersion) GetLastModified() int64 { + if x != nil { + return x.LastModified + } + return 0 +} + +func (x *ObjectVersion) GetOwner() *Owner { + if x != nil { + return x.Owner + } + return nil +} + +func (x *ObjectVersion) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *ObjectVersion) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *ObjectVersion) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// ListObjectVersionsOutput +type ListObjectVersionsOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + CommonPrefixes []string `protobuf:"bytes,1,rep,name=common_prefixes,json=commonPrefixes,proto3" json:"common_prefixes,omitempty"` + // Container for an object that is a delete marker. + DeleteMarkers []*DeleteMarkerEntry `protobuf:"bytes,2,rep,name=delete_markers,json=deleteMarkers,proto3" json:"delete_markers,omitempty"` + // The delimiter grouping the included keys. + Delimiter string `protobuf:"bytes,3,opt,name=delimiter,proto3" json:"delimiter,omitempty"` + // Encoding type used by Amazon S3 to encode object key names in the XML response. + EncodingType string `protobuf:"bytes,4,opt,name=encoding_type,json=encodingType,proto3" json:"encoding_type,omitempty"` + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria + IsTruncated bool `protobuf:"varint,5,opt,name=is_truncated,json=isTruncated,proto3" json:"is_truncated,omitempty"` + // Marks the last key returned in a truncated response. + KeyMarker string `protobuf:"bytes,6,opt,name=key_marker,json=keyMarker,proto3" json:"key_marker,omitempty"` + // Specifies the maximum number of objects to return + MaxKeys int64 `protobuf:"varint,7,opt,name=max_keys,json=maxKeys,proto3" json:"max_keys,omitempty"` + // The bucket name. + Name string `protobuf:"bytes,8,opt,name=name,proto3" json:"name,omitempty"` + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria + NextKeyMarker string `protobuf:"bytes,9,opt,name=next_key_marker,json=nextKeyMarker,proto3" json:"next_key_marker,omitempty"` + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. + NextVersionIdMarker string `protobuf:"bytes,10,opt,name=next_version_id_marker,json=nextVersionIdMarker,proto3" json:"next_version_id_marker,omitempty"` + // Selects objects that start with the value supplied by this parameter. + Prefix string `protobuf:"bytes,11,opt,name=prefix,proto3" json:"prefix,omitempty"` + // Marks the last version of the key returned in a truncated response. + VersionIdMarker string `protobuf:"bytes,12,opt,name=version_id_marker,json=versionIdMarker,proto3" json:"version_id_marker,omitempty"` + // Container for version information. + Versions []*ObjectVersion `protobuf:"bytes,13,rep,name=versions,proto3" json:"versions,omitempty"` +} + +func (x *ListObjectVersionsOutput) Reset() { + *x = ListObjectVersionsOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectVersionsOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectVersionsOutput) ProtoMessage() {} + +func (x *ListObjectVersionsOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectVersionsOutput.ProtoReflect.Descriptor instead. +func (*ListObjectVersionsOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{51} +} + +func (x *ListObjectVersionsOutput) GetCommonPrefixes() []string { + if x != nil { + return x.CommonPrefixes + } + return nil +} + +func (x *ListObjectVersionsOutput) GetDeleteMarkers() []*DeleteMarkerEntry { + if x != nil { + return x.DeleteMarkers + } + return nil +} + +func (x *ListObjectVersionsOutput) GetDelimiter() string { + if x != nil { + return x.Delimiter + } + return "" +} + +func (x *ListObjectVersionsOutput) GetEncodingType() string { + if x != nil { + return x.EncodingType + } + return "" +} + +func (x *ListObjectVersionsOutput) GetIsTruncated() bool { + if x != nil { + return x.IsTruncated + } + return false +} + +func (x *ListObjectVersionsOutput) GetKeyMarker() string { + if x != nil { + return x.KeyMarker + } + return "" +} + +func (x *ListObjectVersionsOutput) GetMaxKeys() int64 { + if x != nil { + return x.MaxKeys + } + return 0 +} + +func (x *ListObjectVersionsOutput) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListObjectVersionsOutput) GetNextKeyMarker() string { + if x != nil { + return x.NextKeyMarker + } + return "" +} + +func (x *ListObjectVersionsOutput) GetNextVersionIdMarker() string { + if x != nil { + return x.NextVersionIdMarker + } + return "" +} + +func (x *ListObjectVersionsOutput) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *ListObjectVersionsOutput) GetVersionIdMarker() string { + if x != nil { + return x.VersionIdMarker + } + return "" +} + +func (x *ListObjectVersionsOutput) GetVersions() []*ObjectVersion { + if x != nil { + return x.Versions + } + return nil +} + +// HeadObjectInput +type HeadObjectInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // To retrieve the checksum, this parameter must be enabled + ChecksumMode string `protobuf:"bytes,4,opt,name=checksum_mode,json=checksumMode,proto3" json:"checksum_mode,omitempty"` + // The account ID of the expected bucket owner + ExpectedBucketOwner string `protobuf:"bytes,5,opt,name=expected_bucket_owner,json=expectedBucketOwner,proto3" json:"expected_bucket_owner,omitempty"` + // Return the object only if its entity tag (ETag) is the same as the one + // specified; otherwise, return a 412 (precondition failed) error. + IfMatch string `protobuf:"bytes,6,opt,name=if_match,json=ifMatch,proto3" json:"if_match,omitempty"` + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. + IfModifiedSince int64 `protobuf:"varint,7,opt,name=if_modified_since,json=ifModifiedSince,proto3" json:"if_modified_since,omitempty"` + // Return the object only if its entity tag (ETag) is different from the one + // specified + IfNoneMatch string `protobuf:"bytes,8,opt,name=if_none_match,json=ifNoneMatch,proto3" json:"if_none_match,omitempty"` + // Return the object only if it has not been modified since the specified time; + IfUnmodifiedSince int64 `protobuf:"varint,9,opt,name=if_unmodified_since,json=ifUnmodifiedSince,proto3" json:"if_unmodified_since,omitempty"` + // Part number of the object being read. This is a positive integer between 1 and + // 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber int32 `protobuf:"varint,10,opt,name=part_number,json=partNumber,proto3" json:"part_number,omitempty"` + // Confirms that the requester knows that they will be charged for the request. + RequestPayer string `protobuf:"bytes,11,opt,name=request_payer,json=requestPayer,proto3" json:"request_payer,omitempty"` + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SseCustomerAlgorithm string `protobuf:"bytes,12,opt,name=sse_customer_algorithm,json=sseCustomerAlgorithm,proto3" json:"sse_customer_algorithm,omitempty"` + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data + SseCustomerKey string `protobuf:"bytes,13,opt,name=sse_customer_key,json=sseCustomerKey,proto3" json:"sse_customer_key,omitempty"` + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + SseCustomerKeyMd5 string `protobuf:"bytes,14,opt,name=sse_customer_key_md5,json=sseCustomerKeyMd5,proto3" json:"sse_customer_key_md5,omitempty"` + // VersionId used to reference a specific version of the object. + VersionId string `protobuf:"bytes,15,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` + // Return object details meta + WithDetails bool `protobuf:"varint,16,opt,name=with_details,json=withDetails,proto3" json:"with_details,omitempty"` +} + +func (x *HeadObjectInput) Reset() { + *x = HeadObjectInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeadObjectInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeadObjectInput) ProtoMessage() {} + +func (x *HeadObjectInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeadObjectInput.ProtoReflect.Descriptor instead. +func (*HeadObjectInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{52} +} + +func (x *HeadObjectInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *HeadObjectInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *HeadObjectInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *HeadObjectInput) GetChecksumMode() string { + if x != nil { + return x.ChecksumMode + } + return "" +} + +func (x *HeadObjectInput) GetExpectedBucketOwner() string { + if x != nil { + return x.ExpectedBucketOwner + } + return "" +} + +func (x *HeadObjectInput) GetIfMatch() string { + if x != nil { + return x.IfMatch + } + return "" +} + +func (x *HeadObjectInput) GetIfModifiedSince() int64 { + if x != nil { + return x.IfModifiedSince + } + return 0 +} + +func (x *HeadObjectInput) GetIfNoneMatch() string { + if x != nil { + return x.IfNoneMatch + } + return "" +} + +func (x *HeadObjectInput) GetIfUnmodifiedSince() int64 { + if x != nil { + return x.IfUnmodifiedSince + } + return 0 +} + +func (x *HeadObjectInput) GetPartNumber() int32 { + if x != nil { + return x.PartNumber + } + return 0 +} + +func (x *HeadObjectInput) GetRequestPayer() string { + if x != nil { + return x.RequestPayer + } + return "" +} + +func (x *HeadObjectInput) GetSseCustomerAlgorithm() string { + if x != nil { + return x.SseCustomerAlgorithm + } + return "" +} + +func (x *HeadObjectInput) GetSseCustomerKey() string { + if x != nil { + return x.SseCustomerKey + } + return "" +} + +func (x *HeadObjectInput) GetSseCustomerKeyMd5() string { + if x != nil { + return x.SseCustomerKeyMd5 + } + return "" +} + +func (x *HeadObjectInput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +func (x *HeadObjectInput) GetWithDetails() bool { + if x != nil { + return x.WithDetails + } + return false +} + +// HeadObjectOutput +type HeadObjectOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Metadata pertaining to the operation's result. + ResultMetadata map[string]string `protobuf:"bytes,1,rep,name=result_metadata,json=resultMetadata,proto3" json:"result_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *HeadObjectOutput) Reset() { + *x = HeadObjectOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeadObjectOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeadObjectOutput) ProtoMessage() {} + +func (x *HeadObjectOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeadObjectOutput.ProtoReflect.Descriptor instead. +func (*HeadObjectOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{53} +} + +func (x *HeadObjectOutput) GetResultMetadata() map[string]string { + if x != nil { + return x.ResultMetadata + } + return nil +} + +// IsObjectExistInput +type IsObjectExistInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Object version id + VersionId string `protobuf:"bytes,4,opt,name=version_id,json=versionId,proto3" json:"version_id,omitempty"` +} + +func (x *IsObjectExistInput) Reset() { + *x = IsObjectExistInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IsObjectExistInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IsObjectExistInput) ProtoMessage() {} + +func (x *IsObjectExistInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IsObjectExistInput.ProtoReflect.Descriptor instead. +func (*IsObjectExistInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{54} +} + +func (x *IsObjectExistInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *IsObjectExistInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *IsObjectExistInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *IsObjectExistInput) GetVersionId() string { + if x != nil { + return x.VersionId + } + return "" +} + +// IsObjectExistOutput +type IsObjectExistOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Object exist or not + FileExist bool `protobuf:"varint,1,opt,name=file_exist,json=fileExist,proto3" json:"file_exist,omitempty"` +} + +func (x *IsObjectExistOutput) Reset() { + *x = IsObjectExistOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IsObjectExistOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IsObjectExistOutput) ProtoMessage() {} + +func (x *IsObjectExistOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IsObjectExistOutput.ProtoReflect.Descriptor instead. +func (*IsObjectExistOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{55} +} + +func (x *IsObjectExistOutput) GetFileExist() bool { + if x != nil { + return x.FileExist + } + return false +} + +// SignURLInput +type SignURLInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // the method for sign url, eg. GET、POST + Method string `protobuf:"bytes,4,opt,name=method,proto3" json:"method,omitempty"` + // expire time of the sign url + ExpiredInSec int64 `protobuf:"varint,5,opt,name=expired_in_sec,json=expiredInSec,proto3" json:"expired_in_sec,omitempty"` +} + +func (x *SignURLInput) Reset() { + *x = SignURLInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignURLInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignURLInput) ProtoMessage() {} + +func (x *SignURLInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignURLInput.ProtoReflect.Descriptor instead. +func (*SignURLInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{56} +} + +func (x *SignURLInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *SignURLInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *SignURLInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *SignURLInput) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +func (x *SignURLInput) GetExpiredInSec() int64 { + if x != nil { + return x.ExpiredInSec + } + return 0 +} + +// SignURLOutput +type SignURLOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Object signed url + SignedUrl string `protobuf:"bytes,1,opt,name=signed_url,json=signedUrl,proto3" json:"signed_url,omitempty"` +} + +func (x *SignURLOutput) Reset() { + *x = SignURLOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignURLOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignURLOutput) ProtoMessage() {} + +func (x *SignURLOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignURLOutput.ProtoReflect.Descriptor instead. +func (*SignURLOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{57} +} + +func (x *SignURLOutput) GetSignedUrl() string { + if x != nil { + return x.SignedUrl + } + return "" +} + +// UpdateBandwidthRateLimitInput +type UpdateBandwidthRateLimitInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The average upload/download bandwidth rate limit in bits per second. + AverageRateLimitInBitsPerSec int64 `protobuf:"varint,2,opt,name=average_rate_limit_in_bits_per_sec,json=averageRateLimitInBitsPerSec,proto3" json:"average_rate_limit_in_bits_per_sec,omitempty"` + // Resource name of gateway + GatewayResourceName string `protobuf:"bytes,3,opt,name=gateway_resource_name,json=gatewayResourceName,proto3" json:"gateway_resource_name,omitempty"` +} + +func (x *UpdateBandwidthRateLimitInput) Reset() { + *x = UpdateBandwidthRateLimitInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateBandwidthRateLimitInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateBandwidthRateLimitInput) ProtoMessage() {} + +func (x *UpdateBandwidthRateLimitInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateBandwidthRateLimitInput.ProtoReflect.Descriptor instead. +func (*UpdateBandwidthRateLimitInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{58} +} + +func (x *UpdateBandwidthRateLimitInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *UpdateBandwidthRateLimitInput) GetAverageRateLimitInBitsPerSec() int64 { + if x != nil { + return x.AverageRateLimitInBitsPerSec + } + return 0 +} + +func (x *UpdateBandwidthRateLimitInput) GetGatewayResourceName() string { + if x != nil { + return x.GatewayResourceName + } + return "" +} + +// AppendObjectInput +type AppendObjectInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Object content + Body []byte `protobuf:"bytes,4,opt,name=body,proto3" json:"body,omitempty"` + // Append start position + Position int64 `protobuf:"varint,5,opt,name=position,proto3" json:"position,omitempty"` + // Object ACL + Acl string `protobuf:"bytes,6,opt,name=acl,proto3" json:"acl,omitempty"` + // Sets the Cache-Control header of the response. + CacheControl string `protobuf:"bytes,7,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` + // Sets the Content-Disposition header of the response + ContentDisposition string `protobuf:"bytes,8,opt,name=content_disposition,json=contentDisposition,proto3" json:"content_disposition,omitempty"` + // Sets the Content-Encoding header of the response + ContentEncoding string `protobuf:"bytes,9,opt,name=content_encoding,json=contentEncoding,proto3" json:"content_encoding,omitempty"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMd5 string `protobuf:"bytes,10,opt,name=content_md5,json=contentMd5,proto3" json:"content_md5,omitempty"` + // Sets the Expires header of the response + Expires int64 `protobuf:"varint,11,opt,name=expires,proto3" json:"expires,omitempty"` + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. + StorageClass string `protobuf:"bytes,12,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption string `protobuf:"bytes,13,opt,name=server_side_encryption,json=serverSideEncryption,proto3" json:"server_side_encryption,omitempty"` + // Object metadata + Meta string `protobuf:"bytes,14,opt,name=meta,proto3" json:"meta,omitempty"` + // Object tags + Tags map[string]string `protobuf:"bytes,15,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *AppendObjectInput) Reset() { + *x = AppendObjectInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppendObjectInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppendObjectInput) ProtoMessage() {} + +func (x *AppendObjectInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppendObjectInput.ProtoReflect.Descriptor instead. +func (*AppendObjectInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{59} +} + +func (x *AppendObjectInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *AppendObjectInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *AppendObjectInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *AppendObjectInput) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +func (x *AppendObjectInput) GetPosition() int64 { + if x != nil { + return x.Position + } + return 0 +} + +func (x *AppendObjectInput) GetAcl() string { + if x != nil { + return x.Acl + } + return "" +} + +func (x *AppendObjectInput) GetCacheControl() string { + if x != nil { + return x.CacheControl + } + return "" +} + +func (x *AppendObjectInput) GetContentDisposition() string { + if x != nil { + return x.ContentDisposition + } + return "" +} + +func (x *AppendObjectInput) GetContentEncoding() string { + if x != nil { + return x.ContentEncoding + } + return "" +} + +func (x *AppendObjectInput) GetContentMd5() string { + if x != nil { + return x.ContentMd5 + } + return "" +} + +func (x *AppendObjectInput) GetExpires() int64 { + if x != nil { + return x.Expires + } + return 0 +} + +func (x *AppendObjectInput) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *AppendObjectInput) GetServerSideEncryption() string { + if x != nil { + return x.ServerSideEncryption + } + return "" +} + +func (x *AppendObjectInput) GetMeta() string { + if x != nil { + return x.Meta + } + return "" +} + +func (x *AppendObjectInput) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +// AppendObjectOutput +type AppendObjectOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Next append position + AppendPosition int64 `protobuf:"varint,1,opt,name=append_position,json=appendPosition,proto3" json:"append_position,omitempty"` +} + +func (x *AppendObjectOutput) Reset() { + *x = AppendObjectOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppendObjectOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppendObjectOutput) ProtoMessage() {} + +func (x *AppendObjectOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppendObjectOutput.ProtoReflect.Descriptor instead. +func (*AppendObjectOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{60} +} + +func (x *AppendObjectOutput) GetAppendPosition() int64 { + if x != nil { + return x.AppendPosition + } + return 0 +} + +// ListPartsInput +type ListPartsInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of oss store. + StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // The account ID of the expected bucket owner + ExpectedBucketOwner string `protobuf:"bytes,4,opt,name=expected_bucket_owner,json=expectedBucketOwner,proto3" json:"expected_bucket_owner,omitempty"` + // Sets the maximum number of parts to return + MaxParts int64 `protobuf:"varint,5,opt,name=max_parts,json=maxParts,proto3" json:"max_parts,omitempty"` + // Specifies the part after which listing should begin. Only parts with higher part + // numbers will be listed. + PartNumberMarker int64 `protobuf:"varint,6,opt,name=part_number_marker,json=partNumberMarker,proto3" json:"part_number_marker,omitempty"` + // Confirms that the requester knows that they will be charged for the request. + RequestPayer string `protobuf:"bytes,7,opt,name=request_payer,json=requestPayer,proto3" json:"request_payer,omitempty"` + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId string `protobuf:"bytes,8,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *ListPartsInput) Reset() { + *x = ListPartsInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListPartsInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListPartsInput) ProtoMessage() {} + +func (x *ListPartsInput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListPartsInput.ProtoReflect.Descriptor instead. +func (*ListPartsInput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{61} +} + +func (x *ListPartsInput) GetStoreName() string { + if x != nil { + return x.StoreName + } + return "" +} + +func (x *ListPartsInput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *ListPartsInput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *ListPartsInput) GetExpectedBucketOwner() string { + if x != nil { + return x.ExpectedBucketOwner + } + return "" +} + +func (x *ListPartsInput) GetMaxParts() int64 { + if x != nil { + return x.MaxParts + } + return 0 +} + +func (x *ListPartsInput) GetPartNumberMarker() int64 { + if x != nil { + return x.PartNumberMarker + } + return 0 +} + +func (x *ListPartsInput) GetRequestPayer() string { + if x != nil { + return x.RequestPayer + } + return "" +} + +func (x *ListPartsInput) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// Part +type Part struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Part Etag + Etag string `protobuf:"bytes,1,opt,name=etag,proto3" json:"etag,omitempty"` + // Last modified time + LastModified int64 `protobuf:"varint,2,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` + // Part number + PartNumber int64 `protobuf:"varint,3,opt,name=part_number,json=partNumber,proto3" json:"part_number,omitempty"` + // Part size + Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` +} + +func (x *Part) Reset() { + *x = Part{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Part) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Part) ProtoMessage() {} + +func (x *Part) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Part.ProtoReflect.Descriptor instead. +func (*Part) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{62} +} + +func (x *Part) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *Part) GetLastModified() int64 { + if x != nil { + return x.LastModified + } + return 0 +} + +func (x *Part) GetPartNumber() int64 { + if x != nil { + return x.PartNumber + } + return 0 +} + +func (x *Part) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +// ListPartsOutput +type ListPartsOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bucket name containing the object + // This member is required + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Name of the object key. + // This member is required. + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId string `protobuf:"bytes,3,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` + // When a list is truncated, this element specifies the last part in the list, as + // well as the value to use for the part-number-marker request parameter in a + // subsequent request. + NextPartNumberMarker string `protobuf:"bytes,4,opt,name=next_part_number_marker,json=nextPartNumberMarker,proto3" json:"next_part_number_marker,omitempty"` + // Maximum number of parts that were allowed in the response. + MaxParts int64 `protobuf:"varint,5,opt,name=max_parts,json=maxParts,proto3" json:"max_parts,omitempty"` + // Indicates whether the returned list of parts is truncated. A true value + // indicates that the list was truncated. A list can be truncated if the number of + // parts exceeds the limit returned in the MaxParts element. + IsTruncated bool `protobuf:"varint,6,opt,name=is_truncated,json=isTruncated,proto3" json:"is_truncated,omitempty"` + // Container for elements related to a particular part. A response can contain zero + // or more Part elements. + Parts []*Part `protobuf:"bytes,7,rep,name=parts,proto3" json:"parts,omitempty"` +} + +func (x *ListPartsOutput) Reset() { + *x = ListPartsOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_oss_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListPartsOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListPartsOutput) ProtoMessage() {} + +func (x *ListPartsOutput) ProtoReflect() protoreflect.Message { + mi := &file_oss_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListPartsOutput.ProtoReflect.Descriptor instead. +func (*ListPartsOutput) Descriptor() ([]byte, []int) { + return file_oss_proto_rawDescGZIP(), []int{63} +} + +func (x *ListPartsOutput) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *ListPartsOutput) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *ListPartsOutput) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +func (x *ListPartsOutput) GetNextPartNumberMarker() string { + if x != nil { + return x.NextPartNumberMarker + } + return "" +} + +func (x *ListPartsOutput) GetMaxParts() int64 { + if x != nil { + return x.MaxParts + } + return 0 +} + +func (x *ListPartsOutput) GetIsTruncated() bool { + if x != nil { + return x.IsTruncated + } + return false +} + +func (x *ListPartsOutput) GetParts() []*Part { + if x != nil { + return x.Parts + } + return nil +} + +var File_oss_proto protoreflect.FileDescriptor + +var file_oss_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x6f, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x73, 0x70, 0x65, + 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdd, 0x07, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, + 0x77, 0x6e, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x66, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x69, 0x66, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x2a, 0x0a, 0x11, 0x69, 0x66, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x69, 0x66, 0x4d, 0x6f, + 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x69, + 0x66, 0x5f, 0x6e, 0x6f, 0x6e, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x69, 0x66, 0x4e, 0x6f, 0x6e, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x2e, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x69, 0x66, + 0x55, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x65, 0x72, 0x12, 0x34, 0x0a, + 0x16, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x12, 0x40, 0x0a, 0x1c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x19, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, + 0x67, 0x12, 0x3a, 0x0a, 0x19, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x32, 0x0a, + 0x15, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, + 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x61, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x73, + 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, + 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x73, + 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x14, + 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x6d, 0x64, 0x35, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x73, 0x65, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x4d, 0x64, 0x35, 0x12, 0x1d, 0x0a, + 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, + 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, + 0x75, 0x72, 0x6c, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x65, + 0x64, 0x55, 0x72, 0x6c, 0x22, 0xee, 0x05, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x29, 0x0a, + 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, + 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x61, 0x67, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x74, 0x61, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x55, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x12, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, + 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xaf, 0x05, 0x0a, 0x0e, 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x2c, 0x0a, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x45, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, + 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, + 0x12, 0x34, 0x0a, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x69, 0x64, 0x65, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, + 0x65, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x48, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, + 0x2e, 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, + 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, + 0x51, 0x0a, 0x07, 0x74, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x37, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x50, 0x75, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x54, 0x61, 0x67, + 0x67, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, 0x67, 0x67, 0x69, + 0x6e, 0x67, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x54, + 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xbb, 0x01, 0x0a, 0x0f, 0x50, 0x75, 0x74, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1e, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, + 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, + 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0xa0, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x70, 0x61, 0x79, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x81, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, + 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, + 0x72, 0x6b, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x89, 0x02, 0x0a, + 0x15, 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, + 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x4f, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, + 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x50, 0x75, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x1a, + 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xeb, 0x01, 0x0a, 0x16, 0x50, 0x75, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x12, 0x6f, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x73, 0x70, + 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb6, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x15, 0x65, + 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, + 0x77, 0x6e, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x70, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x22, + 0xf1, 0x01, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x72, 0x0a, 0x0f, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x73, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, + 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x2e, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0xd8, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x65, 0x72, 0x22, 0xf6, + 0x02, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x50, 0x0a, 0x04, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, + 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x2e, 0x54, 0x61, 0x67, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x6f, 0x0a, 0x0f, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, + 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, + 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x54, + 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x97, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x70, 0x79, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, + 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x16, + 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x63, 0x6f, + 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, + 0x64, 0x22, 0x90, 0x04, 0x0a, 0x0f, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, + 0x0a, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, + 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x70, + 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x07, 0x74, 0x61, 0x67, 0x67, 0x69, + 0x6e, 0x67, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x11, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x12, 0x55, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x73, 0x33, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3a, 0x0a, 0x0c, 0x54, + 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x10, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0d, + 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x22, 0xad, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x5a, 0x0a, 0x12, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, + 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x52, 0x10, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, + 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x66, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, + 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x69, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x71, 0x75, 0x69, 0x65, 0x74, 0x22, 0x43, 0x0a, 0x10, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0xac, + 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3a, 0x0a, 0x06, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, + 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x65, 0x72, 0x22, 0x9e, 0x01, + 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, + 0x72, 0x6b, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6d, + 0x61, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, + 0x72, 0x6b, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x5a, + 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x4f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x43, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x73, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0xaf, 0x02, 0x0a, 0x10, 0x4c, + 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x70, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, + 0x06, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, + 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x4b, 0x65, 0x79, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x4b, 0x65, 0x79, 0x73, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x65, 0x72, 0x22, 0xe2, 0x02, 0x0a, + 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x08, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, + 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x6d, 0x61, 0x78, + 0x4b, 0x65, 0x79, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x78, 0x74, + 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6e, + 0x65, 0x78, 0x74, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x22, 0x3a, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, + 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0xc5, 0x01, + 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, + 0x0a, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x69, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x9b, 0x01, 0x0a, 0x18, 0x47, 0x65, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x61, 0x6e, 0x6e, + 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x37, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, + 0x33, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x27, + 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x43, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x22, 0x93, 0x01, 0x0a, 0x17, 0x50, 0x75, 0x74, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x61, 0x63, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x1d, + 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x43, 0x0a, + 0x18, 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x65, 0x64, + 0x41, 0x63, 0x6c, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x72, 0x67, + 0x65, 0x64, 0x22, 0x7c, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x22, 0x6e, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, + 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x72, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x61, 0x74, 0x68, + 0x22, 0xf5, 0x0b, 0x0a, 0x1a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x2c, 0x0a, 0x12, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, + 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, + 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x5f, 0x66, + 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x61, + 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x65, + 0x61, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x61, 0x63, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x67, 0x72, 0x61, 0x6e, + 0x74, 0x52, 0x65, 0x61, 0x64, 0x41, 0x63, 0x70, 0x12, 0x26, 0x0a, 0x0f, 0x67, 0x72, 0x61, 0x6e, + 0x74, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x61, 0x63, 0x70, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x57, 0x72, 0x69, 0x74, 0x65, 0x41, 0x63, 0x70, + 0x12, 0x61, 0x0a, 0x09, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x11, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, + 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x1d, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6c, 0x6f, + 0x63, 0x6b, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x6c, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x19, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x4c, 0x65, 0x67, 0x61, 0x6c, 0x48, 0x6f, 0x6c, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x40, 0x0a, 0x1d, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, + 0x65, 0x74, 0x61, 0x69, 0x6e, 0x5f, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x65, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x03, 0x52, 0x19, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, + 0x63, 0x6b, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x44, 0x61, 0x74, + 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x79, + 0x65, 0x72, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x79, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x73, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, + 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x73, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x64, 0x35, 0x18, 0x18, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x73, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, + 0x72, 0x4b, 0x65, 0x79, 0x4d, 0x64, 0x35, 0x12, 0x3b, 0x0a, 0x1a, 0x73, 0x73, 0x65, 0x5f, 0x6b, + 0x6d, 0x73, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x73, 0x73, 0x65, + 0x4b, 0x6d, 0x73, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x12, 0x23, 0x0a, 0x0e, 0x73, 0x73, 0x65, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x73, + 0x65, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x53, 0x69, 0x64, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x12, 0x5d, 0x0a, 0x07, 0x74, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, + 0x1d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x73, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, + 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x54, 0x61, + 0x67, 0x67, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x12, 0x3a, 0x0a, 0x19, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x5f, 0x72, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x1e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, + 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, + 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfd, 0x03, 0x0a, 0x1b, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x44, 0x61, 0x74, + 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x52, + 0x75, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x10, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x45, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, + 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x16, + 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x61, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x73, + 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, + 0x68, 0x6d, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x64, 0x35, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x11, 0x73, 0x73, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4b, 0x65, 0x79, + 0x4d, 0x64, 0x35, 0x12, 0x3b, 0x0a, 0x1a, 0x73, 0x73, 0x65, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x65, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x73, 0x73, 0x65, 0x4b, 0x6d, 0x73, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x12, 0x23, 0x0a, 0x0e, 0x73, 0x73, 0x65, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x73, 0x65, 0x4b, 0x6d, 0x73, + 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x73, 0x69, 0x64, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x69, 0x64, + 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0xde, 0x03, 0x0a, 0x0f, 0x55, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x64, 0x35, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x64, + 0x35, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x13, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x70, 0x61, 0x79, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x73, + 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x73, 0x65, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, + 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x73, 0x65, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x14, 0x73, + 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x6d, 0x64, 0x35, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x73, 0x65, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x4d, 0x64, 0x35, 0x12, 0x1b, 0x0a, 0x09, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0xbf, 0x02, 0x0a, 0x10, 0x55, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x2c, + 0x0a, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x72, + 0x67, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x43, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x73, 0x65, + 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x73, 0x65, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, + 0x2f, 0x0a, 0x14, 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x64, 0x35, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, + 0x73, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x4d, 0x64, 0x35, + 0x12, 0x23, 0x0a, 0x0e, 0x73, 0x73, 0x65, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x73, 0x65, 0x4b, 0x6d, 0x73, + 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x73, 0x69, 0x64, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x69, 0x64, + 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa9, 0x02, 0x0a, 0x13, + 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x70, 0x79, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x0b, + 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x43, + 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x70, 0x79, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, + 0x72, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x70, + 0x61, 0x72, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x49, 0x0a, 0x0e, 0x43, 0x6f, 0x70, 0x79, 0x50, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, + 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x22, 0xba, 0x03, 0x0a, 0x14, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, + 0x74, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x54, 0x0a, 0x10, 0x63, 0x6f, 0x70, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, + 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x50, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, + 0x0e, 0x63, 0x6f, 0x70, 0x79, 0x50, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x33, 0x0a, 0x16, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x12, 0x34, 0x0a, + 0x16, 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x61, 0x6c, + 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, + 0x73, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x64, 0x35, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x11, 0x73, 0x73, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4b, 0x65, + 0x79, 0x4d, 0x64, 0x35, 0x12, 0x23, 0x0a, 0x0e, 0x73, 0x73, 0x65, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x73, + 0x65, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x53, 0x69, 0x64, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x44, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x5b, 0x0a, 0x18, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x64, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x12, 0x3f, 0x0a, 0x05, 0x70, 0x61, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x43, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x74, 0x52, 0x05, 0x70, 0x61, 0x72, + 0x74, 0x73, 0x22, 0xbe, 0x02, 0x0a, 0x1c, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x65, 0x72, 0x12, 0x32, + 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, + 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x77, 0x6e, + 0x65, 0x72, 0x12, 0x5f, 0x0a, 0x10, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x5f, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x73, + 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x0f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x22, 0xe9, 0x02, 0x0a, 0x1d, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, + 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, + 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x73, 0x65, 0x5f, 0x6b, 0x6d, + 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x49, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x73, 0x65, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x53, 0x69, 0x64, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, + 0xda, 0x01, 0x0a, 0x19, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, + 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x65, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x45, 0x0a, 0x1a, + 0x41, 0x62, 0x6f, 0x72, 0x74, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x72, + 0x67, 0x65, 0x64, 0x22, 0xcb, 0x02, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x65, + 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, + 0x77, 0x6e, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x70, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, + 0x1d, 0x0a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x1f, + 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x69, 0x64, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x4d, 0x61, 0x72, 0x6b, 0x65, + 0x72, 0x22, 0x3e, 0x0a, 0x09, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x64, 0x22, 0x81, 0x02, 0x0a, 0x0f, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x73, 0x33, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x05, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x70, 0x65, 0x63, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x05, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0xe7, 0x03, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x75, + 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x0f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x74, + 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x69, 0x73, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6b, + 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x61, + 0x78, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0a, 0x6d, 0x61, 0x78, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, + 0x65, 0x78, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x72, + 0x6b, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x15, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x75, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x69, 0x64, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x6e, 0x65, 0x78, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, + 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, + 0x0a, 0x10, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x5f, 0x6d, 0x61, 0x72, 0x6b, + 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x49, 0x64, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x07, 0x75, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x70, 0x65, 0x63, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, + 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x22, + 0xc5, 0x02, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, + 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, + 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x6b, 0x65, 0x79, + 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6b, + 0x65, 0x79, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, + 0x6b, 0x65, 0x79, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x4b, + 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x11, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, + 0x64, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x22, 0xbf, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1b, 0x0a, + 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x69, 0x73, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, + 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x12, 0x37, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x4f, 0x77, + 0x6e, 0x65, 0x72, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x88, 0x02, 0x0a, 0x0d, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x65, + 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, + 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, + 0x0a, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x69, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x22, 0xb5, 0x04, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x0e, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0d, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x73, + 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x23, + 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x54, 0x72, 0x75, + 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, + 0x72, 0x6b, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x4d, + 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x4b, 0x65, 0x79, 0x73, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, + 0x65, 0x78, 0x74, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x33, 0x0a, 0x16, + 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x5f, + 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6e, 0x65, + 0x78, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x4d, 0x61, 0x72, 0x6b, 0x65, + 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x11, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x4d, + 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe7, 0x04, 0x0a, + 0x0f, 0x48, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x32, + 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, + 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x77, 0x6e, + 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x66, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x69, 0x66, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2a, 0x0a, + 0x11, 0x69, 0x66, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x6e, + 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x69, 0x66, 0x4d, 0x6f, 0x64, 0x69, + 0x66, 0x69, 0x65, 0x64, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x66, 0x5f, + 0x6e, 0x6f, 0x6e, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x69, 0x66, 0x4e, 0x6f, 0x6e, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2e, 0x0a, + 0x13, 0x69, 0x66, 0x5f, 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x69, 0x66, 0x55, 0x6e, + 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x1f, 0x0a, + 0x0b, 0x70, 0x61, 0x72, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x23, + 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x65, 0x72, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x79, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x73, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, + 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x73, 0x65, + 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x73, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, + 0x4b, 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x73, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x64, 0x35, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x11, 0x73, 0x73, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4b, 0x65, + 0x79, 0x4d, 0x64, 0x35, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xc0, 0x01, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x69, 0x0a, 0x0f, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, + 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7c, 0x0a, 0x12, 0x49, 0x73, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x34, 0x0a, 0x13, 0x49, 0x73, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x22, 0x95, 0x01, + 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x55, 0x52, 0x4c, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x24, 0x0a, 0x0e, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x5f, 0x73, 0x65, + 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, + 0x49, 0x6e, 0x53, 0x65, 0x63, 0x22, 0x2e, 0x0a, 0x0d, 0x53, 0x69, 0x67, 0x6e, 0x55, 0x52, 0x4c, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, + 0x65, 0x64, 0x55, 0x72, 0x6c, 0x22, 0xbc, 0x01, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x22, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x69, 0x6e, 0x5f, + 0x62, 0x69, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x1c, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x42, 0x69, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, + 0x12, 0x32, 0x0a, 0x15, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xcf, 0x04, 0x0a, 0x11, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x5f, 0x6d, 0x64, 0x35, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x64, 0x35, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, + 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x69, + 0x64, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x6d, 0x65, 0x74, 0x61, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, + 0x12, 0x4b, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, + 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x41, 0x70, 0x70, 0x65, + 0x6e, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x54, 0x61, + 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x37, 0x0a, + 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3d, 0x0a, 0x12, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x27, 0x0a, 0x0f, + 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x50, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x9a, 0x02, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x74, 0x73, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x13, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, 0x72, + 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x50, 0x61, 0x72, + 0x74, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, + 0x70, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x65, + 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x79, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x49, 0x64, 0x22, 0x74, 0x0a, 0x04, 0x50, 0x61, 0x72, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, + 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, + 0x0a, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x69, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x87, 0x02, 0x0a, 0x0f, 0x4c, 0x69, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x74, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x74, + 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, + 0x78, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, + 0x61, 0x78, 0x50, 0x61, 0x72, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x74, 0x72, + 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, + 0x73, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x05, 0x70, 0x61, + 0x72, 0x74, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x70, 0x65, 0x63, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x52, 0x05, 0x70, 0x61, 0x72, + 0x74, 0x73, 0x32, 0xcd, 0x18, 0x0a, 0x14, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x68, 0x0a, 0x09, 0x50, + 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, + 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x2b, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, + 0x33, 0x2e, 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x22, 0x00, 0x28, 0x01, 0x12, 0x68, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, + 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x2b, + 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x6f, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x2d, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x2e, + 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, + 0x12, 0x69, 0x0a, 0x0a, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, + 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x43, 0x6f, 0x70, 0x79, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x2c, 0x2e, 0x73, 0x70, + 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x0d, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x2e, 0x2e, 0x73, + 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x2f, 0x2e, 0x73, + 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, + 0x6c, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x2c, + 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x2d, 0x2e, 0x73, + 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x69, 0x0a, + 0x0a, 0x48, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x2e, 0x73, 0x70, + 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x2c, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x0d, 0x49, 0x73, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x12, 0x2e, 0x2e, 0x73, 0x70, 0x65, 0x63, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x49, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x45, + 0x78, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x2f, 0x2e, 0x73, 0x70, 0x65, 0x63, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x49, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x45, + 0x78, 0x69, 0x73, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x7b, 0x0a, 0x10, + 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, + 0x12, 0x31, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x50, 0x75, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x1a, 0x32, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, + 0x2e, 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, + 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x84, 0x01, 0x0a, 0x13, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, + 0x67, 0x12, 0x34, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, + 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x35, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, + 0x12, 0x7b, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, + 0x67, 0x69, 0x6e, 0x67, 0x12, 0x31, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, + 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, + 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x32, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x61, + 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x81, 0x01, + 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x65, + 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, + 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x65, + 0x64, 0x41, 0x63, 0x6c, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x34, 0x2e, 0x73, 0x70, 0x65, 0x63, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, + 0x00, 0x12, 0x81, 0x01, 0x0a, 0x12, 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, + 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, + 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x34, 0x2e, + 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x50, 0x75, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x8a, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x36, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x37, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x0a, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x74, + 0x12, 0x2b, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x55, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x2c, 0x2e, + 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x50, 0x61, 0x72, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x28, 0x01, 0x12, + 0x75, 0x0a, 0x0e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x70, + 0x79, 0x12, 0x2f, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x55, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x70, 0x79, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x1a, 0x30, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, + 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x90, 0x01, 0x0a, 0x17, 0x43, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x12, 0x38, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, + 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, + 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x39, 0x2e, 0x73, + 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x87, 0x01, 0x0a, 0x14, 0x41, 0x62, + 0x6f, 0x72, 0x74, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x12, 0x35, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, + 0x41, 0x62, 0x6f, 0x72, 0x74, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x36, 0x2e, 0x73, 0x70, 0x65, 0x63, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x4d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x22, 0x00, 0x12, 0x87, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x35, 0x2e, 0x73, + 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x75, + 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x1a, 0x36, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x61, 0x72, 0x74, 0x55, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x66, 0x0a, + 0x09, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x74, 0x73, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x65, + 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x74, + 0x73, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x2b, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x73, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x74, 0x73, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x81, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x2e, 0x73, + 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x1a, 0x34, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x07, 0x53, 0x69, 0x67, + 0x6e, 0x55, 0x52, 0x4c, 0x12, 0x28, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, + 0x33, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x55, 0x52, 0x4c, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x29, + 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x53, 0x69, 0x67, 0x6e, + 0x55, 0x52, 0x4c, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x12, 0x77, 0x0a, 0x20, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6e, + 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x39, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x52, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x1e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x52, 0x61, 0x74, + 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x39, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x73, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, + 0x64, 0x74, 0x68, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x0c, 0x41, + 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2d, 0x2e, 0x73, 0x70, + 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x2e, 0x2e, 0x73, 0x70, 0x65, + 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x00, 0x28, 0x01, 0x12, 0x72, + 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x2e, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, + 0x2f, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x33, 0x2e, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x22, 0x00, 0x42, 0x2f, 0x5a, 0x2d, 0x6d, 0x6f, 0x73, 0x6e, 0x2e, 0x69, 0x6f, 0x2f, 0x6c, 0x61, + 0x79, 0x6f, 0x74, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x33, + 0x3b, 0x73, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_oss_proto_rawDescOnce sync.Once + file_oss_proto_rawDescData = file_oss_proto_rawDesc +) + +func file_oss_proto_rawDescGZIP() []byte { + file_oss_proto_rawDescOnce.Do(func() { + file_oss_proto_rawDescData = protoimpl.X.CompressGZIP(file_oss_proto_rawDescData) + }) + return file_oss_proto_rawDescData +} + +var file_oss_proto_msgTypes = make([]protoimpl.MessageInfo, 78) +var file_oss_proto_goTypes = []interface{}{ + (*GetObjectInput)(nil), // 0: spec.proto.extension.v1.s3.GetObjectInput + (*GetObjectOutput)(nil), // 1: spec.proto.extension.v1.s3.GetObjectOutput + (*PutObjectInput)(nil), // 2: spec.proto.extension.v1.s3.PutObjectInput + (*PutObjectOutput)(nil), // 3: spec.proto.extension.v1.s3.PutObjectOutput + (*DeleteObjectInput)(nil), // 4: spec.proto.extension.v1.s3.DeleteObjectInput + (*DeleteObjectOutput)(nil), // 5: spec.proto.extension.v1.s3.DeleteObjectOutput + (*PutObjectTaggingInput)(nil), // 6: spec.proto.extension.v1.s3.PutObjectTaggingInput + (*PutObjectTaggingOutput)(nil), // 7: spec.proto.extension.v1.s3.PutObjectTaggingOutput + (*DeleteObjectTaggingInput)(nil), // 8: spec.proto.extension.v1.s3.DeleteObjectTaggingInput + (*DeleteObjectTaggingOutput)(nil), // 9: spec.proto.extension.v1.s3.DeleteObjectTaggingOutput + (*GetObjectTaggingInput)(nil), // 10: spec.proto.extension.v1.s3.GetObjectTaggingInput + (*GetObjectTaggingOutput)(nil), // 11: spec.proto.extension.v1.s3.GetObjectTaggingOutput + (*CopySource)(nil), // 12: spec.proto.extension.v1.s3.CopySource + (*CopyObjectInput)(nil), // 13: spec.proto.extension.v1.s3.CopyObjectInput + (*CopyObjectResult)(nil), // 14: spec.proto.extension.v1.s3.CopyObjectResult + (*CopyObjectOutput)(nil), // 15: spec.proto.extension.v1.s3.CopyObjectOutput + (*Delete)(nil), // 16: spec.proto.extension.v1.s3.Delete + (*ObjectIdentifier)(nil), // 17: spec.proto.extension.v1.s3.ObjectIdentifier + (*DeleteObjectsInput)(nil), // 18: spec.proto.extension.v1.s3.DeleteObjectsInput + (*DeletedObject)(nil), // 19: spec.proto.extension.v1.s3.DeletedObject + (*DeleteObjectsOutput)(nil), // 20: spec.proto.extension.v1.s3.DeleteObjectsOutput + (*ListObjectsInput)(nil), // 21: spec.proto.extension.v1.s3.ListObjectsInput + (*ListObjectsOutput)(nil), // 22: spec.proto.extension.v1.s3.ListObjectsOutput + (*Owner)(nil), // 23: spec.proto.extension.v1.s3.Owner + (*Object)(nil), // 24: spec.proto.extension.v1.s3.Object + (*GetObjectCannedAclInput)(nil), // 25: spec.proto.extension.v1.s3.GetObjectCannedAclInput + (*GetObjectCannedAclOutput)(nil), // 26: spec.proto.extension.v1.s3.GetObjectCannedAclOutput + (*PutObjectCannedAclInput)(nil), // 27: spec.proto.extension.v1.s3.PutObjectCannedAclInput + (*PutObjectCannedAclOutput)(nil), // 28: spec.proto.extension.v1.s3.PutObjectCannedAclOutput + (*RestoreObjectInput)(nil), // 29: spec.proto.extension.v1.s3.RestoreObjectInput + (*RestoreObjectOutput)(nil), // 30: spec.proto.extension.v1.s3.RestoreObjectOutput + (*CreateMultipartUploadInput)(nil), // 31: spec.proto.extension.v1.s3.CreateMultipartUploadInput + (*CreateMultipartUploadOutput)(nil), // 32: spec.proto.extension.v1.s3.CreateMultipartUploadOutput + (*UploadPartInput)(nil), // 33: spec.proto.extension.v1.s3.UploadPartInput + (*UploadPartOutput)(nil), // 34: spec.proto.extension.v1.s3.UploadPartOutput + (*UploadPartCopyInput)(nil), // 35: spec.proto.extension.v1.s3.UploadPartCopyInput + (*CopyPartResult)(nil), // 36: spec.proto.extension.v1.s3.CopyPartResult + (*UploadPartCopyOutput)(nil), // 37: spec.proto.extension.v1.s3.UploadPartCopyOutput + (*CompletedPart)(nil), // 38: spec.proto.extension.v1.s3.CompletedPart + (*CompletedMultipartUpload)(nil), // 39: spec.proto.extension.v1.s3.CompletedMultipartUpload + (*CompleteMultipartUploadInput)(nil), // 40: spec.proto.extension.v1.s3.CompleteMultipartUploadInput + (*CompleteMultipartUploadOutput)(nil), // 41: spec.proto.extension.v1.s3.CompleteMultipartUploadOutput + (*AbortMultipartUploadInput)(nil), // 42: spec.proto.extension.v1.s3.AbortMultipartUploadInput + (*AbortMultipartUploadOutput)(nil), // 43: spec.proto.extension.v1.s3.AbortMultipartUploadOutput + (*ListMultipartUploadsInput)(nil), // 44: spec.proto.extension.v1.s3.ListMultipartUploadsInput + (*Initiator)(nil), // 45: spec.proto.extension.v1.s3.Initiator + (*MultipartUpload)(nil), // 46: spec.proto.extension.v1.s3.MultipartUpload + (*ListMultipartUploadsOutput)(nil), // 47: spec.proto.extension.v1.s3.ListMultipartUploadsOutput + (*ListObjectVersionsInput)(nil), // 48: spec.proto.extension.v1.s3.ListObjectVersionsInput + (*DeleteMarkerEntry)(nil), // 49: spec.proto.extension.v1.s3.DeleteMarkerEntry + (*ObjectVersion)(nil), // 50: spec.proto.extension.v1.s3.ObjectVersion + (*ListObjectVersionsOutput)(nil), // 51: spec.proto.extension.v1.s3.ListObjectVersionsOutput + (*HeadObjectInput)(nil), // 52: spec.proto.extension.v1.s3.HeadObjectInput + (*HeadObjectOutput)(nil), // 53: spec.proto.extension.v1.s3.HeadObjectOutput + (*IsObjectExistInput)(nil), // 54: spec.proto.extension.v1.s3.IsObjectExistInput + (*IsObjectExistOutput)(nil), // 55: spec.proto.extension.v1.s3.IsObjectExistOutput + (*SignURLInput)(nil), // 56: spec.proto.extension.v1.s3.SignURLInput + (*SignURLOutput)(nil), // 57: spec.proto.extension.v1.s3.SignURLOutput + (*UpdateBandwidthRateLimitInput)(nil), // 58: spec.proto.extension.v1.s3.UpdateBandwidthRateLimitInput + (*AppendObjectInput)(nil), // 59: spec.proto.extension.v1.s3.AppendObjectInput + (*AppendObjectOutput)(nil), // 60: spec.proto.extension.v1.s3.AppendObjectOutput + (*ListPartsInput)(nil), // 61: spec.proto.extension.v1.s3.ListPartsInput + (*Part)(nil), // 62: spec.proto.extension.v1.s3.Part + (*ListPartsOutput)(nil), // 63: spec.proto.extension.v1.s3.ListPartsOutput + nil, // 64: spec.proto.extension.v1.s3.GetObjectOutput.MetadataEntry + nil, // 65: spec.proto.extension.v1.s3.PutObjectInput.MetaEntry + nil, // 66: spec.proto.extension.v1.s3.PutObjectInput.TaggingEntry + nil, // 67: spec.proto.extension.v1.s3.PutObjectTaggingInput.TagsEntry + nil, // 68: spec.proto.extension.v1.s3.PutObjectTaggingOutput.ResultMetadataEntry + nil, // 69: spec.proto.extension.v1.s3.DeleteObjectTaggingOutput.ResultMetadataEntry + nil, // 70: spec.proto.extension.v1.s3.GetObjectTaggingOutput.TagsEntry + nil, // 71: spec.proto.extension.v1.s3.GetObjectTaggingOutput.ResultMetadataEntry + nil, // 72: spec.proto.extension.v1.s3.CopyObjectInput.TaggingEntry + nil, // 73: spec.proto.extension.v1.s3.CopyObjectInput.MetadataEntry + nil, // 74: spec.proto.extension.v1.s3.CreateMultipartUploadInput.MetaDataEntry + nil, // 75: spec.proto.extension.v1.s3.CreateMultipartUploadInput.TaggingEntry + nil, // 76: spec.proto.extension.v1.s3.HeadObjectOutput.ResultMetadataEntry + nil, // 77: spec.proto.extension.v1.s3.AppendObjectInput.TagsEntry + (*emptypb.Empty)(nil), // 78: google.protobuf.Empty +} +var file_oss_proto_depIdxs = []int32{ + 64, // 0: spec.proto.extension.v1.s3.GetObjectOutput.metadata:type_name -> spec.proto.extension.v1.s3.GetObjectOutput.MetadataEntry + 65, // 1: spec.proto.extension.v1.s3.PutObjectInput.meta:type_name -> spec.proto.extension.v1.s3.PutObjectInput.MetaEntry + 66, // 2: spec.proto.extension.v1.s3.PutObjectInput.tagging:type_name -> spec.proto.extension.v1.s3.PutObjectInput.TaggingEntry + 67, // 3: spec.proto.extension.v1.s3.PutObjectTaggingInput.tags:type_name -> spec.proto.extension.v1.s3.PutObjectTaggingInput.TagsEntry + 68, // 4: spec.proto.extension.v1.s3.PutObjectTaggingOutput.result_metadata:type_name -> spec.proto.extension.v1.s3.PutObjectTaggingOutput.ResultMetadataEntry + 69, // 5: spec.proto.extension.v1.s3.DeleteObjectTaggingOutput.result_metadata:type_name -> spec.proto.extension.v1.s3.DeleteObjectTaggingOutput.ResultMetadataEntry + 70, // 6: spec.proto.extension.v1.s3.GetObjectTaggingOutput.tags:type_name -> spec.proto.extension.v1.s3.GetObjectTaggingOutput.TagsEntry + 71, // 7: spec.proto.extension.v1.s3.GetObjectTaggingOutput.result_metadata:type_name -> spec.proto.extension.v1.s3.GetObjectTaggingOutput.ResultMetadataEntry + 12, // 8: spec.proto.extension.v1.s3.CopyObjectInput.copy_source:type_name -> spec.proto.extension.v1.s3.CopySource + 72, // 9: spec.proto.extension.v1.s3.CopyObjectInput.tagging:type_name -> spec.proto.extension.v1.s3.CopyObjectInput.TaggingEntry + 73, // 10: spec.proto.extension.v1.s3.CopyObjectInput.metadata:type_name -> spec.proto.extension.v1.s3.CopyObjectInput.MetadataEntry + 14, // 11: spec.proto.extension.v1.s3.CopyObjectOutput.copy_object_result:type_name -> spec.proto.extension.v1.s3.CopyObjectResult + 17, // 12: spec.proto.extension.v1.s3.Delete.objects:type_name -> spec.proto.extension.v1.s3.ObjectIdentifier + 16, // 13: spec.proto.extension.v1.s3.DeleteObjectsInput.delete:type_name -> spec.proto.extension.v1.s3.Delete + 19, // 14: spec.proto.extension.v1.s3.DeleteObjectsOutput.deleted:type_name -> spec.proto.extension.v1.s3.DeletedObject + 24, // 15: spec.proto.extension.v1.s3.ListObjectsOutput.contents:type_name -> spec.proto.extension.v1.s3.Object + 23, // 16: spec.proto.extension.v1.s3.Object.owner:type_name -> spec.proto.extension.v1.s3.Owner + 23, // 17: spec.proto.extension.v1.s3.GetObjectCannedAclOutput.owner:type_name -> spec.proto.extension.v1.s3.Owner + 74, // 18: spec.proto.extension.v1.s3.CreateMultipartUploadInput.meta_data:type_name -> spec.proto.extension.v1.s3.CreateMultipartUploadInput.MetaDataEntry + 75, // 19: spec.proto.extension.v1.s3.CreateMultipartUploadInput.tagging:type_name -> spec.proto.extension.v1.s3.CreateMultipartUploadInput.TaggingEntry + 12, // 20: spec.proto.extension.v1.s3.UploadPartCopyInput.copy_source:type_name -> spec.proto.extension.v1.s3.CopySource + 36, // 21: spec.proto.extension.v1.s3.UploadPartCopyOutput.copy_part_result:type_name -> spec.proto.extension.v1.s3.CopyPartResult + 38, // 22: spec.proto.extension.v1.s3.CompletedMultipartUpload.parts:type_name -> spec.proto.extension.v1.s3.CompletedPart + 39, // 23: spec.proto.extension.v1.s3.CompleteMultipartUploadInput.multipart_upload:type_name -> spec.proto.extension.v1.s3.CompletedMultipartUpload + 45, // 24: spec.proto.extension.v1.s3.MultipartUpload.initiator:type_name -> spec.proto.extension.v1.s3.Initiator + 23, // 25: spec.proto.extension.v1.s3.MultipartUpload.owner:type_name -> spec.proto.extension.v1.s3.Owner + 46, // 26: spec.proto.extension.v1.s3.ListMultipartUploadsOutput.uploads:type_name -> spec.proto.extension.v1.s3.MultipartUpload + 23, // 27: spec.proto.extension.v1.s3.DeleteMarkerEntry.owner:type_name -> spec.proto.extension.v1.s3.Owner + 23, // 28: spec.proto.extension.v1.s3.ObjectVersion.owner:type_name -> spec.proto.extension.v1.s3.Owner + 49, // 29: spec.proto.extension.v1.s3.ListObjectVersionsOutput.delete_markers:type_name -> spec.proto.extension.v1.s3.DeleteMarkerEntry + 50, // 30: spec.proto.extension.v1.s3.ListObjectVersionsOutput.versions:type_name -> spec.proto.extension.v1.s3.ObjectVersion + 76, // 31: spec.proto.extension.v1.s3.HeadObjectOutput.result_metadata:type_name -> spec.proto.extension.v1.s3.HeadObjectOutput.ResultMetadataEntry + 77, // 32: spec.proto.extension.v1.s3.AppendObjectInput.tags:type_name -> spec.proto.extension.v1.s3.AppendObjectInput.TagsEntry + 62, // 33: spec.proto.extension.v1.s3.ListPartsOutput.parts:type_name -> spec.proto.extension.v1.s3.Part + 2, // 34: spec.proto.extension.v1.s3.ObjectStorageService.PutObject:input_type -> spec.proto.extension.v1.s3.PutObjectInput + 0, // 35: spec.proto.extension.v1.s3.ObjectStorageService.GetObject:input_type -> spec.proto.extension.v1.s3.GetObjectInput + 4, // 36: spec.proto.extension.v1.s3.ObjectStorageService.DeleteObject:input_type -> spec.proto.extension.v1.s3.DeleteObjectInput + 13, // 37: spec.proto.extension.v1.s3.ObjectStorageService.CopyObject:input_type -> spec.proto.extension.v1.s3.CopyObjectInput + 18, // 38: spec.proto.extension.v1.s3.ObjectStorageService.DeleteObjects:input_type -> spec.proto.extension.v1.s3.DeleteObjectsInput + 21, // 39: spec.proto.extension.v1.s3.ObjectStorageService.ListObjects:input_type -> spec.proto.extension.v1.s3.ListObjectsInput + 52, // 40: spec.proto.extension.v1.s3.ObjectStorageService.HeadObject:input_type -> spec.proto.extension.v1.s3.HeadObjectInput + 54, // 41: spec.proto.extension.v1.s3.ObjectStorageService.IsObjectExist:input_type -> spec.proto.extension.v1.s3.IsObjectExistInput + 6, // 42: spec.proto.extension.v1.s3.ObjectStorageService.PutObjectTagging:input_type -> spec.proto.extension.v1.s3.PutObjectTaggingInput + 8, // 43: spec.proto.extension.v1.s3.ObjectStorageService.DeleteObjectTagging:input_type -> spec.proto.extension.v1.s3.DeleteObjectTaggingInput + 10, // 44: spec.proto.extension.v1.s3.ObjectStorageService.GetObjectTagging:input_type -> spec.proto.extension.v1.s3.GetObjectTaggingInput + 25, // 45: spec.proto.extension.v1.s3.ObjectStorageService.GetObjectCannedAcl:input_type -> spec.proto.extension.v1.s3.GetObjectCannedAclInput + 27, // 46: spec.proto.extension.v1.s3.ObjectStorageService.PutObjectCannedAcl:input_type -> spec.proto.extension.v1.s3.PutObjectCannedAclInput + 31, // 47: spec.proto.extension.v1.s3.ObjectStorageService.CreateMultipartUpload:input_type -> spec.proto.extension.v1.s3.CreateMultipartUploadInput + 33, // 48: spec.proto.extension.v1.s3.ObjectStorageService.UploadPart:input_type -> spec.proto.extension.v1.s3.UploadPartInput + 35, // 49: spec.proto.extension.v1.s3.ObjectStorageService.UploadPartCopy:input_type -> spec.proto.extension.v1.s3.UploadPartCopyInput + 40, // 50: spec.proto.extension.v1.s3.ObjectStorageService.CompleteMultipartUpload:input_type -> spec.proto.extension.v1.s3.CompleteMultipartUploadInput + 42, // 51: spec.proto.extension.v1.s3.ObjectStorageService.AbortMultipartUpload:input_type -> spec.proto.extension.v1.s3.AbortMultipartUploadInput + 44, // 52: spec.proto.extension.v1.s3.ObjectStorageService.ListMultipartUploads:input_type -> spec.proto.extension.v1.s3.ListMultipartUploadsInput + 61, // 53: spec.proto.extension.v1.s3.ObjectStorageService.ListParts:input_type -> spec.proto.extension.v1.s3.ListPartsInput + 48, // 54: spec.proto.extension.v1.s3.ObjectStorageService.ListObjectVersions:input_type -> spec.proto.extension.v1.s3.ListObjectVersionsInput + 56, // 55: spec.proto.extension.v1.s3.ObjectStorageService.SignURL:input_type -> spec.proto.extension.v1.s3.SignURLInput + 58, // 56: spec.proto.extension.v1.s3.ObjectStorageService.UpdateDownloadBandwidthRateLimit:input_type -> spec.proto.extension.v1.s3.UpdateBandwidthRateLimitInput + 58, // 57: spec.proto.extension.v1.s3.ObjectStorageService.UpdateUploadBandwidthRateLimit:input_type -> spec.proto.extension.v1.s3.UpdateBandwidthRateLimitInput + 59, // 58: spec.proto.extension.v1.s3.ObjectStorageService.AppendObject:input_type -> spec.proto.extension.v1.s3.AppendObjectInput + 29, // 59: spec.proto.extension.v1.s3.ObjectStorageService.RestoreObject:input_type -> spec.proto.extension.v1.s3.RestoreObjectInput + 3, // 60: spec.proto.extension.v1.s3.ObjectStorageService.PutObject:output_type -> spec.proto.extension.v1.s3.PutObjectOutput + 1, // 61: spec.proto.extension.v1.s3.ObjectStorageService.GetObject:output_type -> spec.proto.extension.v1.s3.GetObjectOutput + 5, // 62: spec.proto.extension.v1.s3.ObjectStorageService.DeleteObject:output_type -> spec.proto.extension.v1.s3.DeleteObjectOutput + 15, // 63: spec.proto.extension.v1.s3.ObjectStorageService.CopyObject:output_type -> spec.proto.extension.v1.s3.CopyObjectOutput + 20, // 64: spec.proto.extension.v1.s3.ObjectStorageService.DeleteObjects:output_type -> spec.proto.extension.v1.s3.DeleteObjectsOutput + 22, // 65: spec.proto.extension.v1.s3.ObjectStorageService.ListObjects:output_type -> spec.proto.extension.v1.s3.ListObjectsOutput + 53, // 66: spec.proto.extension.v1.s3.ObjectStorageService.HeadObject:output_type -> spec.proto.extension.v1.s3.HeadObjectOutput + 55, // 67: spec.proto.extension.v1.s3.ObjectStorageService.IsObjectExist:output_type -> spec.proto.extension.v1.s3.IsObjectExistOutput + 7, // 68: spec.proto.extension.v1.s3.ObjectStorageService.PutObjectTagging:output_type -> spec.proto.extension.v1.s3.PutObjectTaggingOutput + 9, // 69: spec.proto.extension.v1.s3.ObjectStorageService.DeleteObjectTagging:output_type -> spec.proto.extension.v1.s3.DeleteObjectTaggingOutput + 11, // 70: spec.proto.extension.v1.s3.ObjectStorageService.GetObjectTagging:output_type -> spec.proto.extension.v1.s3.GetObjectTaggingOutput + 26, // 71: spec.proto.extension.v1.s3.ObjectStorageService.GetObjectCannedAcl:output_type -> spec.proto.extension.v1.s3.GetObjectCannedAclOutput + 28, // 72: spec.proto.extension.v1.s3.ObjectStorageService.PutObjectCannedAcl:output_type -> spec.proto.extension.v1.s3.PutObjectCannedAclOutput + 32, // 73: spec.proto.extension.v1.s3.ObjectStorageService.CreateMultipartUpload:output_type -> spec.proto.extension.v1.s3.CreateMultipartUploadOutput + 34, // 74: spec.proto.extension.v1.s3.ObjectStorageService.UploadPart:output_type -> spec.proto.extension.v1.s3.UploadPartOutput + 37, // 75: spec.proto.extension.v1.s3.ObjectStorageService.UploadPartCopy:output_type -> spec.proto.extension.v1.s3.UploadPartCopyOutput + 41, // 76: spec.proto.extension.v1.s3.ObjectStorageService.CompleteMultipartUpload:output_type -> spec.proto.extension.v1.s3.CompleteMultipartUploadOutput + 43, // 77: spec.proto.extension.v1.s3.ObjectStorageService.AbortMultipartUpload:output_type -> spec.proto.extension.v1.s3.AbortMultipartUploadOutput + 47, // 78: spec.proto.extension.v1.s3.ObjectStorageService.ListMultipartUploads:output_type -> spec.proto.extension.v1.s3.ListMultipartUploadsOutput + 63, // 79: spec.proto.extension.v1.s3.ObjectStorageService.ListParts:output_type -> spec.proto.extension.v1.s3.ListPartsOutput + 51, // 80: spec.proto.extension.v1.s3.ObjectStorageService.ListObjectVersions:output_type -> spec.proto.extension.v1.s3.ListObjectVersionsOutput + 57, // 81: spec.proto.extension.v1.s3.ObjectStorageService.SignURL:output_type -> spec.proto.extension.v1.s3.SignURLOutput + 78, // 82: spec.proto.extension.v1.s3.ObjectStorageService.UpdateDownloadBandwidthRateLimit:output_type -> google.protobuf.Empty + 78, // 83: spec.proto.extension.v1.s3.ObjectStorageService.UpdateUploadBandwidthRateLimit:output_type -> google.protobuf.Empty + 60, // 84: spec.proto.extension.v1.s3.ObjectStorageService.AppendObject:output_type -> spec.proto.extension.v1.s3.AppendObjectOutput + 30, // 85: spec.proto.extension.v1.s3.ObjectStorageService.RestoreObject:output_type -> spec.proto.extension.v1.s3.RestoreObjectOutput + 60, // [60:86] is the sub-list for method output_type + 34, // [34:60] is the sub-list for method input_type + 34, // [34:34] is the sub-list for extension type_name + 34, // [34:34] is the sub-list for extension extendee + 0, // [0:34] is the sub-list for field type_name +} + +func init() { file_oss_proto_init() } +func file_oss_proto_init() { + if File_oss_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_oss_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetObjectInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetObjectOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutObjectInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutObjectOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteObjectInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteObjectOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutObjectTaggingInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutObjectTaggingOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteObjectTaggingInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteObjectTaggingOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetObjectTaggingInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetObjectTaggingOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopySource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyObjectInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyObjectResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyObjectOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Delete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectIdentifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteObjectsInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeletedObject); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteObjectsOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListObjectsInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListObjectsOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Owner); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Object); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetObjectCannedAclInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetObjectCannedAclOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutObjectCannedAclInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutObjectCannedAclOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreObjectInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreObjectOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateMultipartUploadInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateMultipartUploadOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadPartInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadPartOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadPartCopyInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyPartResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadPartCopyOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompletedPart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompletedMultipartUpload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompleteMultipartUploadInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompleteMultipartUploadOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AbortMultipartUploadInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AbortMultipartUploadOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMultipartUploadsInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Initiator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MultipartUpload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMultipartUploadsOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListObjectVersionsInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteMarkerEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListObjectVersionsOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeadObjectInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeadObjectOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IsObjectExistInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IsObjectExistOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignURLInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignURLOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateBandwidthRateLimitInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendObjectInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendObjectOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListPartsInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Part); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oss_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListPartsOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_oss_proto_rawDesc, + NumEnums: 0, + NumMessages: 78, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_oss_proto_goTypes, + DependencyIndexes: file_oss_proto_depIdxs, + MessageInfos: file_oss_proto_msgTypes, + }.Build() + File_oss_proto = out.File + file_oss_proto_rawDesc = nil + file_oss_proto_goTypes = nil + file_oss_proto_depIdxs = nil +} diff --git a/spec/proto/extension/v1/s3/oss.proto b/spec/proto/extension/v1/s3/oss.proto new file mode 100644 index 0000000000..9d46e2113a --- /dev/null +++ b/spec/proto/extension/v1/s3/oss.proto @@ -0,0 +1,1422 @@ +//The file defined base on s3 protocol, to get an in-depth walkthrough of this file, see: +//https://docs.aws.amazon.com/s3/index.html +//https://github.com/aws/aws-sdk-go-v2 +syntax = "proto3"; + +package spec.proto.extension.v1.s3; + +import "google/protobuf/empty.proto"; + +option go_package = "mosn.io/layotto/spec/proto/extension/v1/s3;s3"; + +// ObjectStorageService +service ObjectStorageService{ + //Object CRUD API + //Adds an object to a bucket. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + rpc PutObject(stream PutObjectInput) returns(PutObjectOutput){} + //Retrieves objects. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + rpc GetObject(GetObjectInput) returns (stream GetObjectOutput){} + //Delete objects. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + rpc DeleteObject(DeleteObjectInput) returns (DeleteObjectOutput){} + //Creates a copy of an object that is already stored in oss server. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_CopyObject.html + rpc CopyObject(CopyObjectInput) returns(CopyObjectOutput){} + //Delete multiple objects from a bucket. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_DeleteObjects.html + rpc DeleteObjects(DeleteObjectsInput) returns(DeleteObjectsOutput){} + //Returns some or all (up to 1,000) of the objects in a bucket. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_ListObjects.html + rpc ListObjects(ListObjectsInput) returns(ListObjectsOutput){} + //The HEAD action retrieves metadata from an object without returning the object itself. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html + rpc HeadObject(HeadObjectInput) returns(HeadObjectOutput){} + //This action used to check if the file exists. + rpc IsObjectExist(IsObjectExistInput) returns(IsObjectExistOutput){} + + //Object Tagging API + //Sets the supplied tag-set to an object that already exists in a bucket. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html + rpc PutObjectTagging(PutObjectTaggingInput) returns (PutObjectTaggingOutput){} + //Removes the entire tag set from the specified object. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html + rpc DeleteObjectTagging(DeleteObjectTaggingInput) returns(DeleteObjectTaggingOutput){} + //Returns the tag-set of an object. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_GetObjectTagging.html + rpc GetObjectTagging(GetObjectTaggingInput) returns(GetObjectTaggingOutput){} + + //Object ACL Operation API + //Because different manufacturers have different definitions for ACL types, at the same time, + //the actual permissions corresponding to ACLs with the same name may be different between different manufacturers. + //Therefore, applications using this interface will greatly increase the complexity of transplantation. + //In general, this interface is not recommended, especially if your application has portability requirements. + //For the types and permission definitions supported by ACL, please refer to the specific manufacturer's definition,eg: + //AWS: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL + //tenantcloud: https://intl.cloud.tencent.com/document/product/436/30583 + //aliyun: https://www.alibabacloud.com/help/en/object-storage-service/latest/access-and-control-acl + + //Returns object canned acl. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL + rpc GetObjectCannedAcl(GetObjectCannedAclInput) returns(GetObjectCannedAclOutput){} + //Set object canned acl. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL + rpc PutObjectCannedAcl(PutObjectCannedAclInput) returns(PutObjectCannedAclOutput){} + + //Object Multipart Operation API + //Initiates a multipart upload and returns an upload ID. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_CreateMultipartUpload.html + rpc CreateMultipartUpload(CreateMultipartUploadInput) returns(CreateMultipartUploadOutput){} + //Uploads a part in a multipart upload. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + rpc UploadPart(stream UploadPartInput) returns(UploadPartOutput){} + //Uploads a part by copying data from an existing object as data source. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + rpc UploadPartCopy(UploadPartCopyInput) returns(UploadPartCopyOutput){} + //Completes a multipart upload by assembling previously uploaded parts. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + rpc CompleteMultipartUpload(CompleteMultipartUploadInput) returns(CompleteMultipartUploadOutput){} + //This action aborts a multipart upload. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + rpc AbortMultipartUpload(AbortMultipartUploadInput) returns(AbortMultipartUploadOutput){} + //This action lists in-progress multipart uploads. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + rpc ListMultipartUploads(ListMultipartUploadsInput) returns(ListMultipartUploadsOutput){} + //Lists the parts that have been uploaded for a specific multipart upload. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + rpc ListParts(ListPartsInput) returns(ListPartsOutput){} + + //Returns metadata about all versions of the objects in a bucket. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html + rpc ListObjectVersions(ListObjectVersionsInput) returns(ListObjectVersionsOutput){} + + //A presigned URL gives you access to the object identified in the URL, provided that the creator of the presigned URL has permissions to access that object. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/PresignedUrlUploadObject.html + rpc SignURL(SignURLInput) returns(SignURLOutput){} + + //This action used to set download bandwidth limit speed. + //Refer https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client.go#L2106 + rpc UpdateDownloadBandwidthRateLimit(UpdateBandwidthRateLimitInput) returns(google.protobuf.Empty){} + //This action used to set upload bandwidth limit speed. + //Refer https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client.go#L2096 + rpc UpdateUploadBandwidthRateLimit(UpdateBandwidthRateLimitInput) returns(google.protobuf.Empty){} + + //This action is used to append object. + //Refer https://help.aliyun.com/document_detail/31981.html or https://github.com/minio/minio-java/issues/980 + rpc AppendObject(stream AppendObjectInput) returns(AppendObjectOutput){} + + //Restores an archived copy of an object back. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_RestoreObject.html + rpc RestoreObject(RestoreObjectInput) returns(RestoreObjectOutput){} +} + +// GetObjectInput +message GetObjectInput { + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Key of the object to get + // This member is required + string key = 3; + // The account ID of the expected bucket owner + string expected_bucket_owner = 4; + // Return the object only if its entity tag (ETag) is the same as the one specified + string if_match = 5; + // Return the object only if it has been modified since the specified time + int64 if_modified_since = 6; + // Return the object only if its entity tag (ETag) is different from the one specified + string if_none_match = 7; + // Return the object only if it has not been modified since the specified time + int64 if_unmodified_since = 8; + // Part number of the object being read. This is a positive integer between 1 and + // 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + int64 part_number = 9; + // Downloads the specified range bytes of an object + // start is used to specify the location where the file starts + int64 start = 10; + // end is used to specify the location where the file end + int64 end = 11; + // Confirms that the requester knows that they will be charged for the request. + string request_payer = 12; + // Sets the Cache-Control header of the response. + string response_cache_control = 13; + // Sets the Content-Disposition header of the response + string response_content_disposition = 14; + // Sets the Content-Encoding header of the response + string response_content_encoding = 15; + // Sets the Content-Language header of the response + string response_content_language = 16; + // Sets the Content-Type header of the response + string response_content_type = 17; + // Sets the Expires header of the response + string response_expires = 18; + // Specifies the algorithm to use to when decrypting the object (for example,AES256) + string sse_customer_algorithm = 19; + // Specifies the customer-provided encryption key for Amazon S3 used to encrypt the + // data. This value is used to decrypt the object when recovering it and must match + // the one used when storing the data. The key must be appropriate for use with the + // algorithm specified in the x-amz-server-side-encryption-customer-algorithm header + string sse_customer_key = 20; + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321 + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + string sse_customer_key_md5 = 21; + // VersionId used to reference a specific version of the object + string version_id = 22; + // Specify Accept-Encoding, aws not supported now + string accept_encoding = 23; + // Specify the signed url of object, user can get object with signed url without ak、sk + string signed_url = 24; +} + +// GetObjectOutput +message GetObjectOutput { + // Object data. + bytes body = 1; + // Specifies caching behavior along the request/reply chain. + string cache_control = 2; + // Specifies presentational information for the object. + string content_disposition = 3; + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + string content_encoding = 4; + // The language the content is in. + string content_language = 5; + // Size of the body in bytes. + int64 content_length = 6; + // The portion of the object returned in the response. + string content_range = 7; + // A standard MIME type describing the format of the object data. + string content_type = 8; + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + bool delete_marker = 9; + // An entity tag (ETag) is an opaque identifier assigned by a web server to a + // specific version of a resource found at a URL. + string etag = 10; + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is + // URL-encoded. + string expiration = 11; + // The date and time at which the object is no longer cacheable. + string expires = 12; + // Creation date of the object. + int64 last_modified = 13; + // Version of the object. + string version_id = 14; + // The number of tags, if any, on the object. + int64 tag_count = 15; + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. + string storage_class = 16; + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + int64 parts_count = 17; + // A map of metadata to store with the object in S3. + // Map keys will be normalized to lower-case. + map metadata = 18; +} + +// PutObjectInput +message PutObjectInput{ + // Required. The name of oss store. + string store_name = 1; + // Object data. + bytes body = 2; + // The bucket name to which the PUT action was initiated + // This member is required. + string bucket = 3; + // Object key for which the PUT action was initiated. + // This member is required. + string key = 4; + // The canned ACL to apply to the object,different oss provider have different acl type + string acl = 5; + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + bool bucket_key_enabled = 6; + // Can be used to specify caching behavior along the request/reply chain. + string cache_control = 7; + // Specifies presentational information for the object. For more information, see + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + string content_disposition = 8; + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. For more information, see + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + string content_encoding = 9; + // The date and time at which the object is no longer cacheable. For more + // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + int64 expires = 10; + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + string server_side_encryption = 11; + // Specify the signed url of object, user can put object with signed url without ak、sk + string signed_url = 12; + // A map of metadata to store with the object in S3. + map meta = 13; + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + map tagging = 14; +} + +// PutObjectOutput +message PutObjectOutput{ + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + bool bucket_key_enabled = 1; + // Entity tag for the uploaded object. + string etag = 2; + // If the expiration is configured for the object + string expiration = 3; + // If present, indicates that the requester was successfully charged for the request. + string request_charged = 4; + // Version of the object. + string version_id = 5; +} + +// DeleteObjectInput +message DeleteObjectInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name to which the DEL action was initiated + // This member is required. + string bucket = 2; + // Object key for which the DEL action was initiated. + // This member is required. + string key = 3; + // Confirms that the requester knows that they will be charged for the request. + string request_payer = 4; + // VersionId used to reference a specific version of the object. + string version_id = 5; +} + +// DeleteObjectOutput +message DeleteObjectOutput{ + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + bool delete_marker = 1; + // If present, indicates that the requester was successfully charged for the + // request. + string request_charged = 2; + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + string version_id = 3; +} + +// PutObjectTaggingInput +message PutObjectTaggingInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required. + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // Container for the TagSet and Tag elements + map tags = 4; + // The versionId of the object that the tag-set will be added to. + string version_id = 5; +} + +// PutObjectTaggingOutput +message PutObjectTaggingOutput{ + // The versionId of the object the tag-set was added to. + string version_id = 1; + // Metadata pertaining to the operation's result. + map result_metadata = 2; +} + +// DeleteObjectTaggingInput +message DeleteObjectTaggingInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the objects from which to remove the tags. + string bucket = 2; + // The key that identifies the object in the bucket from which to remove all tags. + // This member is required. + string key = 3; + // The versionId of the object that the tag-set will be removed from. + string version_id = 4; + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + string expected_bucket_owner = 5; +} + +// DeleteObjectTaggingOutput +message DeleteObjectTaggingOutput{ + // The versionId of the object the tag-set was removed from. + string version_id = 1; + // Metadata pertaining to the operation's result. + map result_metadata = 2; +} + +// GetObjectTaggingInput +message GetObjectTaggingInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object for which to get the tagging information. + // This member is required. + string bucket = 2; + // Object key for which to get the tagging information. + // This member is required. + string key = 3; + // The versionId of the object for which to get the tagging information. + string version_id = 4; + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + string expected_bucket_owner = 5; + // Confirms that the requester knows that they will be charged for the request. + string request_payer = 6; +} + +// GetObjectTaggingOutput +message GetObjectTaggingOutput{ + // Contains the tag set. + // This member is required. + map tags = 1; + // The versionId of the object for which you got the tagging information. + string version_id = 2; + // Metadata pertaining to the operation's result. + map result_metadata = 3; +} + +// CopySource +message CopySource{ + // source object bucket name + string copy_source_bucket = 1; + // source object name + string copy_source_key = 2; + // source object version + string copy_source_version_id = 3; +} + +// CopyObjectInput +message CopyObjectInput{ + // Required. The name of oss store. + string store_name = 1; + // The name of the destination bucket. When using this action with an access point + // This member is required. + string bucket = 2; + // The key of the destination object. + // This member is required. + string key = 3; + // CopySource + CopySource copy_source = 4; + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL Query + // parameters. + map tagging = 5; + // The date and time at which the object is no longer cacheable. + int64 expires = 6; + // Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request. + string metadata_directive = 7; + // A map of metadata to store with the object in S3. + map metadata = 8; +} + +// CopyObjectResult +message CopyObjectResult{ + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. + string etag = 1; + // Creation date of the object. + int64 last_modified = 2; +} + +// CopyObjectOutput +message CopyObjectOutput{ + // Container for all response elements. + CopyObjectResult copy_object_result = 1; + // Version ID of the newly created copy. + string version_id = 2; + // If the object expiration is configured, the response includes this header. + string expiration = 3; +} + +// Delete +message Delete{ + // ObjectIdentifier + repeated ObjectIdentifier objects = 1; + // Element to enable quiet mode for the request. When you add this element, you + // must set its value to true. + bool quiet = 2; +} + +// ObjectIdentifier +message ObjectIdentifier{ + // Key name of the object. + // This member is required. + string key = 1; + // VersionId for the specific version of the object to delete. + string version_id = 2; +} + +// DeleteObjectsInput +message DeleteObjectsInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Delete objects + Delete delete = 3; + // Confirms that the requester knows that they will be charged for the request. + string request_payer = 4; +} + +// DeletedObject +message DeletedObject{ + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. + bool delete_marker = 1; + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header is + // the version ID of the object version deleted. + string delete_marker_version_id = 2; + // The name of the deleted object. + string key = 3; + // The version ID of the deleted object. + string version_id = 4; +} + +// DeleteObjectsOutput +message DeleteObjectsOutput{ + // DeletedObject + repeated DeletedObject deleted = 1; +} + +// ListObjectsInput +message ListObjectsInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // A delimiter is a character you use to group keys. + string delimiter = 3; + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters with an + // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. + string encoding_type = 4; + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + string expected_bucket_owner = 5; + // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. Marker can be any key in the bucket. + string marker = 6; + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. + int32 maxKeys = 7; + // Limits the response to keys that begin with the specified prefix. + string prefix = 8; + // Confirms that the requester knows that they will be charged for the request. + string request_payer = 9; +} + +// ListObjectsOutput +message ListObjectsOutput{ + // CommonPrefixes + repeated string common_prefixes = 1; + // Objects contents + repeated Object contents = 2; + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element in the + // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in + // the response. Each rolled-up result counts as only one return against the + // MaxKeys value. + string delimiter = 3; + // Encoding type used by Amazon S3 to encode object keys in the response. + string encoding_type = 4; + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. + bool is_truncated = 5; + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. + string marker = 6; + // The maximum number of keys returned in the response body. + int32 max_keys = 7; + // The bucket name. + string name = 8; + // When response is truncated (the IsTruncated element value in the response is + // true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. + string next_marker = 9; + // Keys that begin with the indicated prefix. + string prefix = 10; +} + +// Owner +message Owner{ + // Owner display name + string display_name = 1; + // Owner id + string id = 2; +} + +// Object +message Object{ + // The entity tag is a hash of the object + string etag = 1; + // The name that you assign to an object. You use the object key to retrieve the + // object. + string key = 2; + // Creation date of the object. + int64 last_modified = 3; + // The owner of the object + Owner owner = 4; + // Size in bytes of the object + int64 size = 5; + // The class of storage used to store the object. + string storage_class = 6; +} + +// GetObjectCannedAclInput +message GetObjectCannedAclInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // VersionId used to reference a specific version of the object + string version_id = 4; +} + +// GetObjectCannedAclOutput +message GetObjectCannedAclOutput{ + // Object CannedACL + string canned_acl = 1; + // Owner + Owner owner = 2; + // If present, indicates that the requester was successfully charged for the + // request. + string request_charged = 3; +} + +// PutObjectCannedAclInput +message PutObjectCannedAclInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // The canned ACL to apply to the object + string acl = 4; + // VersionId used to reference a specific version of the object. + string version_id = 5; +} + +// PutObjectCannedAclOutput +message PutObjectCannedAclOutput{ + // Request charged + string request_charged = 1; +} + +// RestoreObjectInput +message RestoreObjectInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // VersionId used to reference a specific version of the object. + string version_id = 5; +} + +// RestoreObjectOutput +message RestoreObjectOutput{ + // If present, indicates that the requester was successfully charged for the + // request. + string request_charged = 1; + // Indicates the path in the provided S3 output location where Select results will + // be restored to. + string restore_output_path = 2; +} + +// CreateMultipartUploadInput +message CreateMultipartUploadInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // The canned ACL to apply to the object. This action is not supported by Amazon S3 + // on Outposts. + string acl = 4; + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true + // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. + bool bucket_key_enabled = 5; + // Specifies caching behavior along the request/reply chain + string cache_control = 6; + // Specifies presentational information for the object + string content_disposition = 7; + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + string content_encoding = 8; + // The language the content is in. + string content_language = 9; + // A standard MIME type describing the format of the object data. + string content_type = 10; + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + string expected_bucket_owner = 11; + // The date and time at which the object is no longer cacheable. + int64 expires = 12; + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This + // action is not supported by Amazon S3 on Outposts. + string grant_full_control = 13; + // Allows grantee to read the object data and its metadata. This action is not + // supported by Amazon S3 on Outposts. + string grant_read = 14; + // Allows grantee to read the object ACL. This action is not supported by Amazon S3 + // on Outposts. + string grant_read_acp = 15; + // Allows grantee to write the ACL for the applicable object. This action is not + // supported by Amazon S3 on Outposts. + string grant_write_acp = 16; + // A map of metadata to store with the object + map meta_data = 17; + // Specifies whether you want to apply a legal hold to the uploaded object + string object_lock_legal_hold_status = 18; + // Specifies the Object Lock mode that you want to apply to the uploaded object + string object_lock_mode = 19; + // Specifies the date and time when you want the Object Lock to expire + int64 object_lock_retain_until_date = 20; + // Confirms that the requester knows that they will be charged for the request + string request_payer = 21; + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + string sse_customer_algorithm = 22; + // Specifies the customer-provided encryption key to use in encrypting data + string sse_customer_key = 23; + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321 + string sse_customer_key_md5 = 24; + // Specifies the Amazon Web Services KMS Encryption Context to use for object encryption + string sse_kms_encryption_context = 25; + // Specifies the ID of the symmetric customer managed key to use for object encryption + string sse_kms_key_id = 26; + // The server-side encryption algorithm used when storing this object + string server_side_encryption = 27; + // By default, oss store uses the STANDARD Storage Class to store newly created objects + string storage_class = 28; + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + map tagging = 29; + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. + string website_redirect_location = 30; +} + +// CreateMultipartUploadOutput +message CreateMultipartUploadOutput{ + // The bucket name containing the object + // This member is required + string bucket = 1; + // Name of the object key. + // This member is required. + string key = 2; + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object name + // in the request, the response includes this header + int64 abort_date = 3; + // It identifies the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + string abort_rule_id = 4; + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + bool bucket_key_enabled = 5; + // If present, indicates that the requester was successfully charged for the + // request. + string request_charged = 6; + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + string sse_customer_algorithm = 7; + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + string sse_customer_key_md5 = 8; + // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. + string sse_kms_encryption_context = 9; + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + string sse_kms_key_id = 10; + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + string server_side_encryption = 11; + // ID for the initiated multipart upload. + string upload_id = 12; +} + +// UploadPartInput +message UploadPartInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // Object data. + bytes body = 4; + // Size of the body in bytes. This parameter is useful when the size of the body + // cannot be determined automatically. + int64 content_length = 5; + // The base64-encoded 128-bit MD5 digest of the part data. + string content_md5 = 6; + // The account ID of the expected bucket owner + string expected_bucket_owner = 7; + // Part number of part being uploaded. This is a positive integer between 1 and 10,000. + // This member is required. + int32 part_number = 8; + // Confirms that the requester knows that they will be charged for the request. + string request_payer = 9; + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + string sse_customer_algorithm = 10; + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data + string sse_customer_key = 11; + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + string sse_customer_key_md5 = 12; + // Upload ID identifying the multipart upload whose part is being uploaded. + // This member is required. + string upload_id = 13; +} + +// UploadPartOutput +message UploadPartOutput{ + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + bool bucket_key_enabled = 1; + // Entity tag for the uploaded object. + string etag = 2; + // If present, indicates that the requester was successfully charged for the + // request. + string request_charged = 3; + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + string sse_customer_algorithm = 4; + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + string sse_customer_key_md5 = 5; + // Specifies the ID of the symmetric customer managed key to use for object encryption + string sse_kms_key_id = 6; + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + string server_side_encryption = 7; +} + +// UploadPartCopyInput +message UploadPartCopyInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // CopySource + CopySource copy_source = 4; + // Part number of part being copied. This is a positive integer between 1 and 10,000. + // This member is required. + int32 part_number = 5; + // Upload ID identifying the multipart upload whose part is being copied. + // This member is required. + string upload_id = 6; + // The range of bytes to copy from the source object.bytes=start_position-part_size + int64 start_position = 7; + // Part size + int64 part_size = 8; +} + +// CopyPartResult +message CopyPartResult{ + // Entity tag of the object. + string etag = 1; + // Last modified time + int64 last_modified = 2; +} + +// UploadPartCopyOutput +message UploadPartCopyOutput{ + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + bool bucket_key_enabled = 1; + // Container for all response elements. + CopyPartResult copy_part_result = 2; + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + string copy_source_version_id = 3; + // If present, indicates that the requester was successfully charged for the + // request. + string request_charged = 4; + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + string sse_customer_algorithm = 5; + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + string sse_customer_key_md5 = 6; + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + string sse_kms_key_id = 7; + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + string server_side_encryption = 8; +} + +// CompletedPart +message CompletedPart{ + // Entity tag returned when the part was uploaded. + string etag = 1; + // Part number that identifies the part. This is a positive integer between 1 and + // 10,000. + int32 part_number = 2; +} + +// CompletedMultipartUpload +message CompletedMultipartUpload{ + // Array of CompletedPart data types. + repeated CompletedPart parts = 1; +} + +// CompleteMultipartUploadInput +message CompleteMultipartUploadInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // ID for the initiated multipart upload. + // This member is required. + string upload_id = 4; + // Confirms that the requester knows that they will be charged for the request. + string request_payer = 5; + // Expected bucket owner + string expected_bucket_owner = 6; + // The container for the multipart upload request information. + CompletedMultipartUpload multipart_upload = 7; +} + +// CompleteMultipartUploadOutput +message CompleteMultipartUploadOutput{ + // The bucket name containing the object + // This member is required + string bucket = 1; + // Name of the object key. + // This member is required. + string key = 2; + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + bool bucket_key_enabled = 3; + // Entity tag that identifies the newly created object's data + string etag = 4; + // If the object expiration is configured, this will contain the expiration date + // (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. + string expiration = 5; + // The URI that identifies the newly created object. + string location = 6; + // If present, indicates that the requester was successfully charged for the + // request. + string request_charged = 7; + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + string sse_kms_keyId = 8; + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + string server_side_encryption = 9; + // Version ID of the newly created object, in case the bucket has versioning turned + // on. + string version_id = 10; +} + +// AbortMultipartUploadInput +message AbortMultipartUploadInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // The account ID of the expected bucket owner + string expected_bucket_owner = 4; + // Confirms that the requester knows that they will be charged for the request. + string request_payer = 5; + // Upload ID that identifies the multipart upload. + // This member is required. + string upload_id = 6; +} + +// AbortMultipartUploadOutput +message AbortMultipartUploadOutput{ + // If present, indicates that the requester was successfully charged for the request. + string request_charged = 1; +} + +// ListMultipartUploadsInput +message ListMultipartUploadsInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Character you use to group keys. All keys that contain the same string between + // the prefix, if specified, and the first occurrence of the delimiter after the + // prefix are grouped under a single result element, CommonPrefixes. If you don't + // specify the prefix parameter, then the substring starts at the beginning of the + // key. The keys that are grouped under CommonPrefixes result element are not + // returned elsewhere in the response. + string delimiter = 3; + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key may contain any Unicode character; + string encoding_type = 4; + // The account ID of the expected bucket owner + string expected_bucket_owner = 5; + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. If upload-id-marker is not specified, only the + // keys lexicographically greater than the specified key-marker will be included in + // the list. If upload-id-marker is specified, any multipart uploads for a key + // equal to the key-marker might also be included, provided those multipart uploads + // have upload IDs lexicographically greater than the specified upload-id-marker. + string key_marker = 6; + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the + // response body. 1,000 is the maximum number of uploads that can be returned in a + // response. + int64 max_uploads = 7; + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different grouping of + // keys. (You can think of using prefix to make groups in the same way you'd use a + // folder in a file system.) + string prefix = 8; + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter is + // ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + string upload_id_marker = 9; +} + +// Initiator +message Initiator{ + // Initiator name + string display_name = 1; + // Initiator id + string id = 2; +} + +// MultipartUpload +message MultipartUpload{ + // Date and time at which the multipart upload was initiated. + int64 initiated = 1; + // Identifies who initiated the multipart upload. + Initiator initiator = 2; + // Name of the object key. + // This member is required. + string key = 3; + // Specifies the owner of the object that is part of the multipart upload. + Owner owner = 4; + // The class of storage used to store the object. + string storage_class = 5; + // Upload ID that identifies the multipart upload. + string upload_id = 6; +} + +// ListMultipartUploadsOutput +message ListMultipartUploadsOutput{ + // The bucket name containing the object + // This member is required + string bucket = 1; + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. + repeated string common_prefixes = 2; + // Contains the delimiter you specified in the request. If you don't specify a + // delimiter in your request, this element is absent from the response. + string delimiter = 3; + // Encoding type used by Amazon S3 to encode object keys in the response. + string encoding_type = 4; + // Indicates whether the returned list of multipart uploads is truncated. A value + // of true indicates that the list was truncated. The list can be truncated if the + // number of multipart uploads exceeds the limit allowed or specified by max + // uploads. + bool is_truncated = 5; + // The key at or after which the listing began. + string key_marker = 6; + // Maximum number of multipart uploads that could have been included in the + // response. + int32 max_uploads = 7; + // When a list is truncated, this element specifies the value that should be used + // for the key-marker request parameter in a subsequent request. + string next_key_marker = 8; + // When a list is truncated, this element specifies the value that should be used + // for the upload-id-marker request parameter in a subsequent request. + string next_upload_id_marker = 9; + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + string prefix = 10; + // Upload ID after which listing began. + string upload_id_marker = 11; + // Container for elements related to a particular multipart upload. A response can + // contain zero or more Upload elements. + repeated MultipartUpload uploads = 12; +} + +// ListObjectVersionsInput +message ListObjectVersionsInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // A delimiter is a character that you specify to group keys. All keys that contain + // the same string between the prefix and the first occurrence of the delimiter are + // grouped under a single result element in CommonPrefixes. These groups are + // counted as one result against the max-keys limitation. These keys are not + // returned elsewhere in the response. + string delimiter = 3; + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key may contain any Unicode character; + string encoding_type = 4; + // The account ID of the expected bucket owner + string expected_bucket_owner = 5; + // Specifies the key to start with when listing objects in a bucket. + string key_marker = 6; + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. If additional keys satisfy the search criteria, but were not + // returned because max-keys was exceeded, the response contains true. To return + // the additional keys, see key-marker and version-id-marker. + int64 max_keys = 7; + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings of + // keys. (You can think of using prefix to make groups in the same way you'd use a + // folder in a file system.) You can use prefix with delimiter to roll up numerous + // objects into a single result under CommonPrefixes. + string prefix = 8; + // Specifies the object version you want to start listing from. + string version_id_marker = 9; +} + +// DeleteMarkerEntry +message DeleteMarkerEntry{ + // Specifies whether the object is (true) or is not (false) the latest version of + // an object. + bool is_latest = 1; + // Name of the object key. + // This member is required. + string key = 2; + // Date and time the object was last modified. + int64 last_modified = 3; + // Owner + Owner owner = 4; + // Version ID of an object. + string version_id = 5; +} + +// ObjectVersion +message ObjectVersion{ + // The entity tag is an MD5 hash of that version of the object. + string etag = 1; + // Specifies whether the object is (true) or is not (false) the latest version of + // an object. + bool is_latest = 2; + // Name of the object key. + // This member is required. + string key = 3; + // Date and time the object was last modified. + int64 last_modified = 4; + // Specifies the owner of the object. + Owner owner = 5; + // Size in bytes of the object. + int64 size = 6; + // The class of storage used to store the object. + string storage_class = 7; + // Version ID of an object. + string version_id = 8; +} + +// ListObjectVersionsOutput +message ListObjectVersionsOutput{ + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + repeated string common_prefixes = 1; + // Container for an object that is a delete marker. + repeated DeleteMarkerEntry delete_markers = 2; + // The delimiter grouping the included keys. + string delimiter = 3; + // Encoding type used by Amazon S3 to encode object key names in the XML response. + string encoding_type = 4; + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria + bool is_truncated = 5; + // Marks the last key returned in a truncated response. + string key_marker = 6; + // Specifies the maximum number of objects to return + int64 max_keys = 7; + // The bucket name. + string name = 8; + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria + string next_key_marker = 9; + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. + string next_version_id_marker = 10; + // Selects objects that start with the value supplied by this parameter. + string prefix = 11; + // Marks the last version of the key returned in a truncated response. + string version_id_marker = 12; + // Container for version information. + repeated ObjectVersion versions = 13; +} + +// HeadObjectInput +message HeadObjectInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // To retrieve the checksum, this parameter must be enabled + string checksum_mode = 4; + // The account ID of the expected bucket owner + string expected_bucket_owner = 5; + // Return the object only if its entity tag (ETag) is the same as the one + // specified; otherwise, return a 412 (precondition failed) error. + string if_match = 6; + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. + int64 if_modified_since = 7; + // Return the object only if its entity tag (ETag) is different from the one + // specified + string if_none_match = 8; + // Return the object only if it has not been modified since the specified time; + int64 if_unmodified_since = 9; + // Part number of the object being read. This is a positive integer between 1 and + // 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + int32 part_number = 10; + // Confirms that the requester knows that they will be charged for the request. + string request_payer = 11; + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + string sse_customer_algorithm = 12; + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data + string sse_customer_key = 13; + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + string sse_customer_key_md5 = 14; + // VersionId used to reference a specific version of the object. + string version_id = 15; + // Return object details meta + bool with_details = 16; +} + +// HeadObjectOutput +message HeadObjectOutput{ + // Metadata pertaining to the operation's result. + map result_metadata = 1; +} + +// IsObjectExistInput +message IsObjectExistInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // Object version id + string version_id = 4; +} + +// IsObjectExistOutput +message IsObjectExistOutput{ + // Object exist or not + bool file_exist = 1; +} + +// SignURLInput +message SignURLInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // the method for sign url, eg. GET、POST + string method = 4; + // expire time of the sign url + int64 expired_in_sec = 5; +} + +// SignURLOutput +message SignURLOutput{ + // Object signed url + string signed_url = 1; +} + +// UpdateBandwidthRateLimitInput +message UpdateBandwidthRateLimitInput{ + // Required. The name of oss store. + string store_name = 1; + // The average upload/download bandwidth rate limit in bits per second. + int64 average_rate_limit_in_bits_per_sec = 2; + // Resource name of gateway + string gateway_resource_name = 3; +} + +// AppendObjectInput +message AppendObjectInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // Object content + bytes body = 4; + // Append start position + int64 position = 5; + // Object ACL + string acl = 6; + // Sets the Cache-Control header of the response. + string cache_control = 7; + // Sets the Content-Disposition header of the response + string content_disposition = 8; + // Sets the Content-Encoding header of the response + string content_encoding = 9; + // The base64-encoded 128-bit MD5 digest of the part data. + string content_md5 = 10; + // Sets the Expires header of the response + int64 expires = 11; + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. + string storage_class = 12; + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + string server_side_encryption = 13; + // Object metadata + string meta = 14; + // Object tags + map tags = 15; + +} + +// AppendObjectOutput +message AppendObjectOutput{ + // Next append position + int64 append_position = 1; +} + +// ListPartsInput +message ListPartsInput{ + // Required. The name of oss store. + string store_name = 1; + // The bucket name containing the object + // This member is required + string bucket = 2; + // Name of the object key. + // This member is required. + string key = 3; + // The account ID of the expected bucket owner + string expected_bucket_owner = 4; + // Sets the maximum number of parts to return + int64 max_parts = 5; + // Specifies the part after which listing should begin. Only parts with higher part + // numbers will be listed. + int64 part_number_marker = 6; + // Confirms that the requester knows that they will be charged for the request. + string request_payer = 7; + // Upload ID identifying the multipart upload whose parts are being listed. + string upload_id = 8; +} + +// Part +message Part{ + // Part Etag + string etag = 1; + // Last modified time + int64 last_modified = 2; + // Part number + int64 part_number = 3; + // Part size + int64 size = 4; +} + +// ListPartsOutput +message ListPartsOutput{ + // The bucket name containing the object + // This member is required + string bucket = 1; + // Name of the object key. + // This member is required. + string key = 2; + // Upload ID identifying the multipart upload whose parts are being listed. + string upload_id = 3; + // When a list is truncated, this element specifies the last part in the list, as + // well as the value to use for the part-number-marker request parameter in a + // subsequent request. + string next_part_number_marker = 4; + // Maximum number of parts that were allowed in the response. + int64 max_parts = 5; + // Indicates whether the returned list of parts is truncated. A true value + // indicates that the list was truncated. A list can be truncated if the number of + // parts exceeds the limit returned in the MaxParts element. + bool is_truncated = 6; + // Container for elements related to a particular part. A response can contain zero + // or more Part elements. + repeated Part parts = 7; +} \ No newline at end of file diff --git a/spec/proto/extension/v1/s3/oss_grpc.pb.go b/spec/proto/extension/v1/s3/oss_grpc.pb.go new file mode 100644 index 0000000000..c071825f24 --- /dev/null +++ b/spec/proto/extension/v1/s3/oss_grpc.pb.go @@ -0,0 +1,1243 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.17.3 +// source: oss.proto + +package s3 + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ObjectStorageServiceClient is the client API for ObjectStorageService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ObjectStorageServiceClient interface { + //Object CRUD API + //Adds an object to a bucket. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + PutObject(ctx context.Context, opts ...grpc.CallOption) (ObjectStorageService_PutObjectClient, error) + //Retrieves objects. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + GetObject(ctx context.Context, in *GetObjectInput, opts ...grpc.CallOption) (ObjectStorageService_GetObjectClient, error) + //Delete objects. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + DeleteObject(ctx context.Context, in *DeleteObjectInput, opts ...grpc.CallOption) (*DeleteObjectOutput, error) + //Creates a copy of an object that is already stored in oss server. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_CopyObject.html + CopyObject(ctx context.Context, in *CopyObjectInput, opts ...grpc.CallOption) (*CopyObjectOutput, error) + //Delete multiple objects from a bucket. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_DeleteObjects.html + DeleteObjects(ctx context.Context, in *DeleteObjectsInput, opts ...grpc.CallOption) (*DeleteObjectsOutput, error) + //Returns some or all (up to 1,000) of the objects in a bucket. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_ListObjects.html + ListObjects(ctx context.Context, in *ListObjectsInput, opts ...grpc.CallOption) (*ListObjectsOutput, error) + //The HEAD action retrieves metadata from an object without returning the object itself. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html + HeadObject(ctx context.Context, in *HeadObjectInput, opts ...grpc.CallOption) (*HeadObjectOutput, error) + //This action used to check if the file exists. + IsObjectExist(ctx context.Context, in *IsObjectExistInput, opts ...grpc.CallOption) (*IsObjectExistOutput, error) + //Object Tagging API + //Sets the supplied tag-set to an object that already exists in a bucket. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html + PutObjectTagging(ctx context.Context, in *PutObjectTaggingInput, opts ...grpc.CallOption) (*PutObjectTaggingOutput, error) + //Removes the entire tag set from the specified object. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html + DeleteObjectTagging(ctx context.Context, in *DeleteObjectTaggingInput, opts ...grpc.CallOption) (*DeleteObjectTaggingOutput, error) + //Returns the tag-set of an object. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_GetObjectTagging.html + GetObjectTagging(ctx context.Context, in *GetObjectTaggingInput, opts ...grpc.CallOption) (*GetObjectTaggingOutput, error) + //Returns object canned acl. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL + GetObjectCannedAcl(ctx context.Context, in *GetObjectCannedAclInput, opts ...grpc.CallOption) (*GetObjectCannedAclOutput, error) + //Set object canned acl. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL + PutObjectCannedAcl(ctx context.Context, in *PutObjectCannedAclInput, opts ...grpc.CallOption) (*PutObjectCannedAclOutput, error) + //Object Multipart Operation API + //Initiates a multipart upload and returns an upload ID. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_CreateMultipartUpload.html + CreateMultipartUpload(ctx context.Context, in *CreateMultipartUploadInput, opts ...grpc.CallOption) (*CreateMultipartUploadOutput, error) + //Uploads a part in a multipart upload. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + UploadPart(ctx context.Context, opts ...grpc.CallOption) (ObjectStorageService_UploadPartClient, error) + //Uploads a part by copying data from an existing object as data source. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + UploadPartCopy(ctx context.Context, in *UploadPartCopyInput, opts ...grpc.CallOption) (*UploadPartCopyOutput, error) + //Completes a multipart upload by assembling previously uploaded parts. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + CompleteMultipartUpload(ctx context.Context, in *CompleteMultipartUploadInput, opts ...grpc.CallOption) (*CompleteMultipartUploadOutput, error) + //This action aborts a multipart upload. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + AbortMultipartUpload(ctx context.Context, in *AbortMultipartUploadInput, opts ...grpc.CallOption) (*AbortMultipartUploadOutput, error) + //This action lists in-progress multipart uploads. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + ListMultipartUploads(ctx context.Context, in *ListMultipartUploadsInput, opts ...grpc.CallOption) (*ListMultipartUploadsOutput, error) + //Lists the parts that have been uploaded for a specific multipart upload. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + ListParts(ctx context.Context, in *ListPartsInput, opts ...grpc.CallOption) (*ListPartsOutput, error) + //Returns metadata about all versions of the objects in a bucket. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html + ListObjectVersions(ctx context.Context, in *ListObjectVersionsInput, opts ...grpc.CallOption) (*ListObjectVersionsOutput, error) + //A presigned URL gives you access to the object identified in the URL, provided that the creator of the presigned URL has permissions to access that object. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/PresignedUrlUploadObject.html + SignURL(ctx context.Context, in *SignURLInput, opts ...grpc.CallOption) (*SignURLOutput, error) + //This action used to set download bandwidth limit speed. + //Refer https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client.go#L2106 + UpdateDownloadBandwidthRateLimit(ctx context.Context, in *UpdateBandwidthRateLimitInput, opts ...grpc.CallOption) (*emptypb.Empty, error) + //This action used to set upload bandwidth limit speed. + //Refer https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client.go#L2096 + UpdateUploadBandwidthRateLimit(ctx context.Context, in *UpdateBandwidthRateLimitInput, opts ...grpc.CallOption) (*emptypb.Empty, error) + //This action is used to append object. + //Refer https://help.aliyun.com/document_detail/31981.html or https://github.com/minio/minio-java/issues/980 + AppendObject(ctx context.Context, opts ...grpc.CallOption) (ObjectStorageService_AppendObjectClient, error) + //Restores an archived copy of an object back. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_RestoreObject.html + RestoreObject(ctx context.Context, in *RestoreObjectInput, opts ...grpc.CallOption) (*RestoreObjectOutput, error) +} + +type objectStorageServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewObjectStorageServiceClient(cc grpc.ClientConnInterface) ObjectStorageServiceClient { + return &objectStorageServiceClient{cc} +} + +func (c *objectStorageServiceClient) PutObject(ctx context.Context, opts ...grpc.CallOption) (ObjectStorageService_PutObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &ObjectStorageService_ServiceDesc.Streams[0], "/spec.proto.extension.v1.s3.ObjectStorageService/PutObject", opts...) + if err != nil { + return nil, err + } + x := &objectStorageServicePutObjectClient{stream} + return x, nil +} + +type ObjectStorageService_PutObjectClient interface { + Send(*PutObjectInput) error + CloseAndRecv() (*PutObjectOutput, error) + grpc.ClientStream +} + +type objectStorageServicePutObjectClient struct { + grpc.ClientStream +} + +func (x *objectStorageServicePutObjectClient) Send(m *PutObjectInput) error { + return x.ClientStream.SendMsg(m) +} + +func (x *objectStorageServicePutObjectClient) CloseAndRecv() (*PutObjectOutput, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(PutObjectOutput) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *objectStorageServiceClient) GetObject(ctx context.Context, in *GetObjectInput, opts ...grpc.CallOption) (ObjectStorageService_GetObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &ObjectStorageService_ServiceDesc.Streams[1], "/spec.proto.extension.v1.s3.ObjectStorageService/GetObject", opts...) + if err != nil { + return nil, err + } + x := &objectStorageServiceGetObjectClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ObjectStorageService_GetObjectClient interface { + Recv() (*GetObjectOutput, error) + grpc.ClientStream +} + +type objectStorageServiceGetObjectClient struct { + grpc.ClientStream +} + +func (x *objectStorageServiceGetObjectClient) Recv() (*GetObjectOutput, error) { + m := new(GetObjectOutput) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *objectStorageServiceClient) DeleteObject(ctx context.Context, in *DeleteObjectInput, opts ...grpc.CallOption) (*DeleteObjectOutput, error) { + out := new(DeleteObjectOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/DeleteObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) CopyObject(ctx context.Context, in *CopyObjectInput, opts ...grpc.CallOption) (*CopyObjectOutput, error) { + out := new(CopyObjectOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/CopyObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) DeleteObjects(ctx context.Context, in *DeleteObjectsInput, opts ...grpc.CallOption) (*DeleteObjectsOutput, error) { + out := new(DeleteObjectsOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/DeleteObjects", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) ListObjects(ctx context.Context, in *ListObjectsInput, opts ...grpc.CallOption) (*ListObjectsOutput, error) { + out := new(ListObjectsOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/ListObjects", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) HeadObject(ctx context.Context, in *HeadObjectInput, opts ...grpc.CallOption) (*HeadObjectOutput, error) { + out := new(HeadObjectOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/HeadObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) IsObjectExist(ctx context.Context, in *IsObjectExistInput, opts ...grpc.CallOption) (*IsObjectExistOutput, error) { + out := new(IsObjectExistOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/IsObjectExist", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) PutObjectTagging(ctx context.Context, in *PutObjectTaggingInput, opts ...grpc.CallOption) (*PutObjectTaggingOutput, error) { + out := new(PutObjectTaggingOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/PutObjectTagging", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) DeleteObjectTagging(ctx context.Context, in *DeleteObjectTaggingInput, opts ...grpc.CallOption) (*DeleteObjectTaggingOutput, error) { + out := new(DeleteObjectTaggingOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/DeleteObjectTagging", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) GetObjectTagging(ctx context.Context, in *GetObjectTaggingInput, opts ...grpc.CallOption) (*GetObjectTaggingOutput, error) { + out := new(GetObjectTaggingOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/GetObjectTagging", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) GetObjectCannedAcl(ctx context.Context, in *GetObjectCannedAclInput, opts ...grpc.CallOption) (*GetObjectCannedAclOutput, error) { + out := new(GetObjectCannedAclOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/GetObjectCannedAcl", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) PutObjectCannedAcl(ctx context.Context, in *PutObjectCannedAclInput, opts ...grpc.CallOption) (*PutObjectCannedAclOutput, error) { + out := new(PutObjectCannedAclOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/PutObjectCannedAcl", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) CreateMultipartUpload(ctx context.Context, in *CreateMultipartUploadInput, opts ...grpc.CallOption) (*CreateMultipartUploadOutput, error) { + out := new(CreateMultipartUploadOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/CreateMultipartUpload", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) UploadPart(ctx context.Context, opts ...grpc.CallOption) (ObjectStorageService_UploadPartClient, error) { + stream, err := c.cc.NewStream(ctx, &ObjectStorageService_ServiceDesc.Streams[2], "/spec.proto.extension.v1.s3.ObjectStorageService/UploadPart", opts...) + if err != nil { + return nil, err + } + x := &objectStorageServiceUploadPartClient{stream} + return x, nil +} + +type ObjectStorageService_UploadPartClient interface { + Send(*UploadPartInput) error + CloseAndRecv() (*UploadPartOutput, error) + grpc.ClientStream +} + +type objectStorageServiceUploadPartClient struct { + grpc.ClientStream +} + +func (x *objectStorageServiceUploadPartClient) Send(m *UploadPartInput) error { + return x.ClientStream.SendMsg(m) +} + +func (x *objectStorageServiceUploadPartClient) CloseAndRecv() (*UploadPartOutput, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(UploadPartOutput) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *objectStorageServiceClient) UploadPartCopy(ctx context.Context, in *UploadPartCopyInput, opts ...grpc.CallOption) (*UploadPartCopyOutput, error) { + out := new(UploadPartCopyOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/UploadPartCopy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) CompleteMultipartUpload(ctx context.Context, in *CompleteMultipartUploadInput, opts ...grpc.CallOption) (*CompleteMultipartUploadOutput, error) { + out := new(CompleteMultipartUploadOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/CompleteMultipartUpload", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) AbortMultipartUpload(ctx context.Context, in *AbortMultipartUploadInput, opts ...grpc.CallOption) (*AbortMultipartUploadOutput, error) { + out := new(AbortMultipartUploadOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/AbortMultipartUpload", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) ListMultipartUploads(ctx context.Context, in *ListMultipartUploadsInput, opts ...grpc.CallOption) (*ListMultipartUploadsOutput, error) { + out := new(ListMultipartUploadsOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/ListMultipartUploads", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) ListParts(ctx context.Context, in *ListPartsInput, opts ...grpc.CallOption) (*ListPartsOutput, error) { + out := new(ListPartsOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/ListParts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) ListObjectVersions(ctx context.Context, in *ListObjectVersionsInput, opts ...grpc.CallOption) (*ListObjectVersionsOutput, error) { + out := new(ListObjectVersionsOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/ListObjectVersions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) SignURL(ctx context.Context, in *SignURLInput, opts ...grpc.CallOption) (*SignURLOutput, error) { + out := new(SignURLOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/SignURL", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) UpdateDownloadBandwidthRateLimit(ctx context.Context, in *UpdateBandwidthRateLimitInput, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/UpdateDownloadBandwidthRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) UpdateUploadBandwidthRateLimit(ctx context.Context, in *UpdateBandwidthRateLimitInput, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/UpdateUploadBandwidthRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectStorageServiceClient) AppendObject(ctx context.Context, opts ...grpc.CallOption) (ObjectStorageService_AppendObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &ObjectStorageService_ServiceDesc.Streams[3], "/spec.proto.extension.v1.s3.ObjectStorageService/AppendObject", opts...) + if err != nil { + return nil, err + } + x := &objectStorageServiceAppendObjectClient{stream} + return x, nil +} + +type ObjectStorageService_AppendObjectClient interface { + Send(*AppendObjectInput) error + CloseAndRecv() (*AppendObjectOutput, error) + grpc.ClientStream +} + +type objectStorageServiceAppendObjectClient struct { + grpc.ClientStream +} + +func (x *objectStorageServiceAppendObjectClient) Send(m *AppendObjectInput) error { + return x.ClientStream.SendMsg(m) +} + +func (x *objectStorageServiceAppendObjectClient) CloseAndRecv() (*AppendObjectOutput, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(AppendObjectOutput) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *objectStorageServiceClient) RestoreObject(ctx context.Context, in *RestoreObjectInput, opts ...grpc.CallOption) (*RestoreObjectOutput, error) { + out := new(RestoreObjectOutput) + err := c.cc.Invoke(ctx, "/spec.proto.extension.v1.s3.ObjectStorageService/RestoreObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ObjectStorageServiceServer is the server API for ObjectStorageService service. +// All implementations should embed UnimplementedObjectStorageServiceServer +// for forward compatibility +type ObjectStorageServiceServer interface { + //Object CRUD API + //Adds an object to a bucket. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + PutObject(ObjectStorageService_PutObjectServer) error + //Retrieves objects. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + GetObject(*GetObjectInput, ObjectStorageService_GetObjectServer) error + //Delete objects. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + DeleteObject(context.Context, *DeleteObjectInput) (*DeleteObjectOutput, error) + //Creates a copy of an object that is already stored in oss server. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_CopyObject.html + CopyObject(context.Context, *CopyObjectInput) (*CopyObjectOutput, error) + //Delete multiple objects from a bucket. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_DeleteObjects.html + DeleteObjects(context.Context, *DeleteObjectsInput) (*DeleteObjectsOutput, error) + //Returns some or all (up to 1,000) of the objects in a bucket. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_ListObjects.html + ListObjects(context.Context, *ListObjectsInput) (*ListObjectsOutput, error) + //The HEAD action retrieves metadata from an object without returning the object itself. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html + HeadObject(context.Context, *HeadObjectInput) (*HeadObjectOutput, error) + //This action used to check if the file exists. + IsObjectExist(context.Context, *IsObjectExistInput) (*IsObjectExistOutput, error) + //Object Tagging API + //Sets the supplied tag-set to an object that already exists in a bucket. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html + PutObjectTagging(context.Context, *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) + //Removes the entire tag set from the specified object. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html + DeleteObjectTagging(context.Context, *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) + //Returns the tag-set of an object. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_GetObjectTagging.html + GetObjectTagging(context.Context, *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) + //Returns object canned acl. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL + GetObjectCannedAcl(context.Context, *GetObjectCannedAclInput) (*GetObjectCannedAclOutput, error) + //Set object canned acl. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#CannedACL + PutObjectCannedAcl(context.Context, *PutObjectCannedAclInput) (*PutObjectCannedAclOutput, error) + //Object Multipart Operation API + //Initiates a multipart upload and returns an upload ID. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_CreateMultipartUpload.html + CreateMultipartUpload(context.Context, *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) + //Uploads a part in a multipart upload. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + UploadPart(ObjectStorageService_UploadPartServer) error + //Uploads a part by copying data from an existing object as data source. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + UploadPartCopy(context.Context, *UploadPartCopyInput) (*UploadPartCopyOutput, error) + //Completes a multipart upload by assembling previously uploaded parts. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + CompleteMultipartUpload(context.Context, *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) + //This action aborts a multipart upload. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + AbortMultipartUpload(context.Context, *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) + //This action lists in-progress multipart uploads. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + ListMultipartUploads(context.Context, *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) + //Lists the parts that have been uploaded for a specific multipart upload. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + ListParts(context.Context, *ListPartsInput) (*ListPartsOutput, error) + //Returns metadata about all versions of the objects in a bucket. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html + ListObjectVersions(context.Context, *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) + //A presigned URL gives you access to the object identified in the URL, provided that the creator of the presigned URL has permissions to access that object. + //Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/PresignedUrlUploadObject.html + SignURL(context.Context, *SignURLInput) (*SignURLOutput, error) + //This action used to set download bandwidth limit speed. + //Refer https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client.go#L2106 + UpdateDownloadBandwidthRateLimit(context.Context, *UpdateBandwidthRateLimitInput) (*emptypb.Empty, error) + //This action used to set upload bandwidth limit speed. + //Refer https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client.go#L2096 + UpdateUploadBandwidthRateLimit(context.Context, *UpdateBandwidthRateLimitInput) (*emptypb.Empty, error) + //This action is used to append object. + //Refer https://help.aliyun.com/document_detail/31981.html or https://github.com/minio/minio-java/issues/980 + AppendObject(ObjectStorageService_AppendObjectServer) error + //Restores an archived copy of an object back. + //Refer https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_RestoreObject.html + RestoreObject(context.Context, *RestoreObjectInput) (*RestoreObjectOutput, error) +} + +// UnimplementedObjectStorageServiceServer should be embedded to have forward compatible implementations. +type UnimplementedObjectStorageServiceServer struct { +} + +func (UnimplementedObjectStorageServiceServer) PutObject(ObjectStorageService_PutObjectServer) error { + return status.Errorf(codes.Unimplemented, "method PutObject not implemented") +} +func (UnimplementedObjectStorageServiceServer) GetObject(*GetObjectInput, ObjectStorageService_GetObjectServer) error { + return status.Errorf(codes.Unimplemented, "method GetObject not implemented") +} +func (UnimplementedObjectStorageServiceServer) DeleteObject(context.Context, *DeleteObjectInput) (*DeleteObjectOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") +} +func (UnimplementedObjectStorageServiceServer) CopyObject(context.Context, *CopyObjectInput) (*CopyObjectOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method CopyObject not implemented") +} +func (UnimplementedObjectStorageServiceServer) DeleteObjects(context.Context, *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteObjects not implemented") +} +func (UnimplementedObjectStorageServiceServer) ListObjects(context.Context, *ListObjectsInput) (*ListObjectsOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented") +} +func (UnimplementedObjectStorageServiceServer) HeadObject(context.Context, *HeadObjectInput) (*HeadObjectOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method HeadObject not implemented") +} +func (UnimplementedObjectStorageServiceServer) IsObjectExist(context.Context, *IsObjectExistInput) (*IsObjectExistOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method IsObjectExist not implemented") +} +func (UnimplementedObjectStorageServiceServer) PutObjectTagging(context.Context, *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method PutObjectTagging not implemented") +} +func (UnimplementedObjectStorageServiceServer) DeleteObjectTagging(context.Context, *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteObjectTagging not implemented") +} +func (UnimplementedObjectStorageServiceServer) GetObjectTagging(context.Context, *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetObjectTagging not implemented") +} +func (UnimplementedObjectStorageServiceServer) GetObjectCannedAcl(context.Context, *GetObjectCannedAclInput) (*GetObjectCannedAclOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetObjectCannedAcl not implemented") +} +func (UnimplementedObjectStorageServiceServer) PutObjectCannedAcl(context.Context, *PutObjectCannedAclInput) (*PutObjectCannedAclOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method PutObjectCannedAcl not implemented") +} +func (UnimplementedObjectStorageServiceServer) CreateMultipartUpload(context.Context, *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateMultipartUpload not implemented") +} +func (UnimplementedObjectStorageServiceServer) UploadPart(ObjectStorageService_UploadPartServer) error { + return status.Errorf(codes.Unimplemented, "method UploadPart not implemented") +} +func (UnimplementedObjectStorageServiceServer) UploadPartCopy(context.Context, *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method UploadPartCopy not implemented") +} +func (UnimplementedObjectStorageServiceServer) CompleteMultipartUpload(context.Context, *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method CompleteMultipartUpload not implemented") +} +func (UnimplementedObjectStorageServiceServer) AbortMultipartUpload(context.Context, *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method AbortMultipartUpload not implemented") +} +func (UnimplementedObjectStorageServiceServer) ListMultipartUploads(context.Context, *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListMultipartUploads not implemented") +} +func (UnimplementedObjectStorageServiceServer) ListParts(context.Context, *ListPartsInput) (*ListPartsOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListParts not implemented") +} +func (UnimplementedObjectStorageServiceServer) ListObjectVersions(context.Context, *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListObjectVersions not implemented") +} +func (UnimplementedObjectStorageServiceServer) SignURL(context.Context, *SignURLInput) (*SignURLOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method SignURL not implemented") +} +func (UnimplementedObjectStorageServiceServer) UpdateDownloadBandwidthRateLimit(context.Context, *UpdateBandwidthRateLimitInput) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateDownloadBandwidthRateLimit not implemented") +} +func (UnimplementedObjectStorageServiceServer) UpdateUploadBandwidthRateLimit(context.Context, *UpdateBandwidthRateLimitInput) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateUploadBandwidthRateLimit not implemented") +} +func (UnimplementedObjectStorageServiceServer) AppendObject(ObjectStorageService_AppendObjectServer) error { + return status.Errorf(codes.Unimplemented, "method AppendObject not implemented") +} +func (UnimplementedObjectStorageServiceServer) RestoreObject(context.Context, *RestoreObjectInput) (*RestoreObjectOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method RestoreObject not implemented") +} + +// UnsafeObjectStorageServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ObjectStorageServiceServer will +// result in compilation errors. +type UnsafeObjectStorageServiceServer interface { + mustEmbedUnimplementedObjectStorageServiceServer() +} + +func RegisterObjectStorageServiceServer(s grpc.ServiceRegistrar, srv ObjectStorageServiceServer) { + s.RegisterService(&ObjectStorageService_ServiceDesc, srv) +} + +func _ObjectStorageService_PutObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ObjectStorageServiceServer).PutObject(&objectStorageServicePutObjectServer{stream}) +} + +type ObjectStorageService_PutObjectServer interface { + SendAndClose(*PutObjectOutput) error + Recv() (*PutObjectInput, error) + grpc.ServerStream +} + +type objectStorageServicePutObjectServer struct { + grpc.ServerStream +} + +func (x *objectStorageServicePutObjectServer) SendAndClose(m *PutObjectOutput) error { + return x.ServerStream.SendMsg(m) +} + +func (x *objectStorageServicePutObjectServer) Recv() (*PutObjectInput, error) { + m := new(PutObjectInput) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _ObjectStorageService_GetObject_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetObjectInput) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ObjectStorageServiceServer).GetObject(m, &objectStorageServiceGetObjectServer{stream}) +} + +type ObjectStorageService_GetObjectServer interface { + Send(*GetObjectOutput) error + grpc.ServerStream +} + +type objectStorageServiceGetObjectServer struct { + grpc.ServerStream +} + +func (x *objectStorageServiceGetObjectServer) Send(m *GetObjectOutput) error { + return x.ServerStream.SendMsg(m) +} + +func _ObjectStorageService_DeleteObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteObjectInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).DeleteObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/DeleteObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).DeleteObject(ctx, req.(*DeleteObjectInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_CopyObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CopyObjectInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).CopyObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/CopyObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).CopyObject(ctx, req.(*CopyObjectInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_DeleteObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteObjectsInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).DeleteObjects(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/DeleteObjects", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).DeleteObjects(ctx, req.(*DeleteObjectsInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_ListObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListObjectsInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).ListObjects(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/ListObjects", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).ListObjects(ctx, req.(*ListObjectsInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_HeadObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HeadObjectInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).HeadObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/HeadObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).HeadObject(ctx, req.(*HeadObjectInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_IsObjectExist_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IsObjectExistInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).IsObjectExist(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/IsObjectExist", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).IsObjectExist(ctx, req.(*IsObjectExistInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_PutObjectTagging_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutObjectTaggingInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).PutObjectTagging(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/PutObjectTagging", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).PutObjectTagging(ctx, req.(*PutObjectTaggingInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_DeleteObjectTagging_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteObjectTaggingInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).DeleteObjectTagging(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/DeleteObjectTagging", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).DeleteObjectTagging(ctx, req.(*DeleteObjectTaggingInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_GetObjectTagging_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetObjectTaggingInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).GetObjectTagging(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/GetObjectTagging", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).GetObjectTagging(ctx, req.(*GetObjectTaggingInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_GetObjectCannedAcl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetObjectCannedAclInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).GetObjectCannedAcl(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/GetObjectCannedAcl", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).GetObjectCannedAcl(ctx, req.(*GetObjectCannedAclInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_PutObjectCannedAcl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutObjectCannedAclInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).PutObjectCannedAcl(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/PutObjectCannedAcl", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).PutObjectCannedAcl(ctx, req.(*PutObjectCannedAclInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_CreateMultipartUpload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateMultipartUploadInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).CreateMultipartUpload(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/CreateMultipartUpload", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).CreateMultipartUpload(ctx, req.(*CreateMultipartUploadInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_UploadPart_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ObjectStorageServiceServer).UploadPart(&objectStorageServiceUploadPartServer{stream}) +} + +type ObjectStorageService_UploadPartServer interface { + SendAndClose(*UploadPartOutput) error + Recv() (*UploadPartInput, error) + grpc.ServerStream +} + +type objectStorageServiceUploadPartServer struct { + grpc.ServerStream +} + +func (x *objectStorageServiceUploadPartServer) SendAndClose(m *UploadPartOutput) error { + return x.ServerStream.SendMsg(m) +} + +func (x *objectStorageServiceUploadPartServer) Recv() (*UploadPartInput, error) { + m := new(UploadPartInput) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _ObjectStorageService_UploadPartCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UploadPartCopyInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).UploadPartCopy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/UploadPartCopy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).UploadPartCopy(ctx, req.(*UploadPartCopyInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_CompleteMultipartUpload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompleteMultipartUploadInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).CompleteMultipartUpload(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/CompleteMultipartUpload", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).CompleteMultipartUpload(ctx, req.(*CompleteMultipartUploadInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_AbortMultipartUpload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AbortMultipartUploadInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).AbortMultipartUpload(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/AbortMultipartUpload", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).AbortMultipartUpload(ctx, req.(*AbortMultipartUploadInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_ListMultipartUploads_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMultipartUploadsInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).ListMultipartUploads(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/ListMultipartUploads", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).ListMultipartUploads(ctx, req.(*ListMultipartUploadsInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_ListParts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPartsInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).ListParts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/ListParts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).ListParts(ctx, req.(*ListPartsInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_ListObjectVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListObjectVersionsInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).ListObjectVersions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/ListObjectVersions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).ListObjectVersions(ctx, req.(*ListObjectVersionsInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_SignURL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignURLInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).SignURL(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/SignURL", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).SignURL(ctx, req.(*SignURLInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_UpdateDownloadBandwidthRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateBandwidthRateLimitInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).UpdateDownloadBandwidthRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/UpdateDownloadBandwidthRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).UpdateDownloadBandwidthRateLimit(ctx, req.(*UpdateBandwidthRateLimitInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_UpdateUploadBandwidthRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateBandwidthRateLimitInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).UpdateUploadBandwidthRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/UpdateUploadBandwidthRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).UpdateUploadBandwidthRateLimit(ctx, req.(*UpdateBandwidthRateLimitInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectStorageService_AppendObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ObjectStorageServiceServer).AppendObject(&objectStorageServiceAppendObjectServer{stream}) +} + +type ObjectStorageService_AppendObjectServer interface { + SendAndClose(*AppendObjectOutput) error + Recv() (*AppendObjectInput, error) + grpc.ServerStream +} + +type objectStorageServiceAppendObjectServer struct { + grpc.ServerStream +} + +func (x *objectStorageServiceAppendObjectServer) SendAndClose(m *AppendObjectOutput) error { + return x.ServerStream.SendMsg(m) +} + +func (x *objectStorageServiceAppendObjectServer) Recv() (*AppendObjectInput, error) { + m := new(AppendObjectInput) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _ObjectStorageService_RestoreObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreObjectInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectStorageServiceServer).RestoreObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spec.proto.extension.v1.s3.ObjectStorageService/RestoreObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectStorageServiceServer).RestoreObject(ctx, req.(*RestoreObjectInput)) + } + return interceptor(ctx, in, info, handler) +} + +// ObjectStorageService_ServiceDesc is the grpc.ServiceDesc for ObjectStorageService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ObjectStorageService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "spec.proto.extension.v1.s3.ObjectStorageService", + HandlerType: (*ObjectStorageServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DeleteObject", + Handler: _ObjectStorageService_DeleteObject_Handler, + }, + { + MethodName: "CopyObject", + Handler: _ObjectStorageService_CopyObject_Handler, + }, + { + MethodName: "DeleteObjects", + Handler: _ObjectStorageService_DeleteObjects_Handler, + }, + { + MethodName: "ListObjects", + Handler: _ObjectStorageService_ListObjects_Handler, + }, + { + MethodName: "HeadObject", + Handler: _ObjectStorageService_HeadObject_Handler, + }, + { + MethodName: "IsObjectExist", + Handler: _ObjectStorageService_IsObjectExist_Handler, + }, + { + MethodName: "PutObjectTagging", + Handler: _ObjectStorageService_PutObjectTagging_Handler, + }, + { + MethodName: "DeleteObjectTagging", + Handler: _ObjectStorageService_DeleteObjectTagging_Handler, + }, + { + MethodName: "GetObjectTagging", + Handler: _ObjectStorageService_GetObjectTagging_Handler, + }, + { + MethodName: "GetObjectCannedAcl", + Handler: _ObjectStorageService_GetObjectCannedAcl_Handler, + }, + { + MethodName: "PutObjectCannedAcl", + Handler: _ObjectStorageService_PutObjectCannedAcl_Handler, + }, + { + MethodName: "CreateMultipartUpload", + Handler: _ObjectStorageService_CreateMultipartUpload_Handler, + }, + { + MethodName: "UploadPartCopy", + Handler: _ObjectStorageService_UploadPartCopy_Handler, + }, + { + MethodName: "CompleteMultipartUpload", + Handler: _ObjectStorageService_CompleteMultipartUpload_Handler, + }, + { + MethodName: "AbortMultipartUpload", + Handler: _ObjectStorageService_AbortMultipartUpload_Handler, + }, + { + MethodName: "ListMultipartUploads", + Handler: _ObjectStorageService_ListMultipartUploads_Handler, + }, + { + MethodName: "ListParts", + Handler: _ObjectStorageService_ListParts_Handler, + }, + { + MethodName: "ListObjectVersions", + Handler: _ObjectStorageService_ListObjectVersions_Handler, + }, + { + MethodName: "SignURL", + Handler: _ObjectStorageService_SignURL_Handler, + }, + { + MethodName: "UpdateDownloadBandwidthRateLimit", + Handler: _ObjectStorageService_UpdateDownloadBandwidthRateLimit_Handler, + }, + { + MethodName: "UpdateUploadBandwidthRateLimit", + Handler: _ObjectStorageService_UpdateUploadBandwidthRateLimit_Handler, + }, + { + MethodName: "RestoreObject", + Handler: _ObjectStorageService_RestoreObject_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "PutObject", + Handler: _ObjectStorageService_PutObject_Handler, + ClientStreams: true, + }, + { + StreamName: "GetObject", + Handler: _ObjectStorageService_GetObject_Handler, + ServerStreams: true, + }, + { + StreamName: "UploadPart", + Handler: _ObjectStorageService_UploadPart_Handler, + ClientStreams: true, + }, + { + StreamName: "AppendObject", + Handler: _ObjectStorageService_AppendObject_Handler, + ClientStreams: true, + }, + }, + Metadata: "oss.proto", +} diff --git a/spec/proto/runtime/v1/html.tmpl b/spec/proto/runtime/v1/html.tmpl new file mode 100644 index 0000000000..3b8d1d4b7f --- /dev/null +++ b/spec/proto/runtime/v1/html.tmpl @@ -0,0 +1,439 @@ + + + + + Protocol Documentation + + + + + + + + + + +

Protocol Documentation

+ +

Table of Contents

+ +
+
    + {{range .Files}} + {{$file_name := .Name}} +
  • + {{.Name}} +
      + + {{range .Services}} +
    • + S{{.Name}} +
    • + {{end}} +
    +
  • + {{end}} + +
+
+ + {{range .Files}} + {{$file_name := .Name}} +
+

{{.Name}}

Top +
+ {{p .Description}} + + {{range .Services}} +

[gRPC Service] {{.Name}}

+ {{p .Description}} + + + + + + {{range .Methods}} + + + + + + + {{end}} + +
Method NameRequest TypeResponse TypeDescription
{{.Name}}{{.RequestLongType}}{{if .RequestStreaming}} stream{{end}}{{.ResponseLongType}}{{if .ResponseStreaming}} stream{{end}}

{{.Description}}

+ + {{$service := .}} + {{- range .MethodOptions}} + {{$option := .}} + {{if eq . "google.api.http"}} +

Methods with HTTP bindings

+ + + + + + + + + + + {{range $service.MethodsWithOption .}} + {{$name := .Name}} + {{range (.Option $option).Rules}} + + + + + + + {{end}} + {{end}} + +
Method NameMethodPatternBody
{{$name}}{{.Method}}{{.Pattern}}{{.Body}}
+ {{else}} +

Methods with {{.}} option

+ + + + + + + + + {{range $service.MethodsWithOption .}} + + + + + {{end}} + +
Method NameOption
{{.Name}}

{{ printf "%+v" (.Option $option)}}

+ {{end}} + {{end -}} + {{end}} + + {{range .Messages}} +

{{.LongName}}

+ {{p .Description}} + + {{if .HasFields}} + + + + + + {{range .Fields}} + + + + + + + {{end}} + +
FieldTypeLabelDescription
{{.Name}}{{.LongType}}{{.Label}}

{{if (index .Options "deprecated"|default false)}}Deprecated. {{end}}{{.Description}} {{if .DefaultValue}}Default: {{.DefaultValue}}{{end}}

+ + {{$message := .}} + {{- range .FieldOptions}} + {{$option := .}} + {{if eq . "validator.field" "validate.rules" }} +

Validated Fields

+ + + + + + + + + {{range $message.FieldsWithOption .}} + + + + + {{end}} + +
FieldValidations
{{.Name}} +
    + {{range (.Option $option).Rules}} +
  • {{.Name}}: {{.Value}}
  • + {{end}} +
+
+ {{else}} +

Fields with {{.}} option

+ + + + + + + + + {{range $message.FieldsWithOption .}} + + + + + {{end}} + +
NameOption
{{.Name}}

{{ printf "%+v" (.Option $option)}}

+ {{end}} + {{end -}} + {{end}} + + {{if .HasExtensions}} +
+ + + + + + {{range .Extensions}} + + + + + + + + {{end}} + +
ExtensionTypeBaseNumberDescription
{{.Name}}{{.LongType}}{{.ContainingLongType}}{{.Number}}

{{.Description}}{{if .DefaultValue}} Default: {{.DefaultValue}}{{end}}

+ {{end}} + {{end}} + + {{range .Enums}} +

{{.LongName}}

+ {{p .Description}} + + + + + + {{range .Values}} + + + + + + {{end}} + +
NameNumberDescription
{{.Name}}{{.Number}}

{{.Description}}

+ {{end}} + + {{if .HasExtensions}} +

File-level Extensions

+ + + + + + {{range .Extensions}} + + + + + + + + {{end}} + +
ExtensionTypeBaseNumberDescription
{{.Name}}{{.LongType}}{{.ContainingLongType}}{{.Number}}

{{.Description}}{{if .DefaultValue}} Default: {{.DefaultValue}}{{end}}

+ {{end}} + + {{end}} + +

Scalar Value Types

+ + + + + + {{range .Scalars}} + + + + + + + + + + + + {{end}} + +
.proto TypeNotesC++JavaPythonGoC#PHPRuby
{{.ProtoType}}{{.Notes}}{{.CppType}}{{.JavaType}}{{.PythonType}}{{.GoType}}{{.CSharp}}{{.PhpType}}{{.RubyType}}
+ + diff --git a/spec/proto/runtime/v1/runtime.pb.go b/spec/proto/runtime/v1/runtime.pb.go index 33cce02782..60480c17ae 100644 --- a/spec/proto/runtime/v1/runtime.pb.go +++ b/spec/proto/runtime/v1/runtime.pb.go @@ -74,13 +74,18 @@ func (SequencerOptions_AutoIncrement) EnumDescriptor() ([]byte, []int) { return file_runtime_proto_rawDescGZIP(), []int{13, 0} } +// The enum of unlock status type UnlockResponse_Status int32 const ( - UnlockResponse_SUCCESS UnlockResponse_Status = 0 - UnlockResponse_LOCK_UNEXIST UnlockResponse_Status = 1 + // Unlock is success + UnlockResponse_SUCCESS UnlockResponse_Status = 0 + // The lock is not exist + UnlockResponse_LOCK_UNEXIST UnlockResponse_Status = 1 + // The lock is belong to others UnlockResponse_LOCK_BELONG_TO_OTHERS UnlockResponse_Status = 2 - UnlockResponse_INTERNAL_ERROR UnlockResponse_Status = 3 + // Internal error + UnlockResponse_INTERNAL_ERROR UnlockResponse_Status = 3 ) // Enum value maps for UnlockResponse_Status. @@ -126,19 +131,30 @@ func (UnlockResponse_Status) EnumDescriptor() ([]byte, []int) { return file_runtime_proto_rawDescGZIP(), []int{18, 0} } +// The enum of http reuest method type HTTPExtension_Verb int32 const ( - HTTPExtension_NONE HTTPExtension_Verb = 0 - HTTPExtension_GET HTTPExtension_Verb = 1 - HTTPExtension_HEAD HTTPExtension_Verb = 2 - HTTPExtension_POST HTTPExtension_Verb = 3 - HTTPExtension_PUT HTTPExtension_Verb = 4 - HTTPExtension_DELETE HTTPExtension_Verb = 5 + // NONE + HTTPExtension_NONE HTTPExtension_Verb = 0 + // GET method + HTTPExtension_GET HTTPExtension_Verb = 1 + // HEAD method + HTTPExtension_HEAD HTTPExtension_Verb = 2 + // POST method + HTTPExtension_POST HTTPExtension_Verb = 3 + // PUT method + HTTPExtension_PUT HTTPExtension_Verb = 4 + // DELETE method + HTTPExtension_DELETE HTTPExtension_Verb = 5 + // CONNECT method HTTPExtension_CONNECT HTTPExtension_Verb = 6 + // CONNECT method HTTPExtension_OPTIONS HTTPExtension_Verb = 7 - HTTPExtension_TRACE HTTPExtension_Verb = 8 - HTTPExtension_PATCH HTTPExtension_Verb = 9 + // CONNECT method + HTTPExtension_TRACE HTTPExtension_Verb = 8 + // PATCH method + HTTPExtension_PATCH HTTPExtension_Verb = 9 ) // Enum value maps for HTTPExtension_Verb. @@ -203,6 +219,7 @@ func (HTTPExtension_Verb) EnumDescriptor() ([]byte, []int) { type StateOptions_StateConcurrency int32 const ( + // Concurrency state is unspecified StateOptions_CONCURRENCY_UNSPECIFIED StateOptions_StateConcurrency = 0 // First write wins StateOptions_CONCURRENCY_FIRST_WRITE StateOptions_StateConcurrency = 1 @@ -255,6 +272,7 @@ func (StateOptions_StateConcurrency) EnumDescriptor() ([]byte, []int) { type StateOptions_StateConsistency int32 const ( + // Consistency state is unspecified StateOptions_CONSISTENCY_UNSPECIFIED StateOptions_StateConsistency = 0 // The API server assumes data stores are eventually consistent by default.A state store should: // - For read requests, the state store can return data from any of the replicas @@ -307,11 +325,13 @@ func (StateOptions_StateConsistency) EnumDescriptor() ([]byte, []int) { return file_runtime_proto_rawDescGZIP(), []int{42, 1} } +// Get fileMeta request message type GetFileMetaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // File meta request Request *FileRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` } @@ -354,6 +374,7 @@ func (x *GetFileMetaRequest) GetRequest() *FileRequest { return nil } +// Get fileMeta response message type GetFileMetaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -362,8 +383,9 @@ type GetFileMetaResponse struct { // The size of file Size int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` // The modified time of file - LastModified string `protobuf:"bytes,2,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` - Response *FileMeta `protobuf:"bytes,3,opt,name=response,proto3" json:"response,omitempty"` + LastModified string `protobuf:"bytes,2,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` + // File meta response + Response *FileMeta `protobuf:"bytes,3,opt,name=response,proto3" json:"response,omitempty"` } func (x *GetFileMetaResponse) Reset() { @@ -419,11 +441,13 @@ func (x *GetFileMetaResponse) GetResponse() *FileMeta { return nil } +// FileMeta value type FileMetaValue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // File meta value Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` } @@ -466,11 +490,13 @@ func (x *FileMetaValue) GetValue() []string { return nil } +// A map that store FileMetaValue type FileMeta struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // A data structure to store metadata Metadata map[string]*FileMetaValue `protobuf:"bytes,1,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -513,12 +539,13 @@ func (x *FileMeta) GetMetadata() map[string]*FileMetaValue { return nil } +// Get file request message type GetFileRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // + // The name of store StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` // The name of the file or object want to get. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` @@ -579,11 +606,13 @@ func (x *GetFileRequest) GetMetadata() map[string]string { return nil } +// Get file response message type GetFileResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The data of file Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` } @@ -626,11 +655,13 @@ func (x *GetFileResponse) GetData() []byte { return nil } +// Put file request message type PutFileRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The name of store StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` // The name of the file or object want to put. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` @@ -700,11 +731,13 @@ func (x *PutFileRequest) GetMetadata() map[string]string { return nil } +// File request message type FileRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The name of store StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` // The name of the directory Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` @@ -765,14 +798,18 @@ func (x *FileRequest) GetMetadata() map[string]string { return nil } +// List file request message type ListFileRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Request *FileRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - Marker string `protobuf:"bytes,3,opt,name=marker,proto3" json:"marker,omitempty"` + // File request + Request *FileRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + // Page size + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Marker + Marker string `protobuf:"bytes,3,opt,name=marker,proto3" json:"marker,omitempty"` } func (x *ListFileRequest) Reset() { @@ -828,6 +865,7 @@ func (x *ListFileRequest) GetMarker() string { return "" } +// File info message type FileInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -903,14 +941,18 @@ func (x *FileInfo) GetMetadata() map[string]string { return nil } +// List file response message type ListFileResp struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Files []*FileInfo `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` - Marker string `protobuf:"bytes,2,opt,name=marker,proto3" json:"marker,omitempty"` - IsTruncated bool `protobuf:"varint,3,opt,name=is_truncated,json=isTruncated,proto3" json:"is_truncated,omitempty"` + // File info + Files []*FileInfo `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` + // Marker + Marker string `protobuf:"bytes,2,opt,name=marker,proto3" json:"marker,omitempty"` + // Is truncated + IsTruncated bool `protobuf:"varint,3,opt,name=is_truncated,json=isTruncated,proto3" json:"is_truncated,omitempty"` } func (x *ListFileResp) Reset() { @@ -966,11 +1008,13 @@ func (x *ListFileResp) GetIsTruncated() bool { return false } +// Delete file request message type DelFileRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // File request Request *FileRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` } @@ -1013,6 +1057,7 @@ func (x *DelFileRequest) GetRequest() *FileRequest { return nil } +// Get next id request message type GetNextIdRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1094,6 +1139,7 @@ type SequencerOptions struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // Default STRONG auto-increment Increment SequencerOptions_AutoIncrement `protobuf:"varint,1,opt,name=increment,proto3,enum=spec.proto.runtime.v1.SequencerOptions_AutoIncrement" json:"increment,omitempty"` } @@ -1136,6 +1182,7 @@ func (x *SequencerOptions) GetIncrement() SequencerOptions_AutoIncrement { return SequencerOptions_WEAK } +// Get next id response message type GetNextIdResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1185,6 +1232,7 @@ func (x *GetNextIdResponse) GetNextId() int64 { return 0 } +// Lock request message is distributed lock API which is not blocking method tring to get a lock with ttl type TryLockRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1272,11 +1320,13 @@ func (x *TryLockRequest) GetExpire() int32 { return 0 } +// Lock response message returns is the lock obtained. type TryLockResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // Is lock success Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` } @@ -1319,15 +1369,18 @@ func (x *TryLockResponse) GetSuccess() bool { return false } +// UnLock request message type UnlockRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The name of store StoreName string `protobuf:"bytes,1,opt,name=store_name,json=storeName,proto3" json:"store_name,omitempty"` // resource_id is the lock key. ResourceId string `protobuf:"bytes,2,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` - LockOwner string `protobuf:"bytes,3,opt,name=lock_owner,json=lockOwner,proto3" json:"lock_owner,omitempty"` + // The owner of the lock + LockOwner string `protobuf:"bytes,3,opt,name=lock_owner,json=lockOwner,proto3" json:"lock_owner,omitempty"` } func (x *UnlockRequest) Reset() { @@ -1383,11 +1436,13 @@ func (x *UnlockRequest) GetLockOwner() string { return "" } +// UnLock response message type UnlockResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The status of unlock Status UnlockResponse_Status `protobuf:"varint,1,opt,name=status,proto3,enum=spec.proto.runtime.v1.UnlockResponse_Status" json:"status,omitempty"` } @@ -1430,13 +1485,16 @@ func (x *UnlockResponse) GetStatus() UnlockResponse_Status { return UnlockResponse_SUCCESS } +// Hello request message type SayHelloRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The name of service ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Reuqest name + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // Optional. This field is used to control the packet size during load tests. Data *anypb.Any `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` } @@ -1494,13 +1552,16 @@ func (x *SayHelloRequest) GetData() *anypb.Any { return nil } +// Hello response message type SayHelloResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Hello string `protobuf:"bytes,1,opt,name=hello,proto3" json:"hello,omitempty"` - Data *anypb.Any `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Hello + Hello string `protobuf:"bytes,1,opt,name=hello,proto3" json:"hello,omitempty"` + // Hello message of data + Data *anypb.Any `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` } func (x *SayHelloResponse) Reset() { @@ -1549,12 +1610,15 @@ func (x *SayHelloResponse) GetData() *anypb.Any { return nil } +// Invoke service request message type InvokeServiceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // The identify of InvokeServiceRequest + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // InvokeServiceRequest message Message *CommonInvokeRequest `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` } @@ -1604,14 +1668,19 @@ func (x *InvokeServiceRequest) GetMessage() *CommonInvokeRequest { return nil } +// Common invoke request message which includes invoke method and data type CommonInvokeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Method string `protobuf:"bytes,1,opt,name=method,proto3" json:"method,omitempty"` - Data *anypb.Any `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - ContentType string `protobuf:"bytes,3,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // The method of requset + Method string `protobuf:"bytes,1,opt,name=method,proto3" json:"method,omitempty"` + // The request data + Data *anypb.Any `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // The content type of request data + ContentType string `protobuf:"bytes,3,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // The extra information of http HttpExtension *HTTPExtension `protobuf:"bytes,4,opt,name=http_extension,json=httpExtension,proto3" json:"http_extension,omitempty"` } @@ -1675,13 +1744,16 @@ func (x *CommonInvokeRequest) GetHttpExtension() *HTTPExtension { return nil } +// Http extension message is about invoke http information type HTTPExtension struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Verb HTTPExtension_Verb `protobuf:"varint,1,opt,name=verb,proto3,enum=spec.proto.runtime.v1.HTTPExtension_Verb" json:"verb,omitempty"` - Querystring string `protobuf:"bytes,2,opt,name=querystring,proto3" json:"querystring,omitempty"` + // The method of http reuest + Verb HTTPExtension_Verb `protobuf:"varint,1,opt,name=verb,proto3,enum=spec.proto.runtime.v1.HTTPExtension_Verb" json:"verb,omitempty"` + // The query information of http + Querystring string `protobuf:"bytes,2,opt,name=querystring,proto3" json:"querystring,omitempty"` } func (x *HTTPExtension) Reset() { @@ -1730,13 +1802,16 @@ func (x *HTTPExtension) GetQuerystring() string { return "" } +// Invoke service response message is result of invoke service queset type InvokeResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Data *anypb.Any `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - ContentType string `protobuf:"bytes,2,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // The response data + Data *anypb.Any `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // The content type of response data + ContentType string `protobuf:"bytes,2,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` } func (x *InvokeResponse) Reset() { @@ -3070,7 +3145,9 @@ type StateOptions struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The state operation of concurrency Concurrency StateOptions_StateConcurrency `protobuf:"varint,1,opt,name=concurrency,proto3,enum=spec.proto.runtime.v1.StateOptions_StateConcurrency" json:"concurrency,omitempty"` + // The state operation of consistency Consistency StateOptions_StateConsistency `protobuf:"varint,2,opt,name=consistency,proto3,enum=spec.proto.runtime.v1.StateOptions_StateConsistency" json:"consistency,omitempty"` } @@ -3707,6 +3784,7 @@ type SecretResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The data struct of secrets Secrets map[string]string `protobuf:"bytes,1,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } diff --git a/spec/proto/runtime/v1/runtime.proto b/spec/proto/runtime/v1/runtime.proto index 9db05e44b2..a5772e4723 100644 --- a/spec/proto/runtime/v1/runtime.proto +++ b/spec/proto/runtime/v1/runtime.proto @@ -9,6 +9,7 @@ option go_package = "mosn.io/layotto/spec/proto/runtime/v1;runtime"; option java_outer_classname = "RuntimeProto"; option java_package = "spec.proto.runtime.v1"; +// Runtime encapsulates variours Runtime APIs(such as Configuration API, Pub/Sub API, etc) service Runtime { //SayHello used for test rpc SayHello(SayHelloRequest) returns (SayHelloResponse) {} @@ -32,6 +33,7 @@ service Runtime { // A non-blocking method trying to get a lock with ttl. rpc TryLock(TryLockRequest)returns (TryLockResponse) {} + // A method trying to unlock. rpc Unlock(UnlockRequest)returns (UnlockResponse) {} // Sequencer API @@ -88,28 +90,37 @@ service Runtime { rpc GetBulkSecret(GetBulkSecretRequest) returns (GetBulkSecretResponse) {} } +// Get fileMeta request message message GetFileMetaRequest{ + // File meta request FileRequest request = 1; } +// Get fileMeta response message message GetFileMetaResponse{ // The size of file int64 size = 1; // The modified time of file string last_modified = 2; + // File meta response FileMeta response = 3; } +// FileMeta value message FileMetaValue{ + // File meta value repeated string value = 1; } +// A map that store FileMetaValue message FileMeta{ + // A data structure to store metadata map metadata = 1; } +// Get file request message message GetFileRequest { - // + // The name of store string store_name = 1; // The name of the file or object want to get. string name = 2; @@ -117,11 +128,15 @@ message GetFileRequest { map metadata = 3; } +// Get file response message message GetFileResponse { + // The data of file bytes data = 1; } +// Put file request message message PutFileRequest { + // The name of store string store_name = 1; // The name of the file or object want to put. string name = 2; @@ -131,7 +146,9 @@ message PutFileRequest { map metadata = 4; } +// File request message message FileRequest { + // The name of store string store_name = 1; // The name of the directory string name = 2; @@ -139,12 +156,17 @@ message FileRequest { map metadata = 3; } +// List file request message message ListFileRequest { + // File request FileRequest request = 1; + // Page size int32 page_size = 2; + // Marker string marker = 3; } +// File info message message FileInfo { // The name of file string file_name = 1; @@ -155,16 +177,24 @@ message FileInfo { // The metadata for user extension. map metadata = 4; } + +// List file response message message ListFileResp { + // File info repeated FileInfo files = 1; + // Marker string marker = 2; + // Is truncated bool is_truncated = 3; } +// Delete file request message message DelFileRequest { + // File request FileRequest request = 1; } +// Get next id request message message GetNextIdRequest { // Required. Name of sequencer storage string store_name = 1; @@ -188,6 +218,7 @@ message SequencerOptions { STRONG = 1; } + // Default STRONG auto-increment AutoIncrement increment = 1; // We removed Uniqueness enum to make it simple. @@ -204,12 +235,14 @@ message SequencerOptions { // Uniqueness uniqueness=2; } +// Get next id response message message GetNextIdResponse{ // The next unique id // Fixed int64 overflow problems on JavaScript https://github.com/improbable-eng/ts-protoc-gen#gotchas int64 next_id = 1 [jstype = JS_STRING]; } +// Lock request message is distributed lock API which is not blocking method tring to get a lock with ttl message TryLockRequest { // Required. The lock store name,e.g. `redis`. string store_name = 1; @@ -236,74 +269,116 @@ message TryLockRequest { int32 expire = 4; } +// Lock response message returns is the lock obtained. message TryLockResponse { + // Is lock success bool success = 1; } +// UnLock request message message UnlockRequest { + // The name of store string store_name = 1; // resource_id is the lock key. string resource_id = 2; - + // The owner of the lock string lock_owner = 3; } +// UnLock response message message UnlockResponse { + // The enum of unlock status enum Status { + // Unlock is success SUCCESS = 0; + // The lock is not exist LOCK_UNEXIST = 1; + // The lock is belong to others LOCK_BELONG_TO_OTHERS = 2; + // Internal error INTERNAL_ERROR = 3; } + // The status of unlock Status status = 1; } +// Hello request message message SayHelloRequest { + // The name of service string service_name = 1; + // Reuqest name string name = 2; // Optional. This field is used to control the packet size during load tests. google.protobuf.Any data = 3; } +// Hello response message message SayHelloResponse { + // Hello string hello = 1; + // Hello message of data google.protobuf.Any data = 2; } +// Invoke service request message message InvokeServiceRequest { + // The identify of InvokeServiceRequest string id = 1; + // InvokeServiceRequest message CommonInvokeRequest message = 3; } +// Common invoke request message which includes invoke method and data message CommonInvokeRequest { + // The method of requset string method = 1; + // The request data google.protobuf.Any data = 2; + // The content type of request data string content_type = 3; + // The extra information of http HTTPExtension http_extension = 4; } +// Http extension message is about invoke http information message HTTPExtension { + // The enum of http reuest method enum Verb { + // NONE NONE = 0; + // GET method GET = 1; + // HEAD method HEAD = 2; + // POST method POST = 3; + // PUT method PUT = 4; + // DELETE method DELETE = 5; + // CONNECT method CONNECT = 6; + // CONNECT method OPTIONS = 7; + // CONNECT method TRACE = 8; + // PATCH method PATCH = 9; } + // The method of http reuest Verb verb = 1; + // The query information of http string querystring = 2; } +// Invoke service response message is result of invoke service queset message InvokeResponse { + // The response data google.protobuf.Any data = 1; + // The content type of response data string content_type = 2; } @@ -575,6 +650,7 @@ message StateOptions { // When an ETag is associated with an save or delete request, the store shall allow the update only if the attached ETag matches with the latest ETag in the database. // But when ETag is missing in the write requests, the state store shall handle the requests in the specified strategy(e.g. a last-write-wins fashion). enum StateConcurrency { + // Concurrency state is unspecified CONCURRENCY_UNSPECIFIED = 0; // First write wins CONCURRENCY_FIRST_WRITE = 1; @@ -584,6 +660,7 @@ message StateOptions { // Enum describing the supported consistency for state. enum StateConsistency { + // Consistency state is unspecified CONSISTENCY_UNSPECIFIED = 0; // The API server assumes data stores are eventually consistent by default.A state store should: // - For read requests, the state store can return data from any of the replicas @@ -596,7 +673,9 @@ message StateOptions { CONSISTENCY_STRONG = 2; } + // The state operation of concurrency StateConcurrency concurrency = 1; + // The state operation of consistency StateConsistency consistency = 2; } @@ -714,5 +793,6 @@ message GetBulkSecretResponse { // SecretResponse is a map of decrypted string/string values message SecretResponse { + // The data struct of secrets map secrets = 1; } diff --git a/spec/proto/runtime/v1/runtime_grpc.pb.go b/spec/proto/runtime/v1/runtime_grpc.pb.go index b473524885..3e7ee6de93 100644 --- a/spec/proto/runtime/v1/runtime_grpc.pb.go +++ b/spec/proto/runtime/v1/runtime_grpc.pb.go @@ -39,6 +39,7 @@ type RuntimeClient interface { // Distributed Lock API // A non-blocking method trying to get a lock with ttl. TryLock(ctx context.Context, in *TryLockRequest, opts ...grpc.CallOption) (*TryLockResponse, error) + // A method trying to unlock. Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) // Sequencer API // Get next unique id with some auto-increment guarantee @@ -388,6 +389,7 @@ type RuntimeServer interface { // Distributed Lock API // A non-blocking method trying to get a lock with ttl. TryLock(context.Context, *TryLockRequest) (*TryLockResponse, error) + // A method trying to unlock. Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error) // Sequencer API // Get next unique id with some auto-increment guarantee