diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index ccbdb8925ca3..d0e6adc48938 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -390,6 +390,21 @@ "Comment": "v1.6.10", "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3iface", + "Comment": "v1.6.10", + "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3manager", + "Comment": "v1.6.10", + "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface", + "Comment": "v1.6.10", + "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + }, { "ImportPath": "github.com/aws/aws-sdk-go/service/sts", "Comment": "v1.6.10", diff --git a/contrib/completions/bash/oc b/contrib/completions/bash/oc index a6f61832f6d4..e63a7dc3321e 100644 --- a/contrib/completions/bash/oc +++ b/contrib/completions/bash/oc @@ -12301,6 +12301,111 @@ _oc_idle() noun_aliases=() } +_oc_image_mirror() +{ + last_command="oc_image_mirror" + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--filter-by-os=") + local_nonpersistent_flags+=("--filter-by-os=") + flags+=("--force") + local_nonpersistent_flags+=("--force") + flags+=("--insecure") + local_nonpersistent_flags+=("--insecure") + flags+=("--s3-source-bucket=") + local_nonpersistent_flags+=("--s3-source-bucket=") + flags+=("--skip-mount") + local_nonpersistent_flags+=("--skip-mount") + flags+=("--as=") + flags+=("--as-group=") + flags+=("--certificate-authority=") + flags_with_completion+=("--certificate-authority") + flags_completion+=("_filedir") + flags+=("--client-certificate=") + flags_with_completion+=("--client-certificate") + flags_completion+=("_filedir") + flags+=("--client-key=") + flags_with_completion+=("--client-key") + flags_completion+=("_filedir") + flags+=("--cluster=") + flags+=("--config=") + flags_with_completion+=("--config") + flags_completion+=("_filedir") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--loglevel=") + flags+=("--logspec=") + flags+=("--match-server-version") + flags+=("--namespace=") + flags_with_completion+=("--namespace") + flags_completion+=("__oc_get_namespaces") + two_word_flags+=("-n") + flags_with_completion+=("-n") + flags_completion+=("__oc_get_namespaces") + flags+=("--request-timeout=") + flags+=("--server=") + flags+=("--token=") + flags+=("--user=") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_oc_image() +{ + last_command="oc_image" + commands=() + commands+=("mirror") + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--as=") + flags+=("--as-group=") + flags+=("--certificate-authority=") + flags_with_completion+=("--certificate-authority") + flags_completion+=("_filedir") + flags+=("--client-certificate=") + flags_with_completion+=("--client-certificate") + flags_completion+=("_filedir") + flags+=("--client-key=") + flags_with_completion+=("--client-key") + flags_completion+=("_filedir") + flags+=("--cluster=") + flags+=("--config=") + flags_with_completion+=("--config") + flags_completion+=("_filedir") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--loglevel=") + flags+=("--logspec=") + flags+=("--match-server-version") + flags+=("--namespace=") + flags_with_completion+=("--namespace") + flags_completion+=("__oc_get_namespaces") + two_word_flags+=("-n") + flags_with_completion+=("-n") + flags_completion+=("__oc_get_namespaces") + flags+=("--request-timeout=") + flags+=("--server=") + flags+=("--token=") + flags+=("--user=") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + _oc_import_app.json() { last_command="oc_import_app.json" @@ -18112,6 +18217,7 @@ _oc() commands+=("extract") commands+=("get") commands+=("idle") + commands+=("image") commands+=("import") commands+=("import-image") commands+=("label") diff --git a/contrib/completions/bash/openshift b/contrib/completions/bash/openshift index c7ec92eaad33..3dd9304d5390 100644 --- a/contrib/completions/bash/openshift +++ b/contrib/completions/bash/openshift @@ -17559,6 +17559,111 @@ _openshift_cli_idle() noun_aliases=() } +_openshift_cli_image_mirror() +{ + last_command="openshift_cli_image_mirror" + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--filter-by-os=") + local_nonpersistent_flags+=("--filter-by-os=") + flags+=("--force") + local_nonpersistent_flags+=("--force") + flags+=("--insecure") + local_nonpersistent_flags+=("--insecure") + flags+=("--s3-source-bucket=") + local_nonpersistent_flags+=("--s3-source-bucket=") + flags+=("--skip-mount") + local_nonpersistent_flags+=("--skip-mount") + flags+=("--as=") + flags+=("--as-group=") + flags+=("--certificate-authority=") + flags_with_completion+=("--certificate-authority") + flags_completion+=("_filedir") + flags+=("--client-certificate=") + flags_with_completion+=("--client-certificate") + flags_completion+=("_filedir") + flags+=("--client-key=") + flags_with_completion+=("--client-key") + flags_completion+=("_filedir") + flags+=("--cluster=") + flags+=("--config=") + flags_with_completion+=("--config") + flags_completion+=("_filedir") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--loglevel=") + flags+=("--logspec=") + flags+=("--match-server-version") + flags+=("--namespace=") + flags_with_completion+=("--namespace") + flags_completion+=("__oc_get_namespaces") + two_word_flags+=("-n") + flags_with_completion+=("-n") + flags_completion+=("__oc_get_namespaces") + flags+=("--request-timeout=") + flags+=("--server=") + flags+=("--token=") + flags+=("--user=") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_openshift_cli_image() +{ + last_command="openshift_cli_image" + commands=() + commands+=("mirror") + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--as=") + flags+=("--as-group=") + flags+=("--certificate-authority=") + flags_with_completion+=("--certificate-authority") + flags_completion+=("_filedir") + flags+=("--client-certificate=") + flags_with_completion+=("--client-certificate") + flags_completion+=("_filedir") + flags+=("--client-key=") + flags_with_completion+=("--client-key") + flags_completion+=("_filedir") + flags+=("--cluster=") + flags+=("--config=") + flags_with_completion+=("--config") + flags_completion+=("_filedir") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--loglevel=") + flags+=("--logspec=") + flags+=("--match-server-version") + flags+=("--namespace=") + flags_with_completion+=("--namespace") + flags_completion+=("__oc_get_namespaces") + two_word_flags+=("-n") + flags_with_completion+=("-n") + flags_completion+=("__oc_get_namespaces") + flags+=("--request-timeout=") + flags+=("--server=") + flags+=("--token=") + flags+=("--user=") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + _openshift_cli_import_app.json() { last_command="openshift_cli_import_app.json" @@ -23323,6 +23428,7 @@ _openshift_cli() commands+=("extract") commands+=("get") commands+=("idle") + commands+=("image") commands+=("import") commands+=("import-image") commands+=("label") diff --git a/contrib/completions/zsh/oc b/contrib/completions/zsh/oc index 3d98860339c7..677e1cb7c779 100644 --- a/contrib/completions/zsh/oc +++ b/contrib/completions/zsh/oc @@ -12450,6 +12450,111 @@ _oc_idle() noun_aliases=() } +_oc_image_mirror() +{ + last_command="oc_image_mirror" + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--filter-by-os=") + local_nonpersistent_flags+=("--filter-by-os=") + flags+=("--force") + local_nonpersistent_flags+=("--force") + flags+=("--insecure") + local_nonpersistent_flags+=("--insecure") + flags+=("--s3-source-bucket=") + local_nonpersistent_flags+=("--s3-source-bucket=") + flags+=("--skip-mount") + local_nonpersistent_flags+=("--skip-mount") + flags+=("--as=") + flags+=("--as-group=") + flags+=("--certificate-authority=") + flags_with_completion+=("--certificate-authority") + flags_completion+=("_filedir") + flags+=("--client-certificate=") + flags_with_completion+=("--client-certificate") + flags_completion+=("_filedir") + flags+=("--client-key=") + flags_with_completion+=("--client-key") + flags_completion+=("_filedir") + flags+=("--cluster=") + flags+=("--config=") + flags_with_completion+=("--config") + flags_completion+=("_filedir") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--loglevel=") + flags+=("--logspec=") + flags+=("--match-server-version") + flags+=("--namespace=") + flags_with_completion+=("--namespace") + flags_completion+=("__oc_get_namespaces") + two_word_flags+=("-n") + flags_with_completion+=("-n") + flags_completion+=("__oc_get_namespaces") + flags+=("--request-timeout=") + flags+=("--server=") + flags+=("--token=") + flags+=("--user=") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_oc_image() +{ + last_command="oc_image" + commands=() + commands+=("mirror") + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--as=") + flags+=("--as-group=") + flags+=("--certificate-authority=") + flags_with_completion+=("--certificate-authority") + flags_completion+=("_filedir") + flags+=("--client-certificate=") + flags_with_completion+=("--client-certificate") + flags_completion+=("_filedir") + flags+=("--client-key=") + flags_with_completion+=("--client-key") + flags_completion+=("_filedir") + flags+=("--cluster=") + flags+=("--config=") + flags_with_completion+=("--config") + flags_completion+=("_filedir") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--loglevel=") + flags+=("--logspec=") + flags+=("--match-server-version") + flags+=("--namespace=") + flags_with_completion+=("--namespace") + flags_completion+=("__oc_get_namespaces") + two_word_flags+=("-n") + flags_with_completion+=("-n") + flags_completion+=("__oc_get_namespaces") + flags+=("--request-timeout=") + flags+=("--server=") + flags+=("--token=") + flags+=("--user=") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + _oc_import_app.json() { last_command="oc_import_app.json" @@ -18261,6 +18366,7 @@ _oc() commands+=("extract") commands+=("get") commands+=("idle") + commands+=("image") commands+=("import") commands+=("import-image") commands+=("label") diff --git a/contrib/completions/zsh/openshift b/contrib/completions/zsh/openshift index b19123164a1c..c444e215bad9 100644 --- a/contrib/completions/zsh/openshift +++ b/contrib/completions/zsh/openshift @@ -17708,6 +17708,111 @@ _openshift_cli_idle() noun_aliases=() } +_openshift_cli_image_mirror() +{ + last_command="openshift_cli_image_mirror" + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--filter-by-os=") + local_nonpersistent_flags+=("--filter-by-os=") + flags+=("--force") + local_nonpersistent_flags+=("--force") + flags+=("--insecure") + local_nonpersistent_flags+=("--insecure") + flags+=("--s3-source-bucket=") + local_nonpersistent_flags+=("--s3-source-bucket=") + flags+=("--skip-mount") + local_nonpersistent_flags+=("--skip-mount") + flags+=("--as=") + flags+=("--as-group=") + flags+=("--certificate-authority=") + flags_with_completion+=("--certificate-authority") + flags_completion+=("_filedir") + flags+=("--client-certificate=") + flags_with_completion+=("--client-certificate") + flags_completion+=("_filedir") + flags+=("--client-key=") + flags_with_completion+=("--client-key") + flags_completion+=("_filedir") + flags+=("--cluster=") + flags+=("--config=") + flags_with_completion+=("--config") + flags_completion+=("_filedir") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--loglevel=") + flags+=("--logspec=") + flags+=("--match-server-version") + flags+=("--namespace=") + flags_with_completion+=("--namespace") + flags_completion+=("__oc_get_namespaces") + two_word_flags+=("-n") + flags_with_completion+=("-n") + flags_completion+=("__oc_get_namespaces") + flags+=("--request-timeout=") + flags+=("--server=") + flags+=("--token=") + flags+=("--user=") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_openshift_cli_image() +{ + last_command="openshift_cli_image" + commands=() + commands+=("mirror") + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--as=") + flags+=("--as-group=") + flags+=("--certificate-authority=") + flags_with_completion+=("--certificate-authority") + flags_completion+=("_filedir") + flags+=("--client-certificate=") + flags_with_completion+=("--client-certificate") + flags_completion+=("_filedir") + flags+=("--client-key=") + flags_with_completion+=("--client-key") + flags_completion+=("_filedir") + flags+=("--cluster=") + flags+=("--config=") + flags_with_completion+=("--config") + flags_completion+=("_filedir") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--loglevel=") + flags+=("--logspec=") + flags+=("--match-server-version") + flags+=("--namespace=") + flags_with_completion+=("--namespace") + flags_completion+=("__oc_get_namespaces") + two_word_flags+=("-n") + flags_with_completion+=("-n") + flags_completion+=("__oc_get_namespaces") + flags+=("--request-timeout=") + flags+=("--server=") + flags+=("--token=") + flags+=("--user=") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + _openshift_cli_import_app.json() { last_command="openshift_cli_import_app.json" @@ -23472,6 +23577,7 @@ _openshift_cli() commands+=("extract") commands+=("get") commands+=("idle") + commands+=("image") commands+=("import") commands+=("import-image") commands+=("label") diff --git a/docs/man/man1/.files_generated_oc b/docs/man/man1/.files_generated_oc index 8e6e50f73ab6..450ace5b618f 100644 --- a/docs/man/man1/.files_generated_oc +++ b/docs/man/man1/.files_generated_oc @@ -176,6 +176,8 @@ oc-expose.1 oc-extract.1 oc-get.1 oc-idle.1 +oc-image-mirror.1 +oc-image.1 oc-import-app.json.1 oc-import-image.1 oc-import.1 diff --git a/docs/man/man1/.files_generated_openshift b/docs/man/man1/.files_generated_openshift index de28807761ff..994d69fb20e4 100644 --- a/docs/man/man1/.files_generated_openshift +++ b/docs/man/man1/.files_generated_openshift @@ -274,6 +274,8 @@ openshift-cli-expose.1 openshift-cli-extract.1 openshift-cli-get.1 openshift-cli-idle.1 +openshift-cli-image-mirror.1 +openshift-cli-image.1 openshift-cli-import-app.json.1 openshift-cli-import-image.1 openshift-cli-import.1 diff --git a/docs/man/man1/oc-image-mirror.1 b/docs/man/man1/oc-image-mirror.1 new file mode 100644 index 000000000000..b6fd7a0f9896 --- /dev/null +++ b/docs/man/man1/oc-image-mirror.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/oc-image.1 b/docs/man/man1/oc-image.1 new file mode 100644 index 000000000000..b6fd7a0f9896 --- /dev/null +++ b/docs/man/man1/oc-image.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/openshift-cli-image-mirror.1 b/docs/man/man1/openshift-cli-image-mirror.1 new file mode 100644 index 000000000000..b6fd7a0f9896 --- /dev/null +++ b/docs/man/man1/openshift-cli-image-mirror.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/openshift-cli-image.1 b/docs/man/man1/openshift-cli-image.1 new file mode 100644 index 000000000000..b6fd7a0f9896 --- /dev/null +++ b/docs/man/man1/openshift-cli-image.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/pkg/image/importer/client.go b/pkg/image/importer/client.go index 476e670c2b9a..075ca50a7f42 100644 --- a/pkg/image/importer/client.go +++ b/pkg/image/importer/client.go @@ -48,6 +48,7 @@ func NewContext(transport, insecureTransport http.RoundTripper) Context { InsecureTransport: insecureTransport, Challenges: challenge.NewSimpleManager(), Actions: []string{"pull"}, + Retries: 2, } } @@ -57,6 +58,7 @@ type Context struct { Challenges challenge.Manager Scopes []auth.Scope Actions []string + Retries int } func (c Context) WithScopes(scopes ...auth.Scope) Context { @@ -120,6 +122,9 @@ func (r *repositoryRetriever) Repository(ctx gocontext.Context, registry *url.UR t = r.context.InsecureTransport } src := *registry + if len(src.Scheme) == 0 { + src.Scheme = "https" + } // ping the registry to get challenge headers if err, ok := r.pings[src]; ok { if err != nil { @@ -154,7 +159,10 @@ func (r *repositoryRetriever) Repository(ctx gocontext.Context, registry *url.UR if err != nil { return nil, err } - return NewRetryRepository(repo, 2, 3/2*time.Second), nil + if r.context.Retries > 0 { + return NewRetryRepository(repo, r.context.Retries, 3/2*time.Second), nil + } + return repo, nil } func (r *repositoryRetriever) ping(registry url.URL, insecure bool, transport http.RoundTripper) (*url.URL, error) { diff --git a/pkg/oc/cli/cli.go b/pkg/oc/cli/cli.go index ac9a76088c19..68797546c626 100644 --- a/pkg/oc/cli/cli.go +++ b/pkg/oc/cli/cli.go @@ -22,6 +22,7 @@ import ( "github.com/openshift/origin/pkg/oc/admin" "github.com/openshift/origin/pkg/oc/cli/cmd" "github.com/openshift/origin/pkg/oc/cli/cmd/cluster" + "github.com/openshift/origin/pkg/oc/cli/cmd/image" "github.com/openshift/origin/pkg/oc/cli/cmd/importer" "github.com/openshift/origin/pkg/oc/cli/cmd/login" "github.com/openshift/origin/pkg/oc/cli/cmd/observe" @@ -164,6 +165,7 @@ func NewCommandCLI(name, fullName string, in io.Reader, out, errout io.Writer) * cmd.NewCmdAuth(fullName, f, out, errout), cmd.NewCmdConvert(fullName, f, out), importer.NewCmdImport(fullName, f, in, out, errout), + image.NewCmdImage(fullName, f, in, out, errout), }, }, { diff --git a/pkg/oc/cli/cmd/image/image.go b/pkg/oc/cli/cmd/image/image.go new file mode 100644 index 000000000000..abc09d8fb501 --- /dev/null +++ b/pkg/oc/cli/cmd/image/image.go @@ -0,0 +1,45 @@ +package image + +import ( + "fmt" + "io" + + "github.com/spf13/cobra" + ktemplates "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + + "github.com/openshift/origin/pkg/cmd/templates" + "github.com/openshift/origin/pkg/cmd/util/clientcmd" + "github.com/openshift/origin/pkg/oc/cli/cmd/image/mirror" +) + +var ( + imageLong = ktemplates.LongDesc(` + Manage images on OpenShift + + These commands help you manage images on OpenShift.`) +) + +// NewCmdImage exposes commands for modifying images. +func NewCmdImage(fullName string, f *clientcmd.Factory, in io.Reader, out, errout io.Writer) *cobra.Command { + image := &cobra.Command{ + Use: "image COMMAND", + Short: "Useful commands for managing images", + Long: imageLong, + Run: cmdutil.DefaultSubCommandRun(errout), + } + + name := fmt.Sprintf("%s image", fullName) + + groups := ktemplates.CommandGroups{ + { + Message: "Advanced commands:", + Commands: []*cobra.Command{ + mirror.NewCmdMirrorImage(name, out, errout), + }, + }, + } + groups.Add(image) + templates.ActsAsRootCommand(image, []string{"options"}, groups...) + return image +} diff --git a/pkg/oc/cli/cmd/image/mirror/mirror.go b/pkg/oc/cli/cmd/image/mirror/mirror.go new file mode 100644 index 000000000000..2c699c7ed7ef --- /dev/null +++ b/pkg/oc/cli/cmd/image/mirror/mirror.go @@ -0,0 +1,677 @@ +package mirror + +import ( + "fmt" + "io" + "regexp" + "strings" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + units "github.com/docker/go-units" + "github.com/golang/glog" + "github.com/spf13/cobra" + "k8s.io/client-go/rest" + + kerrors "k8s.io/apimachinery/pkg/util/errors" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + + imageapi "github.com/openshift/origin/pkg/image/apis/image" + "github.com/openshift/origin/pkg/image/importer" +) + +var ( + mirrorDesc = templates.LongDesc(` + Mirror images from one image repository to another. + + Accepts a list of arguments defining source images that should be pushed to the provided + destination image tag. The images are streamed from registry to registry without being stored + locally. The default docker credentials are used for authenticating to the registries. + + When using S3 mirroring the region and bucket must be the first two segments after the host. + Mirroring will create the necessary metadata so that images can be pulled via tag or digest, + but listing manifests and tags will not be possible. You may also specify one or more + --s3-source-bucket parameters (as /) to designate buckets to look in to find + blobs (instead of uploading). The source bucket also supports the suffix "/[store]", which + will transform blob identifiers into the form the Docker registry uses on disk, allowing + you to mirror directly from an existing S3-backed Docker registry. Credentials for S3 + may be stored in your docker credential file and looked up by host. + + Images in manifest list format will be copied as-is unless you use --filter-by-os to restrict + the allowed images to copy in a manifest list. This flag has no effect on regular images. + + Experimental: This command is under active development and may change without notice.`) + + mirrorExample = templates.Examples(` +# Copy image to another tag +%[1]s myregistry.com/myimage:latest myregistry.com/myimage:stable + +# Copy image to another registry +%[1]s myregistry.com/myimage:latest docker.io/myrepository/myimage:stable + +# Copy image to S3 (pull from .s3.amazonaws.com/image:latest) +%[1]s myregistry.com/myimage:latest s3://s3.amazonaws.com///image:latest + +# Copy image to S3 without setting a tag (pull via @) +%[1]s myregistry.com/myimage:latest s3://s3.amazonaws.com///image + +# Copy image to multiple locations +%[1]s myregistry.com/myimage:latest docker.io/myrepository/myimage:stable \ + docker.io/myrepository/myimage:dev + +# Copy multiple images +%[1]s myregistry.com/myimage:latest=myregistry.com/other:test \ + myregistry.com/myimage:new=myregistry.com/other:target +`) +) + +type DestinationType string + +var ( + DestinationRegistry DestinationType = "docker" + DestinationS3 DestinationType = "s3" +) + +type Mapping struct { + Source imageapi.DockerImageReference + Destination imageapi.DockerImageReference + Type DestinationType +} + +type pushOptions struct { + Out, ErrOut io.Writer + + Mappings []Mapping + OSFilter *regexp.Regexp + + FilterByOS string + + Insecure bool + SkipMount bool + Force bool + + AttemptS3BucketCopy []string +} + +// NewCommandMirrorImage copies images from one location to another. +func NewCmdMirrorImage(name string, out, errOut io.Writer) *cobra.Command { + o := &pushOptions{} + + cmd := &cobra.Command{ + Use: "mirror SRC DST [DST ...]", + Short: "Mirror images from one repository to another", + Long: mirrorDesc, + Example: fmt.Sprintf(mirrorExample, name+" mirror"), + Run: func(c *cobra.Command, args []string) { + o.Out = out + o.ErrOut = errOut + kcmdutil.CheckErr(o.Complete(args)) + kcmdutil.CheckErr(o.Run()) + }, + } + + flag := cmd.Flags() + flag.BoolVar(&o.Insecure, "insecure", o.Insecure, "If true, connections may be made over HTTP") + flag.BoolVar(&o.SkipMount, "skip-mount", o.SkipMount, "If true, always push layers instead of cross-mounting them") + flag.StringVar(&o.FilterByOS, "filter-by-os", o.FilterByOS, "A regular expression to control which images are mirrored. Images will be passed as '/[/]'.") + flag.BoolVar(&o.Force, "force", o.Force, "If true, attempt to write all contents.") + flag.StringSliceVar(&o.AttemptS3BucketCopy, "s3-source-bucket", o.AttemptS3BucketCopy, "A list of bucket/path locations on S3 that may contain already uploaded blobs. Add [store] to the end to use the Docker registry path convention.") + + return cmd +} + +func parseSource(ref string) (imageapi.DockerImageReference, error) { + src, err := imageapi.ParseDockerImageReference(ref) + if err != nil { + return src, fmt.Errorf("%q is not a valid image reference: %v", ref, err) + } + if len(src.Tag) == 0 && len(src.ID) == 0 { + return src, fmt.Errorf("you must specify a tag or digest for SRC") + } + return src, nil +} + +func parseDestination(ref string) (imageapi.DockerImageReference, DestinationType, error) { + dstType := DestinationRegistry + switch { + case strings.HasPrefix(ref, "s3://"): + dstType = DestinationS3 + ref = strings.TrimPrefix(ref, "s3://") + } + dst, err := imageapi.ParseDockerImageReference(ref) + if err != nil { + return dst, dstType, fmt.Errorf("%q is not a valid image reference: %v", ref, err) + } + if len(dst.ID) != 0 { + return dst, dstType, fmt.Errorf("you must specify a tag for DST or leave it blank to only push by digest") + } + return dst, dstType, nil +} + +func (o *pushOptions) Complete(args []string) error { + var remainingArgs []string + overlap := make(map[string]string) + for _, s := range args { + parts := strings.SplitN(s, "=", 2) + if len(parts) != 2 { + remainingArgs = append(remainingArgs, s) + continue + } + if len(parts[0]) == 0 || len(parts[1]) == 0 { + return fmt.Errorf("all arguments must be valid SRC=DST mappings") + } + src, err := parseSource(parts[0]) + if err != nil { + return err + } + dst, dstType, err := parseDestination(parts[1]) + if err != nil { + return err + } + if _, ok := overlap[dst.String()]; ok { + return fmt.Errorf("each destination tag may only be specified once: %s", dst.String()) + } + overlap[dst.String()] = src.String() + + o.Mappings = append(o.Mappings, Mapping{Source: src, Destination: dst, Type: dstType}) + } + + switch { + case len(remainingArgs) == 0 && len(o.Mappings) > 0: + // user has input arguments + case len(remainingArgs) > 1 && len(o.Mappings) == 0: + src, err := parseSource(remainingArgs[0]) + if err != nil { + return err + } + for i := 1; i < len(remainingArgs); i++ { + dst, dstType, err := parseDestination(remainingArgs[i]) + if err != nil { + return err + } + if _, ok := overlap[dst.String()]; ok { + return fmt.Errorf("each destination tag may only be specified once: %s", dst.String()) + } + overlap[dst.String()] = src.String() + o.Mappings = append(o.Mappings, Mapping{Source: src, Destination: dst, Type: dstType}) + } + case len(remainingArgs) == 1 && len(o.Mappings) == 0: + return fmt.Errorf("all arguments must be valid SRC=DST mappings, or you must specify one SRC argument and one or more DST arguments") + default: + return fmt.Errorf("you must specify at least one source image to pull and the destination to push to as SRC=DST or SRC DST [DST2 DST3 ...]") + } + + for _, mapping := range o.Mappings { + if mapping.Source.Equal(mapping.Destination) { + return fmt.Errorf("SRC and DST may not be the same") + } + } + + pattern := o.FilterByOS + if len(pattern) > 0 { + re, err := regexp.Compile(pattern) + if err != nil { + return fmt.Errorf("--filter-by-os was not a valid regular expression: %v", err) + } + o.OSFilter = re + } + + return nil +} + +type key struct { + registry string + repository string +} + +type destination struct { + t DestinationType + ref imageapi.DockerImageReference + tags []string +} + +type pushTargets map[key]destination + +type destinations struct { + ref imageapi.DockerImageReference + tags map[string]pushTargets + digests map[string]pushTargets +} + +func (d destinations) mergeIntoDigests(srcDigest digest.Digest, target pushTargets) { + srcKey := srcDigest.String() + current, ok := d.digests[srcKey] + if !ok { + d.digests[srcKey] = target + return + } + for repo, dst := range target { + existing, ok := current[repo] + if !ok { + current[repo] = dst + continue + } + existing.tags = append(existing.tags, dst.tags...) + } +} + +type targetTree map[key]destinations + +func buildTargetTree(mappings []Mapping) targetTree { + tree := make(targetTree) + for _, m := range mappings { + srcKey := key{registry: m.Source.Registry, repository: m.Source.RepositoryName()} + dstKey := key{registry: m.Destination.Registry, repository: m.Destination.RepositoryName()} + + src, ok := tree[srcKey] + if !ok { + src.ref = m.Source.AsRepository() + src.digests = make(map[string]pushTargets) + src.tags = make(map[string]pushTargets) + tree[srcKey] = src + } + + var current pushTargets + if tag := m.Source.Tag; len(tag) != 0 { + current = src.tags[tag] + if current == nil { + current = make(pushTargets) + src.tags[tag] = current + } + } else { + current = src.digests[m.Source.ID] + if current == nil { + current = make(pushTargets) + src.digests[m.Source.ID] = current + } + } + + dst, ok := current[dstKey] + if !ok { + dst.ref = m.Destination.AsRepository() + dst.t = m.Type + } + if len(m.Destination.Tag) > 0 { + dst.tags = append(dst.tags, m.Destination.Tag) + } + current[dstKey] = dst + } + return tree +} + +type retrieverError struct { + src, dst imageapi.DockerImageReference + err error +} + +func (e retrieverError) Error() string { + return e.err.Error() +} + +func (o *pushOptions) Repository(ctx apirequest.Context, context importer.Context, creds auth.CredentialStore, t DestinationType, ref imageapi.DockerImageReference) (distribution.Repository, error) { + switch t { + case DestinationRegistry: + toClient := context.WithCredentials(creds) + return toClient.Repository(ctx, ref.DockerClientDefaults().RegistryURL(), ref.RepositoryName(), o.Insecure) + case DestinationS3: + driver := &s3Driver{ + Creds: creds, + CopyFrom: o.AttemptS3BucketCopy, + } + url := ref.DockerClientDefaults().RegistryURL() + return driver.Repository(ctx, url, ref.RepositoryName(), o.Insecure) + default: + return nil, fmt.Errorf("unrecognized destination type %s", t) + } +} + +// includeDescriptor returns true if the provided manifest should be included. +func (o *pushOptions) includeDescriptor(d *manifestlist.ManifestDescriptor) bool { + if o.OSFilter == nil { + return true + } + if len(d.Platform.Variant) > 0 { + return o.OSFilter.MatchString(fmt.Sprintf("%s/%s/%s", d.Platform.OS, d.Platform.Architecture, d.Platform.Variant)) + } + return o.OSFilter.MatchString(fmt.Sprintf("%s/%s", d.Platform.OS, d.Platform.Architecture)) +} + +// ErrAlreadyExists may be returned by the blob Create function to indicate that the blob already exists. +var ErrAlreadyExists = fmt.Errorf("blob already exists in the target location") + +var schema2ManifestOnly = distribution.WithManifestMediaTypes([]string{ + manifestlist.MediaTypeManifestList, + schema2.MediaTypeManifest, +}) + +func (o *pushOptions) Run() error { + tree := buildTargetTree(o.Mappings) + + creds := importer.NewLocalCredentials() + ctx := apirequest.NewContext() + + rt, err := rest.TransportFor(&rest.Config{}) + if err != nil { + return err + } + insecureRT, err := rest.TransportFor(&rest.Config{TLSClientConfig: rest.TLSClientConfig{Insecure: true}}) + if err != nil { + return err + } + srcClient := importer.NewContext(rt, insecureRT).WithCredentials(creds) + toContext := importer.NewContext(rt, insecureRT).WithActions("pull", "push") + + var errs []error + for _, src := range tree { + srcRepo, err := srcClient.Repository(ctx, src.ref.DockerClientDefaults().RegistryURL(), src.ref.RepositoryName(), o.Insecure) + if err != nil { + errs = append(errs, retrieverError{err: fmt.Errorf("unable to connect to %s: %v", src.ref, err), src: src.ref}) + continue + } + + manifests, err := srcRepo.Manifests(ctx) + if err != nil { + errs = append(errs, retrieverError{src: src.ref, err: fmt.Errorf("unable to access source image %s manifests: %v", src.ref, err)}) + continue + } + + var tagErrs []retrieverError + var digestErrs []retrieverError + + // convert source tags to digests + for srcTag, pushTargets := range src.tags { + desc, err := srcRepo.Tags(ctx).Get(ctx, srcTag) + if err != nil { + tagErrs = append(tagErrs, retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s by tag: %v", src.ref, err)}) + continue + } + srcDigest := desc.Digest + glog.V(3).Infof("Resolved source image %s:%s to %s\n", src.ref, srcTag, srcDigest) + src.mergeIntoDigests(srcDigest, pushTargets) + } + + canonicalFrom := srcRepo.Named() + + for srcDigestString, pushTargets := range src.digests { + // load the manifest + srcDigest := digest.Digest(srcDigestString) + // var contentDigest digest.Digest / client.ReturnContentDigest(&contentDigest), + srcManifest, err := manifests.Get(ctx, digest.Digest(srcDigest), schema2ManifestOnly) + if err != nil { + digestErrs = append(digestErrs, retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s manifest: %v", src.ref, err)}) + continue + } + + // filter or load manifest list as appropriate + srcManifests, srcManifest, srcDigest, err := processManifestList(ctx, srcDigest, srcManifest, manifests, src.ref, o.includeDescriptor) + if err != nil { + digestErrs = append(digestErrs, retrieverError{src: src.ref, err: err}) + continue + } + if len(srcManifests) == 0 { + fmt.Fprintf(o.ErrOut, "info: Filtered all images from %s, skipping\n", src.ref) + continue + } + + for _, dst := range pushTargets { + // if we are going to be using cross repository mount, get a token that covers the src + if src.ref.Registry == dst.ref.Registry { + toContext = toContext.WithScopes(auth.RepositoryScope{Repository: src.ref.RepositoryName(), Actions: []string{"pull"}}) + } + + toRepo, err := o.Repository(ctx, toContext, creds, dst.t, dst.ref) + if err != nil { + digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to connect to %s: %v", dst.ref, err)}) + continue + } + + canonicalTo := toRepo.Named() + toManifests, err := toRepo.Manifests(ctx) + if err != nil { + digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to access destination image %s manifests: %v", src.ref, err)}) + continue + } + + var mustCopyLayers bool + switch { + case o.Force: + mustCopyLayers = true + case src.ref.Registry == dst.ref.Registry && canonicalFrom.String() == canonicalTo.String(): + // if the source and destination repos are the same, we don't need to copy layers unless forced + default: + if _, err := toManifests.Get(ctx, srcDigest); err != nil { + mustCopyLayers = true + } else { + glog.V(4).Infof("Manifest exists in %s, no need to copy layers without --force", dst.ref) + } + } + if mustCopyLayers { + // upload all the blobs + toBlobs := toRepo.Blobs(ctx) + srcBlobs := srcRepo.Blobs(ctx) + + // upload the each manifest + for _, srcManifest := range srcManifests { + switch srcManifest.(type) { + case *schema2.DeserializedManifest: + case *manifestlist.DeserializedManifestList: + // we do not need to upload layers in a manifestlist + continue + default: + digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("the manifest type %T is not supported", srcManifest)}) + continue + } + + for _, blob := range srcManifest.References() { + blobSource, err := reference.WithDigest(canonicalFrom, blob.Digest) + if err != nil { + digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unexpected error building named digest: %v", err)}) + continue + } + + // if we aren't forcing upload, skip the blob copy + if !o.Force { + _, err := toBlobs.Stat(ctx, blob.Digest) + if err == nil { + // blob exists, skip + glog.V(5).Infof("Server reports blob exists %#v", blob) + continue + } + if err != distribution.ErrBlobUnknown { + glog.V(5).Infof("Server was unable to check whether blob exists %s: %v", blob.Digest, err) + } + } + + var options []distribution.BlobCreateOption + if !o.SkipMount { + options = append(options, client.WithMountFrom(blobSource), WithDescriptor(blob)) + } + w, err := toBlobs.Create(ctx, options...) + // no-op + if err == ErrAlreadyExists { + glog.V(5).Infof("Blob already exists %#v", blob) + continue + } + // mount successful + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + glog.V(5).Infof("Blob mounted %#v", blob) + if ebm.From.Digest() != blob.Digest { + digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to push %s: tried to mount blob %s src source and got back a different digest %s", src.ref, blob.Digest, ebm.From.Digest())}) + break + } + continue + } + if err != nil { + digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to upload blob %s to %s: %v", blob.Digest, dst.ref, err)}) + break + } + + err = func() error { + glog.V(5).Infof("Uploading blob %s", blob.Digest) + defer w.Cancel(ctx) + r, err := srcBlobs.Open(ctx, blob.Digest) + if err != nil { + return fmt.Errorf("unable to open source layer %s to copy to %s: %v", blob.Digest, dst.ref, err) + } + defer r.Close() + + switch dst.t { + case DestinationS3: + fmt.Fprintf(o.ErrOut, "uploading: s3://%s %s %s\n", dst.ref, blob.Digest, units.BytesSize(float64(blob.Size))) + default: + fmt.Fprintf(o.ErrOut, "uploading: %s %s %s\n", dst.ref, blob.Digest, units.BytesSize(float64(blob.Size))) + } + + n, err := w.ReadFrom(r) + if err != nil { + return fmt.Errorf("unable to copy layer %s to %s: %v", blob.Digest, dst.ref, err) + } + if n != blob.Size { + fmt.Fprintf(o.ErrOut, "warning: Layer size mismatch for %s: had %d, wrote %d\n", blob.Digest, blob.Size, n) + } + _, err = w.Commit(ctx, blob) + return err + }() + if err != nil { + _, srcBody, _ := srcManifest.Payload() + srcManifestDigest := digest.Canonical.FromBytes(srcBody) + if srcManifestDigest == srcDigest { + digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("failed to commit blob %s from manifest %s to %s: %v", blob.Digest, srcManifestDigest, dst.ref, err)}) + } else { + digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("failed to commit blob %s from manifest %s in manifest list %s to %s: %v", blob.Digest, srcManifestDigest, srcDigest, dst.ref, err)}) + } + break + } + } + } + } + + if len(digestErrs) > 0 { + continue + } + + // upload and tag the manifest + for _, tag := range dst.tags { + toDigest, err := toManifests.Put(ctx, srcManifest, distribution.WithTag(tag)) + if err != nil { + digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to push manifest to %s: %v", dst.ref, err)}) + continue + } + switch dst.t { + case DestinationS3: + fmt.Fprintf(o.Out, "%s s3://%s:%s\n", toDigest, dst.ref, tag) + default: + fmt.Fprintf(o.Out, "%s %s:%s\n", toDigest, dst.ref, tag) + } + } + if len(dst.tags) == 0 { + toDigest, err := toManifests.Put(ctx, srcManifest) + if err != nil { + digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to push manifest to %s: %v", dst.ref, err)}) + continue + } + switch dst.t { + case DestinationS3: + fmt.Fprintf(o.Out, "%s s3://%s\n", toDigest, dst.ref) + default: + fmt.Fprintf(o.Out, "%s %s\n", toDigest, dst.ref) + } + } + } + } + for _, err := range append(tagErrs, digestErrs...) { + errs = append(errs, err) + } + } + return kerrors.NewAggregate(errs) +} + +func processManifestList(ctx apirequest.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imageapi.DockerImageReference, filterFn func(*manifestlist.ManifestDescriptor) bool) ([]distribution.Manifest, distribution.Manifest, digest.Digest, error) { + var srcManifests []distribution.Manifest + switch t := srcManifest.(type) { + case *manifestlist.DeserializedManifestList: + manifestDigest := srcDigest + manifestList := t + + filtered := make([]manifestlist.ManifestDescriptor, 0, len(t.Manifests)) + for _, manifest := range t.Manifests { + if !filterFn(&manifest) { + glog.V(5).Infof("Skipping image for %#v from %s", manifest.Platform, ref) + continue + } + glog.V(5).Infof("Including image for %#v from %s", manifest.Platform, ref) + filtered = append(filtered, manifest) + } + + if len(filtered) == 0 { + return nil, nil, "", nil + } + + // if we're filtering the manifest list, update the source manifest and digest + if len(filtered) != len(t.Manifests) { + var err error + t, err = manifestlist.FromDescriptors(filtered) + if err != nil { + return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list: %v", ref, err) + } + _, body, err := t.Payload() + if err != nil { + return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list (bad payload): %v", ref, err) + } + manifestList = t + manifestDigest = srcDigest.Algorithm().FromBytes(body) + glog.V(5).Infof("Filtered manifest list to new digest %s:\n%s", manifestDigest, body) + } + + for i, manifest := range t.Manifests { + childManifest, err := manifests.Get(ctx, manifest.Digest, schema2ManifestOnly) + if err != nil { + return nil, nil, "", fmt.Errorf("unable to retrieve source image %s manifest #%d from manifest list: %v", ref, i+1, err) + } + srcManifests = append(srcManifests, childManifest) + } + + switch { + case len(srcManifests) == 1: + _, body, err := srcManifests[0].Payload() + if err != nil { + return nil, nil, "", fmt.Errorf("unable to convert source image %s manifest list to single manifest: %v", ref, err) + } + manifestDigest := srcDigest.Algorithm().FromBytes(body) + glog.V(5).Infof("Used only one manifest from the list %s:\n%s", manifestDigest, body) + return srcManifests, srcManifests[0], manifestDigest, nil + default: + return append(srcManifests, manifestList), manifestList, manifestDigest, nil + } + + default: + return []distribution.Manifest{srcManifest}, srcManifest, srcDigest, nil + } +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithDescriptor returns a BlobCreateOption which provides the expected blob metadata. +func WithDescriptor(desc distribution.Descriptor) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + if opts.Mount.Stat == nil { + opts.Mount.Stat = &desc + } + return nil + }) +} diff --git a/pkg/oc/cli/cmd/image/mirror/s3.go b/pkg/oc/cli/cmd/image/mirror/s3.go new file mode 100644 index 000000000000..2e9601acbbe0 --- /dev/null +++ b/pkg/oc/cli/cmd/image/mirror/s3.go @@ -0,0 +1,454 @@ +package mirror + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strings" + "sync" + "time" + + "github.com/golang/glog" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" +) + +type s3Driver struct { + UserAgent string + Region string + Creds auth.CredentialStore + CopyFrom []string + + repositories map[string]*s3.S3 +} + +type s3CredentialStore struct { + store auth.CredentialStore + url *url.URL + retrieved bool +} + +func (s *s3CredentialStore) IsExpired() bool { return !s.retrieved } + +func (s *s3CredentialStore) Retrieve() (credentials.Value, error) { + s.retrieved = false + accessKeyID, secretAccessKey := s.store.Basic(s.url) + if len(accessKeyID) == 0 || len(secretAccessKey) == 0 { + return credentials.Value{}, fmt.Errorf("no AWS credentials located for %s", s.url) + } + s.retrieved = true + glog.V(4).Infof("found credentials for %s", s.url) + return credentials.Value{ + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + ProviderName: "DockerCfg", + }, nil +} + +func (d *s3Driver) newObject(server *url.URL, region string, insecure bool, securityDomain *url.URL) (*s3.S3, error) { + key := fmt.Sprintf("%s:%s:%t:%s", server, region, insecure, securityDomain) + s3obj, ok := d.repositories[key] + if ok { + return s3obj, nil + } + + awsConfig := aws.NewConfig() + + var creds *credentials.Credentials + creds = credentials.NewChainCredentials([]credentials.Provider{ + &s3CredentialStore{store: d.Creds, url: securityDomain}, + &credentials.EnvProvider{}, + }) + + awsConfig.WithS3ForcePathStyle(true) + awsConfig.WithEndpoint(server.String()) + awsConfig.WithCredentials(creds) + awsConfig.WithRegion(region) + awsConfig.WithDisableSSL(insecure) + if glog.V(6) { + awsConfig.WithLogLevel(aws.LogDebug) + } + + if d.UserAgent != "" { + awsConfig.WithHTTPClient(&http.Client{ + Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{d.UserAgent}})), + }) + } + s, err := session.NewSession(awsConfig) + if err != nil { + return nil, err + } + s3obj = s3.New(s) + if d.repositories == nil { + d.repositories = make(map[string]*s3.S3) + } + d.repositories[key] = s3obj + return s3obj, nil +} + +func (d *s3Driver) Repository(ctx context.Context, server *url.URL, repoName string, insecure bool) (distribution.Repository, error) { + parts := strings.SplitN(repoName, "/", 3) + if len(parts) < 3 { + return nil, fmt.Errorf("you must pass a three segment repository name for s3 uploads, where the first segment is the region and the second segment is the bucket") + } + s3obj, err := d.newObject(server, parts[0], insecure, &url.URL{Scheme: server.Scheme, Host: server.Host, Path: "/" + repoName}) + if err != nil { + return nil, err + } + named, err := reference.ParseNamed(parts[2]) + if err != nil { + return nil, err + } + repo := &s3Repository{ + ctx: ctx, + s3: s3obj, + bucket: parts[1], + repoName: named, + copyFrom: d.CopyFrom, + } + return repo, nil +} + +type s3Repository struct { + ctx context.Context + s3 *s3.S3 + bucket string + once sync.Once + initErr error + copyFrom []string + + repoName reference.Named +} + +// Named returns the name of the repository. +func (r *s3Repository) Named() reference.Named { + return r.repoName +} + +// Manifests returns a reference to this repository's manifest service. +// with the supplied options applied. +func (r *s3Repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + return &s3ManifestService{r: r}, nil +} + +// Blobs returns a reference to this repository's blob service. +func (r *s3Repository) Blobs(ctx context.Context) distribution.BlobStore { + return &s3BlobStore{r: r} +} + +// Tags returns a reference to this repositories tag service +func (r *s3Repository) Tags(ctx context.Context) distribution.TagService { + return nil +} + +func (r *s3Repository) attemptCopy(id string, bucket, key string) bool { + if _, err := r.s3.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }); err == nil { + return true + } + if len(id) == 0 { + return false + } + for _, copyFrom := range r.copyFrom { + var sourceKey string + if strings.HasSuffix(copyFrom, "[store]") { + sourceKey = strings.TrimSuffix(copyFrom, "[store]") + d, err := digest.ParseDigest(id) + if err != nil { + glog.V(4).Infof("Object %q is not a valid digest, cannot perform [store] copy: %v", id, err) + continue + } + sourceKey = fmt.Sprintf("%s%s/%s/%s/data", sourceKey, d.Algorithm().String(), d.Hex()[:2], d.Hex()) + } else { + sourceKey = path.Join(copyFrom, id) + } + _, err := r.s3.CopyObject(&s3.CopyObjectInput{ + CopySource: aws.String(sourceKey), + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err == nil { + glog.V(4).Infof("Copied existing object from %s to %s", sourceKey, key) + return true + } + if a, ok := err.(awserr.Error); ok && a.Code() == "NoSuchKey" { + glog.V(4).Infof("No existing object matches source %s", sourceKey) + continue + } + glog.V(4).Infof("Unable to copy from %s to %s: %v", sourceKey, key, err) + } + return false +} + +func (r *s3Repository) conditionalUpload(input *s3manager.UploadInput, id string) error { + if r.attemptCopy(id, *input.Bucket, *input.Key) { + return nil + } + _, err := s3manager.NewUploaderWithClient(r.s3).Upload(input) + return err +} + +func (r *s3Repository) init() error { + r.once.Do(func() { + r.initErr = r.conditionalUpload(&s3manager.UploadInput{ + Bucket: aws.String(r.bucket), + Metadata: map[string]*string{"X-Docker-Distribution-API-Version": aws.String("registry/2.0")}, + Body: bytes.NewBufferString(""), + Key: aws.String("/v2/"), + }, "") + }) + return r.initErr +} + +type noSeekReader struct { + io.Reader +} + +var _ io.ReadSeeker = noSeekReader{} + +func (noSeekReader) Seek(offset int64, whence int) (int64, error) { + return 0, fmt.Errorf("unable to seek to %d via %d", offset, whence) +} + +type s3ManifestService struct { + r *s3Repository +} + +// Exists returns true if the manifest exists. +func (s *s3ManifestService) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + return false, fmt.Errorf("unimplemented") +} + +// Get retrieves the manifest specified by the given digest +func (s *s3ManifestService) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + return nil, fmt.Errorf("unimplemented") +} + +// Put creates or updates the given manifest returning the manifest digest +func (s *s3ManifestService) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + if err := s.r.init(); err != nil { + return "", err + } + mediaType, payload, err := manifest.Payload() + if err != nil { + return "", err + } + dgst := digest.FromBytes(payload) + blob := fmt.Sprintf("/v2/%s/blobs/%s", s.r.repoName, dgst) + + if err := s.r.conditionalUpload(&s3manager.UploadInput{ + Bucket: aws.String(s.r.bucket), + ContentType: aws.String(mediaType), + Body: bytes.NewBuffer(payload), + Key: aws.String(blob), + }, dgst.String()); err != nil { + return "", err + } + + // set manifests + tags := []string{dgst.String()} + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + tags = append(tags, opt.Tag) + } + } + for _, tag := range tags { + if _, err := s.r.s3.CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String(s.r.bucket), + ContentType: aws.String(mediaType), + CopySource: aws.String(path.Join(s.r.bucket, blob)), + Key: aws.String(fmt.Sprintf("/v2/%s/manifests/%s", s.r.repoName, tag)), + }); err != nil { + return "", err + } + } + return dgst, nil +} + +// Delete removes the manifest specified by the given digest. Deleting +// a manifest that doesn't exist will return ErrManifestNotFound +func (s *s3ManifestService) Delete(ctx context.Context, dgst digest.Digest) error { + return fmt.Errorf("unimplemented") +} + +type s3BlobStore struct { + r *s3Repository +} + +func (s *s3BlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return distribution.Descriptor{}, fmt.Errorf("unimplemented") +} + +func (s *s3BlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + return fmt.Errorf("unimplemented") +} + +func (s *s3BlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + return nil, fmt.Errorf("unimplemented") +} + +func (s *s3BlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + return nil, fmt.Errorf("unimplemented") +} + +func (s *s3BlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + return fmt.Errorf("unimplemented") +} + +func (s *s3BlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + if err := s.r.init(); err != nil { + return distribution.Descriptor{}, err + } + d := digest.FromBytes(p) + if err := s.r.conditionalUpload(&s3manager.UploadInput{ + Bucket: aws.String(s.r.bucket), + ContentType: aws.String(mediaType), + Body: bytes.NewBuffer(p), + Key: aws.String(fmt.Sprintf("/v2/%s/blobs/%s", s.r.repoName, d)), + }, d.String()); err != nil { + return distribution.Descriptor{}, err + } + return distribution.Descriptor{MediaType: mediaType, Size: int64(len(p)), Digest: d}, nil +} + +func (s *s3BlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts distribution.CreateOptions + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + if opts.Mount.Stat == nil || len(opts.Mount.Stat.Digest) == 0 { + return nil, fmt.Errorf("S3 target blob store requires blobs to have mount stats that include a digest") + } + d := opts.Mount.Stat.Digest + + // attempt to copy before returning a writer + key := fmt.Sprintf("/v2/%s/blobs/%s", s.r.repoName, d) + if s.r.attemptCopy(d.String(), s.r.bucket, key) { + return nil, ErrAlreadyExists + } + + return s.r.newWriter(key, d.String(), opts.Mount.Stat.Size), nil +} + +func (s *s3BlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + return nil, fmt.Errorf("unimplemented") +} + +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *s3Repository + key string + uploadID string + closed bool + committed bool + cancelled bool + size int64 + startedAt time.Time +} + +func (d *s3Repository) newWriter(key, uploadID string, size int64) distribution.BlobWriter { + return &writer{ + driver: d, + key: key, + uploadID: uploadID, + size: size, + } +} + +func (w *writer) ID() string { + return w.uploadID +} + +func (w *writer) StartedAt() time.Time { + return w.startedAt +} + +func (w *writer) ReadFrom(r io.Reader) (int64, error) { + switch { + case w.closed: + return 0, fmt.Errorf("already closed") + case w.committed: + return 0, fmt.Errorf("already committed") + case w.cancelled: + return 0, fmt.Errorf("already cancelled") + } + if w.startedAt.IsZero() { + w.startedAt = time.Now() + } + _, err := s3manager.NewUploaderWithClient(w.driver.s3).Upload(&s3manager.UploadInput{ + Bucket: aws.String(w.driver.bucket), + ContentType: aws.String("application/octet-stream"), + Key: aws.String(w.key), + Body: r, + }) + if err != nil { + return 0, err + } + return w.size, nil +} + +func (w *writer) Write(p []byte) (int, error) { + return 0, fmt.Errorf("already closed") +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + switch { + case w.closed: + return fmt.Errorf("already closed") + } + w.closed = true + return nil +} + +func (w *writer) Cancel(ctx context.Context) error { + switch { + case w.closed: + return fmt.Errorf("already closed") + case w.committed: + return fmt.Errorf("already committed") + } + w.cancelled = true + return nil +} + +// TODO: verify uploaded descriptor matches +func (w *writer) Commit(ctx context.Context, descriptor distribution.Descriptor) (distribution.Descriptor, error) { + desc := descriptor + switch { + case w.closed: + return desc, fmt.Errorf("already closed") + case w.committed: + return desc, fmt.Errorf("already committed") + case w.cancelled: + return desc, fmt.Errorf("already cancelled") + } + w.committed = true + return desc, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go new file mode 100644 index 000000000000..8006412c5742 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -0,0 +1,377 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package s3iface provides an interface to enable mocking the Amazon Simple Storage Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package s3iface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3API provides an interface to enable mocking the +// s3.S3 service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // Amazon Simple Storage Service. +// func myFunc(svc s3iface.S3API) bool { +// // Make svc.AbortMultipartUpload request +// } +// +// func main() { +// sess := session.New() +// svc := s3.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockS3Client struct { +// s3iface.S3API +// } +// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) { +// // mock response/functionality +// } +// +// TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockS3Client{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type S3API interface { + AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) + + AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) + + CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) + + CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) + + CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) + + CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error) + + CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) + + CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error) + + CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) + + CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) + + DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) + + DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) + + DeleteBucketAnalyticsConfigurationRequest(*s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput) + + DeleteBucketAnalyticsConfiguration(*s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) + + DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) + + DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) + + DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) + + DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) + + DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) + + DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) + + DeleteBucketMetricsConfigurationRequest(*s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput) + + DeleteBucketMetricsConfiguration(*s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error) + + DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) + + DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) + + DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) + + DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) + + DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) + + DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) + + DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) + + DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) + + DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) + + DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) + + DeleteObjectTaggingRequest(*s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput) + + DeleteObjectTagging(*s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error) + + DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) + + DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) + + GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput) + + GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error) + + GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) + + GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) + + GetBucketAnalyticsConfigurationRequest(*s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput) + + GetBucketAnalyticsConfiguration(*s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error) + + GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) + + GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) + + GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) + + GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) + + GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) + + GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) + + GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) + + GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) + + GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) + + GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) + + GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) + + GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) + + GetBucketMetricsConfigurationRequest(*s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput) + + GetBucketMetricsConfiguration(*s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error) + + GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) + + GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) + + GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) + + GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) + + GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) + + GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) + + GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) + + GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) + + GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) + + GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) + + GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) + + GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) + + GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) + + GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) + + GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) + + GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) + + GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) + + GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error) + + GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) + + GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) + + GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput) + + GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error) + + GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) + + GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) + + HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) + + HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) + + HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) + + HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) + + ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput) + + ListBucketAnalyticsConfigurations(*s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error) + + ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput) + + ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error) + + ListBucketMetricsConfigurationsRequest(*s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput) + + ListBucketMetricsConfigurations(*s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error) + + ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) + + ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error) + + ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) + + ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) + + ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error + + ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) + + ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) + + ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error + + ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) + + ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) + + ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error + + ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output) + + ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) + + ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error + + ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) + + ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error) + + ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error + + PutBucketAccelerateConfigurationRequest(*s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput) + + PutBucketAccelerateConfiguration(*s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error) + + PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) + + PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) + + PutBucketAnalyticsConfigurationRequest(*s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput) + + PutBucketAnalyticsConfiguration(*s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error) + + PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) + + PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) + + PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) + + PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) + + PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) + + PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) + + PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) + + PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) + + PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) + + PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) + + PutBucketMetricsConfigurationRequest(*s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput) + + PutBucketMetricsConfiguration(*s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error) + + PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) + + PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) + + PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) + + PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) + + PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) + + PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) + + PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) + + PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) + + PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) + + PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) + + PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) + + PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) + + PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) + + PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) + + PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) + + PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) + + PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) + + PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error) + + PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) + + PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) + + PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput) + + PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error) + + RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) + + RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) + + UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) + + UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error) + + UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) + + UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) + + WaitUntilBucketExists(*s3.HeadBucketInput) error + + WaitUntilBucketNotExists(*s3.HeadBucketInput) error + + WaitUntilObjectExists(*s3.HeadObjectInput) error + + WaitUntilObjectNotExists(*s3.HeadObjectInput) error +} + +var _ S3API = (*s3.S3)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go new file mode 100644 index 000000000000..229c0d63bdaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go @@ -0,0 +1,3 @@ +// Package s3manager provides utilities to upload and download objects from +// S3 concurrently. Helpful for when working with large objects. +package s3manager diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go new file mode 100644 index 000000000000..dfdee18817d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go @@ -0,0 +1,395 @@ +package s3manager + +import ( + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// DefaultDownloadPartSize is the default range of bytes to get at a time when +// using Download(). +const DefaultDownloadPartSize = 1024 * 1024 * 5 + +// DefaultDownloadConcurrency is the default number of goroutines to spin up +// when using Download(). +const DefaultDownloadConcurrency = 5 + +// The Downloader structure that calls Download(). It is safe to call Download() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Downloader's properties is not safe to be done concurrently. +type Downloader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultDownloadConcurrency value will be used. + Concurrency int + + // An S3 client to use when performing downloads. + S3 s3iface.S3API +} + +// NewDownloader creates a new Downloader instance to downloads objects from +// S3 in concurrent chunks. Pass in additional functional options to customize +// the downloader behavior. Requires a client.ConfigProvider in order to create +// a S3 service client. The session.Session satisfies the client.ConfigProvider +// interface. +// +// Example: +// // The session the S3 Downloader will use +// sess, err := session.NewSession() +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a downloader with the session and custom options +// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: s3.New(c), + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + } + for _, option := range options { + option(d) + } + + return d +} + +// NewDownloaderWithClient creates a new Downloader instance to downloads +// objects from S3 in concurrent chunks. Pass in additional functional +// options to customize the downloader behavior. Requires a S3 service client +// to make S3 API calls. +// +// Example: +// // The session the S3 Downloader will use +// sess, err := session.NewSession() +// +// // The S3 client the S3 Downloader will use +// s3Svc := s3.new(sess) +// +// // Create a downloader with the s3 client and default options +// downloader := s3manager.NewDownloaderWithClient(s3Svc) +// +// // Create a downloader with the s3 client and custom options +// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: svc, + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + } + for _, option := range options { + option(d) + } + + return d +} + +type maxRetrier interface { + MaxRetries() int +} + +// Download downloads an object in S3 and writes the payload into w using +// concurrent GET requests. +// +// Additional functional options can be provided to configure the individual +// download. These options are copies of the Downloader instance Download is called from. +// Modifying the options will not impact the original Downloader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. +func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + impl := downloader{w: w, in: input, ctx: d} + + for _, option := range options { + option(&impl.ctx) + } + + if s, ok := d.S3.(maxRetrier); ok { + impl.partBodyMaxRetries = s.MaxRetries() + } + + impl.totalBytes = -1 + if impl.ctx.Concurrency == 0 { + impl.ctx.Concurrency = DefaultDownloadConcurrency + } + + if impl.ctx.PartSize == 0 { + impl.ctx.PartSize = DefaultDownloadPartSize + } + + return impl.download() +} + +// downloader is the implementation structure used internally by Downloader. +type downloader struct { + ctx Downloader + + in *s3.GetObjectInput + w io.WriterAt + + wg sync.WaitGroup + m sync.Mutex + + pos int64 + totalBytes int64 + written int64 + err error + + partBodyMaxRetries int +} + +// download performs the implementation of the object download across ranged +// GETs. +func (d *downloader) download() (n int64, err error) { + // Spin off first worker to check additional header information + d.getChunk() + + if total := d.getTotalBytes(); total >= 0 { + // Spin up workers + ch := make(chan dlchunk, d.ctx.Concurrency) + + for i := 0; i < d.ctx.Concurrency; i++ { + d.wg.Add(1) + go d.downloadPart(ch) + } + + // Assign work + for d.getErr() == nil { + if d.pos >= total { + break // We're finished queuing chunks + } + + // Queue the next range of bytes to read. + ch <- dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize} + d.pos += d.ctx.PartSize + } + + // Wait for completion + close(ch) + d.wg.Wait() + } else { + // Checking if we read anything new + for d.err == nil { + d.getChunk() + } + + // We expect a 416 error letting us know we are done downloading the + // total bytes. Since we do not know the content's length, this will + // keep grabbing chunks of data until the range of bytes specified in + // the request is out of range of the content. Once, this happens, a + // 416 should occur. + e, ok := d.err.(awserr.RequestFailure) + if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable { + d.err = nil + } + } + + // Return error + return d.written, d.err +} + +// downloadPart is an individual goroutine worker reading from the ch channel +// and performing a GetObject request on the data with a given byte range. +// +// If this is the first worker, this operation also resolves the total number +// of bytes to be read so that the worker manager knows when it is finished. +func (d *downloader) downloadPart(ch chan dlchunk) { + defer d.wg.Done() + for { + chunk, ok := <-ch + if !ok || d.getErr() != nil { + break + } + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + break + } + } +} + +// getChunk grabs a chunk of data from the body. +// Not thread safe. Should only used when grabbing data on a single thread. +func (d *downloader) getChunk() { + if d.getErr() != nil { + return + } + + chunk := dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize} + d.pos += d.ctx.PartSize + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } +} + +// downloadChunk downloads the chunk froom s3 +func (d *downloader) downloadChunk(chunk dlchunk) error { + in := &s3.GetObjectInput{} + awsutil.Copy(in, d.in) + + // Get the next byte range of data + rng := fmt.Sprintf("bytes=%d-%d", chunk.start, chunk.start+chunk.size-1) + in.Range = &rng + + var n int64 + var err error + for retry := 0; retry <= d.partBodyMaxRetries; retry++ { + req, resp := d.ctx.S3.GetObjectRequest(in) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + + err = req.Send() + if err != nil { + return err + } + d.setTotalBytes(resp) // Set total if not yet set. + + n, err = io.Copy(&chunk, resp.Body) + resp.Body.Close() + if err == nil { + break + } + + chunk.cur = 0 + logMessage(d.ctx.S3, aws.LogDebugWithRequestRetries, + fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d", + aws.StringValue(in.Key), err, retry)) + } + + d.incrWritten(n) + + return err +} + +func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) { + s, ok := svc.(*s3.S3) + if !ok { + return + } + + if s.Config.Logger == nil { + return + } + + if s.Config.LogLevel.Matches(level) { + s.Config.Logger.Log(msg) + } +} + +// getTotalBytes is a thread-safe getter for retrieving the total byte status. +func (d *downloader) getTotalBytes() int64 { + d.m.Lock() + defer d.m.Unlock() + + return d.totalBytes +} + +// setTotalBytes is a thread-safe setter for setting the total byte status. +// Will extract the object's total bytes from the Content-Range if the file +// will be chunked, or Content-Length. Content-Length is used when the response +// does not include a Content-Range. Meaning the object was not chunked. This +// occurs when the full file fits within the PartSize directive. +func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { + d.m.Lock() + defer d.m.Unlock() + + if d.totalBytes >= 0 { + return + } + + if resp.ContentRange == nil { + // ContentRange is nil when the full file contents is provied, and + // is not chunked. Use ContentLength instead. + if resp.ContentLength != nil { + d.totalBytes = *resp.ContentLength + return + } + } else { + parts := strings.Split(*resp.ContentRange, "/") + + total := int64(-1) + var err error + // Checking for whether or not a numbered total exists + // If one does not exist, we will assume the total to be -1, undefined, + // and sequentially download each chunk until hitting a 416 error + totalStr := parts[len(parts)-1] + if totalStr != "*" { + total, err = strconv.ParseInt(totalStr, 10, 64) + if err != nil { + d.err = err + return + } + } + + d.totalBytes = total + } +} + +func (d *downloader) incrWritten(n int64) { + d.m.Lock() + defer d.m.Unlock() + + d.written += n +} + +// getErr is a thread-safe getter for the error object +func (d *downloader) getErr() error { + d.m.Lock() + defer d.m.Unlock() + + return d.err +} + +// setErr is a thread-safe setter for the error object +func (d *downloader) setErr(e error) { + d.m.Lock() + defer d.m.Unlock() + + d.err = e +} + +// dlchunk represents a single chunk of data to write by the worker routine. +// This structure also implements an io.SectionReader style interface for +// io.WriterAt, effectively making it an io.SectionWriter (which does not +// exist). +type dlchunk struct { + w io.WriterAt + start int64 + size int64 + cur int64 +} + +// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start +// position to its end (or EOF). +func (c *dlchunk) Write(p []byte) (n int, err error) { + if c.cur >= c.size { + return 0, io.EOF + } + + n, err = c.w.WriteAt(p, c.start+c.cur) + c.cur += int64(n) + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go new file mode 100644 index 000000000000..2114d311bd6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go @@ -0,0 +1,427 @@ +package s3manager_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +func dlLoggingSvc(data []byte) (*s3.S3, *[]string, *[]string) { + var m sync.Mutex + names := []string{} + ranges := []string{} + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range) + + rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`) + rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range")) + start, _ := strconv.ParseInt(rng[1], 10, 64) + fin, _ := strconv.ParseInt(rng[2], 10, 64) + fin++ + + if fin > int64(len(data)) { + fin = int64(len(data)) + } + + bodyBytes := data[start:fin] + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader(bodyBytes)), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", + start, fin-1, len(data))) + r.HTTPResponse.Header.Set("Content-Length", fmt.Sprintf("%d", len(bodyBytes))) + }) + + return svc, &names, &ranges +} + +func dlLoggingSvcNoChunk(data []byte) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader(data[:])), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Length", fmt.Sprintf("%d", len(data))) + }) + + return svc, &names +} + +func dlLoggingSvcNoContentRangeLength(data []byte, states []int) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + var index int = 0 + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + + r.HTTPResponse = &http.Response{ + StatusCode: states[index], + Body: ioutil.NopCloser(bytes.NewReader(data[:])), + Header: http.Header{}, + } + index++ + }) + + return svc, &names +} + +func dlLoggingSvcContentRangeTotalAny(data []byte, states []int) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + ranges := []string{} + var index int = 0 + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range) + + rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`) + rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range")) + start, _ := strconv.ParseInt(rng[1], 10, 64) + fin, _ := strconv.ParseInt(rng[2], 10, 64) + fin++ + + if fin >= int64(len(data)) { + fin = int64(len(data)) + } + + // Setting start and finish to 0 because this state of 1 is suppose to + // be an error state of 416 + if index == len(states)-1 { + start = 0 + fin = 0 + } + + bodyBytes := data[start:fin] + + r.HTTPResponse = &http.Response{ + StatusCode: states[index], + Body: ioutil.NopCloser(bytes.NewReader(bodyBytes)), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/*", + start, fin-1)) + index++ + }) + + return svc, &names +} + +func dlLoggingSvcWithErrReader(cases []testErrReader) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + var index int = 0 + + svc := s3.New(unit.Session, &aws.Config{ + MaxRetries: aws.Int(len(cases) - 1), + }) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + + c := cases[index] + + r.HTTPResponse = &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(&c), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Range", + fmt.Sprintf("bytes %d-%d/%d", 0, c.Len-1, c.Len)) + r.HTTPResponse.Header.Set("Content-Length", fmt.Sprintf("%d", c.Len)) + index++ + }) + + return svc, &names +} + +func TestDownloadOrder(t *testing.T) { + s, names, ranges := dlLoggingSvc(buf12MB) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf12MB)), n) + assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names) + assert.Equal(t, []string{"bytes=0-5242879", "bytes=5242880-10485759", "bytes=10485760-15728639"}, *ranges) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadZero(t *testing.T) { + s, names, ranges := dlLoggingSvc([]byte{}) + + d := s3manager.NewDownloaderWithClient(s) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(0), n) + assert.Equal(t, []string{"GetObject"}, *names) + assert.Equal(t, []string{"bytes=0-5242879"}, *ranges) +} + +func TestDownloadSetPartSize(t *testing.T) { + s, names, ranges := dlLoggingSvc([]byte{1, 2, 3}) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + d.PartSize = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(3), n) + assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names) + assert.Equal(t, []string{"bytes=0-0", "bytes=1-1", "bytes=2-2"}, *ranges) + assert.Equal(t, []byte{1, 2, 3}, w.Bytes()) +} + +func TestDownloadError(t *testing.T) { + s, names, _ := dlLoggingSvc([]byte{1, 2, 3}) + + num := 0 + s.Handlers.Send.PushBack(func(r *request.Request) { + num++ + if num > 1 { + r.HTTPResponse.StatusCode = 400 + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + } + }) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + d.PartSize = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.NotNil(t, err) + assert.Equal(t, int64(1), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + assert.Equal(t, []byte{1}, w.Bytes()) +} + +func TestDownloadNonChunk(t *testing.T) { + s, names := dlLoggingSvcNoChunk(buf2MB) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf2MB)), n) + assert.Equal(t, []string{"GetObject"}, *names) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadNoContentRangeLength(t *testing.T) { + s, names := dlLoggingSvcNoContentRangeLength(buf2MB, []int{200, 416}) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf2MB)), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadContentRangeTotalAny(t *testing.T) { + s, names := dlLoggingSvcContentRangeTotalAny(buf2MB, []int{200, 416}) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf2MB)), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadPartBodyRetry_SuccessRetry(t *testing.T) { + s, names := dlLoggingSvcWithErrReader([]testErrReader{ + {Buf: []byte("ab"), Len: 3, Err: io.ErrUnexpectedEOF}, + {Buf: []byte("123"), Len: 3, Err: io.EOF}, + }) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(3), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + assert.Equal(t, []byte("123"), w.Bytes()) +} + +func TestDownloadPartBodyRetry_SuccessNoRetry(t *testing.T) { + s, names := dlLoggingSvcWithErrReader([]testErrReader{ + {Buf: []byte("abc"), Len: 3, Err: io.EOF}, + }) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(3), n) + assert.Equal(t, []string{"GetObject"}, *names) + assert.Equal(t, []byte("abc"), w.Bytes()) +} + +func TestDownloadPartBodyRetry_FailRetry(t *testing.T) { + s, names := dlLoggingSvcWithErrReader([]testErrReader{ + {Buf: []byte("ab"), Len: 3, Err: io.ErrUnexpectedEOF}, + }) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Error(t, err) + assert.Equal(t, int64(2), n) + assert.Equal(t, []string{"GetObject"}, *names) + assert.Equal(t, []byte("ab"), w.Bytes()) +} + +type testErrReader struct { + Buf []byte + Err error + Len int64 + + off int +} + +func (r *testErrReader) Read(p []byte) (int, error) { + to := len(r.Buf) - r.off + + n := copy(p, r.Buf[r.off:to]) + r.off += n + + if n < len(p) { + return n, r.Err + + } + + return n, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go new file mode 100644 index 000000000000..b7d0a1256eb1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go @@ -0,0 +1,23 @@ +// Package s3manageriface provides an interface for the s3manager package +package s3manageriface + +import ( + "io" + + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +// DownloaderAPI is the interface type for s3manager.Downloader. +type DownloaderAPI interface { + Download(io.WriterAt, *s3.GetObjectInput, ...func(*s3manager.Downloader)) (int64, error) +} + +var _ DownloaderAPI = (*s3manager.Downloader)(nil) + +// UploaderAPI is the interface type for s3manager.Uploader. +type UploaderAPI interface { + Upload(*s3manager.UploadInput, ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) +} + +var _ UploaderAPI = (*s3manager.Uploader)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go new file mode 100644 index 000000000000..b5b613143365 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go @@ -0,0 +1,4 @@ +package s3manager_test + +var buf12MB = make([]byte, 1024*1024*12) +var buf2MB = make([]byte, 1024*1024*2) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go new file mode 100644 index 000000000000..5ae59bc1fe2f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -0,0 +1,686 @@ +package s3manager + +import ( + "bytes" + "fmt" + "io" + "sort" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// MaxUploadParts is the maximum allowed number of parts in a multi-part upload +// on Amazon S3. +const MaxUploadParts = 10000 + +// MinUploadPartSize is the minimum allowed part size when uploading a part to +// Amazon S3. +const MinUploadPartSize int64 = 1024 * 1024 * 5 + +// DefaultUploadPartSize is the default part size to buffer chunks of a +// payload into. +const DefaultUploadPartSize = MinUploadPartSize + +// DefaultUploadConcurrency is the default number of goroutines to spin up when +// using Upload(). +const DefaultUploadConcurrency = 5 + +// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned +// will satisfy this interface when a multi part upload failed to upload all +// chucks to S3. In the case of a failure the UploadID is needed to operate on +// the chunks, if any, which were uploaded. +// +// Example: +// +// u := s3manager.NewUploader(opts) +// output, err := u.upload(input) +// if err != nil { +// if multierr, ok := err.(s3manager.MultiUploadFailure); ok { +// // Process error and its associated uploadID +// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID()) +// } else { +// // Process error generically +// fmt.Println("Error:", err.Error()) +// } +// } +// +type MultiUploadFailure interface { + awserr.Error + + // Returns the upload id for the S3 multipart upload that failed. + UploadID() string +} + +// So that the Error interface type can be included as an anonymous field +// in the multiUploadError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A multiUploadError wraps the upload ID of a failed s3 multipart upload. +// Composed of BaseError for code, message, and original error +// +// Should be used for an error that occurred failing a S3 multipart upload, +// and a upload ID is available. If an uploadID is not available a more relevant +type multiUploadError struct { + awsError + + // ID for multipart upload which failed. + uploadID string +} + +// Error returns the string representation of the error. +// +// See apierr.BaseError ErrorWithExtra for output format +// +// Satisfies the error interface. +func (m multiUploadError) Error() string { + extra := fmt.Sprintf("upload id: %s", m.uploadID) + return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (m multiUploadError) String() string { + return m.Error() +} + +// UploadID returns the id of the S3 upload which failed. +func (m multiUploadError) UploadID() string { + return m.uploadID +} + +// UploadInput contains all input for upload requests to Amazon S3. +type UploadInput struct { + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + + // The readable body payload to send to S3. + Body io.Reader +} + +// UploadOutput represents a response from the Upload() call. +type UploadOutput struct { + // The URL where the object was uploaded to. + Location string + + // The version of the object that was uploaded. Will only be populated if + // the S3 Bucket is versioned. If the bucket is not versioned this field + // will not be set. + VersionID *string + + // The ID for a multipart upload to S3. In the case of an error the error + // can be cast to the MultiUploadFailure interface to extract the upload ID. + UploadID string +} + +// The Uploader structure that calls Upload(). It is safe to call Upload() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Uploader's properties is not safe to be done concurrently. +type Uploader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultUploadConcurrency value will be used. + Concurrency int + + // Setting this value to true will cause the SDK to avoid calling + // AbortMultipartUpload on a failure, leaving all successfully uploaded + // parts on S3 for manual recovery. + // + // Note that storing parts of an incomplete multipart upload counts towards + // space usage on S3 and will add additional costs if not cleaned up. + LeavePartsOnError bool + + // MaxUploadParts is the max number of parts which will be uploaded to S3. + // Will be used to calculate the partsize of the object to be uploaded. + // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file + // as 100, 50MB parts. + // With a limited of s3.MaxUploadParts (10,000 parts). + MaxUploadParts int + + // The client to use when uploading to S3. + S3 s3iface.S3API +} + +// NewUploader creates a new Uploader instance to upload objects to S3. Pass In +// additional functional options to customize the uploader's behavior. Requires a +// client.ConfigProvider in order to create a S3 service client. The session.Session +// satisfies the client.ConfigProvider interface. +// +// Example: +// // The session the S3 Uploader will use +// sess, err := session.NewSession() +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// // Create an uploader with the session and custom options +// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: s3.New(c), + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + } + + for _, option := range options { + option(u) + } + + return u +} + +// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in +// additional functional options to customize the uploader's behavior. Requires +// a S3 service client to make S3 API calls. +// +// Example: +// // The session the S3 Uploader will use +// sess, err := session.NewSession() +// +// // S3 service client the Upload manager will use. +// s3Svc := s3.New(sess) +// +// // Create an uploader with S3 client and default options +// uploader := s3manager.NewUploaderWithClient(s3Svc) +// +// // Create an uploader with S3 client and custom options +// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: svc, + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + } + + for _, option := range options { + option(u) + } + + return u +} + +// Upload uploads an object to S3, intelligently buffering large files into +// smaller chunks and sending them in parallel across multiple goroutines. You +// can configure the buffer size and concurrency through the Uploader's parameters. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// Example: +// // Upload input parameters +// upParams := &s3manager.UploadInput{ +// Bucket: &bucketName, +// Key: &keyName, +// Body: file, +// } +// +// // Perform an upload. +// result, err := uploader.Upload(upParams) +// +// // Perform upload with options different than the those in the Uploader. +// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) { +// u.PartSize = 10 * 1024 * 1024 // 10MB part size +// u.LeavePartsOnError = true // Don't delete the parts if the upload fails. +// }) +func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) { + i := uploader{in: input, ctx: u} + + for _, option := range options { + option(&i.ctx) + } + + return i.upload() +} + +// internal structure to manage an upload to S3. +type uploader struct { + ctx Uploader + + in *UploadInput + + readerPos int64 // current reader position + totalSize int64 // set to -1 if the size is not known +} + +// internal logic for deciding whether to upload a single part or use a +// multipart upload. +func (u *uploader) upload() (*UploadOutput, error) { + u.init() + + if u.ctx.PartSize < MinUploadPartSize { + msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) + return nil, awserr.New("ConfigError", msg, nil) + } + + // Do one read to determine if we have more than one part + reader, _, err := u.nextReader() + if err == io.EOF { // single part + return u.singlePart(reader) + } else if err != nil { + return nil, awserr.New("ReadRequestBody", "read upload data failed", err) + } + + mu := multiuploader{uploader: u} + return mu.upload(reader) +} + +// init will initialize all default options. +func (u *uploader) init() { + if u.ctx.Concurrency == 0 { + u.ctx.Concurrency = DefaultUploadConcurrency + } + if u.ctx.PartSize == 0 { + u.ctx.PartSize = DefaultUploadPartSize + } + + // Try to get the total size for some optimizations + u.initSize() +} + +// initSize tries to detect the total stream size, setting u.totalSize. If +// the size is not known, totalSize is set to -1. +func (u *uploader) initSize() { + u.totalSize = -1 + + switch r := u.in.Body.(type) { + case io.Seeker: + pos, _ := r.Seek(0, 1) + defer r.Seek(pos, 0) + + n, err := r.Seek(0, 2) + if err != nil { + return + } + u.totalSize = n + + // Try to adjust partSize if it is too small and account for + // integer division truncation. + if u.totalSize/u.ctx.PartSize >= int64(u.ctx.MaxUploadParts) { + // Add one to the part size to account for remainders + // during the size calculation. e.g odd number of bytes. + u.ctx.PartSize = (u.totalSize / int64(u.ctx.MaxUploadParts)) + 1 + } + } +} + +// nextReader returns a seekable reader representing the next packet of data. +// This operation increases the shared u.readerPos counter, but note that it +// does not need to be wrapped in a mutex because nextReader is only called +// from the main thread. +func (u *uploader) nextReader() (io.ReadSeeker, int, error) { + type readerAtSeeker interface { + io.ReaderAt + io.ReadSeeker + } + switch r := u.in.Body.(type) { + case readerAtSeeker: + var err error + + n := u.ctx.PartSize + if u.totalSize >= 0 { + bytesLeft := u.totalSize - u.readerPos + + if bytesLeft <= u.ctx.PartSize { + err = io.EOF + n = bytesLeft + } + } + + reader := io.NewSectionReader(r, u.readerPos, n) + u.readerPos += n + + return reader, int(n), err + + default: + part := make([]byte, u.ctx.PartSize) + n, err := readFillBuf(r, part) + u.readerPos += int64(n) + + return bytes.NewReader(part[0:n]), n, err + } +} + +func readFillBuf(r io.Reader, b []byte) (offset int, err error) { + for offset < len(b) && err == nil { + var n int + n, err = r.Read(b[offset:]) + offset += n + } + + return offset, err +} + +// singlePart contains upload logic for uploading a single chunk via +// a regular PutObject request. Multipart requests require at least two +// parts, or at least 5MB of data. +func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) { + params := &s3.PutObjectInput{} + awsutil.Copy(params, u.in) + params.Body = buf + + req, out := u.ctx.S3.PutObjectRequest(params) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return nil, err + } + + url := req.HTTPRequest.URL.String() + return &UploadOutput{ + Location: url, + VersionID: out.VersionId, + }, nil +} + +// internal structure to manage a specific multipart upload to S3. +type multiuploader struct { + *uploader + wg sync.WaitGroup + m sync.Mutex + err error + uploadID string + parts completedParts +} + +// keeps track of a single chunk of data being sent to S3. +type chunk struct { + buf io.ReadSeeker + num int64 +} + +// completedParts is a wrapper to make parts sortable by their part number, +// since S3 required this list to be sent in sorted order. +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +// upload will perform a multipart upload using the firstBuf buffer containing +// the first chunk of data. +func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) { + params := &s3.CreateMultipartUploadInput{} + awsutil.Copy(params, u.in) + + // Create the multipart + req, resp := u.ctx.S3.CreateMultipartUploadRequest(params) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return nil, err + } + u.uploadID = *resp.UploadId + + // Create the workers + ch := make(chan chunk, u.ctx.Concurrency) + for i := 0; i < u.ctx.Concurrency; i++ { + u.wg.Add(1) + go u.readChunk(ch) + } + + // Send part 1 to the workers + var num int64 = 1 + ch <- chunk{buf: firstBuf, num: num} + + // Read and queue the rest of the parts + var err error + for u.geterr() == nil && err == nil { + num++ + // This upload exceeded maximum number of supported parts, error now. + if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) { + var msg string + if num > int64(u.ctx.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.ctx.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) + break + } + + var reader io.ReadSeeker + var nextChunkLen int + reader, nextChunkLen, err = u.nextReader() + + if err != nil && err != io.EOF { + u.seterr(awserr.New( + "ReadRequestBody", + "read multipart upload data failed", + err)) + break + } + + if nextChunkLen == 0 { + // No need to upload empty part, if file was empty to start + // with empty single part would of been created and never + // started multipart upload. + break + } + + ch <- chunk{buf: reader, num: num} + } + + // Close the channel, wait for workers, and complete upload + close(ch) + u.wg.Wait() + complete := u.complete() + + if err := u.geterr(); err != nil { + return nil, &multiUploadError{ + awsError: awserr.New( + "MultipartUpload", + "upload multipart failed", + err), + uploadID: u.uploadID, + } + } + return &UploadOutput{ + Location: aws.StringValue(complete.Location), + VersionID: complete.VersionId, + UploadID: u.uploadID, + }, nil +} + +// readChunk runs in worker goroutines to pull chunks off of the ch channel +// and send() them as UploadPart requests. +func (u *multiuploader) readChunk(ch chan chunk) { + defer u.wg.Done() + for { + data, ok := <-ch + + if !ok { + break + } + + if u.geterr() == nil { + if err := u.send(data); err != nil { + u.seterr(err) + } + } + } +} + +// send performs an UploadPart request and keeps track of the completed +// part information. +func (u *multiuploader) send(c chunk) error { + req, resp := u.ctx.S3.UploadPartRequest(&s3.UploadPartInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + Body: c.buf, + UploadId: &u.uploadID, + PartNumber: &c.num, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return err + } + + n := c.num + completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n} + + u.m.Lock() + u.parts = append(u.parts, completed) + u.m.Unlock() + + return nil +} + +// geterr is a thread-safe getter for the error object +func (u *multiuploader) geterr() error { + u.m.Lock() + defer u.m.Unlock() + + return u.err +} + +// seterr is a thread-safe setter for the error object +func (u *multiuploader) seterr(e error) { + u.m.Lock() + defer u.m.Unlock() + + u.err = e +} + +// fail will abort the multipart unless LeavePartsOnError is set to true. +func (u *multiuploader) fail() { + if u.ctx.LeavePartsOnError { + return + } + + req, _ := u.ctx.S3.AbortMultipartUploadRequest(&s3.AbortMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + req.Send() +} + +// complete successfully completes a multipart upload and returns the response. +func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { + if u.geterr() != nil { + u.fail() + return nil + } + + // Parts must be sorted in PartNumber order. + sort.Sort(u.parts) + + req, resp := u.ctx.S3.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts}, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + u.seterr(err) + u.fail() + } + + return resp +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go new file mode 100644 index 000000000000..6b46e31c1126 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go @@ -0,0 +1,672 @@ +package s3manager_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "sync" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/stretchr/testify/assert" +) + +var emptyList = []string{} + +func val(i interface{}, s string) interface{} { + v, err := awsutil.ValuesAtPath(i, s) + if err != nil || len(v) == 0 { + return nil + } + if _, ok := v[0].(io.Reader); ok { + return v[0] + } + + if rv := reflect.ValueOf(v[0]); rv.Kind() == reflect.Ptr { + return rv.Elem().Interface() + } + + return v[0] +} + +func contains(src []string, s string) bool { + for _, v := range src { + if s == v { + return true + } + } + return false +} + +func loggingSvc(ignoreOps []string) (*s3.S3, *[]string, *[]interface{}) { + var m sync.Mutex + partNum := 0 + names := []string{} + params := []interface{}{} + svc := s3.New(unit.Session) + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.UnmarshalError.Clear() + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + if !contains(ignoreOps, r.Operation.Name) { + names = append(names, r.Operation.Name) + params = append(params, r.Params) + } + + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + + switch data := r.Data.(type) { + case *s3.CreateMultipartUploadOutput: + data.UploadId = aws.String("UPLOAD-ID") + case *s3.UploadPartOutput: + partNum++ + data.ETag = aws.String(fmt.Sprintf("ETAG%d", partNum)) + case *s3.CompleteMultipartUploadOutput: + data.Location = aws.String("https://location") + data.VersionId = aws.String("VERSION-ID") + case *s3.PutObjectOutput: + data.VersionId = aws.String("VERSION-ID") + } + }) + + return svc, &names, ¶ms +} + +func buflen(i interface{}) int { + r := i.(io.Reader) + b, _ := ioutil.ReadAll(r) + return len(b) +} + +func TestUploadOrderMulti(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + u := s3manager.NewUploaderWithClient(s) + + resp, err := u.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + ServerSideEncryption: aws.String("aws:kms"), + SSEKMSKeyId: aws.String("KmsId"), + ContentType: aws.String("content/type"), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + assert.Equal(t, "https://location", resp.Location) + assert.Equal(t, "UPLOAD-ID", resp.UploadID) + assert.Equal(t, aws.String("VERSION-ID"), resp.VersionID) + + // Validate input values + + // UploadPart + assert.Equal(t, "UPLOAD-ID", val((*args)[1], "UploadId")) + assert.Equal(t, "UPLOAD-ID", val((*args)[2], "UploadId")) + assert.Equal(t, "UPLOAD-ID", val((*args)[3], "UploadId")) + + // CompleteMultipartUpload + assert.Equal(t, "UPLOAD-ID", val((*args)[4], "UploadId")) + assert.Equal(t, int64(1), val((*args)[4], "MultipartUpload.Parts[0].PartNumber")) + assert.Equal(t, int64(2), val((*args)[4], "MultipartUpload.Parts[1].PartNumber")) + assert.Equal(t, int64(3), val((*args)[4], "MultipartUpload.Parts[2].PartNumber")) + assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[0].ETag")) + assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[1].ETag")) + assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[2].ETag")) + + // Custom headers + assert.Equal(t, "aws:kms", val((*args)[0], "ServerSideEncryption")) + assert.Equal(t, "KmsId", val((*args)[0], "SSEKMSKeyId")) + assert.Equal(t, "content/type", val((*args)[0], "ContentType")) +} + +func TestUploadOrderMultiDifferentPartSize(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.PartSize = 1024 * 1024 * 7 + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + assert.Equal(t, 1024*1024*7, buflen(val((*args)[1], "Body"))) + assert.Equal(t, 1024*1024*5, buflen(val((*args)[2], "Body"))) +} + +func TestUploadIncreasePartSize(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.MaxUploadParts = 2 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.NoError(t, err) + assert.Equal(t, int64(s3manager.DefaultDownloadPartSize), mgr.PartSize) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + assert.Equal(t, (1024*1024*6)+1, buflen(val((*args)[1], "Body"))) + assert.Equal(t, (1024*1024*6)-1, buflen(val((*args)[2], "Body"))) +} + +func TestUploadFailIfPartSizeTooSmall(t *testing.T) { + mgr := s3manager.NewUploader(unit.Session, func(u *s3manager.Uploader) { + u.PartSize = 5 + }) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.Nil(t, resp) + assert.NotNil(t, err) + + aerr := err.(awserr.Error) + assert.Equal(t, "ConfigError", aerr.Code()) + assert.Contains(t, aerr.Message(), "part size must be at least") +} + +func TestUploadOrderSingle(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf2MB), + ServerSideEncryption: aws.String("aws:kms"), + SSEKMSKeyId: aws.String("KmsId"), + ContentType: aws.String("content/type"), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, aws.String("VERSION-ID"), resp.VersionID) + assert.Equal(t, "", resp.UploadID) + assert.Equal(t, "aws:kms", val((*args)[0], "ServerSideEncryption")) + assert.Equal(t, "KmsId", val((*args)[0], "SSEKMSKeyId")) + assert.Equal(t, "content/type", val((*args)[0], "ContentType")) +} + +func TestUploadOrderSingleFailure(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse.StatusCode = 400 + }) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf2MB), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.Nil(t, resp) +} + +func TestUploadOrderZero(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(make([]byte, 0)), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, "", resp.UploadID) + assert.Equal(t, 0, buflen(val((*args)[0], "Body"))) +} + +func TestUploadOrderMultiFailure(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch t := r.Data.(type) { + case *s3.UploadPartOutput: + if *t.ETag == "ETAG2" { + r.HTTPResponse.StatusCode = 400 + } + } + }) + + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "AbortMultipartUpload"}, *ops) +} + +func TestUploadOrderMultiFailureOnComplete(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch r.Data.(type) { + case *s3.CompleteMultipartUploadOutput: + r.HTTPResponse.StatusCode = 400 + } + }) + + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", + "UploadPart", "CompleteMultipartUpload", "AbortMultipartUpload"}, *ops) +} + +func TestUploadOrderMultiFailureOnCreate(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch r.Data.(type) { + case *s3.CreateMultipartUploadOutput: + r.HTTPResponse.StatusCode = 400 + } + }) + + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(make([]byte, 1024*1024*12)), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload"}, *ops) +} + +func TestUploadOrderMultiFailureLeaveParts(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch data := r.Data.(type) { + case *s3.UploadPartOutput: + if *data.ETag == "ETAG2" { + r.HTTPResponse.StatusCode = 400 + } + } + }) + + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.LeavePartsOnError = true + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(make([]byte, 1024*1024*12)), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart"}, *ops) +} + +type failreader struct { + times int + failCount int +} + +func (f *failreader) Read(b []byte) (int, error) { + f.failCount++ + if f.failCount >= f.times { + return 0, fmt.Errorf("random failure") + } + return len(b), nil +} + +func TestUploadOrderReadFail1(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &failreader{times: 1}, + }) + + assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code()) + assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure") + assert.Equal(t, []string{}, *ops) +} + +func TestUploadOrderReadFail2(t *testing.T) { + s, ops, _ := loggingSvc([]string{"UploadPart"}) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &failreader{times: 2}, + }) + + assert.Equal(t, "MultipartUpload", err.(awserr.Error).Code()) + assert.Equal(t, "ReadRequestBody", err.(awserr.Error).OrigErr().(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).OrigErr().Error(), "random failure") + assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops) +} + +type sizedReader struct { + size int + cur int + err error +} + +func (s *sizedReader) Read(p []byte) (n int, err error) { + if s.cur >= s.size { + if s.err == nil { + s.err = io.EOF + } + return 0, s.err + } + + n = len(p) + s.cur += len(p) + if s.cur > s.size { + n -= s.cur - s.size + } + + return +} + +func TestUploadOrderMultiBufferedReader(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 12}, + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + parts := []int{ + buflen(val((*args)[1], "Body")), + buflen(val((*args)[2], "Body")), + buflen(val((*args)[3], "Body")), + } + sort.Ints(parts) + assert.Equal(t, []int{1024 * 1024 * 2, 1024 * 1024 * 5, 1024 * 1024 * 5}, parts) +} + +func TestUploadOrderMultiBufferedReaderPartial(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 12, err: io.EOF}, + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + parts := []int{ + buflen(val((*args)[1], "Body")), + buflen(val((*args)[2], "Body")), + buflen(val((*args)[3], "Body")), + } + sort.Ints(parts) + assert.Equal(t, []int{1024 * 1024 * 2, 1024 * 1024 * 5, 1024 * 1024 * 5}, parts) +} + +// TestUploadOrderMultiBufferedReaderEOF tests the edge case where the +// file size is the same as part size. +func TestUploadOrderMultiBufferedReaderEOF(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 10, err: io.EOF}, + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + parts := []int{ + buflen(val((*args)[1], "Body")), + buflen(val((*args)[2], "Body")), + } + sort.Ints(parts) + assert.Equal(t, []int{1024 * 1024 * 5, 1024 * 1024 * 5}, parts) +} + +func TestUploadOrderMultiBufferedReaderExceedTotalParts(t *testing.T) { + s, ops, _ := loggingSvc([]string{"UploadPart"}) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.MaxUploadParts = 2 + }) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 12}, + }) + + assert.Error(t, err) + assert.Nil(t, resp) + assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops) + + aerr := err.(awserr.Error) + assert.Equal(t, "MultipartUpload", aerr.Code()) + assert.Equal(t, "TotalPartsExceeded", aerr.OrigErr().(awserr.Error).Code()) + assert.Contains(t, aerr.Error(), "configured MaxUploadParts (2)") +} + +func TestUploadOrderSingleBufferedReader(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 2}, + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, "", resp.UploadID) +} + +func TestUploadZeroLenObject(t *testing.T) { + requestMade := false + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestMade = true + w.WriteHeader(http.StatusOK) + })) + mgr := s3manager.NewUploaderWithClient(s3.New(unit.Session, &aws.Config{ + Endpoint: aws.String(server.URL), + })) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: strings.NewReader(""), + }) + + assert.NoError(t, err) + assert.True(t, requestMade) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, "", resp.UploadID) +} + +func TestUploadInputS3PutObjectInputPairity(t *testing.T) { + matchings := compareStructType(reflect.TypeOf(s3.PutObjectInput{}), + reflect.TypeOf(s3manager.UploadInput{})) + aOnly := []string{} + bOnly := []string{} + + for k, c := range matchings { + if c == 1 && k != "ContentLength" { + aOnly = append(aOnly, k) + } else if c == 2 { + bOnly = append(bOnly, k) + } + } + assert.Empty(t, aOnly, "s3.PutObjectInput") + assert.Empty(t, bOnly, "s3Manager.UploadInput") +} + +type testIncompleteReader struct { + Buf []byte + Count int +} + +func (r *testIncompleteReader) Read(p []byte) (n int, err error) { + if r.Count < 0 { + return 0, io.ErrUnexpectedEOF + } + + r.Count-- + return copy(p, r.Buf), nil +} + +func TestUploadUnexpectedEOF(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &testIncompleteReader{ + Buf: make([]byte, 1024*1024*5), + Count: 1, + }, + }) + + assert.Error(t, err) + assert.Equal(t, "CreateMultipartUpload", (*ops)[0]) + assert.Equal(t, "UploadPart", (*ops)[1]) + assert.Equal(t, "AbortMultipartUpload", (*ops)[len(*ops)-1]) + + // Part lengths + assert.Equal(t, 1024*1024*5, buflen(val((*args)[1], "Body"))) +} + +func compareStructType(a, b reflect.Type) map[string]int { + if a.Kind() != reflect.Struct || b.Kind() != reflect.Struct { + panic(fmt.Sprintf("types must both be structs, got %v and %v", a.Kind(), b.Kind())) + } + + aFields := enumFields(a) + bFields := enumFields(b) + + matchings := map[string]int{} + + for i := 0; i < len(aFields) || i < len(bFields); i++ { + if i < len(aFields) { + c := matchings[aFields[i].Name] + matchings[aFields[i].Name] = c + 1 + } + if i < len(bFields) { + c := matchings[bFields[i].Name] + matchings[bFields[i].Name] = c + 2 + } + } + + return matchings +} + +func enumFields(v reflect.Type) []reflect.StructField { + fields := []reflect.StructField{} + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + // Ignoreing anon fields + if field.PkgPath != "" { + // Ignore unexported fields + continue + } + + fields = append(fields, field) + } + + return fields +} + +type fooReaderAt struct{} + +func (r *fooReaderAt) Read(p []byte) (n int, err error) { + return 12, io.EOF +} + +func (r *fooReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + return 12, io.EOF +} + +func TestReaderAt(t *testing.T) { + svc := s3.New(unit.Session) + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.UnmarshalError.Clear() + svc.Handlers.Send.Clear() + + contentLen := "" + svc.Handlers.Send.PushBack(func(r *request.Request) { + contentLen = r.HTTPRequest.Header.Get("Content-Length") + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + }) + + mgr := s3manager.NewUploaderWithClient(svc, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &fooReaderAt{}, + }) + + assert.NoError(t, err) + assert.Equal(t, contentLen, "12") +} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go index 1ede31ebb63c..5aefb4a962e9 100644 --- a/vendor/github.com/docker/distribution/registry.go +++ b/vendor/github.com/docker/distribution/registry.go @@ -72,6 +72,21 @@ func (o WithTagOption) Apply(m ManifestService) error { return nil } +// WithManifestMediaTypesOption lists the media types the client wishes +// the server to provide. +func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { + return WithManifestMediaTypesOption{mediaTypes} +} + +// WithManifestMediaTypesOption holds a list of accepted media types +type WithManifestMediaTypesOption struct{ MediaTypes []string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { + // no implementation + return nil +} + // Repository is a named collection of manifests and layers. type Repository interface { // Named returns the name of the repository. diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go index d6d884ffd1dd..5e2ae1c93933 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/session.go @@ -253,6 +253,15 @@ func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]st return nil } +func hasScope(scopes []string, scope string) bool { + for _, s := range scopes { + if s == scope { + return true + } + } + return false +} + func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { th.tokenLock.Lock() defer th.tokenLock.Unlock() @@ -262,6 +271,9 @@ func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...s } var addedScopes bool for _, scope := range additionalScopes { + if hasScope(scopes, scope) { + continue + } scopes = append(scopes, scope) addedScopes = true } diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go index 1ebd0b183946..bf67465f14b8 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -321,7 +321,8 @@ func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, er defer resp.Body.Close() switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400: + case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: + // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers return descriptorFromResponse(resp) default: // if the response is an error - there will be no body to decode. @@ -421,18 +422,22 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis ref reference.Named err error contentDgst *digest.Digest + mediaTypes []string ) for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { + switch opt := option.(type) { + case distribution.WithTagOption: digestOrTag = opt.Tag ref, err = reference.WithTag(ms.name, opt.Tag) if err != nil { return nil, err } - } else if opt, ok := option.(contentDigestOption); ok { + case contentDigestOption: contentDgst = opt.digest - } else { + case distribution.WithManifestMediaTypesOption: + mediaTypes = opt.MediaTypes + default: err := option.Apply(ms) if err != nil { return nil, err @@ -448,6 +453,10 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis } } + if len(mediaTypes) == 0 { + mediaTypes = distribution.ManifestMediaTypes() + } + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return nil, err @@ -458,7 +467,7 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis return nil, err } - for _, t := range distribution.ManifestMediaTypes() { + for _, t := range mediaTypes { req.Header.Add("Accept", t) } diff --git a/vendor/github.com/docker/distribution/registry/client/repository_test.go b/vendor/github.com/docker/distribution/registry/client/repository_test.go index a232e03ec7ec..ce342182cedd 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository_test.go +++ b/vendor/github.com/docker/distribution/registry/client/repository_test.go @@ -9,6 +9,8 @@ import ( "log" "net/http" "net/http/httptest" + "reflect" + "sort" "strconv" "strings" "testing" @@ -784,6 +786,65 @@ func TestManifestFetchWithEtag(t *testing.T) { } } +func TestManifestFetchWithAccept(t *testing.T) { + ctx := context.Background() + repo, _ := reference.WithName("test.example.com/repo") + _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + headers := make(chan []string, 1) + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + headers <- req.Header["Accept"] + })) + defer close(headers) + defer s.Close() + + r, err := NewRepository(context.Background(), repo, s.URL, nil) + if err != nil { + t.Fatal(err) + } + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + // the media types we send + mediaTypes []string + // the expected Accept headers the server should receive + expect []string + // whether to sort the request and response values for comparison + sort bool + }{ + { + mediaTypes: []string{}, + expect: distribution.ManifestMediaTypes(), + sort: true, + }, + { + mediaTypes: []string{"test1", "test2"}, + expect: []string{"test1", "test2"}, + }, + { + mediaTypes: []string{"test1"}, + expect: []string{"test1"}, + }, + { + mediaTypes: []string{""}, + expect: []string{""}, + }, + } + for _, testCase := range testCases { + ms.Get(ctx, dgst, distribution.WithManifestMediaTypes(testCase.mediaTypes)) + actual := <-headers + if testCase.sort { + sort.Strings(actual) + sort.Strings(testCase.expect) + } + if !reflect.DeepEqual(actual, testCase.expect) { + t.Fatalf("unexpected Accept header values: %v", actual) + } + } +} + func TestManifestDelete(t *testing.T) { repo, _ := reference.ParseNamed("test.example.com/repo/delete") _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6)